diff sat/memory/sqlite.py @ 3028:ab2696e34d29

Python 3 port: /!\ this is a huge commit /!\ starting from this commit, SàT is needs Python 3.6+ /!\ SàT maybe be instable or some feature may not work anymore, this will improve with time This patch port backend, bridge and frontends to Python 3. Roughly this has been done this way: - 2to3 tools has been applied (with python 3.7) - all references to python2 have been replaced with python3 (notably shebangs) - fixed files not handled by 2to3 (notably the shell script) - several manual fixes - fixed issues reported by Python 3 that where not handled in Python 2 - replaced "async" with "async_" when needed (it's a reserved word from Python 3.7) - replaced zope's "implements" with @implementer decorator - temporary hack to handle data pickled in database, as str or bytes may be returned, to be checked later - fixed hash comparison for password - removed some code which is not needed anymore with Python 3 - deactivated some code which needs to be checked (notably certificate validation) - tested with jp, fixed reported issues until some basic commands worked - ported Primitivus (after porting dependencies like urwid satext) - more manual fixes
author Goffi <goffi@goffi.org>
date Tue, 13 Aug 2019 19:08:41 +0200
parents 860c550028d6
children f8cc88c773c8
line wrap: on
line diff
--- a/sat/memory/sqlite.py	Wed Jul 31 11:31:22 2019 +0200
+++ b/sat/memory/sqlite.py	Tue Aug 13 19:08:41 2019 +0200
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2
+#!/usr/bin/env python3
 # -*- coding: utf-8 -*-
 
 # SAT: a jabber client
@@ -31,7 +31,7 @@
 import sys
 import re
 import os.path
-import cPickle as pickle
+import pickle as pickle
 import hashlib
 import sqlite3
 import json
@@ -152,12 +152,12 @@
             #        Sqlite integration, probably with high level library
             retry -= 1
             if retry == 0:
-                log.error(_(u'too many db tries, we abandon! Error message: {msg}\n'
-                            u'query was {query}'
-                            .format(msg=e, query=u' '.join([unicode(a) for a in args]))))
+                log.error(_('too many db tries, we abandon! Error message: {msg}\n'
+                            'query was {query}'
+                            .format(msg=e, query=' '.join([str(a) for a in args]))))
                 raise e
             log.warning(
-                _(u'exception while running query, retrying ({try_}): {msg}').format(
+                _('exception while running query, retrying ({try_}): {msg}').format(
                 try_ = 6 - retry,
                 msg = e))
             kw['query_retry'] = retry
@@ -175,14 +175,14 @@
             retry -= 1
             if retry == 0:
                 log.error(
-                    _(u'too many interaction tries, we abandon! Error message: {msg}\n'
-                      u'interaction method was: {interaction}\n'
-                      u'interaction arguments were: {args}'
+                    _('too many interaction tries, we abandon! Error message: {msg}\n'
+                      'interaction method was: {interaction}\n'
+                      'interaction arguments were: {args}'
                       .format(msg=e, interaction=interaction,
-                              args=u', '.join([unicode(a) for a in args]))))
+                              args=', '.join([str(a) for a in args]))))
                 raise e
             log.warning(
-                _(u'exception while running interaction, retrying ({try_}): {msg}')
+                _('exception while running interaction, retrying ({try_}): {msg}')
                 .format(try_ = 4 - retry, msg = e))
             kw['interaction_retry'] = retry
             return self._runInteraction(interaction, *args, **kw)
@@ -204,7 +204,7 @@
         if new_base:  # the dir may not exist if it's not the XDG recommended one
             dir_ = os.path.dirname(db_filename)
             if not os.path.exists(dir_):
-                os.makedirs(dir_, 0700)
+                os.makedirs(dir_, 0o700)
 
         def foreignKeysOn(sqlite):
             sqlite.execute('PRAGMA foreign_keys = ON')
@@ -240,7 +240,7 @@
 
         if statements is None:
             return defer.succeed(None)
-        log.debug(u"\n===== COMMITTING STATEMENTS =====\n%s\n============\n\n" % '\n'.join(statements))
+        log.debug("\n===== COMMITTING STATEMENTS =====\n%s\n============\n\n" % '\n'.join(statements))
         d = self.dbpool.runInteraction(self._updateDb, tuple(statements))
         return d
 
@@ -270,7 +270,7 @@
 
     def getProfilesList(self):
         """"Return list of all registered profiles"""
-        return self.profiles.keys()
+        return list(self.profiles.keys())
 
     def hasProfile(self, profile_name):
         """return True if profile_name exists
@@ -283,13 +283,13 @@
         try:
             return self.profiles[profile_name] in self.components
         except KeyError:
-            raise exceptions.NotFound(u"the requested profile doesn't exists")
+            raise exceptions.NotFound("the requested profile doesn't exists")
 
     def getEntryPoint(self, profile_name):
         try:
             return self.components[self.profiles[profile_name]]
         except KeyError:
-            raise exceptions.NotFound(u"the requested profile doesn't exists or is not a component")
+            raise exceptions.NotFound("the requested profile doesn't exists or is not a component")
 
     def createProfile(self, name, component=None):
         """Create a new profile
@@ -326,7 +326,7 @@
         @return: deferred triggered once profile is actually deleted
         """
         def deletionError(failure_):
-            log.error(_(u"Can't delete profile [%s]") % name)
+            log.error(_("Can't delete profile [%s]") % name)
             return failure_
 
         def delete(txn):
@@ -359,7 +359,7 @@
             for param in result:
                 category, name, value = param
                 params_gen[(category, name)] = value
-        log.debug(_(u"loading general parameters from database"))
+        log.debug(_("loading general parameters from database"))
         return self.dbpool.runQuery("SELECT category,name,value FROM param_gen").addCallback(fillParams)
 
     def loadIndParams(self, params_ind, profile):
@@ -374,7 +374,7 @@
             for param in result:
                 category, name, value = param
                 params_ind[(category, name)] = value
-        log.debug(_(u"loading individual parameters from database"))
+        log.debug(_("loading individual parameters from database"))
         d = self.dbpool.runQuery("SELECT category,name,value FROM param_ind WHERE profile_id=?", (self.profiles[profile], ))
         d.addCallback(fillParams)
         return d
@@ -399,7 +399,7 @@
         @param value: value to set
         @return: deferred"""
         d = self.dbpool.runQuery("REPLACE INTO param_gen(category,name,value) VALUES (?,?,?)", (category, name, value))
-        d.addErrback(lambda ignore: log.error(_(u"Can't set general parameter (%(category)s/%(name)s) in database" % {"category": category, "name": name})))
+        d.addErrback(lambda ignore: log.error(_("Can't set general parameter (%(category)s/%(name)s) in database" % {"category": category, "name": name})))
         return d
 
     def setIndParam(self, category, name, value, profile):
@@ -412,7 +412,7 @@
         @return: deferred
         """
         d = self.dbpool.runQuery("REPLACE INTO param_ind(category,name,profile_id,value) VALUES (?,?,?,?)", (category, name, self.profiles[profile], value))
-        d.addErrback(lambda ignore: log.error(_(u"Can't set individual parameter (%(category)s/%(name)s) for [%(profile)s] in database" % {"category": category, "name": name, "profile": profile})))
+        d.addErrback(lambda ignore: log.error(_("Can't set individual parameter (%(category)s/%(name)s) for [%(profile)s] in database" % {"category": category, "name": name, "profile": profile})))
         return d
 
     ## History
@@ -423,14 +423,14 @@
         uid = data['uid']
         d_list = []
         for key in ('message', 'subject'):
-            for lang, value in data[key].iteritems():
+            for lang, value in data[key].items():
                 d = self.dbpool.runQuery(
                     "INSERT INTO {key}(history_uid, {key}, language) VALUES (?,?,?)"
                     .format(key=key),
                     (uid, value, lang or None))
                 d.addErrback(lambda __: log.error(
-                    _(u"Can't save following {key} in history (uid: {uid}, lang:{lang}):"
-                      u" {value}").format(
+                    _("Can't save following {key} in history (uid: {uid}, lang:{lang}):"
+                      " {value}").format(
                     key=key, uid=uid, lang=lang, value=value)))
                 d_list.append(d)
         try:
@@ -443,8 +443,8 @@
                 "INSERT INTO thread(history_uid, thread_id, parent_id) VALUES (?,?,?)",
                 (uid, thread, thread_parent))
             d.addErrback(lambda __: log.error(
-                _(u"Can't save following thread in history (uid: {uid}): thread: "
-                  u"{thread}), parent:{parent}").format(
+                _("Can't save following thread in history (uid: {uid}): thread: "
+                  "{thread}), parent:{parent}").format(
                 uid=uid, thread=thread, parent=thread_parent)))
             d_list.append(d)
         return defer.DeferredList(d_list)
@@ -453,24 +453,24 @@
         failure_.trap(sqlite3.IntegrityError)
         sqlite_msg = failure_.value.args[0]
         if "UNIQUE constraint failed" in sqlite_msg:
-            log.debug(u"message {} is already in history, not storing it again"
+            log.debug("message {} is already in history, not storing it again"
                       .format(data['uid']))
             if 'received_timestamp' not in data:
                 log.warning(
-                    u"duplicate message is not delayed, this is maybe a bug: data={}"
+                    "duplicate message is not delayed, this is maybe a bug: data={}"
                     .format(data))
             # we cancel message to avoid sending duplicate message to frontends
             raise failure.Failure(exceptions.CancelError("Cancelled duplicated message"))
         else:
-            log.error(u"Can't store message in history: {}".format(failure_))
+            log.error("Can't store message in history: {}".format(failure_))
 
     def _logHistoryError(self, failure_, from_jid, to_jid, data):
         if failure_.check(exceptions.CancelError):
             # we propagate CancelError to avoid sending message to frontends
             raise failure_
         log.error(_(
-            u"Can't save following message in history: from [{from_jid}] to [{to_jid}] "
-            u"(uid: {uid})")
+            "Can't save following message in history: from [{from_jid}] to [{to_jid}] "
+            "(uid: {uid})")
             .format(from_jid=from_jid.full(), to_jid=to_jid.full(), uid=data['uid']))
 
     def addToHistory(self, data, profile):
@@ -478,14 +478,14 @@
 
         @param data(dict): message data as build by SatMessageProtocol.onMessage
         """
-        extra = pickle.dumps({k: v for k, v in data['extra'].iteritems()
+        extra = pickle.dumps({k: v for k, v in data['extra'].items()
                               if k not in NOT_IN_EXTRA}, 0)
         from_jid = data['from']
         to_jid = data['to']
         d = self.dbpool.runQuery(
-            u"INSERT INTO history(uid, stanza_id, update_uid, profile_id, source, dest, "
-            u"source_res, dest_res, timestamp, received_timestamp, type, extra) VALUES "
-            u"(?,?,?,?,?,?,?,?,?,?,?,?)",
+            "INSERT INTO history(uid, stanza_id, update_uid, profile_id, source, dest, "
+            "source_res, dest_res, timestamp, received_timestamp, type, extra) VALUES "
+            "(?,?,?,?,?,?,?,?,?,?,?,?)",
             (data['uid'], data['extra'].get('stanza_id'), data['extra'].get('update_uid'),
             self.profiles[profile], data['from'].userhost(), to_jid.userhost(),
             from_jid.resource, to_jid.resource, data['timestamp'],
@@ -508,7 +508,7 @@
             if uid != current['uid']:
                 # new message
                 try:
-                    extra = pickle.loads(str(extra or ""))
+                    extra = pickle.loads(extra or b"")
                 except EOFError:
                     extra = {}
                 current = {
@@ -543,8 +543,8 @@
             else:
                 if thread_parent is not None:
                     log.error(
-                        u"Database inconsistency: thread parent without thread (uid: "
-                        u"{uid}, thread_parent: {parent})"
+                        "Database inconsistency: thread parent without thread (uid: "
+                        "{uid}, thread_parent: {parent})"
                         .format(uid=uid, parent=thread_parent))
 
         return result
@@ -575,7 +575,7 @@
         if limit == 0:
             return defer.succeed([])
 
-        query_parts = [u"SELECT uid, stanza_id, update_uid, source, dest, source_res, dest_res, timestamp, received_timestamp,\
+        query_parts = ["SELECT uid, stanza_id, update_uid, source, dest, source_res, dest_res, timestamp, received_timestamp,\
                         type, extra, message, message.language, subject, subject.language, thread_id, thread.parent_id\
                         FROM history LEFT JOIN message ON history.uid = message.history_uid\
                         LEFT JOIN subject ON history.uid=subject.history_uid\
@@ -587,8 +587,8 @@
             values.append(jid_.userhost())
             if jid_.resource:
                 values.append(jid_.resource)
-                return u'({type_}=? AND {type_}_res=?)'.format(type_=type_)
-            return u'{type_}=?'.format(type_=type_)
+                return '({type_}=? AND {type_}_res=?)'.format(type_=type_)
+            return '{type_}=?'.format(type_=type_)
 
         if not from_jid and not to_jid:
             # not jid specified, we want all one2one communications
@@ -598,15 +598,15 @@
                 # we only have one jid specified, we check all messages
                 # from or to this jid
                 jid_ = from_jid or to_jid
-                query_parts.append(u"AND ({source} OR {dest})".format(
-                    source=test_jid(u'source', jid_),
-                    dest=test_jid(u'dest' , jid_)))
+                query_parts.append("AND ({source} OR {dest})".format(
+                    source=test_jid('source', jid_),
+                    dest=test_jid('dest' , jid_)))
             else:
                 # we have 2 jids specified, we check all communications between
                 # those 2 jids
                 query_parts.append(
-                    u"AND (({source_from} AND {dest_to}) "
-                    u"OR ({source_to} AND {dest_from}))".format(
+                    "AND (({source_from} AND {dest_to}) "
+                    "OR ({source_to} AND {dest_from}))".format(
                     source_from=test_jid('source', from_jid),
                     dest_to=test_jid('dest', to_jid),
                     source_to=test_jid('source', to_jid),
@@ -619,47 +619,47 @@
                 q.append(test_jid('source', from_jid))
             if to_jid is not None:
                 q.append(test_jid('dest', to_jid))
-            query_parts.append(u"AND " + u" AND ".join(q))
+            query_parts.append("AND " + " AND ".join(q))
 
         if filters:
-            if u'timestamp_start' in filters:
-                query_parts.append(u"AND timestamp>= ?")
-                values.append(float(filters[u'timestamp_start']))
-            if u'body' in filters:
+            if 'timestamp_start' in filters:
+                query_parts.append("AND timestamp>= ?")
+                values.append(float(filters['timestamp_start']))
+            if 'body' in filters:
                 # TODO: use REGEXP (function to be defined) instead of GLOB: https://www.sqlite.org/lang_expr.html
-                query_parts.append(u"AND message LIKE ?")
-                values.append(u"%{}%".format(filters['body']))
-            if u'search' in filters:
-                query_parts.append(u"AND (message LIKE ? OR source_res LIKE ?)")
-                values.extend([u"%{}%".format(filters['search'])] * 2)
-            if u'types' in filters:
+                query_parts.append("AND message LIKE ?")
+                values.append("%{}%".format(filters['body']))
+            if 'search' in filters:
+                query_parts.append("AND (message LIKE ? OR source_res LIKE ?)")
+                values.extend(["%{}%".format(filters['search'])] * 2)
+            if 'types' in filters:
                 types = filters['types'].split()
-                query_parts.append(u"AND type IN ({})".format(u','.join("?"*len(types))))
+                query_parts.append("AND type IN ({})".format(','.join("?"*len(types))))
                 values.extend(types)
-            if u'not_types' in filters:
+            if 'not_types' in filters:
                 types = filters['not_types'].split()
-                query_parts.append(u"AND type NOT IN ({})".format(u','.join("?"*len(types))))
+                query_parts.append("AND type NOT IN ({})".format(','.join("?"*len(types))))
                 values.extend(types)
-            if u'last_stanza_id' in filters:
+            if 'last_stanza_id' in filters:
                 # this request get the last message with a "stanza_id" that we
                 # have in history. This is mainly used to retrieve messages sent
                 # while we were offline, using MAM (XEP-0313).
-                if (filters[u'last_stanza_id'] is not True
+                if (filters['last_stanza_id'] is not True
                     or limit != 1):
-                    raise ValueError(u"Unexpected values for last_stanza_id filter")
-                query_parts.append(u"AND stanza_id IS NOT NULL")
+                    raise ValueError("Unexpected values for last_stanza_id filter")
+                query_parts.append("AND stanza_id IS NOT NULL")
 
 
         # timestamp may be identical for 2 close messages (specially when delay is
         # used) that's why we order ties by received_timestamp
         # We'll reverse the order in sqliteHistoryToList
         # we use DESC here so LIMIT keep the last messages
-        query_parts.append(u"ORDER BY timestamp DESC, history.received_timestamp DESC")
+        query_parts.append("ORDER BY timestamp DESC, history.received_timestamp DESC")
         if limit is not None:
-            query_parts.append(u"LIMIT ?")
+            query_parts.append("LIMIT ?")
             values.append(limit)
 
-        d = self.dbpool.runQuery(u" ".join(query_parts), values)
+        d = self.dbpool.runQuery(" ".join(query_parts), values)
         d.addCallback(self.sqliteHistoryToList)
         d.addCallback(self.listDict2listTuple)
         return d
@@ -668,32 +668,41 @@
 
     def _privateDataEb(self, failure_, operation, namespace, key=None, profile=None):
         """generic errback for data queries"""
-        log.error(_(u"Can't {operation} data in database for namespace {namespace}{and_key}{for_profile}: {msg}").format(
+        log.error(_("Can't {operation} data in database for namespace {namespace}{and_key}{for_profile}: {msg}").format(
             operation = operation,
             namespace = namespace,
-            and_key = (u" and key " + key) if key is not None else u"",
-            for_profile = (u' [' + profile + u']') if profile is not None else u'',
+            and_key = (" and key " + key) if key is not None else "",
+            for_profile = (' [' + profile + ']') if profile is not None else '',
             msg = failure_))
 
+    def _load_pickle(self, v):
+        # FIXME: workaround for Python 3 port, some pickled data are byte while other are strings
+        try:
+            return pickle.loads(v)
+        except TypeError:
+            data = pickle.loads(v.encode('utf-8'))
+            log.warning(f"encoding issue in pickled data: {data}")
+            return data
+
     def _generateDataDict(self, query_result, binary):
         if binary:
-            return {k: pickle.loads(str(v)) for k,v in query_result}
+            return {k: self._load_pickle(v) for k,v in query_result}
         else:
             return dict(query_result)
 
     def _getPrivateTable(self, binary, profile):
         """Get table to use for private values"""
-        table = [u'private']
+        table = ['private']
 
         if profile is None:
-            table.append(u'gen')
+            table.append('gen')
         else:
-            table.append(u'ind')
+            table.append('ind')
 
         if binary:
-            table.append(u'bin')
+            table.append('bin')
 
-        return u'_'.join(table)
+        return '_'.join(table)
 
     def getPrivates(self, namespace, keys=None, binary=False, profile=None):
         """Get private value(s) from databases
@@ -706,27 +715,27 @@
             None to use general values
         @return (dict[unicode, object]): gotten keys/values
         """
-        log.debug(_(u"getting {type}{binary} private values from database for namespace {namespace}{keys}".format(
-            type = u"general" if profile is None else "individual",
-            binary = u" binary" if binary else u"",
+        log.debug(_("getting {type}{binary} private values from database for namespace {namespace}{keys}".format(
+            type = "general" if profile is None else "individual",
+            binary = " binary" if binary else "",
             namespace = namespace,
-            keys = u" with keys {}".format(u", ".join(keys)) if keys is not None else u"")))
+            keys = " with keys {}".format(", ".join(keys)) if keys is not None else "")))
         table = self._getPrivateTable(binary, profile)
-        query_parts = [u"SELECT key,value FROM", table, "WHERE namespace=?"]
+        query_parts = ["SELECT key,value FROM", table, "WHERE namespace=?"]
         args = [namespace]
 
         if keys is not None:
-            placeholders = u','.join(len(keys) * u'?')
-            query_parts.append(u'AND key IN (' + placeholders + u')')
+            placeholders = ','.join(len(keys) * '?')
+            query_parts.append('AND key IN (' + placeholders + ')')
             args.extend(keys)
 
         if profile is not None:
-            query_parts.append(u'AND profile_id=?')
+            query_parts.append('AND profile_id=?')
             args.append(self.profiles[profile])
 
-        d = self.dbpool.runQuery(u" ".join(query_parts), args)
+        d = self.dbpool.runQuery(" ".join(query_parts), args)
         d.addCallback(self._generateDataDict, binary)
-        d.addErrback(self._privateDataEb, u"get", namespace, profile=profile)
+        d.addErrback(self._privateDataEb, "get", namespace, profile=profile)
         return d
 
     def setPrivateValue(self, namespace, key, value, binary=False, profile=None):
@@ -741,7 +750,7 @@
             if None, it's a general value
         """
         table = self._getPrivateTable(binary, profile)
-        query_values_names = [u'namespace', u'key', u'value']
+        query_values_names = ['namespace', 'key', 'value']
         query_values = [namespace, key]
 
         if binary:
@@ -750,14 +759,14 @@
         query_values.append(value)
 
         if profile is not None:
-            query_values_names.append(u'profile_id')
+            query_values_names.append('profile_id')
             query_values.append(self.profiles[profile])
 
-        query_parts = [u"REPLACE INTO", table, u'(', u','.join(query_values_names), u')',
-                       u"VALUES (", u",".join(u'?'*len(query_values_names)), u')']
+        query_parts = ["REPLACE INTO", table, '(', ','.join(query_values_names), ')',
+                       "VALUES (", ",".join('?'*len(query_values_names)), ')']
 
-        d = self.dbpool.runQuery(u" ".join(query_parts), query_values)
-        d.addErrback(self._privateDataEb, u"set", namespace, key, profile=profile)
+        d = self.dbpool.runQuery(" ".join(query_parts), query_values)
+        d.addErrback(self._privateDataEb, "set", namespace, key, profile=profile)
         return d
 
     def delPrivateValue(self, namespace, key, binary=False, profile=None):
@@ -770,13 +779,13 @@
             if None, it's a general value
         """
         table = self._getPrivateTable(binary, profile)
-        query_parts = [u"DELETE FROM", table, u"WHERE namespace=? AND key=?"]
+        query_parts = ["DELETE FROM", table, "WHERE namespace=? AND key=?"]
         args = [namespace, key]
         if profile is not None:
-            query_parts.append(u"AND profile_id=?")
+            query_parts.append("AND profile_id=?")
             args.append(self.profiles[profile])
-        d = self.dbpool.runQuery(u" ".join(query_parts), args)
-        d.addErrback(self._privateDataEb, u"delete", namespace, key, profile=profile)
+        d = self.dbpool.runQuery(" ".join(query_parts), args)
+        d.addErrback(self._privateDataEb, "delete", namespace, key, profile=profile)
         return d
 
     def delPrivateNamespace(self, namespace, binary=False, profile=None):
@@ -787,19 +796,19 @@
         Params are the same as for delPrivateValue
         """
         table = self._getPrivateTable(binary, profile)
-        query_parts = [u"DELETE FROM", table, u"WHERE namespace=?"]
+        query_parts = ["DELETE FROM", table, "WHERE namespace=?"]
         args = [namespace]
         if profile is not None:
-            query_parts.append(u"AND profile_id=?")
+            query_parts.append("AND profile_id=?")
             args.append(self.profiles[profile])
-        d = self.dbpool.runQuery(u" ".join(query_parts), args)
-        d.addErrback(self._privateDataEb, u"delete namespace", namespace, profile=profile)
+        d = self.dbpool.runQuery(" ".join(query_parts), args)
+        d.addErrback(self._privateDataEb, "delete namespace", namespace, profile=profile)
         return d
 
     ## Files
 
     @defer.inlineCallbacks
-    def getFiles(self, client, file_id=None, version=u'', parent=None, type_=None,
+    def getFiles(self, client, file_id=None, version='', parent=None, type_=None,
                  file_hash=None, hash_algo=None, name=None, namespace=None, mime_type=None,
                  owner=None, access=None, projection=None, unique=False):
         """retrieve files with with given filters
@@ -831,45 +840,45 @@
         args = [self.profiles[client.profile]]
 
         if file_id is not None:
-            filters.append(u'id=?')
+            filters.append('id=?')
             args.append(file_id)
         if version is not None:
-            filters.append(u'version=?')
+            filters.append('version=?')
             args.append(version)
         if parent is not None:
-            filters.append(u'parent=?')
+            filters.append('parent=?')
             args.append(parent)
         if type_ is not None:
-            filters.append(u'type=?')
+            filters.append('type=?')
             args.append(type_)
         if file_hash is not None:
-            filters.append(u'file_hash=?')
+            filters.append('file_hash=?')
             args.append(file_hash)
         if hash_algo is not None:
-            filters.append(u'hash_algo=?')
+            filters.append('hash_algo=?')
             args.append(hash_algo)
         if name is not None:
-            filters.append(u'name=?')
+            filters.append('name=?')
             args.append(name)
         if namespace is not None:
-            filters.append(u'namespace=?')
+            filters.append('namespace=?')
             args.append(namespace)
         if mime_type is not None:
-            filters.append(u'mime_type=?')
+            filters.append('mime_type=?')
             args.append(mime_type)
         if owner is not None:
-            filters.append(u'owner=?')
+            filters.append('owner=?')
             args.append(owner.full())
         if access is not None:
             raise NotImplementedError('Access check is not implemented yet')
             # a JSON comparison is needed here
 
-        filters = u' AND '.join(filters)
+        filters = ' AND '.join(filters)
         query_parts.append(filters)
-        query = u' '.join(query_parts)
+        query = ' '.join(query_parts)
 
         result = yield self.dbpool.runQuery(query, args)
-        files_data = [dict(zip(projection, row)) for row in result]
+        files_data = [dict(list(zip(projection, row))) for row in result]
         to_parse = {'access', 'extra'}.intersection(projection)
         to_filter = {'owner'}.intersection(projection)
         if to_parse or to_filter:
@@ -882,7 +891,7 @@
                     file_data['owner'] = jid.JID(owner)
         defer.returnValue(files_data)
 
-    def setFile(self, client, name, file_id, version=u'', parent=None, type_=C.FILE_TYPE_FILE,
+    def setFile(self, client, name, file_id, version='', parent=None, type_=C.FILE_TYPE_FILE,
                 file_hash=None, hash_algo=None, size=None, namespace=None, mime_type=None,
                 created=None, modified=None, owner=None, access=None, extra=None):
         """set a file metadata
@@ -921,12 +930,12 @@
                                          json.dumps(access) if access else None,
                                          json.dumps(extra) if extra else None,
                                          self.profiles[client.profile]))
-        d.addErrback(lambda failure: log.error(_(u"Can't save file metadata for [{profile}]: {reason}".format(profile=client.profile, reason=failure))))
+        d.addErrback(lambda failure: log.error(_("Can't save file metadata for [{profile}]: {reason}".format(profile=client.profile, reason=failure))))
         return d
 
     def _fileUpdate(self, cursor, file_id, column, update_cb):
         query = 'SELECT {column} FROM files where id=?'.format(column=column)
-        for i in xrange(5):
+        for i in range(5):
             cursor.execute(query, [file_id])
             try:
                 older_value_raw = cursor.fetchone()[0]
@@ -951,9 +960,9 @@
             else:
                 if cursor.rowcount == 1:
                     break;
-            log.warning(_(u"table not updated, probably due to race condition, trying again ({tries})").format(tries=i+1))
+            log.warning(_("table not updated, probably due to race condition, trying again ({tries})").format(tries=i+1))
         else:
-            log.error(_(u"Can't update file table"))
+            log.error(_("Can't update file table"))
 
     def fileUpdate(self, file_id, column, update_cb):
         """Update a column value using a method to avoid race conditions
@@ -1072,17 +1081,17 @@
                     update_raw = yield self.update2raw(update_data, True)
                     defer.returnValue(update_raw)
                 else:
-                    log.error(_(u"schema version is up-to-date, but local schema differ from expected current schema"))
+                    log.error(_("schema version is up-to-date, but local schema differ from expected current schema"))
                     update_data = self.generateUpdateData(local_sch, current_sch, True)
                     update_raw = yield self.update2raw(update_data)
-                    log.warning(_(u"Here are the commands that should fix the situation, use at your own risk (do a backup before modifying database), you can go to SàT's MUC room at sat@chat.jabberfr.org for help\n### SQL###\n%s\n### END SQL ###\n") % u'\n'.join("%s;" % statement for statement in update_raw))
+                    log.warning(_("Here are the commands that should fix the situation, use at your own risk (do a backup before modifying database), you can go to SàT's MUC room at sat@chat.jabberfr.org for help\n### SQL###\n%s\n### END SQL ###\n") % '\n'.join("%s;" % statement for statement in update_raw))
                     raise exceptions.DatabaseError("Database mismatch")
             else:
                 if local_version > CURRENT_DB_VERSION:
                     log.error(_(
-                        u"You database version is higher than the one used in this SàT "
-                        u"version, are you using several version at the same time? We "
-                        u"can't run SàT with this database."))
+                        "You database version is higher than the one used in this SàT "
+                        "version, are you using several version at the same time? We "
+                        "can't run SàT with this database."))
                     sys.exit(1)
 
                 # Database is not up-to-date, we'll do the update
@@ -1091,7 +1100,7 @@
                 else:
                     log.info(_("Database schema has changed, local database will be updated"))
                 update_raw = []
-                for version in xrange(local_version + 1, CURRENT_DB_VERSION + 1):
+                for version in range(local_version + 1, CURRENT_DB_VERSION + 1):
                     try:
                         update_data = DATABASE_SCHEMAS[version]
                     except KeyError:
@@ -1150,17 +1159,17 @@
         ret = []
         assert isinstance(data, tuple)
         for table, col_data in data:
-            assert isinstance(table, basestring)
+            assert isinstance(table, str)
             assert isinstance(col_data, tuple)
             for cols in col_data:
                 if isinstance(cols, tuple):
-                    assert all([isinstance(c, basestring) for c in cols])
-                    indexed_cols = u','.join(cols)
-                elif isinstance(cols, basestring):
+                    assert all([isinstance(c, str) for c in cols])
+                    indexed_cols = ','.join(cols)
+                elif isinstance(cols, str):
                     indexed_cols = cols
                 else:
-                    raise exceptions.InternalError(u"unexpected index columns value")
-                index_name = table + u'__' + indexed_cols.replace(u',', u'_')
+                    raise exceptions.InternalError("unexpected index columns value")
+                index_name = table + '__' + indexed_cols.replace(',', '_')
                 ret.append(Updater.INDEX_SQL % (index_name, table, indexed_cols))
         return ret
 
@@ -1173,7 +1182,7 @@
         @return: hash as string
         """
         hash_ = hashlib.sha1()
-        tables = data.keys()
+        tables = list(data.keys())
         tables.sort()
 
         def stmnts2str(stmts):
@@ -1181,7 +1190,9 @@
 
         for table in tables:
             col_defs, col_constr = data[table]
-            hash_.update("%s:%s:%s" % (table, stmnts2str(col_defs), stmnts2str(col_constr)))
+            hash_.update(
+                ("%s:%s:%s" % (table, stmnts2str(col_defs), stmnts2str(col_constr)))
+                .encode('utf-8'))
         return hash_.digest()
 
     def rawStatements2data(self, raw_statements):
@@ -1324,7 +1335,7 @@
 
     def update_v8(self):
         """Update database from v7 to v8 (primary keys order changes + indexes)"""
-        log.info(u"Database update to v8")
+        log.info("Database update to v8")
         statements = ["PRAGMA foreign_keys = OFF"]
 
         # here is a copy of create and index data, we can't use "current" table
@@ -1357,11 +1368,11 @@
             schema = {table: create[table]}
             cols = [d.split()[0] for d in schema[table][0]]
             statements.extend(Updater.createData2Raw(schema))
-            statements.append(u"INSERT INTO {table}({cols}) "
-                              u"SELECT {cols} FROM {table}_old".format(
+            statements.append("INSERT INTO {table}({cols}) "
+                              "SELECT {cols} FROM {table}_old".format(
                               table=table,
-                              cols=u','.join(cols)))
-            statements.append(u"DROP TABLE {}_old".format(table))
+                              cols=','.join(cols)))
+            statements.append("DROP TABLE {}_old".format(table))
 
         statements.extend(Updater.indexData2Raw(index))
         statements.append("PRAGMA foreign_keys = ON")
@@ -1370,48 +1381,48 @@
     @defer.inlineCallbacks
     def update_v7(self):
         """Update database from v6 to v7 (history unique constraint change)"""
-        log.info(u"Database update to v7, this may be long depending on your history "
-                 u"size, please be patient.")
+        log.info("Database update to v7, this may be long depending on your history "
+                 "size, please be patient.")
 
-        log.info(u"Some cleaning first")
+        log.info("Some cleaning first")
         # we need to fix duplicate stanza_id, as it can result in conflicts with the new schema
         # normally database should not contain any, but better safe than sorry.
         rows = yield self.dbpool.runQuery(
-            u"SELECT stanza_id, COUNT(*) as c FROM history WHERE stanza_id is not NULL "
-            u"GROUP BY stanza_id HAVING c>1")
+            "SELECT stanza_id, COUNT(*) as c FROM history WHERE stanza_id is not NULL "
+            "GROUP BY stanza_id HAVING c>1")
         if rows:
             count = sum([r[1] for r in rows]) - len(rows)
-            log.info(u"{count} duplicate stanzas found, cleaning".format(count=count))
+            log.info("{count} duplicate stanzas found, cleaning".format(count=count))
             for stanza_id, count in rows:
-                log.info(u"cleaning duplicate stanza {stanza_id}".format(stanza_id=stanza_id))
+                log.info("cleaning duplicate stanza {stanza_id}".format(stanza_id=stanza_id))
                 row_uids = yield self.dbpool.runQuery(
                     "SELECT uid FROM history WHERE stanza_id = ? LIMIT ?",
                     (stanza_id, count-1))
                 uids = [r[0] for r in row_uids]
                 yield self.dbpool.runQuery(
-                    "DELETE FROM history WHERE uid IN ({})".format(u",".join(u"?"*len(uids))),
+                    "DELETE FROM history WHERE uid IN ({})".format(",".join("?"*len(uids))),
                     uids)
 
         def deleteInfo(txn):
             # with foreign_keys on, the delete takes ages, so we deactivate it here
             # the time to delete info messages from history.
             txn.execute("PRAGMA foreign_keys = OFF")
-            txn.execute(u"DELETE FROM message WHERE history_uid IN (SELECT uid FROM history WHERE "
-                        u"type='info')")
-            txn.execute(u"DELETE FROM subject WHERE history_uid IN (SELECT uid FROM history WHERE "
-                        u"type='info')")
-            txn.execute(u"DELETE FROM thread WHERE history_uid IN (SELECT uid FROM history WHERE "
-                        u"type='info')")
-            txn.execute(u"DELETE FROM message WHERE history_uid IN (SELECT uid FROM history WHERE "
-                        u"type='info')")
-            txn.execute(u"DELETE FROM history WHERE type='info'")
+            txn.execute("DELETE FROM message WHERE history_uid IN (SELECT uid FROM history WHERE "
+                        "type='info')")
+            txn.execute("DELETE FROM subject WHERE history_uid IN (SELECT uid FROM history WHERE "
+                        "type='info')")
+            txn.execute("DELETE FROM thread WHERE history_uid IN (SELECT uid FROM history WHERE "
+                        "type='info')")
+            txn.execute("DELETE FROM message WHERE history_uid IN (SELECT uid FROM history WHERE "
+                        "type='info')")
+            txn.execute("DELETE FROM history WHERE type='info'")
             # not sure that is is necessary to reactivate here, but in doubt…
             txn.execute("PRAGMA foreign_keys = ON")
 
-        log.info(u'Deleting "info" messages (this can take a while)')
+        log.info('Deleting "info" messages (this can take a while)')
         yield self.dbpool.runInteraction(deleteInfo)
 
-        log.info(u"Cleaning done")
+        log.info("Cleaning done")
 
         # we have to rename table we will replace
         # tables referencing history need to be replaced to, else reference would
@@ -1423,68 +1434,68 @@
         yield self.dbpool.runQuery("ALTER TABLE thread RENAME TO thread_old")
 
         # history
-        query = (u"CREATE TABLE history (uid TEXT PRIMARY KEY, stanza_id TEXT, "
-                 u"update_uid TEXT, profile_id INTEGER, source TEXT, dest TEXT, "
-                 u"source_res TEXT, dest_res TEXT, timestamp DATETIME NOT NULL, "
-                 u"received_timestamp DATETIME, type TEXT, extra BLOB, "
-                 u"FOREIGN KEY(profile_id) REFERENCES profiles(id) ON DELETE CASCADE, "
-                 u"FOREIGN KEY(type) REFERENCES message_types(type), "
-                 u"UNIQUE (profile_id, stanza_id, source, dest))")
+        query = ("CREATE TABLE history (uid TEXT PRIMARY KEY, stanza_id TEXT, "
+                 "update_uid TEXT, profile_id INTEGER, source TEXT, dest TEXT, "
+                 "source_res TEXT, dest_res TEXT, timestamp DATETIME NOT NULL, "
+                 "received_timestamp DATETIME, type TEXT, extra BLOB, "
+                 "FOREIGN KEY(profile_id) REFERENCES profiles(id) ON DELETE CASCADE, "
+                 "FOREIGN KEY(type) REFERENCES message_types(type), "
+                 "UNIQUE (profile_id, stanza_id, source, dest))")
         yield self.dbpool.runQuery(query)
 
         # message
-        query = (u"CREATE TABLE message (id INTEGER PRIMARY KEY ASC, history_uid INTEGER"
-                 u", message TEXT, language TEXT, FOREIGN KEY(history_uid) REFERENCES "
-                 u"history(uid) ON DELETE CASCADE)")
+        query = ("CREATE TABLE message (id INTEGER PRIMARY KEY ASC, history_uid INTEGER"
+                 ", message TEXT, language TEXT, FOREIGN KEY(history_uid) REFERENCES "
+                 "history(uid) ON DELETE CASCADE)")
         yield self.dbpool.runQuery(query)
 
         # subject
-        query = (u"CREATE TABLE subject (id INTEGER PRIMARY KEY ASC, history_uid INTEGER"
-                 u", subject TEXT, language TEXT, FOREIGN KEY(history_uid) REFERENCES "
-                 u"history(uid) ON DELETE CASCADE)")
+        query = ("CREATE TABLE subject (id INTEGER PRIMARY KEY ASC, history_uid INTEGER"
+                 ", subject TEXT, language TEXT, FOREIGN KEY(history_uid) REFERENCES "
+                 "history(uid) ON DELETE CASCADE)")
         yield self.dbpool.runQuery(query)
 
         # thread
-        query = (u"CREATE TABLE thread (id INTEGER PRIMARY KEY ASC, history_uid INTEGER"
-                 u", thread_id TEXT, parent_id TEXT, FOREIGN KEY(history_uid) REFERENCES "
-                 u"history(uid) ON DELETE CASCADE)")
+        query = ("CREATE TABLE thread (id INTEGER PRIMARY KEY ASC, history_uid INTEGER"
+                 ", thread_id TEXT, parent_id TEXT, FOREIGN KEY(history_uid) REFERENCES "
+                 "history(uid) ON DELETE CASCADE)")
         yield self.dbpool.runQuery(query)
 
-        log.info(u"Now transfering old data to new tables, please be patient.")
+        log.info("Now transfering old data to new tables, please be patient.")
 
-        log.info(u"\nTransfering table history")
-        query = (u"INSERT INTO history (uid, stanza_id, update_uid, profile_id, source, "
-                 u"dest, source_res, dest_res, timestamp, received_timestamp, type, extra"
-                 u") SELECT uid, stanza_id, update_uid, profile_id, source, dest, "
-                 u"source_res, dest_res, timestamp, received_timestamp, type, extra "
-                 u"FROM history_old")
+        log.info("\nTransfering table history")
+        query = ("INSERT INTO history (uid, stanza_id, update_uid, profile_id, source, "
+                 "dest, source_res, dest_res, timestamp, received_timestamp, type, extra"
+                 ") SELECT uid, stanza_id, update_uid, profile_id, source, dest, "
+                 "source_res, dest_res, timestamp, received_timestamp, type, extra "
+                 "FROM history_old")
         yield self.dbpool.runQuery(query)
 
-        log.info(u"\nTransfering table message")
-        query = (u"INSERT INTO message (id, history_uid, message, language) SELECT id, "
-                 u"history_uid, message, language FROM message_old")
+        log.info("\nTransfering table message")
+        query = ("INSERT INTO message (id, history_uid, message, language) SELECT id, "
+                 "history_uid, message, language FROM message_old")
         yield self.dbpool.runQuery(query)
 
-        log.info(u"\nTransfering table subject")
-        query = (u"INSERT INTO subject (id, history_uid, subject, language) SELECT id, "
-                 u"history_uid, subject, language FROM subject_old")
+        log.info("\nTransfering table subject")
+        query = ("INSERT INTO subject (id, history_uid, subject, language) SELECT id, "
+                 "history_uid, subject, language FROM subject_old")
         yield self.dbpool.runQuery(query)
 
-        log.info(u"\nTransfering table thread")
-        query = (u"INSERT INTO thread (id, history_uid, thread_id, parent_id) SELECT id"
-                 u", history_uid, thread_id, parent_id FROM thread_old")
+        log.info("\nTransfering table thread")
+        query = ("INSERT INTO thread (id, history_uid, thread_id, parent_id) SELECT id"
+                 ", history_uid, thread_id, parent_id FROM thread_old")
         yield self.dbpool.runQuery(query)
 
-        log.info(u"\nRemoving old tables")
+        log.info("\nRemoving old tables")
         # because of foreign keys, tables referencing history_old
         # must be deleted first
         yield self.dbpool.runQuery("DROP TABLE thread_old")
         yield self.dbpool.runQuery("DROP TABLE subject_old")
         yield self.dbpool.runQuery("DROP TABLE message_old")
         yield self.dbpool.runQuery("DROP TABLE history_old")
-        log.info(u"\nReducing database size (this can take a while)")
+        log.info("\nReducing database size (this can take a while)")
         yield self.dbpool.runQuery("VACUUM")
-        log.info(u"Database update done :)")
+        log.info("Database update done :)")
 
     @defer.inlineCallbacks
     def update_v3(self):
@@ -1494,7 +1505,7 @@
         #      big database for tests. If issues are happening, we can cut it
         #      in smaller transactions using LIMIT and by deleting already updated
         #      messages
-        log.info(u"Database update to v3, this may take a while")
+        log.info("Database update to v3, this may take a while")
 
         # we need to fix duplicate timestamp, as it can result in conflicts with the new schema
         rows = yield self.dbpool.runQuery("SELECT timestamp, COUNT(*) as c FROM history GROUP BY timestamp HAVING c>1")
@@ -1506,10 +1517,10 @@
                 for idx, (id_,) in enumerate(ids_rows):
                     fixed.append(id_)
                     yield self.dbpool.runQuery("UPDATE history SET timestamp=? WHERE id=?", (float(timestamp) + idx * 0.001, id_))
-            log.info(u"fixed messages with ids {}".format(u', '.join([unicode(id_) for id_ in fixed])))
+            log.info("fixed messages with ids {}".format(', '.join([str(id_) for id_ in fixed])))
 
         def historySchema(txn):
-            log.info(u"History schema update")
+            log.info("History schema update")
             txn.execute("ALTER TABLE history RENAME TO tmp_sat_update")
             txn.execute("CREATE TABLE history (uid TEXT PRIMARY KEY, update_uid TEXT, profile_id INTEGER, source TEXT, dest TEXT, source_res TEXT, dest_res TEXT, timestamp DATETIME NOT NULL, received_timestamp DATETIME, type TEXT, extra BLOB, FOREIGN KEY(profile_id) REFERENCES profiles(id) ON DELETE CASCADE, FOREIGN KEY(type) REFERENCES message_types(type), UNIQUE (profile_id, timestamp, source, dest, source_res, dest_res))")
             txn.execute("INSERT INTO history (uid, profile_id, source, dest, source_res, dest_res, timestamp, type, extra) SELECT id, profile_id, source, dest, source_res, dest_res, timestamp, type, extra FROM tmp_sat_update")
@@ -1517,17 +1528,17 @@
         yield self.dbpool.runInteraction(historySchema)
 
         def newTables(txn):
-            log.info(u"Creating new tables")
+            log.info("Creating new tables")
             txn.execute("CREATE TABLE message (id INTEGER PRIMARY KEY ASC, history_uid INTEGER, message TEXT, language TEXT, FOREIGN KEY(history_uid) REFERENCES history(uid) ON DELETE CASCADE)")
             txn.execute("CREATE TABLE thread (id INTEGER PRIMARY KEY ASC, history_uid INTEGER, thread_id TEXT, parent_id TEXT, FOREIGN KEY(history_uid) REFERENCES history(uid) ON DELETE CASCADE)")
             txn.execute("CREATE TABLE subject (id INTEGER PRIMARY KEY ASC, history_uid INTEGER, subject TEXT, language TEXT, FOREIGN KEY(history_uid) REFERENCES history(uid) ON DELETE CASCADE)")
 
         yield self.dbpool.runInteraction(newTables)
 
-        log.info(u"inserting new message type")
+        log.info("inserting new message type")
         yield self.dbpool.runQuery("INSERT INTO message_types VALUES (?)", ('info',))
 
-        log.info(u"messages update")
+        log.info("messages update")
         rows = yield self.dbpool.runQuery("SELECT id, timestamp, message, extra FROM tmp_sat_update")
         total = len(rows)
 
@@ -1545,7 +1556,7 @@
             except EOFError:
                 extra = {}
             except Exception:
-                log.warning(u"Can't handle extra data for message id {}, ignoring it".format(id_))
+                log.warning("Can't handle extra data for message id {}, ignoring it".format(id_))
                 extra = {}
 
             queries.append(("INSERT INTO message(history_uid, message) VALUES (?,?)", (id_, message)))
@@ -1556,9 +1567,9 @@
                 pass
             else:
                 try:
-                    subject = subject.decode('utf-8')
+                    subject = subject
                 except UnicodeEncodeError:
-                    log.warning(u"Error while decoding subject, ignoring it")
+                    log.warning("Error while decoding subject, ignoring it")
                     del extra['subject']
                 else:
                     queries.append(("INSERT INTO subject(history_uid, subject) VALUES (?,?)", (id_, subject)))
@@ -1597,7 +1608,7 @@
                 try:
                     id_ = result[0][0]
                 except IndexError:
-                    log.error(u"Profile of id %d is referenced in 'param_ind' but it doesn't exist!" % profile_id)
+                    log.error("Profile of id %d is referenced in 'param_ind' but it doesn't exist!" % profile_id)
                     return defer.succeed(None)
 
                 sat_password = xmpp_password