Mercurial > libervia-backend
diff sat/plugins/plugin_blog_import.py @ 3028:ab2696e34d29
Python 3 port:
/!\ this is a huge commit
/!\ starting from this commit, SàT is needs Python 3.6+
/!\ SàT maybe be instable or some feature may not work anymore, this will improve with time
This patch port backend, bridge and frontends to Python 3.
Roughly this has been done this way:
- 2to3 tools has been applied (with python 3.7)
- all references to python2 have been replaced with python3 (notably shebangs)
- fixed files not handled by 2to3 (notably the shell script)
- several manual fixes
- fixed issues reported by Python 3 that where not handled in Python 2
- replaced "async" with "async_" when needed (it's a reserved word from Python 3.7)
- replaced zope's "implements" with @implementer decorator
- temporary hack to handle data pickled in database, as str or bytes may be returned,
to be checked later
- fixed hash comparison for password
- removed some code which is not needed anymore with Python 3
- deactivated some code which needs to be checked (notably certificate validation)
- tested with jp, fixed reported issues until some basic commands worked
- ported Primitivus (after porting dependencies like urwid satext)
- more manual fixes
author | Goffi <goffi@goffi.org> |
---|---|
date | Tue, 13 Aug 2019 19:08:41 +0200 |
parents | 85d3240a400f |
children | 9d0df638c8b4 |
line wrap: on
line diff
--- a/sat/plugins/plugin_blog_import.py Wed Jul 31 11:31:22 2019 +0200 +++ b/sat/plugins/plugin_blog_import.py Tue Aug 13 19:08:41 2019 +0200 @@ -1,4 +1,4 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python3 # -*- coding: utf-8 -*- # SàT plugin for import external blogs @@ -31,7 +31,7 @@ import os import os.path import tempfile -import urlparse +import urllib.parse import shortuuid @@ -43,7 +43,7 @@ C.PI_MAIN: "BlogImportPlugin", C.PI_HANDLER: "no", C.PI_DESCRIPTION: _( - u"""Blog import management: + """Blog import management: This plugin manage the different blog importers which can register to it, and handle generic importing tasks.""" ), } @@ -67,7 +67,7 @@ self._p = host.plugins["XEP-0060"] self._m = host.plugins["XEP-0277"] self._s = self.host.plugins["TEXT_SYNTAXES"] - host.plugins["IMPORT"].initialize(self, u"blog") + host.plugins["IMPORT"].initialize(self, "blog") def importItem( self, client, item_import_data, session, options, return_data, service, node @@ -107,7 +107,7 @@ try: item_id = mb_data["id"] except KeyError: - item_id = mb_data["id"] = unicode(shortuuid.uuid()) + item_id = mb_data["id"] = str(shortuuid.uuid()) try: # we keep the link between old url and new blog item @@ -121,7 +121,7 @@ node or self._m.namespace, item_id, ) - log.info(u"url link from {old} to {new}".format(old=old_uri, new=new_uri)) + log.info("url link from {old} to {new}".format(old=old_uri, new=new_uri)) return mb_data @@ -129,7 +129,7 @@ def importSubItems(self, client, item_import_data, mb_data, session, options): # comments data if len(item_import_data["comments"]) != 1: - raise NotImplementedError(u"can't manage multiple comment links") + raise NotImplementedError("can't manage multiple comment links") allow_comments = C.bool(mb_data.get("allow_comments", C.BOOL_FALSE)) if allow_comments: comments_service = yield self._m.getCommentsService(client) @@ -145,13 +145,13 @@ else: if item_import_data["comments"][0]: raise exceptions.DataError( - u"allow_comments set to False, but comments are there" + "allow_comments set to False, but comments are there" ) defer.returnValue(None) def publishItem(self, client, mb_data, service, node, session): log.debug( - u"uploading item [{id}]: {title}".format( + "uploading item [{id}]: {title}".format( id=mb_data["id"], title=mb_data.get("title", "") ) ) @@ -182,7 +182,7 @@ else: if "{}_xhtml".format(prefix) in mb_data: raise exceptions.DataError( - u"importer gave {prefix}_rich and {prefix}_xhtml at the same time, this is not allowed".format( + "importer gave {prefix}_rich and {prefix}_xhtml at the same time, this is not allowed".format( prefix=prefix ) ) @@ -200,14 +200,14 @@ else: if "{}_xhtml".format(prefix) in mb_data: log.warning( - u"{prefix}_text will be replaced by converted {prefix}_xhtml, so filters can be handled".format( + "{prefix}_text will be replaced by converted {prefix}_xhtml, so filters can be handled".format( prefix=prefix ) ) del mb_data["{}_text".format(prefix)] else: log.warning( - u"importer gave a text {prefix}, blog filters don't work on text {prefix}".format( + "importer gave a text {prefix}, blog filters don't work on text {prefix}".format( prefix=prefix ) ) @@ -225,8 +225,8 @@ opt_host = options.get(OPT_HOST) if opt_host: # we normalise the domain - parsed_host = urlparse.urlsplit(opt_host) - opt_host = urlparse.urlunsplit( + parsed_host = urllib.parse.urlsplit(opt_host) + opt_host = urllib.parse.urlunsplit( ( parsed_host.scheme or "http", parsed_host.netloc or parsed_host.path, @@ -239,7 +239,7 @@ tmp_dir = tempfile.mkdtemp() try: # TODO: would be nice to also update the hyperlinks to these images, e.g. when you have <a href="{url}"><img src="{url}"></a> - for img_elt in xml_tools.findAll(top_elt, names=[u"img"]): + for img_elt in xml_tools.findAll(top_elt, names=["img"]): yield self.imgFilters(client, img_elt, options, opt_host, tmp_dir) finally: os.rmdir(tmp_dir) # XXX: tmp_dir should be empty, or something went wrong @@ -260,21 +260,21 @@ """ try: url = img_elt["src"] - if url[0] == u"/": + if url[0] == "/": if not opt_host: log.warning( - u"host was not specified, we can't deal with src without host ({url}) and have to ignore the following <img/>:\n{xml}".format( + "host was not specified, we can't deal with src without host ({url}) and have to ignore the following <img/>:\n{xml}".format( url=url, xml=img_elt.toXml() ) ) return else: - url = urlparse.urljoin(opt_host, url) + url = urllib.parse.urljoin(opt_host, url) filename = url.rsplit("/", 1)[-1].strip() if not filename: raise KeyError except (KeyError, IndexError): - log.warning(u"ignoring invalid img element: {}".format(img_elt.toXml())) + log.warning("ignoring invalid img element: {}".format(img_elt.toXml())) return # we change the url for the normalized one @@ -288,10 +288,10 @@ pass else: # host is the ignored one, we skip - parsed_url = urlparse.urlsplit(url) + parsed_url = urllib.parse.urlsplit(url) if ignore_host in parsed_url.hostname: log.info( - u"Don't upload image at {url} because of {opt} option".format( + "Don't upload image at {url} because of {opt} option".format( url=url, opt=OPT_UPLOAD_IGNORE_HOST ) ) @@ -304,7 +304,7 @@ try: yield web_client.downloadPage(url.encode("utf-8"), tmp_file) filename = filename.replace( - u"%", u"_" + "%", "_" ) # FIXME: tmp workaround for a bug in prosody http upload __, download_d = yield self._u.upload( client, tmp_file, filename, options=upload_options @@ -312,7 +312,7 @@ download_url = yield download_d except Exception as e: log.warning( - u"can't download image at {url}: {reason}".format(url=url, reason=e) + "can't download image at {url}: {reason}".format(url=url, reason=e) ) else: img_elt["src"] = download_url