Mercurial > libervia-backend
diff sat/plugins/plugin_blog_import_dotclear.py @ 3028:ab2696e34d29
Python 3 port:
/!\ this is a huge commit
/!\ starting from this commit, SàT is needs Python 3.6+
/!\ SàT maybe be instable or some feature may not work anymore, this will improve with time
This patch port backend, bridge and frontends to Python 3.
Roughly this has been done this way:
- 2to3 tools has been applied (with python 3.7)
- all references to python2 have been replaced with python3 (notably shebangs)
- fixed files not handled by 2to3 (notably the shell script)
- several manual fixes
- fixed issues reported by Python 3 that where not handled in Python 2
- replaced "async" with "async_" when needed (it's a reserved word from Python 3.7)
- replaced zope's "implements" with @implementer decorator
- temporary hack to handle data pickled in database, as str or bytes may be returned,
to be checked later
- fixed hash comparison for password
- removed some code which is not needed anymore with Python 3
- deactivated some code which needs to be checked (notably certificate validation)
- tested with jp, fixed reported issues until some basic commands worked
- ported Primitivus (after porting dependencies like urwid satext)
- more manual fixes
author | Goffi <goffi@goffi.org> |
---|---|
date | Tue, 13 Aug 2019 19:08:41 +0200 |
parents | 003b8b4b56a7 |
children | 9d0df638c8b4 |
line wrap: on
line diff
--- a/sat/plugins/plugin_blog_import_dotclear.py Wed Jul 31 11:31:22 2019 +0200 +++ b/sat/plugins/plugin_blog_import_dotclear.py Tue Aug 13 19:08:41 2019 +0200 @@ -1,4 +1,4 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python3 # -*- coding: utf-8 -*- # SàT plugin for import external blogs @@ -42,10 +42,10 @@ C.PI_DESCRIPTION: _("""Blog importer for Dotclear blog engine."""), } -SHORT_DESC = D_(u"import posts from Dotclear blog engine") +SHORT_DESC = D_("import posts from Dotclear blog engine") LONG_DESC = D_( - u"""This importer handle Dotclear blog engine. + """This importer handle Dotclear blog engine. To use it, you'll need to export your blog to a flat file. You must go in your admin interface and select Plugins/Maintenance then Backup. @@ -55,7 +55,7 @@ location: you must use the absolute path to your backup for the location parameter """ ) -POST_ID_PREFIX = u"sat_dc_" +POST_ID_PREFIX = "sat_dc_" KNOWN_DATA_TYPES = ( "link", "setting", @@ -66,7 +66,7 @@ "comment", "captcha", ) -ESCAPE_MAP = {"r": u"\r", "n": u"\n", '"': u'"', "\\": u"\\"} +ESCAPE_MAP = {"r": "\r", "n": "\n", '"': '"', "\\": "\\"} class DotclearParser(object): @@ -83,7 +83,7 @@ @param post(dict): parsed post data @return (unicode): post unique item id """ - return u"{}_{}_{}_{}:{}".format( + return "{}_{}_{}_{}:{}".format( POST_ID_PREFIX, post["blog_id"], post["user_id"], @@ -99,7 +99,7 @@ """ post_id = comment["post_id"] parent_item_id = self.posts_data[post_id]["blog"]["id"] - return u"{}_comment_{}".format(parent_item_id, comment["comment_id"]) + return "{}_comment_{}".format(parent_item_id, comment["comment_id"]) def getTime(self, data, key): """Parse time as given by dotclear, with timezone handling @@ -125,18 +125,18 @@ if char == '"': # we have reached the end of this field, # we try to parse a new one - yield u"".join(buf) + yield "".join(buf) buf = [] idx += 1 try: separator = fields_data[idx] except IndexError: return - if separator != u",": + if separator != ",": raise exceptions.ParsingError("Field separator was expeceted") idx += 1 break # we have a new field - elif char == u"\\": + elif char == "\\": idx += 1 try: char = ESCAPE_MAP[fields_data[idx]] @@ -144,22 +144,22 @@ raise exceptions.ParsingError("Escaped char was expected") except KeyError: char = fields_data[idx] - log.warning(u"Unknown key to escape: {}".format(char)) + log.warning("Unknown key to escape: {}".format(char)) buf.append(char) def parseFields(self, headers, data): - return dict(itertools.izip(headers, self.readFields(data))) + return dict(zip(headers, self.readFields(data))) def postHandler(self, headers, data, index): post = self.parseFields(headers, data) - log.debug(u"({}) post found: {}".format(index, post["post_title"])) + log.debug("({}) post found: {}".format(index, post["post_title"])) mb_data = { "id": self.getPostId(post), "published": self.getTime(post, "post_creadt"), "updated": self.getTime(post, "post_upddt"), "author": post["user_id"], # there use info are not in the archive # TODO: option to specify user info - "content_xhtml": u"{}{}".format( + "content_xhtml": "{}{}".format( post["post_content_xhtml"], post["post_excerpt_xhtml"] ), "title": post["post_title"], @@ -168,7 +168,7 @@ self.posts_data[post["post_id"]] = { "blog": mb_data, "comments": [[]], - "url": u"/post/{}".format(post["post_url"]), + "url": "/post/{}".format(post["post_url"]), } def metaHandler(self, headers, data, index): @@ -178,7 +178,7 @@ tags.add(meta["meta_id"]) def metaFinishedHandler(self): - for post_id, tags in self.tags.iteritems(): + for post_id, tags in self.tags.items(): data_format.iter2dict("tag", tags, self.posts_data[post_id]["blog"]) del self.tags @@ -186,9 +186,9 @@ comment = self.parseFields(headers, data) if comment["comment_site"]: # we don't use atom:uri because it's used for jid in XMPP - content = u'{}\n<hr>\n<a href="{}">author website</a>'.format( + content = '{}\n<hr>\n<a href="{}">author website</a>'.format( comment["comment_content"], - cgi.escape(comment["comment_site"]).replace('"', u"%22"), + cgi.escape(comment["comment_site"]).replace('"', "%22"), ) else: content = comment["comment_content"] @@ -208,24 +208,24 @@ def parse(self, db_path): with open(db_path) as f: - signature = f.readline().decode("utf-8") + signature = f.readline() try: version = signature.split("|")[1] except IndexError: version = None - log.debug(u"Dotclear version: {}".format(version)) + log.debug("Dotclear version: {}".format(version)) data_type = None data_headers = None index = None while True: - buf = f.readline().decode("utf-8") + buf = f.readline() if not buf: break if buf.startswith("["): header = buf.split(" ", 1) data_type = header[0][1:] if data_type not in KNOWN_DATA_TYPES: - log.warning(u"unkown data type: {}".format(data_type)) + log.warning("unkown data type: {}".format(data_type)) index = 0 try: data_headers = header[1].split(",") @@ -233,7 +233,7 @@ last_header = data_headers[-1] data_headers[-1] = last_header[: last_header.rfind("]")] except IndexError: - log.warning(u"Can't read data)") + log.warning("Can't read data)") else: if data_type is None: continue @@ -247,7 +247,7 @@ pass else: finished_handler() - log.debug(u"{} data finished".format(data_type)) + log.debug("{} data finished".format(data_type)) data_type = None continue assert data_type @@ -258,7 +258,7 @@ else: fields_handler(data_headers, buf, index) index += 1 - return (self.posts_data.itervalues(), len(self.posts_data)) + return (iter(self.posts_data.values()), len(self.posts_data)) class DotclearImport(object): @@ -272,7 +272,7 @@ def DcImport(self, client, location, options=None): if not os.path.isabs(location): raise exceptions.DataError( - u"An absolute path to backup data need to be given as location" + "An absolute path to backup data need to be given as location" ) dc_parser = DotclearParser() d = threads.deferToThread(dc_parser.parse, location)