Mercurial > libervia-web
diff src/server/blog.py @ 1113:cdd389ef97bc
server: code style reformatting using black
author | Goffi <goffi@goffi.org> |
---|---|
date | Fri, 29 Jun 2018 17:45:26 +0200 |
parents | f2170536ba23 |
children |
line wrap: on
line diff
--- a/src/server/blog.py Sun Jun 24 22:21:25 2018 +0200 +++ b/src/server/blog.py Fri Jun 29 17:45:26 2018 +0200 @@ -21,6 +21,7 @@ from sat.core.i18n import _, D_ from sat_frontends.tools.strings import addURLToText, fixXHTMLLinks from sat.core.log import getLogger + log = getLogger(__name__) from sat.tools.common import data_format from sat.tools import xml_tools @@ -40,8 +41,13 @@ from libervia.server.html_tools import sanitizeHtml, convertNewLinesToXHTML from libervia.server.constants import Const as C -NS_ATOM = 'http://www.w3.org/2005/Atom' -PARAMS_TO_GET = (C.STATIC_BLOG_PARAM_TITLE, C.STATIC_BLOG_PARAM_BANNER, C.STATIC_BLOG_PARAM_KEYWORDS, C.STATIC_BLOG_PARAM_DESCRIPTION) +NS_ATOM = "http://www.w3.org/2005/Atom" +PARAMS_TO_GET = ( + C.STATIC_BLOG_PARAM_TITLE, + C.STATIC_BLOG_PARAM_BANNER, + C.STATIC_BLOG_PARAM_KEYWORDS, + C.STATIC_BLOG_PARAM_DESCRIPTION, +) re_strip_empty_div = re.compile(r"<div ?/>|<div> *?</div>") # TODO: check disco features and use max_items when RSM is not available @@ -58,7 +64,9 @@ """ default_query_data = {} try: - default_query_data['tag'] = request.extra_dict['mam_filter_{}'.format(C.MAM_FILTER_CATEGORY)].encode('utf-8') + default_query_data["tag"] = request.extra_dict[ + "mam_filter_{}".format(C.MAM_FILTER_CATEGORY) + ].encode("utf-8") except KeyError: pass return default_query_data @@ -70,7 +78,7 @@ @param value(unicode): value to quote @return (str): quoted value """ - return urllib.quote(value.encode('utf-8'), '') + return urllib.quote(value.encode("utf-8"), "") def _unquote(quoted_value): @@ -80,18 +88,22 @@ @return (unicode): unquoted value """ assert not isinstance(quoted_value, unicode) - return urllib.unquote(quoted_value).decode('utf-8') + return urllib.unquote(quoted_value).decode("utf-8") def _urlencode(query): """Same as urllib.urlencode, but use '&' instead of '&'""" - return '&'.join(["{}={}".format(urllib.quote_plus(str(k)), urllib.quote_plus(str(v))) - for k,v in query.iteritems()]) + return "&".join( + [ + "{}={}".format(urllib.quote_plus(str(k)), urllib.quote_plus(str(v))) + for k, v in query.iteritems() + ] + ) class TemplateProcessor(object): - THEME = 'default' + THEME = "default" def __init__(self, host): self.host = host @@ -102,16 +114,17 @@ self.env = Environment(loader=PackageLoader(themes, self.THEME)) def useTemplate(self, request, tpl, data=None): - theme_url = os.path.join('/', C.THEMES_URL, self.THEME) + theme_url = os.path.join("/", C.THEMES_URL, self.THEME) - data_ = {'images': os.path.join(theme_url, 'images'), - 'styles': os.path.join(theme_url, 'styles'), - } + data_ = { + "images": os.path.join(theme_url, "images"), + "styles": os.path.join(theme_url, "styles"), + } if data: data_.update(data) - template = self.env.get_template('{}.html'.format(tpl)) - return template.render(**data_).encode('utf-8') + template = self.env.get_template("{}.html".format(tpl)) + return template.render(**data_).encode("utf-8") class MicroBlog(Resource, TemplateProcessor): @@ -129,7 +142,6 @@ self.avatars_cache[bare_jid_s] = avatar_url return avatar_url - def getAvatarURL(self, pub_jid, request): """Return avatar of a jid if in cache, else ask for it. @@ -140,15 +152,21 @@ try: url = self.avatars_cache[bare_jid_s] except KeyError: - self.avatars_cache[bare_jid_s] = '' # avoid to request the vcard several times - d = self.host.bridgeCall('avatarGet', bare_jid_s, False, False, C.SERVICE_PROFILE) + self.avatars_cache[ + bare_jid_s + ] = "" # avoid to request the vcard several times + d = self.host.bridgeCall( + "avatarGet", bare_jid_s, False, False, C.SERVICE_PROFILE + ) d.addCallback(self._avatarPathToUrl, request, bare_jid_s) return d return defer.succeed(url if url else C.DEFAULT_AVATAR_URL) def render_GET(self, request): if not request.postpath or len(request.postpath) > 2: - return self.useTemplate(request, "static_blog_error", {'message': "You must indicate a nickname"}) + return self.useTemplate( + request, "static_blog_error", {"message": "You must indicate a nickname"} + ) prof_requested = _unquote(request.postpath[0]) @@ -157,25 +175,34 @@ except DBusException: prof_found = None if not prof_found or prof_found == C.SERVICE_PROFILE: - return self.useTemplate(request, "static_blog_error", {'message': "Invalid nickname"}) + return self.useTemplate( + request, "static_blog_error", {"message": "Invalid nickname"} + ) d = defer.Deferred() # TODO: jid caching - self.host.bridge.asyncGetParamA('JabberID', 'Connection', 'value', profile_key=prof_found, callback=d.callback, errback=d.errback) + self.host.bridge.asyncGetParamA( + "JabberID", + "Connection", + "value", + profile_key=prof_found, + callback=d.callback, + errback=d.errback, + ) d.addCallback(self.render_gotJID, request, prof_found) return server.NOT_DONE_YET def render_gotJID(self, pub_jid_s, request, profile): pub_jid = JID(pub_jid_s) - request.extra_dict = {} # will be used for RSM and MAM + request.extra_dict = {} # will be used for RSM and MAM self.parseURLParams(request) if request.item_id: # FIXME: this part seems useless # we want a specific item # item_ids = [request.item_id] # max_items = 1 - max_items = C.NO_LIMIT # FIXME + max_items = C.NO_LIMIT # FIXME else: # max_items = int(request.extra_dict['rsm_max']) # FIXME max_items = C.NO_LIMIT @@ -183,16 +210,35 @@ if request.atom: request.extra_dict.update(request.mam_extra) - self.getAtom(pub_jid, max_items, request.extra_dict, request.extra_comments_dict, request, profile) + self.getAtom( + pub_jid, + max_items, + request.extra_dict, + request.extra_comments_dict, + request, + profile, + ) elif request.item_id: # we can't merge mam_extra now because we'll use item_ids - self.getItemById(pub_jid, request.item_id, request.extra_dict, - request.extra_comments_dict, request, profile) + self.getItemById( + pub_jid, + request.item_id, + request.extra_dict, + request.extra_comments_dict, + request, + profile, + ) else: request.extra_dict.update(request.mam_extra) - self.getItems(pub_jid, max_items, request.extra_dict, - request.extra_comments_dict, request, profile) + self.getItems( + pub_jid, + max_items, + request.extra_dict, + request.extra_comments_dict, + request, + profile, + ) ## URL parsing @@ -202,7 +248,7 @@ @param request: HTTP request """ if len(request.postpath) > 1: - if request.postpath[1] == 'atom.xml': # return the atom feed + if request.postpath[1] == "atom.xml": # return the atom feed request.atom = True request.item_id = None else: @@ -214,7 +260,9 @@ self.parseURLParamsRSM(request) # XXX: request.display_single is True when only one blog post is visible - request.display_single = (request.item_id is not None) or int(request.extra_dict['rsm_max']) == 1 + request.display_single = (request.item_id is not None) or int( + request.extra_dict["rsm_max"] + ) == 1 self.parseURLParamsCommentsRSM(request) self.parseURLParamsMAM(request) @@ -227,21 +275,25 @@ if request.item_id: # XXX: item_id and RSM are not compatible return try: - rsm_max = int(request.args['max'][0]) + rsm_max = int(request.args["max"][0]) if rsm_max > C.STATIC_RSM_MAX_LIMIT: log.warning(u"Request with rsm_max over limit ({})".format(rsm_max)) rsm_max = C.STATIC_RSM_MAX_LIMIT - request.extra_dict['rsm_max'] = unicode(rsm_max) + request.extra_dict["rsm_max"] = unicode(rsm_max) except (ValueError, KeyError): - request.extra_dict['rsm_max'] = unicode(C.STATIC_RSM_MAX_DEFAULT) + request.extra_dict["rsm_max"] = unicode(C.STATIC_RSM_MAX_DEFAULT) try: - request.extra_dict['rsm_index'] = request.args['index'][0] + request.extra_dict["rsm_index"] = request.args["index"][0] except (ValueError, KeyError): try: - request.extra_dict['rsm_before'] = request.args['before'][0].decode('utf-8') + request.extra_dict["rsm_before"] = request.args["before"][0].decode( + "utf-8" + ) except KeyError: try: - request.extra_dict['rsm_after'] = request.args['after'][0].decode('utf-8') + request.extra_dict["rsm_after"] = request.args["after"][0].decode( + "utf-8" + ) except KeyError: pass @@ -254,15 +306,17 @@ request.extra_comments_dict = {} if request.display_single: try: - rsm_max = int(request.args['comments_max'][0]) + rsm_max = int(request.args["comments_max"][0]) if rsm_max > C.STATIC_RSM_MAX_LIMIT: log.warning(u"Request with rsm_max over limit ({})".format(rsm_max)) rsm_max = C.STATIC_RSM_MAX_LIMIT - request.extra_comments_dict['rsm_max'] = unicode(rsm_max) + request.extra_comments_dict["rsm_max"] = unicode(rsm_max) except (ValueError, KeyError): - request.extra_comments_dict['rsm_max'] = unicode(C.STATIC_RSM_MAX_COMMENTS_DEFAULT) + request.extra_comments_dict["rsm_max"] = unicode( + C.STATIC_RSM_MAX_COMMENTS_DEFAULT + ) else: - request.extra_comments_dict['rsm_max'] = "0" + request.extra_comments_dict["rsm_max"] = "0" def parseURLParamsMAM(self, request): """Parse MAM request data from the URL parameters for main items @@ -276,13 +330,17 @@ # for navigation links. request.mam_extra = {} try: - request.mam_extra['mam_filter_{}'.format(C.MAM_FILTER_CATEGORY)] = request.args['tag'][0].decode('utf-8') + request.mam_extra[ + "mam_filter_{}".format(C.MAM_FILTER_CATEGORY) + ] = request.args["tag"][0].decode("utf-8") except KeyError: pass ## Items retrieval - def getItemById(self, pub_jid, item_id, extra_dict, extra_comments_dict, request, profile): + def getItemById( + self, pub_jid, item_id, extra_dict, extra_comments_dict, request, profile + ): """ @param pub_jid (jid.JID): publisher JID @@ -300,46 +358,76 @@ def gotMetadata(result): dummy, rsm_metadata = result try: - metadata['rsm_count'] = rsm_metadata['rsm_count'] + metadata["rsm_count"] = rsm_metadata["rsm_count"] except KeyError: pass try: - metadata['rsm_index'] = unicode(int(rsm_metadata['rsm_index'])-1) + metadata["rsm_index"] = unicode(int(rsm_metadata["rsm_index"]) - 1) except KeyError: pass - metadata['rsm_first'] = metadata['rsm_last'] = item["id"] + metadata["rsm_first"] = metadata["rsm_last"] = item["id"] def gotComments(comments): # at this point we can merge mam dict request.extra_dict.update(request.mam_extra) # build the items as self.getItems would do it (and as self.renderHTML expects them to be) - comments = [(item['comments_service'], item['comments_node'], "", comments[0], comments[1])] - self.renderHTML([(item, comments)], metadata, request, pub_jid, profile) + comments = [ + ( + item["comments_service"], + item["comments_node"], + "", + comments[0], + comments[1], + ) + ] + self.renderHTML( + [(item, comments)], metadata, request, pub_jid, profile + ) # get the comments # max_comments = int(extra_comments_dict['rsm_max']) # FIXME max_comments = C.NO_LIMIT # TODO: use max_comments only when RSM is not available - self.host.bridge.mbGet(item['comments_service'], item['comments_node'], max_comments, [], - extra_comments_dict, C.SERVICE_PROFILE, + self.host.bridge.mbGet( + item["comments_service"], + item["comments_node"], + max_comments, + [], + extra_comments_dict, + C.SERVICE_PROFILE, callback=gotComments, - errback=lambda failure: self.renderError(failure, request, pub_jid)) + errback=lambda failure: self.renderError(failure, request, pub_jid), + ) # XXX: retrieve RSM information related to the main item. We can't do it while # retrieving the item, because item_ids and rsm should not be used together. - self.host.bridge.mbGet(pub_jid.userhost(), '', 0, [], - {"rsm_max": "1", "rsm_after": item["id"]}, C.SERVICE_PROFILE, + self.host.bridge.mbGet( + pub_jid.userhost(), + "", + 0, + [], + {"rsm_max": "1", "rsm_after": item["id"]}, + C.SERVICE_PROFILE, callback=gotMetadata, - errback=lambda failure: self.renderError(failure, request, pub_jid)) + errback=lambda failure: self.renderError(failure, request, pub_jid), + ) # get the main item - self.host.bridge.mbGet(pub_jid.userhost(), '', 0, [item_id], - extra_dict, C.SERVICE_PROFILE, + self.host.bridge.mbGet( + pub_jid.userhost(), + "", + 0, + [item_id], + extra_dict, + C.SERVICE_PROFILE, callback=gotItems, - errback=lambda failure: self.renderError(failure, request, pub_jid)) + errback=lambda failure: self.renderError(failure, request, pub_jid), + ) - def getItems(self, pub_jid, max_items, extra_dict, extra_comments_dict, request, profile): + def getItems( + self, pub_jid, max_items, extra_dict, extra_comments_dict, request, profile + ): """ @param pub_jid (jid.JID): publisher JID @@ -349,6 +437,7 @@ @param request: HTTP request @param profile """ + def getResultCb(data, rt_session): remaining, results = data # we have requested one node only @@ -361,18 +450,30 @@ self.renderHTML(items, metadata, request, pub_jid, profile) def getResult(rt_session): - self.host.bridge.mbGetFromManyWithCommentsRTResult(rt_session, C.SERVICE_PROFILE, - callback=lambda data: getResultCb(data, rt_session), - errback=lambda failure: self.renderError(failure, request, pub_jid)) + self.host.bridge.mbGetFromManyWithCommentsRTResult( + rt_session, + C.SERVICE_PROFILE, + callback=lambda data: getResultCb(data, rt_session), + errback=lambda failure: self.renderError(failure, request, pub_jid), + ) # max_comments = int(extra_comments_dict['rsm_max']) # FIXME max_comments = 0 # TODO: use max_comments only when RSM is not available - self.host.bridge.mbGetFromManyWithComments(C.JID, [pub_jid.userhost()], max_items, - max_comments, extra_dict, extra_comments_dict, - C.SERVICE_PROFILE, callback=getResult) + self.host.bridge.mbGetFromManyWithComments( + C.JID, + [pub_jid.userhost()], + max_items, + max_comments, + extra_dict, + extra_comments_dict, + C.SERVICE_PROFILE, + callback=getResult, + ) - def getAtom(self, pub_jid, max_items, extra_dict, extra_comments_dict, request, profile): + def getAtom( + self, pub_jid, max_items, extra_dict, extra_comments_dict, request, profile + ): """ @param pub_jid (jid.JID): publisher JID @@ -382,79 +483,93 @@ @param request: HTTP request @param profile """ + def gotItems(data): # Generate a clean atom feed with uri linking to this blog # from microblog data - items, metadata= data - feed_elt = domish.Element((NS_ATOM, u'feed')) + items, metadata = data + feed_elt = domish.Element((NS_ATOM, u"feed")) title = _(u"{user}'s blog").format(user=profile) - feed_elt.addElement(u'title', content=title) + feed_elt.addElement(u"title", content=title) - base_blog_url = self.host.getExtBaseURL(request, - u'blog/{user}'.format(user=profile)) + base_blog_url = self.host.getExtBaseURL( + request, u"blog/{user}".format(user=profile) + ) # atom link - link_feed_elt = feed_elt.addElement('link') - link_feed_elt['href'] = u'{base}/atom.xml'.format(base=base_blog_url) - link_feed_elt['type'] = u'application/atom+xml' - link_feed_elt['rel'] = u'self' + link_feed_elt = feed_elt.addElement("link") + link_feed_elt["href"] = u"{base}/atom.xml".format(base=base_blog_url) + link_feed_elt["type"] = u"application/atom+xml" + link_feed_elt["rel"] = u"self" # blog link - link_blog_elt = feed_elt.addElement('link') - link_blog_elt['rel'] = u'alternate' - link_blog_elt['type'] = u'text/html' - link_blog_elt['href'] = base_blog_url + link_blog_elt = feed_elt.addElement("link") + link_blog_elt["rel"] = u"alternate" + link_blog_elt["type"] = u"text/html" + link_blog_elt["href"] = base_blog_url # blog link XMPP uri - blog_xmpp_uri = metadata['uri'] - link_blog_elt = feed_elt.addElement('link') - link_blog_elt['rel'] = u'alternate' - link_blog_elt['type'] = u'application/atom+xml' - link_blog_elt['href'] = blog_xmpp_uri + blog_xmpp_uri = metadata["uri"] + link_blog_elt = feed_elt.addElement("link") + link_blog_elt["rel"] = u"alternate" + link_blog_elt["type"] = u"application/atom+xml" + link_blog_elt["href"] = blog_xmpp_uri - feed_elt.addElement('id', content=_quote(blog_xmpp_uri)) - updated_unix = max([float(item['updated']) for item in items]) + feed_elt.addElement("id", content=_quote(blog_xmpp_uri)) + updated_unix = max([float(item["updated"]) for item in items]) updated_dt = datetime.fromtimestamp(updated_unix) - feed_elt.addElement(u'updated', content=u'{}Z'.format(updated_dt.isoformat("T"))) + feed_elt.addElement( + u"updated", content=u"{}Z".format(updated_dt.isoformat("T")) + ) for item in items: - entry_elt = feed_elt.addElement(u'entry') + entry_elt = feed_elt.addElement(u"entry") # Title try: - title = item['title'] + title = item["title"] except KeyError: # for microblog (without title), we use an abstract of content as title - title = u'{}…'.format(u' '.join(item['content'][:70].split())) - entry_elt.addElement(u'title', content=title) + title = u"{}…".format(u" ".join(item["content"][:70].split())) + entry_elt.addElement(u"title", content=title) # HTTP link - http_link_elt = entry_elt.addElement(u'link') - http_link_elt['rel'] = u'alternate' - http_link_elt['type'] = u'text/html' - http_link_elt['href'] = u'{base}/{quoted_id}'.format(base=base_blog_url, quoted_id=_quote(item['id'])) + http_link_elt = entry_elt.addElement(u"link") + http_link_elt["rel"] = u"alternate" + http_link_elt["type"] = u"text/html" + http_link_elt["href"] = u"{base}/{quoted_id}".format( + base=base_blog_url, quoted_id=_quote(item["id"]) + ) # XMPP link - xmpp_link_elt = entry_elt.addElement(u'link') - xmpp_link_elt['rel'] = u'alternate' - xmpp_link_elt['type'] = u'application/atom+xml' - xmpp_link_elt['href'] = u'{blog_uri};item={item_id}'.format(blog_uri=blog_xmpp_uri, item_id=item['id']) + xmpp_link_elt = entry_elt.addElement(u"link") + xmpp_link_elt["rel"] = u"alternate" + xmpp_link_elt["type"] = u"application/atom+xml" + xmpp_link_elt["href"] = u"{blog_uri};item={item_id}".format( + blog_uri=blog_xmpp_uri, item_id=item["id"] + ) # date metadata - entry_elt.addElement(u'id', content=item['atom_id']) - updated = datetime.fromtimestamp(float(item['updated'])) - entry_elt.addElement(u'updated', content=u'{}Z'.format(updated.isoformat("T"))) - published = datetime.fromtimestamp(float(item['published'])) - entry_elt.addElement(u'published', content=u'{}Z'.format(published.isoformat("T"))) + entry_elt.addElement(u"id", content=item["atom_id"]) + updated = datetime.fromtimestamp(float(item["updated"])) + entry_elt.addElement( + u"updated", content=u"{}Z".format(updated.isoformat("T")) + ) + published = datetime.fromtimestamp(float(item["published"])) + entry_elt.addElement( + u"published", content=u"{}Z".format(published.isoformat("T")) + ) # author metadata - author_elt = entry_elt.addElement(u'author') - author_elt.addElement('name', content=item.get('author', profile)) + author_elt = entry_elt.addElement(u"author") + author_elt.addElement("name", content=item.get("author", profile)) try: - author_elt.addElement('uri', content=u'xmpp:{}'.format(item['author_jid'])) + author_elt.addElement( + "uri", content=u"xmpp:{}".format(item["author_jid"]) + ) except KeyError: pass try: - author_elt.addElement('email', content=item['author_email']) + author_elt.addElement("email", content=item["author_email"]) except KeyError: pass @@ -465,19 +580,31 @@ # content try: - content_xhtml = item['content_xhtml'] + content_xhtml = item["content_xhtml"] except KeyError: - content_elt = entry_elt.addElement('content', content='content') - content_elt['type'] = 'text' + content_elt = entry_elt.addElement("content", content="content") + content_elt["type"] = "text" else: - content_elt = entry_elt.addElement('content') - content_elt['type'] = 'xhtml' - content_elt.addChild(xml_tools.ElementParser()(content_xhtml, namespace=C.NS_XHTML)) + content_elt = entry_elt.addElement("content") + content_elt["type"] = "xhtml" + content_elt.addChild( + xml_tools.ElementParser()(content_xhtml, namespace=C.NS_XHTML) + ) - atom_feed = u'<?xml version="1.0" encoding="utf-8"?>\n{}'.format(feed_elt.toXml()) + atom_feed = u'<?xml version="1.0" encoding="utf-8"?>\n{}'.format( + feed_elt.toXml() + ) self.renderAtomFeed(atom_feed, request), - self.host.bridge.mbGet(pub_jid.userhost(), '', max_items, [], extra_dict, C.SERVICE_PROFILE, callback=gotItems) + self.host.bridge.mbGet( + pub_jid.userhost(), + "", + max_items, + [], + extra_dict, + C.SERVICE_PROFILE, + callback=gotItems, + ) ## rendering @@ -486,8 +613,10 @@ def _getImageParams(self, options, key, default, alt): """regexp from http://answers.oreilly.com/topic/280-how-to-validate-urls-with-regular-expressions/""" - url = options[key] if key in options else '' - regexp = r"^(https?|ftp)://[a-z0-9-]+(\.[a-z0-9-]+)+(/[\w-]+)*/[\w-]+\.(gif|png|jpg)$" + url = options[key] if key in options else "" + regexp = ( + r"^(https?|ftp)://[a-z0-9-]+(\.[a-z0-9-]+)+(/[\w-]+)*/[\w-]+\.(gif|png|jpg)$" + ) if re.match(regexp, url): url = url else: @@ -496,7 +625,11 @@ def renderError(self, failure, request, pub_jid): request.setResponseCode(500) - request.write(self.useTemplate(request, "static_blog_error", {'message': "Can't access requested data"})) + request.write( + self.useTemplate( + request, "static_blog_error", {"message": "Can't access requested data"} + ) + ) request.finish() def renderHTML(self, items, metadata, request, pub_jid, profile): @@ -512,19 +645,29 @@ options = {} d = self.getAvatarURL(pub_jid, request) - d.addCallback(self._updateDict, options, 'avatar') + d.addCallback(self._updateDict, options, "avatar") d.addErrback(self.renderError, request, pub_jid) d_list.append(d) for param_name in PARAMS_TO_GET: d = defer.Deferred() - self.host.bridge.asyncGetParamA(param_name, C.STATIC_BLOG_KEY, 'value', C.SERVER_SECURITY_LIMIT, profile, callback=d.callback, errback=d.errback) + self.host.bridge.asyncGetParamA( + param_name, + C.STATIC_BLOG_KEY, + "value", + C.SERVER_SECURITY_LIMIT, + profile, + callback=d.callback, + errback=d.errback, + ) d.addCallback(self._updateDict, options, param_name) d.addErrback(self.renderError, request, pub_jid) d_list.append(d) dlist_d = defer.DeferredList(d_list) - dlist_d.addCallback(lambda dummy: self._renderHTML(items, metadata, options, request, pub_jid)) + dlist_d.addCallback( + lambda dummy: self._renderHTML(items, metadata, options, request, pub_jid) + ) def _renderHTML(self, items, metadata, options, request, pub_jid): """Actually render the static blog. @@ -548,55 +691,59 @@ if not isinstance(options, dict): options = {} user = sanitizeHtml(pub_jid.user) - base_url = os.path.join('/blog/',user) + base_url = os.path.join("/blog/", user) def getOption(key): - return sanitizeHtml(options[key]) if key in options else '' + return sanitizeHtml(options[key]) if key in options else "" - avatar = os.path.normpath('/{}'.format(getOption('avatar'))) + avatar = os.path.normpath("/{}".format(getOption("avatar"))) title = getOption(C.STATIC_BLOG_PARAM_TITLE) or user - query_data = _urlencode(getDefaultQueryData(request)).decode('utf-8') + query_data = _urlencode(getDefaultQueryData(request)).decode("utf-8") - xmpp_uri = metadata['uri'] + xmpp_uri = metadata["uri"] if len(items) == 1: # FIXME: that's really not a good way to get item id # this must be changed after static blog refactorisation - item_id = items[0][0]['id'] - xmpp_uri+=u";item={}".format(_quote(item_id)) + item_id = items[0][0]["id"] + xmpp_uri += u";item={}".format(_quote(item_id)) - data = {'url_base': base_url, - 'xmpp_uri': xmpp_uri, - 'url_query': u'?{}'.format(query_data) if query_data else '' , - 'keywords': getOption(C.STATIC_BLOG_PARAM_KEYWORDS), - 'description': getOption(C.STATIC_BLOG_PARAM_DESCRIPTION), - 'title': title, - 'favicon': avatar, - 'banner_img': self._getImageParams(options, C.STATIC_BLOG_PARAM_BANNER, avatar, title) - } + data = { + "url_base": base_url, + "xmpp_uri": xmpp_uri, + "url_query": u"?{}".format(query_data) if query_data else "", + "keywords": getOption(C.STATIC_BLOG_PARAM_KEYWORDS), + "description": getOption(C.STATIC_BLOG_PARAM_DESCRIPTION), + "title": title, + "favicon": avatar, + "banner_img": self._getImageParams( + options, C.STATIC_BLOG_PARAM_BANNER, avatar, title + ), + } - data['navlinks'] = NavigationLinks(request, items, metadata, base_url) - data['messages'] = [] + data["navlinks"] = NavigationLinks(request, items, metadata, base_url) + data["messages"] = [] for item in items: item, comments_list = item comments, comments_count = [], 0 for node_comments in comments_list: comments.extend(node_comments[3]) try: - comments_count += int(node_comments[4]['rsm_count']) + comments_count += int(node_comments[4]["rsm_count"]) except KeyError: pass - data['messages'].append(BlogMessage(request, base_url, item, comments, comments_count)) + data["messages"].append( + BlogMessage(request, base_url, item, comments, comments_count) + ) - request.write(self.useTemplate(request, 'static_blog', data)) + request.write(self.useTemplate(request, "static_blog", data)) request.finish() def renderAtomFeed(self, feed, request): - request.write(feed.encode('utf-8')) + request.write(feed.encode("utf-8")) request.finish() class NavigationLinks(object): - def __init__(self, request, items, metadata, base_url): """Build the navigation links. @@ -613,35 +760,35 @@ # which links we need to display if request.display_single: - links = ('later_message', 'older_message') + links = ("later_message", "older_message") # key must exist when using the template - self.later_messages = self.older_messages = '' + self.later_messages = self.older_messages = "" else: - links = ('later_messages', 'older_messages') - self.later_message = self.older_message = '' + links = ("later_messages", "older_messages") + self.later_message = self.older_message = "" # now we set the links according to RSM for key in links: query_data = default_query_data.copy() - if key.startswith('later_message'): + if key.startswith("later_message"): try: - index = int(metadata['rsm_index']) + index = int(metadata["rsm_index"]) except (KeyError, ValueError): pass else: if index == 0: # we don't show this link on first page - setattr(self, key, '') + setattr(self, key, "") continue try: - query_data['before'] = metadata['rsm_first'].encode('utf-8') + query_data["before"] = metadata["rsm_first"].encode("utf-8") except KeyError: pass else: try: - index = int(metadata['rsm_index']) - count = int(metadata.get('rsm_count')) + index = int(metadata["rsm_index"]) + count = int(metadata.get("rsm_count")) except (KeyError, ValueError): # XXX: if we don't have index or count, we can't know if we # are on the last page or not @@ -650,29 +797,27 @@ # if we have index, we don't show the after link # on the last page if index + len(items) >= count: - setattr(self, key, '') + setattr(self, key, "") continue try: - query_data['after'] = metadata['rsm_last'].encode('utf-8') + query_data["after"] = metadata["rsm_last"].encode("utf-8") except KeyError: pass if request.display_single: - query_data['max'] = 1 + query_data["max"] = 1 link = "{}?{}".format(base_url, _urlencode(query_data)) - setattr(self, key, BlogLink(link, key, key.replace('_', ' '))) + setattr(self, key, BlogLink(link, key, key.replace("_", " "))) class BlogImage(object): - def __init__(self, url_, alt): self.url = url_ self.alt = alt class BlogLink(object): - def __init__(self, url_, style, text): self.url = url_ self.style = style @@ -680,7 +825,6 @@ class BlogMessage(object): - def __init__(self, request, base_url, entry, comments=None, comments_count=0): """ @@ -692,47 +836,54 @@ """ if comments is None: comments = [] - timestamp = float(entry.get('published', 0)) + timestamp = float(entry.get("published", 0)) # FIXME: for now we assume that the comments' depth is only 1 - is_comment = not entry.get('comments', False) + is_comment = not entry.get("comments", False) self.date = datetime.fromtimestamp(timestamp) self.type = "comment" if is_comment else "main_item" - self.style = 'mblog_comment' if is_comment else '' - self.content = self.getText(entry, 'content') + self.style = "mblog_comment" if is_comment else "" + self.content = self.getText(entry, "content") if is_comment: - self.author = (_(u"from {}").format(entry['author'])) + self.author = _(u"from {}").format(entry["author"]) else: - self.author = ' ' - self.url = "{}/{}".format(base_url, _quote(entry['id'])) + self.author = " " + self.url = "{}/{}".format(base_url, _quote(entry["id"])) query_data = getDefaultQueryData(request) if query_data: - self.url += '?{}'.format(_urlencode(query_data)) - self.title = self.getText(entry, 'title') - self.tags = [sanitizeHtml(tag) for tag in data_format.dict2iter('tag', entry)] + self.url += "?{}".format(_urlencode(query_data)) + self.title = self.getText(entry, "title") + self.tags = [sanitizeHtml(tag) for tag in data_format.dict2iter("tag", entry)] - count_text = lambda count: D_(u'comments') if count > 1 else D_(u'comment') + count_text = lambda count: D_(u"comments") if count > 1 else D_(u"comment") - self.comments_text = u"{} {}".format(comments_count, count_text(comments_count)) + self.comments_text = u"{} {}".format( + comments_count, count_text(comments_count) + ) delta = comments_count - len(comments) if request.display_single and delta > 0: - prev_url = "{}?{}".format(self.url, _urlencode({'comments_max': comments_count})) + prev_url = "{}?{}".format( + self.url, _urlencode({"comments_max": comments_count}) + ) prev_text = D_(u"show {count} previous {comments}").format( - count = delta, comments = count_text(delta)) + count=delta, comments=count_text(delta) + ) self.all_comments_link = BlogLink(prev_url, "comments_link", prev_text) if comments: - self.comments = [BlogMessage(request, base_url, comment) for comment in comments] + self.comments = [ + BlogMessage(request, base_url, comment) for comment in comments + ] def getText(self, entry, key): try: - xhtml = entry['{}_xhtml'.format(key)] + xhtml = entry["{}_xhtml".format(key)] except KeyError: try: - processor = addURLToText if key.startswith('content') else sanitizeHtml + processor = addURLToText if key.startswith("content") else sanitizeHtml return convertNewLinesToXHTML(processor(entry[key])) except KeyError: return None