comparison libervia/server/blog.py @ 1216:b2d067339de3

python 3 port: /!\ Python 3.6+ is now needed to use libervia /!\ instability may occur and features may not be working anymore, this will improve with time /!\ TxJSONRPC dependency has been removed The same procedure as in backend has been applied (check backend commit ab2696e34d29 logs for details). Removed now deprecated code (Pyjamas compiled browser part, legacy blog, JSON RPC related code). Adapted code to work without `html` and `themes` dirs.
author Goffi <goffi@goffi.org>
date Tue, 13 Aug 2019 19:12:31 +0200
parents 1276452c0d32
children 987595a254b0
comparison
equal deleted inserted replaced
1215:f14ab8a25e8b 1216:b2d067339de3
34 from jinja2 import Environment, PackageLoader 34 from jinja2 import Environment, PackageLoader
35 from datetime import datetime 35 from datetime import datetime
36 import re 36 import re
37 import os 37 import os
38 import sys 38 import sys
39 import urllib 39 import urllib.request, urllib.parse, urllib.error
40 40
41 from libervia.server.html_tools import sanitizeHtml, convertNewLinesToXHTML 41 from libervia.server.html_tools import sanitizeHtml, convertNewLinesToXHTML
42 from libervia.server.constants import Const as C 42 from libervia.server.constants import Const as C
43 43
44 NS_ATOM = "http://www.w3.org/2005/Atom" 44 NS_ATOM = "http://www.w3.org/2005/Atom"
76 """Quote a value for use in url 76 """Quote a value for use in url
77 77
78 @param value(unicode): value to quote 78 @param value(unicode): value to quote
79 @return (str): quoted value 79 @return (str): quoted value
80 """ 80 """
81 return urllib.quote(value.encode("utf-8"), "") 81 return urllib.parse.quote(value.encode("utf-8"), "")
82 82
83 83
84 def _unquote(quoted_value): 84 def _unquote(quoted_value):
85 """Unquote a value coming from url 85 """Unquote a value coming from url
86 86
87 @param unquote_value(str): value to unquote 87 @param unquote_value(str): value to unquote
88 @return (unicode): unquoted value 88 @return (unicode): unquoted value
89 """ 89 """
90 assert not isinstance(quoted_value, unicode) 90 assert not isinstance(quoted_value, str)
91 return urllib.unquote(quoted_value).decode("utf-8") 91 return urllib.parse.unquote(quoted_value).decode("utf-8")
92 92
93 93
94 def _urlencode(query): 94 def _urlencode(query):
95 """Same as urllib.urlencode, but use '&amp;' instead of '&'""" 95 """Same as urllib.urlencode, but use '&amp;' instead of '&'"""
96 return "&amp;".join( 96 return "&amp;".join(
97 [ 97 [
98 "{}={}".format(urllib.quote_plus(str(k)), urllib.quote_plus(str(v))) 98 "{}={}".format(urllib.parse.quote_plus(str(k)), urllib.parse.quote_plus(str(v)))
99 for k, v in query.iteritems() 99 for k, v in query.items()
100 ] 100 ]
101 ) 101 )
102 102
103 103
104 class TemplateProcessor(object): 104 class TemplateProcessor(object):
275 if request.item_id: # XXX: item_id and RSM are not compatible 275 if request.item_id: # XXX: item_id and RSM are not compatible
276 return 276 return
277 try: 277 try:
278 rsm_max = int(request.args["max"][0]) 278 rsm_max = int(request.args["max"][0])
279 if rsm_max > C.STATIC_RSM_MAX_LIMIT: 279 if rsm_max > C.STATIC_RSM_MAX_LIMIT:
280 log.warning(u"Request with rsm_max over limit ({})".format(rsm_max)) 280 log.warning("Request with rsm_max over limit ({})".format(rsm_max))
281 rsm_max = C.STATIC_RSM_MAX_LIMIT 281 rsm_max = C.STATIC_RSM_MAX_LIMIT
282 request.extra_dict["rsm_max"] = unicode(rsm_max) 282 request.extra_dict["rsm_max"] = str(rsm_max)
283 except (ValueError, KeyError): 283 except (ValueError, KeyError):
284 request.extra_dict["rsm_max"] = unicode(C.STATIC_RSM_MAX_DEFAULT) 284 request.extra_dict["rsm_max"] = str(C.STATIC_RSM_MAX_DEFAULT)
285 try: 285 try:
286 request.extra_dict["rsm_index"] = request.args["index"][0] 286 request.extra_dict["rsm_index"] = request.args["index"][0]
287 except (ValueError, KeyError): 287 except (ValueError, KeyError):
288 try: 288 try:
289 request.extra_dict["rsm_before"] = request.args["before"][0].decode( 289 request.extra_dict["rsm_before"] = request.args["before"][0].decode(
306 request.extra_comments_dict = {} 306 request.extra_comments_dict = {}
307 if request.display_single: 307 if request.display_single:
308 try: 308 try:
309 rsm_max = int(request.args["comments_max"][0]) 309 rsm_max = int(request.args["comments_max"][0])
310 if rsm_max > C.STATIC_RSM_MAX_LIMIT: 310 if rsm_max > C.STATIC_RSM_MAX_LIMIT:
311 log.warning(u"Request with rsm_max over limit ({})".format(rsm_max)) 311 log.warning("Request with rsm_max over limit ({})".format(rsm_max))
312 rsm_max = C.STATIC_RSM_MAX_LIMIT 312 rsm_max = C.STATIC_RSM_MAX_LIMIT
313 request.extra_comments_dict["rsm_max"] = unicode(rsm_max) 313 request.extra_comments_dict["rsm_max"] = str(rsm_max)
314 except (ValueError, KeyError): 314 except (ValueError, KeyError):
315 request.extra_comments_dict["rsm_max"] = unicode( 315 request.extra_comments_dict["rsm_max"] = str(
316 C.STATIC_RSM_MAX_COMMENTS_DEFAULT 316 C.STATIC_RSM_MAX_COMMENTS_DEFAULT
317 ) 317 )
318 else: 318 else:
319 request.extra_comments_dict["rsm_max"] = "0" 319 request.extra_comments_dict["rsm_max"] = "0"
320 320
361 try: 361 try:
362 metadata["rsm_count"] = rsm_metadata["rsm_count"] 362 metadata["rsm_count"] = rsm_metadata["rsm_count"]
363 except KeyError: 363 except KeyError:
364 pass 364 pass
365 try: 365 try:
366 metadata["rsm_index"] = unicode(int(rsm_metadata["rsm_index"]) - 1) 366 metadata["rsm_index"] = str(int(rsm_metadata["rsm_index"]) - 1)
367 except KeyError: 367 except KeyError:
368 pass 368 pass
369 369
370 metadata["rsm_first"] = metadata["rsm_last"] = item["id"] 370 metadata["rsm_first"] = metadata["rsm_last"] = item["id"]
371 371
489 def gotItems(data): 489 def gotItems(data):
490 # Generate a clean atom feed with uri linking to this blog 490 # Generate a clean atom feed with uri linking to this blog
491 # from microblog data 491 # from microblog data
492 items, metadata = data 492 items, metadata = data
493 items = [data_format.deserialise(i) for i in items] 493 items = [data_format.deserialise(i) for i in items]
494 feed_elt = domish.Element((NS_ATOM, u"feed")) 494 feed_elt = domish.Element((NS_ATOM, "feed"))
495 title = _(u"{user}'s blog").format(user=profile) 495 title = _("{user}'s blog").format(user=profile)
496 feed_elt.addElement(u"title", content=title) 496 feed_elt.addElement("title", content=title)
497 497
498 base_blog_url = self.host.getExtBaseURL( 498 base_blog_url = self.host.getExtBaseURL(
499 request, u"blog/{user}".format(user=profile) 499 request, "blog/{user}".format(user=profile)
500 ) 500 )
501 501
502 # atom link 502 # atom link
503 link_feed_elt = feed_elt.addElement("link") 503 link_feed_elt = feed_elt.addElement("link")
504 link_feed_elt["href"] = u"{base}/atom.xml".format(base=base_blog_url) 504 link_feed_elt["href"] = "{base}/atom.xml".format(base=base_blog_url)
505 link_feed_elt["type"] = u"application/atom+xml" 505 link_feed_elt["type"] = "application/atom+xml"
506 link_feed_elt["rel"] = u"self" 506 link_feed_elt["rel"] = "self"
507 507
508 # blog link 508 # blog link
509 link_blog_elt = feed_elt.addElement("link") 509 link_blog_elt = feed_elt.addElement("link")
510 link_blog_elt["rel"] = u"alternate" 510 link_blog_elt["rel"] = "alternate"
511 link_blog_elt["type"] = u"text/html" 511 link_blog_elt["type"] = "text/html"
512 link_blog_elt["href"] = base_blog_url 512 link_blog_elt["href"] = base_blog_url
513 513
514 # blog link XMPP uri 514 # blog link XMPP uri
515 blog_xmpp_uri = metadata["uri"] 515 blog_xmpp_uri = metadata["uri"]
516 link_blog_elt = feed_elt.addElement("link") 516 link_blog_elt = feed_elt.addElement("link")
517 link_blog_elt["rel"] = u"alternate" 517 link_blog_elt["rel"] = "alternate"
518 link_blog_elt["type"] = u"application/atom+xml" 518 link_blog_elt["type"] = "application/atom+xml"
519 link_blog_elt["href"] = blog_xmpp_uri 519 link_blog_elt["href"] = blog_xmpp_uri
520 520
521 feed_elt.addElement("id", content=_quote(blog_xmpp_uri)) 521 feed_elt.addElement("id", content=_quote(blog_xmpp_uri))
522 updated_unix = max([float(item["updated"]) for item in items]) 522 updated_unix = max([float(item["updated"]) for item in items])
523 updated_dt = datetime.fromtimestamp(updated_unix) 523 updated_dt = datetime.fromtimestamp(updated_unix)
524 feed_elt.addElement( 524 feed_elt.addElement(
525 u"updated", content=u"{}Z".format(updated_dt.isoformat("T")) 525 "updated", content="{}Z".format(updated_dt.isoformat("T"))
526 ) 526 )
527 527
528 for item in items: 528 for item in items:
529 entry_elt = feed_elt.addElement(u"entry") 529 entry_elt = feed_elt.addElement("entry")
530 530
531 # Title 531 # Title
532 try: 532 try:
533 title = item["title"] 533 title = item["title"]
534 except KeyError: 534 except KeyError:
535 # for microblog (without title), we use an abstract of content as title 535 # for microblog (without title), we use an abstract of content as title
536 title = u"{}…".format(u" ".join(item["content"][:70].split())) 536 title = "{}…".format(" ".join(item["content"][:70].split()))
537 entry_elt.addElement(u"title", content=title) 537 entry_elt.addElement("title", content=title)
538 538
539 # HTTP link 539 # HTTP link
540 http_link_elt = entry_elt.addElement(u"link") 540 http_link_elt = entry_elt.addElement("link")
541 http_link_elt["rel"] = u"alternate" 541 http_link_elt["rel"] = "alternate"
542 http_link_elt["type"] = u"text/html" 542 http_link_elt["type"] = "text/html"
543 http_link_elt["href"] = u"{base}/{quoted_id}".format( 543 http_link_elt["href"] = "{base}/{quoted_id}".format(
544 base=base_blog_url, quoted_id=_quote(item["id"]) 544 base=base_blog_url, quoted_id=_quote(item["id"])
545 ) 545 )
546 # XMPP link 546 # XMPP link
547 xmpp_link_elt = entry_elt.addElement(u"link") 547 xmpp_link_elt = entry_elt.addElement("link")
548 xmpp_link_elt["rel"] = u"alternate" 548 xmpp_link_elt["rel"] = "alternate"
549 xmpp_link_elt["type"] = u"application/atom+xml" 549 xmpp_link_elt["type"] = "application/atom+xml"
550 xmpp_link_elt["href"] = u"{blog_uri};item={item_id}".format( 550 xmpp_link_elt["href"] = "{blog_uri};item={item_id}".format(
551 blog_uri=blog_xmpp_uri, item_id=item["id"] 551 blog_uri=blog_xmpp_uri, item_id=item["id"]
552 ) 552 )
553 553
554 # date metadata 554 # date metadata
555 entry_elt.addElement(u"id", content=item["atom_id"]) 555 entry_elt.addElement("id", content=item["atom_id"])
556 updated = datetime.fromtimestamp(float(item["updated"])) 556 updated = datetime.fromtimestamp(float(item["updated"]))
557 entry_elt.addElement( 557 entry_elt.addElement(
558 u"updated", content=u"{}Z".format(updated.isoformat("T")) 558 "updated", content="{}Z".format(updated.isoformat("T"))
559 ) 559 )
560 published = datetime.fromtimestamp(float(item["published"])) 560 published = datetime.fromtimestamp(float(item["published"]))
561 entry_elt.addElement( 561 entry_elt.addElement(
562 u"published", content=u"{}Z".format(published.isoformat("T")) 562 "published", content="{}Z".format(published.isoformat("T"))
563 ) 563 )
564 564
565 # author metadata 565 # author metadata
566 author_elt = entry_elt.addElement(u"author") 566 author_elt = entry_elt.addElement("author")
567 author_elt.addElement("name", content=item.get("author", profile)) 567 author_elt.addElement("name", content=item.get("author", profile))
568 try: 568 try:
569 author_elt.addElement( 569 author_elt.addElement(
570 "uri", content=u"xmpp:{}".format(item["author_jid"]) 570 "uri", content="xmpp:{}".format(item["author_jid"])
571 ) 571 )
572 except KeyError: 572 except KeyError:
573 pass 573 pass
574 try: 574 try:
575 author_elt.addElement("email", content=item["author_email"]) 575 author_elt.addElement("email", content=item["author_email"])
576 except KeyError: 576 except KeyError:
577 pass 577 pass
578 578
579 # categories 579 # categories
580 for tag in item.get('tags', []): 580 for tag in item.get('tags', []):
581 category_elt = entry_elt.addElement(u"category") 581 category_elt = entry_elt.addElement("category")
582 category_elt["term"] = tag 582 category_elt["term"] = tag
583 583
584 # content 584 # content
585 try: 585 try:
586 content_xhtml = item["content_xhtml"] 586 content_xhtml = item["content_xhtml"]
592 content_elt["type"] = "xhtml" 592 content_elt["type"] = "xhtml"
593 content_elt.addChild( 593 content_elt.addChild(
594 xml_tools.ElementParser()(content_xhtml, namespace=C.NS_XHTML) 594 xml_tools.ElementParser()(content_xhtml, namespace=C.NS_XHTML)
595 ) 595 )
596 596
597 atom_feed = u'<?xml version="1.0" encoding="utf-8"?>\n{}'.format( 597 atom_feed = '<?xml version="1.0" encoding="utf-8"?>\n{}'.format(
598 feed_elt.toXml() 598 feed_elt.toXml()
599 ) 599 )
600 self.renderAtomFeed(atom_feed, request), 600 self.renderAtomFeed(atom_feed, request),
601 601
602 self.host.bridge.mbGet( 602 self.host.bridge.mbGet(
706 xmpp_uri = metadata["uri"] 706 xmpp_uri = metadata["uri"]
707 if len(items) == 1: 707 if len(items) == 1:
708 # FIXME: that's really not a good way to get item id 708 # FIXME: that's really not a good way to get item id
709 # this must be changed after static blog refactorisation 709 # this must be changed after static blog refactorisation
710 item_id = items[0][0]["id"] 710 item_id = items[0][0]["id"]
711 xmpp_uri += u";item={}".format(_quote(item_id)) 711 xmpp_uri += ";item={}".format(_quote(item_id))
712 712
713 data = { 713 data = {
714 "url_base": base_url, 714 "url_base": base_url,
715 "xmpp_uri": xmpp_uri, 715 "xmpp_uri": xmpp_uri,
716 "url_query": u"?{}".format(query_data) if query_data else "", 716 "url_query": "?{}".format(query_data) if query_data else "",
717 "keywords": getOption(C.STATIC_BLOG_PARAM_KEYWORDS), 717 "keywords": getOption(C.STATIC_BLOG_PARAM_KEYWORDS),
718 "description": getOption(C.STATIC_BLOG_PARAM_DESCRIPTION), 718 "description": getOption(C.STATIC_BLOG_PARAM_DESCRIPTION),
719 "title": title, 719 "title": title,
720 "favicon": avatar, 720 "favicon": avatar,
721 "banner_img": self._getImageParams( 721 "banner_img": self._getImageParams(
848 self.type = "comment" if is_comment else "main_item" 848 self.type = "comment" if is_comment else "main_item"
849 self.style = "mblog_comment" if is_comment else "" 849 self.style = "mblog_comment" if is_comment else ""
850 self.content = self.getText(entry, "content") 850 self.content = self.getText(entry, "content")
851 851
852 if is_comment: 852 if is_comment:
853 self.author = _(u"from {}").format(entry["author"]) 853 self.author = _("from {}").format(entry["author"])
854 else: 854 else:
855 self.author = "&nbsp;" 855 self.author = "&nbsp;"
856 self.url = "{}/{}".format(base_url, _quote(entry["id"])) 856 self.url = "{}/{}".format(base_url, _quote(entry["id"]))
857 query_data = getDefaultQueryData(request) 857 query_data = getDefaultQueryData(request)
858 if query_data: 858 if query_data:
859 self.url += "?{}".format(_urlencode(query_data)) 859 self.url += "?{}".format(_urlencode(query_data))
860 self.title = self.getText(entry, "title") 860 self.title = self.getText(entry, "title")
861 self.tags = [sanitizeHtml(tag) for tag in entry.get('tags', [])] 861 self.tags = [sanitizeHtml(tag) for tag in entry.get('tags', [])]
862 862
863 count_text = lambda count: D_(u"comments") if count > 1 else D_(u"comment") 863 count_text = lambda count: D_("comments") if count > 1 else D_("comment")
864 864
865 self.comments_text = u"{} {}".format( 865 self.comments_text = "{} {}".format(
866 comments_count, count_text(comments_count) 866 comments_count, count_text(comments_count)
867 ) 867 )
868 868
869 delta = comments_count - len(comments) 869 delta = comments_count - len(comments)
870 if request.display_single and delta > 0: 870 if request.display_single and delta > 0:
871 prev_url = "{}?{}".format( 871 prev_url = "{}?{}".format(
872 self.url, _urlencode({"comments_max": comments_count}) 872 self.url, _urlencode({"comments_max": comments_count})
873 ) 873 )
874 prev_text = D_(u"show {count} previous {comments}").format( 874 prev_text = D_("show {count} previous {comments}").format(
875 count=delta, comments=count_text(delta) 875 count=delta, comments=count_text(delta)
876 ) 876 )
877 self.all_comments_link = BlogLink(prev_url, "comments_link", prev_text) 877 self.all_comments_link = BlogLink(prev_url, "comments_link", prev_text)
878 878
879 if comments: 879 if comments: