comparison libervia/server/blog.py @ 1124:28e3eb3bb217

files reorganisation and installation rework: - files have been reorganised to follow other SàT projects and usual Python organisation (no more "/src" directory) - VERSION file is now used, as for other SàT projects - replace the overcomplicated setup.py be a more sane one. Pyjamas part is not compiled anymore by setup.py, it must be done separatly - removed check for data_dir if it's empty - installation tested working in virtual env - libervia launching script is now in bin/libervia
author Goffi <goffi@goffi.org>
date Sat, 25 Aug 2018 17:59:48 +0200
parents src/server/blog.py@cdd389ef97bc
children 2af117bfe6cc
comparison
equal deleted inserted replaced
1123:63a4b8fe9782 1124:28e3eb3bb217
1 #!/usr/bin/python
2 # -*- coding: utf-8 -*-
3
4 # Libervia: a Salut à Toi frontend
5 # Copyright (C) 2011-2018 Jérôme Poisson <goffi@goffi.org>
6 # Copyright (C) 2013-2016 Adrien Cossa <souliane@mailoo.org>
7
8 # This program is free software: you can redistribute it and/or modify
9 # it under the terms of the GNU Affero General Public License as published by
10 # the Free Software Foundation, either version 3 of the License, or
11 # (at your option) any later version.
12
13 # This program is distributed in the hope that it will be useful,
14 # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 # GNU Affero General Public License for more details.
17
18 # You should have received a copy of the GNU Affero General Public License
19 # along with this program. If not, see <http://www.gnu.org/licenses/>.
20
21 from sat.core.i18n import _, D_
22 from sat_frontends.tools.strings import addURLToText, fixXHTMLLinks
23 from sat.core.log import getLogger
24
25 log = getLogger(__name__)
26 from sat.tools.common import data_format
27 from sat.tools import xml_tools
28 from dbus.exceptions import DBusException
29 from twisted.internet import defer
30 from twisted.web import server
31 from twisted.web.resource import Resource
32 from twisted.words.protocols.jabber.jid import JID
33 from twisted.words.xish import domish
34 from jinja2 import Environment, PackageLoader
35 from datetime import datetime
36 import re
37 import os
38 import sys
39 import urllib
40
41 from libervia.server.html_tools import sanitizeHtml, convertNewLinesToXHTML
42 from libervia.server.constants import Const as C
43
44 NS_ATOM = "http://www.w3.org/2005/Atom"
45 PARAMS_TO_GET = (
46 C.STATIC_BLOG_PARAM_TITLE,
47 C.STATIC_BLOG_PARAM_BANNER,
48 C.STATIC_BLOG_PARAM_KEYWORDS,
49 C.STATIC_BLOG_PARAM_DESCRIPTION,
50 )
51 re_strip_empty_div = re.compile(r"<div ?/>|<div> *?</div>")
52
53 # TODO: check disco features and use max_items when RSM is not available
54 # FIXME: change navigation links handling, this is is fragile
55 # XXX: this page will disappear, LiberviaPage will be used instead
56 # TODO: delete this page and create a compatibility page for links
57
58
59 def getDefaultQueryData(request):
60 """Return query data which must be present in all links
61
62 @param request(twisted.web.http.Request): request instance comming from render
63 @return (dict): a dict with values as expected by urllib.urlencode
64 """
65 default_query_data = {}
66 try:
67 default_query_data["tag"] = request.extra_dict[
68 "mam_filter_{}".format(C.MAM_FILTER_CATEGORY)
69 ].encode("utf-8")
70 except KeyError:
71 pass
72 return default_query_data
73
74
75 def _quote(value):
76 """Quote a value for use in url
77
78 @param value(unicode): value to quote
79 @return (str): quoted value
80 """
81 return urllib.quote(value.encode("utf-8"), "")
82
83
84 def _unquote(quoted_value):
85 """Unquote a value coming from url
86
87 @param unquote_value(str): value to unquote
88 @return (unicode): unquoted value
89 """
90 assert not isinstance(quoted_value, unicode)
91 return urllib.unquote(quoted_value).decode("utf-8")
92
93
94 def _urlencode(query):
95 """Same as urllib.urlencode, but use '&amp;' instead of '&'"""
96 return "&amp;".join(
97 [
98 "{}={}".format(urllib.quote_plus(str(k)), urllib.quote_plus(str(v)))
99 for k, v in query.iteritems()
100 ]
101 )
102
103
104 class TemplateProcessor(object):
105
106 THEME = "default"
107
108 def __init__(self, host):
109 self.host = host
110
111 # add Libervia's themes directory to the python path
112 sys.path.append(os.path.dirname(os.path.normpath(self.host.themes_dir)))
113 themes = os.path.basename(os.path.normpath(self.host.themes_dir))
114 self.env = Environment(loader=PackageLoader(themes, self.THEME))
115
116 def useTemplate(self, request, tpl, data=None):
117 theme_url = os.path.join("/", C.THEMES_URL, self.THEME)
118
119 data_ = {
120 "images": os.path.join(theme_url, "images"),
121 "styles": os.path.join(theme_url, "styles"),
122 }
123 if data:
124 data_.update(data)
125
126 template = self.env.get_template("{}.html".format(tpl))
127 return template.render(**data_).encode("utf-8")
128
129
130 class MicroBlog(Resource, TemplateProcessor):
131 isLeaf = True
132
133 def __init__(self, host):
134 self.host = host
135 Resource.__init__(self)
136 TemplateProcessor.__init__(self, host)
137 self.avatars_cache = {}
138
139 def _avatarPathToUrl(self, avatar, request, bare_jid_s):
140 filename = os.path.basename(avatar)
141 avatar_url = os.path.join(self.host.service_cache_url, filename)
142 self.avatars_cache[bare_jid_s] = avatar_url
143 return avatar_url
144
145 def getAvatarURL(self, pub_jid, request):
146 """Return avatar of a jid if in cache, else ask for it.
147
148 @param pub_jid (JID): publisher JID
149 @return: deferred avatar URL (unicode)
150 """
151 bare_jid_s = pub_jid.userhost()
152 try:
153 url = self.avatars_cache[bare_jid_s]
154 except KeyError:
155 self.avatars_cache[
156 bare_jid_s
157 ] = "" # avoid to request the vcard several times
158 d = self.host.bridgeCall(
159 "avatarGet", bare_jid_s, False, False, C.SERVICE_PROFILE
160 )
161 d.addCallback(self._avatarPathToUrl, request, bare_jid_s)
162 return d
163 return defer.succeed(url if url else C.DEFAULT_AVATAR_URL)
164
165 def render_GET(self, request):
166 if not request.postpath or len(request.postpath) > 2:
167 return self.useTemplate(
168 request, "static_blog_error", {"message": "You must indicate a nickname"}
169 )
170
171 prof_requested = _unquote(request.postpath[0])
172
173 try:
174 prof_found = self.host.bridge.profileNameGet(prof_requested)
175 except DBusException:
176 prof_found = None
177 if not prof_found or prof_found == C.SERVICE_PROFILE:
178 return self.useTemplate(
179 request, "static_blog_error", {"message": "Invalid nickname"}
180 )
181
182 d = defer.Deferred()
183 # TODO: jid caching
184 self.host.bridge.asyncGetParamA(
185 "JabberID",
186 "Connection",
187 "value",
188 profile_key=prof_found,
189 callback=d.callback,
190 errback=d.errback,
191 )
192 d.addCallback(self.render_gotJID, request, prof_found)
193 return server.NOT_DONE_YET
194
195 def render_gotJID(self, pub_jid_s, request, profile):
196 pub_jid = JID(pub_jid_s)
197
198 request.extra_dict = {} # will be used for RSM and MAM
199 self.parseURLParams(request)
200 if request.item_id:
201 # FIXME: this part seems useless
202 # we want a specific item
203 # item_ids = [request.item_id]
204 # max_items = 1
205 max_items = C.NO_LIMIT # FIXME
206 else:
207 # max_items = int(request.extra_dict['rsm_max']) # FIXME
208 max_items = C.NO_LIMIT
209 # TODO: use max_items only when RSM is not available
210
211 if request.atom:
212 request.extra_dict.update(request.mam_extra)
213 self.getAtom(
214 pub_jid,
215 max_items,
216 request.extra_dict,
217 request.extra_comments_dict,
218 request,
219 profile,
220 )
221
222 elif request.item_id:
223 # we can't merge mam_extra now because we'll use item_ids
224 self.getItemById(
225 pub_jid,
226 request.item_id,
227 request.extra_dict,
228 request.extra_comments_dict,
229 request,
230 profile,
231 )
232 else:
233 request.extra_dict.update(request.mam_extra)
234 self.getItems(
235 pub_jid,
236 max_items,
237 request.extra_dict,
238 request.extra_comments_dict,
239 request,
240 profile,
241 )
242
243 ## URL parsing
244
245 def parseURLParams(self, request):
246 """Parse the request URL parameters.
247
248 @param request: HTTP request
249 """
250 if len(request.postpath) > 1:
251 if request.postpath[1] == "atom.xml": # return the atom feed
252 request.atom = True
253 request.item_id = None
254 else:
255 request.atom = False
256 request.item_id = _unquote(request.postpath[1])
257 else:
258 request.item_id = None
259 request.atom = False
260
261 self.parseURLParamsRSM(request)
262 # XXX: request.display_single is True when only one blog post is visible
263 request.display_single = (request.item_id is not None) or int(
264 request.extra_dict["rsm_max"]
265 ) == 1
266 self.parseURLParamsCommentsRSM(request)
267 self.parseURLParamsMAM(request)
268
269 def parseURLParamsRSM(self, request):
270 """Parse RSM request data from the URL parameters for main items
271
272 fill request.extra_dict accordingly
273 @param request: HTTP request
274 """
275 if request.item_id: # XXX: item_id and RSM are not compatible
276 return
277 try:
278 rsm_max = int(request.args["max"][0])
279 if rsm_max > C.STATIC_RSM_MAX_LIMIT:
280 log.warning(u"Request with rsm_max over limit ({})".format(rsm_max))
281 rsm_max = C.STATIC_RSM_MAX_LIMIT
282 request.extra_dict["rsm_max"] = unicode(rsm_max)
283 except (ValueError, KeyError):
284 request.extra_dict["rsm_max"] = unicode(C.STATIC_RSM_MAX_DEFAULT)
285 try:
286 request.extra_dict["rsm_index"] = request.args["index"][0]
287 except (ValueError, KeyError):
288 try:
289 request.extra_dict["rsm_before"] = request.args["before"][0].decode(
290 "utf-8"
291 )
292 except KeyError:
293 try:
294 request.extra_dict["rsm_after"] = request.args["after"][0].decode(
295 "utf-8"
296 )
297 except KeyError:
298 pass
299
300 def parseURLParamsCommentsRSM(self, request):
301 """Parse RSM request data from the URL parameters for comments
302
303 fill request.extra_dict accordingly
304 @param request: HTTP request
305 """
306 request.extra_comments_dict = {}
307 if request.display_single:
308 try:
309 rsm_max = int(request.args["comments_max"][0])
310 if rsm_max > C.STATIC_RSM_MAX_LIMIT:
311 log.warning(u"Request with rsm_max over limit ({})".format(rsm_max))
312 rsm_max = C.STATIC_RSM_MAX_LIMIT
313 request.extra_comments_dict["rsm_max"] = unicode(rsm_max)
314 except (ValueError, KeyError):
315 request.extra_comments_dict["rsm_max"] = unicode(
316 C.STATIC_RSM_MAX_COMMENTS_DEFAULT
317 )
318 else:
319 request.extra_comments_dict["rsm_max"] = "0"
320
321 def parseURLParamsMAM(self, request):
322 """Parse MAM request data from the URL parameters for main items
323
324 fill request.extra_dict accordingly
325 @param request: HTTP request
326 """
327 # XXX: we use a separate dict for MAM as the filters are not used
328 # when display_single is set (because it then use item_ids which
329 # can't be used with MAM), but it is still used in this case
330 # for navigation links.
331 request.mam_extra = {}
332 try:
333 request.mam_extra[
334 "mam_filter_{}".format(C.MAM_FILTER_CATEGORY)
335 ] = request.args["tag"][0].decode("utf-8")
336 except KeyError:
337 pass
338
339 ## Items retrieval
340
341 def getItemById(
342 self, pub_jid, item_id, extra_dict, extra_comments_dict, request, profile
343 ):
344 """
345
346 @param pub_jid (jid.JID): publisher JID
347 @param item_id(unicode): ID of the item to retrieve
348 @param extra_dict (dict): extra configuration for initial items only
349 @param extra_comments_dict (dict): extra configuration for comments only
350 @param request: HTTP request
351 @param profile
352 """
353
354 def gotItems(items):
355 items, metadata = items
356 item = items[0] # assume there's only one item
357
358 def gotMetadata(result):
359 dummy, rsm_metadata = result
360 try:
361 metadata["rsm_count"] = rsm_metadata["rsm_count"]
362 except KeyError:
363 pass
364 try:
365 metadata["rsm_index"] = unicode(int(rsm_metadata["rsm_index"]) - 1)
366 except KeyError:
367 pass
368
369 metadata["rsm_first"] = metadata["rsm_last"] = item["id"]
370
371 def gotComments(comments):
372 # at this point we can merge mam dict
373 request.extra_dict.update(request.mam_extra)
374 # build the items as self.getItems would do it (and as self.renderHTML expects them to be)
375 comments = [
376 (
377 item["comments_service"],
378 item["comments_node"],
379 "",
380 comments[0],
381 comments[1],
382 )
383 ]
384 self.renderHTML(
385 [(item, comments)], metadata, request, pub_jid, profile
386 )
387
388 # get the comments
389 # max_comments = int(extra_comments_dict['rsm_max']) # FIXME
390 max_comments = C.NO_LIMIT
391 # TODO: use max_comments only when RSM is not available
392 self.host.bridge.mbGet(
393 item["comments_service"],
394 item["comments_node"],
395 max_comments,
396 [],
397 extra_comments_dict,
398 C.SERVICE_PROFILE,
399 callback=gotComments,
400 errback=lambda failure: self.renderError(failure, request, pub_jid),
401 )
402
403 # XXX: retrieve RSM information related to the main item. We can't do it while
404 # retrieving the item, because item_ids and rsm should not be used together.
405 self.host.bridge.mbGet(
406 pub_jid.userhost(),
407 "",
408 0,
409 [],
410 {"rsm_max": "1", "rsm_after": item["id"]},
411 C.SERVICE_PROFILE,
412 callback=gotMetadata,
413 errback=lambda failure: self.renderError(failure, request, pub_jid),
414 )
415
416 # get the main item
417 self.host.bridge.mbGet(
418 pub_jid.userhost(),
419 "",
420 0,
421 [item_id],
422 extra_dict,
423 C.SERVICE_PROFILE,
424 callback=gotItems,
425 errback=lambda failure: self.renderError(failure, request, pub_jid),
426 )
427
428 def getItems(
429 self, pub_jid, max_items, extra_dict, extra_comments_dict, request, profile
430 ):
431 """
432
433 @param pub_jid (jid.JID): publisher JID
434 @param max_items(int): maximum number of item to get, C.NO_LIMIT for no limit
435 @param extra_dict (dict): extra configuration for initial items only
436 @param extra_comments_dict (dict): extra configuration for comments only
437 @param request: HTTP request
438 @param profile
439 """
440
441 def getResultCb(data, rt_session):
442 remaining, results = data
443 # we have requested one node only
444 assert remaining == 0
445 assert len(results) == 1
446 service, node, failure, items, metadata = results[0]
447 if failure:
448 self.renderError(failure, request, pub_jid)
449 else:
450 self.renderHTML(items, metadata, request, pub_jid, profile)
451
452 def getResult(rt_session):
453 self.host.bridge.mbGetFromManyWithCommentsRTResult(
454 rt_session,
455 C.SERVICE_PROFILE,
456 callback=lambda data: getResultCb(data, rt_session),
457 errback=lambda failure: self.renderError(failure, request, pub_jid),
458 )
459
460 # max_comments = int(extra_comments_dict['rsm_max']) # FIXME
461 max_comments = 0
462 # TODO: use max_comments only when RSM is not available
463 self.host.bridge.mbGetFromManyWithComments(
464 C.JID,
465 [pub_jid.userhost()],
466 max_items,
467 max_comments,
468 extra_dict,
469 extra_comments_dict,
470 C.SERVICE_PROFILE,
471 callback=getResult,
472 )
473
474 def getAtom(
475 self, pub_jid, max_items, extra_dict, extra_comments_dict, request, profile
476 ):
477 """
478
479 @param pub_jid (jid.JID): publisher JID
480 @param max_items(int): maximum number of item to get, C.NO_LIMIT for no limit
481 @param extra_dict (dict): extra configuration for initial items only
482 @param extra_comments_dict (dict): extra configuration for comments only
483 @param request: HTTP request
484 @param profile
485 """
486
487 def gotItems(data):
488 # Generate a clean atom feed with uri linking to this blog
489 # from microblog data
490 items, metadata = data
491 feed_elt = domish.Element((NS_ATOM, u"feed"))
492 title = _(u"{user}'s blog").format(user=profile)
493 feed_elt.addElement(u"title", content=title)
494
495 base_blog_url = self.host.getExtBaseURL(
496 request, u"blog/{user}".format(user=profile)
497 )
498
499 # atom link
500 link_feed_elt = feed_elt.addElement("link")
501 link_feed_elt["href"] = u"{base}/atom.xml".format(base=base_blog_url)
502 link_feed_elt["type"] = u"application/atom+xml"
503 link_feed_elt["rel"] = u"self"
504
505 # blog link
506 link_blog_elt = feed_elt.addElement("link")
507 link_blog_elt["rel"] = u"alternate"
508 link_blog_elt["type"] = u"text/html"
509 link_blog_elt["href"] = base_blog_url
510
511 # blog link XMPP uri
512 blog_xmpp_uri = metadata["uri"]
513 link_blog_elt = feed_elt.addElement("link")
514 link_blog_elt["rel"] = u"alternate"
515 link_blog_elt["type"] = u"application/atom+xml"
516 link_blog_elt["href"] = blog_xmpp_uri
517
518 feed_elt.addElement("id", content=_quote(blog_xmpp_uri))
519 updated_unix = max([float(item["updated"]) for item in items])
520 updated_dt = datetime.fromtimestamp(updated_unix)
521 feed_elt.addElement(
522 u"updated", content=u"{}Z".format(updated_dt.isoformat("T"))
523 )
524
525 for item in items:
526 entry_elt = feed_elt.addElement(u"entry")
527
528 # Title
529 try:
530 title = item["title"]
531 except KeyError:
532 # for microblog (without title), we use an abstract of content as title
533 title = u"{}…".format(u" ".join(item["content"][:70].split()))
534 entry_elt.addElement(u"title", content=title)
535
536 # HTTP link
537 http_link_elt = entry_elt.addElement(u"link")
538 http_link_elt["rel"] = u"alternate"
539 http_link_elt["type"] = u"text/html"
540 http_link_elt["href"] = u"{base}/{quoted_id}".format(
541 base=base_blog_url, quoted_id=_quote(item["id"])
542 )
543 # XMPP link
544 xmpp_link_elt = entry_elt.addElement(u"link")
545 xmpp_link_elt["rel"] = u"alternate"
546 xmpp_link_elt["type"] = u"application/atom+xml"
547 xmpp_link_elt["href"] = u"{blog_uri};item={item_id}".format(
548 blog_uri=blog_xmpp_uri, item_id=item["id"]
549 )
550
551 # date metadata
552 entry_elt.addElement(u"id", content=item["atom_id"])
553 updated = datetime.fromtimestamp(float(item["updated"]))
554 entry_elt.addElement(
555 u"updated", content=u"{}Z".format(updated.isoformat("T"))
556 )
557 published = datetime.fromtimestamp(float(item["published"]))
558 entry_elt.addElement(
559 u"published", content=u"{}Z".format(published.isoformat("T"))
560 )
561
562 # author metadata
563 author_elt = entry_elt.addElement(u"author")
564 author_elt.addElement("name", content=item.get("author", profile))
565 try:
566 author_elt.addElement(
567 "uri", content=u"xmpp:{}".format(item["author_jid"])
568 )
569 except KeyError:
570 pass
571 try:
572 author_elt.addElement("email", content=item["author_email"])
573 except KeyError:
574 pass
575
576 # categories
577 for tag in data_format.dict2iter("tag", item):
578 category_elt = entry_elt.addElement(u"category")
579 category_elt["term"] = tag
580
581 # content
582 try:
583 content_xhtml = item["content_xhtml"]
584 except KeyError:
585 content_elt = entry_elt.addElement("content", content="content")
586 content_elt["type"] = "text"
587 else:
588 content_elt = entry_elt.addElement("content")
589 content_elt["type"] = "xhtml"
590 content_elt.addChild(
591 xml_tools.ElementParser()(content_xhtml, namespace=C.NS_XHTML)
592 )
593
594 atom_feed = u'<?xml version="1.0" encoding="utf-8"?>\n{}'.format(
595 feed_elt.toXml()
596 )
597 self.renderAtomFeed(atom_feed, request),
598
599 self.host.bridge.mbGet(
600 pub_jid.userhost(),
601 "",
602 max_items,
603 [],
604 extra_dict,
605 C.SERVICE_PROFILE,
606 callback=gotItems,
607 )
608
609 ## rendering
610
611 def _updateDict(self, value, dict_, key):
612 dict_[key] = value
613
614 def _getImageParams(self, options, key, default, alt):
615 """regexp from http://answers.oreilly.com/topic/280-how-to-validate-urls-with-regular-expressions/"""
616 url = options[key] if key in options else ""
617 regexp = (
618 r"^(https?|ftp)://[a-z0-9-]+(\.[a-z0-9-]+)+(/[\w-]+)*/[\w-]+\.(gif|png|jpg)$"
619 )
620 if re.match(regexp, url):
621 url = url
622 else:
623 url = default
624 return BlogImage(url, alt)
625
626 def renderError(self, failure, request, pub_jid):
627 request.setResponseCode(500)
628 request.write(
629 self.useTemplate(
630 request, "static_blog_error", {"message": "Can't access requested data"}
631 )
632 )
633 request.finish()
634
635 def renderHTML(self, items, metadata, request, pub_jid, profile):
636 """Retrieve the user parameters before actually rendering the static blog
637
638 @param items(list[tuple(dict, list)]): same as in self._renderHTML
639 @param metadata(dict): original node metadata
640 @param request: HTTP request
641 @param pub_jid (JID): publisher JID
642 @param profile (unicode): %(doc_profile)s
643 """
644 d_list = []
645 options = {}
646
647 d = self.getAvatarURL(pub_jid, request)
648 d.addCallback(self._updateDict, options, "avatar")
649 d.addErrback(self.renderError, request, pub_jid)
650 d_list.append(d)
651
652 for param_name in PARAMS_TO_GET:
653 d = defer.Deferred()
654 self.host.bridge.asyncGetParamA(
655 param_name,
656 C.STATIC_BLOG_KEY,
657 "value",
658 C.SERVER_SECURITY_LIMIT,
659 profile,
660 callback=d.callback,
661 errback=d.errback,
662 )
663 d.addCallback(self._updateDict, options, param_name)
664 d.addErrback(self.renderError, request, pub_jid)
665 d_list.append(d)
666
667 dlist_d = defer.DeferredList(d_list)
668 dlist_d.addCallback(
669 lambda dummy: self._renderHTML(items, metadata, options, request, pub_jid)
670 )
671
672 def _renderHTML(self, items, metadata, options, request, pub_jid):
673 """Actually render the static blog.
674
675 If mblog_data is a list of dict, we are missing the comments items so we just
676 display the main items. If mblog_data is a list of couple, each couple is
677 associating a main item data with the list of its comments, so we render all.
678 @param items(list[tuple(dict, list)]): list of 2-tuple with
679 - item(dict): item microblog data
680 - comments_list(list[tuple]): list of 5-tuple with
681 - service (unicode): pubsub service where the comments node is
682 - node (unicode): comments node
683 - failure (unicode): empty in case of success, else error message
684 - comments(list[dict]): list of microblog data
685 - comments_metadata(dict): metadata of the comment node
686 @param metadata(dict): original node metadata
687 @param options: dict defining the blog's parameters
688 @param request: the HTTP request
689 @param pub_jid (JID): publisher JID
690 """
691 if not isinstance(options, dict):
692 options = {}
693 user = sanitizeHtml(pub_jid.user)
694 base_url = os.path.join("/blog/", user)
695
696 def getOption(key):
697 return sanitizeHtml(options[key]) if key in options else ""
698
699 avatar = os.path.normpath("/{}".format(getOption("avatar")))
700 title = getOption(C.STATIC_BLOG_PARAM_TITLE) or user
701 query_data = _urlencode(getDefaultQueryData(request)).decode("utf-8")
702
703 xmpp_uri = metadata["uri"]
704 if len(items) == 1:
705 # FIXME: that's really not a good way to get item id
706 # this must be changed after static blog refactorisation
707 item_id = items[0][0]["id"]
708 xmpp_uri += u";item={}".format(_quote(item_id))
709
710 data = {
711 "url_base": base_url,
712 "xmpp_uri": xmpp_uri,
713 "url_query": u"?{}".format(query_data) if query_data else "",
714 "keywords": getOption(C.STATIC_BLOG_PARAM_KEYWORDS),
715 "description": getOption(C.STATIC_BLOG_PARAM_DESCRIPTION),
716 "title": title,
717 "favicon": avatar,
718 "banner_img": self._getImageParams(
719 options, C.STATIC_BLOG_PARAM_BANNER, avatar, title
720 ),
721 }
722
723 data["navlinks"] = NavigationLinks(request, items, metadata, base_url)
724 data["messages"] = []
725 for item in items:
726 item, comments_list = item
727 comments, comments_count = [], 0
728 for node_comments in comments_list:
729 comments.extend(node_comments[3])
730 try:
731 comments_count += int(node_comments[4]["rsm_count"])
732 except KeyError:
733 pass
734 data["messages"].append(
735 BlogMessage(request, base_url, item, comments, comments_count)
736 )
737
738 request.write(self.useTemplate(request, "static_blog", data))
739 request.finish()
740
741 def renderAtomFeed(self, feed, request):
742 request.write(feed.encode("utf-8"))
743 request.finish()
744
745
746 class NavigationLinks(object):
747 def __init__(self, request, items, metadata, base_url):
748 """Build the navigation links.
749
750 @param items (list): list of items
751 @param metadata (dict): rsm data
752 @param base_url (unicode): the base URL for this user's blog
753 @return: dict
754 """
755 # FIXME: this code must be refactorized, it is fragile
756 # and difficult to maintain
757
758 # query data which must be present in all links
759 default_query_data = getDefaultQueryData(request)
760
761 # which links we need to display
762 if request.display_single:
763 links = ("later_message", "older_message")
764 # key must exist when using the template
765 self.later_messages = self.older_messages = ""
766 else:
767 links = ("later_messages", "older_messages")
768 self.later_message = self.older_message = ""
769
770 # now we set the links according to RSM
771 for key in links:
772 query_data = default_query_data.copy()
773
774 if key.startswith("later_message"):
775 try:
776 index = int(metadata["rsm_index"])
777 except (KeyError, ValueError):
778 pass
779 else:
780 if index == 0:
781 # we don't show this link on first page
782 setattr(self, key, "")
783 continue
784 try:
785 query_data["before"] = metadata["rsm_first"].encode("utf-8")
786 except KeyError:
787 pass
788 else:
789 try:
790 index = int(metadata["rsm_index"])
791 count = int(metadata.get("rsm_count"))
792 except (KeyError, ValueError):
793 # XXX: if we don't have index or count, we can't know if we
794 # are on the last page or not
795 pass
796 else:
797 # if we have index, we don't show the after link
798 # on the last page
799 if index + len(items) >= count:
800 setattr(self, key, "")
801 continue
802 try:
803 query_data["after"] = metadata["rsm_last"].encode("utf-8")
804 except KeyError:
805 pass
806
807 if request.display_single:
808 query_data["max"] = 1
809
810 link = "{}?{}".format(base_url, _urlencode(query_data))
811 setattr(self, key, BlogLink(link, key, key.replace("_", " ")))
812
813
814 class BlogImage(object):
815 def __init__(self, url_, alt):
816 self.url = url_
817 self.alt = alt
818
819
820 class BlogLink(object):
821 def __init__(self, url_, style, text):
822 self.url = url_
823 self.style = style
824 self.text = text
825
826
827 class BlogMessage(object):
828 def __init__(self, request, base_url, entry, comments=None, comments_count=0):
829 """
830
831 @param request: HTTP request
832 @param base_url (unicode): the base URL
833 @param entry(dict): item microblog data
834 @param comments(list[dict]): list of microblog data
835 @param comments_count (int): total number of comments
836 """
837 if comments is None:
838 comments = []
839 timestamp = float(entry.get("published", 0))
840
841 # FIXME: for now we assume that the comments' depth is only 1
842 is_comment = not entry.get("comments", False)
843
844 self.date = datetime.fromtimestamp(timestamp)
845 self.type = "comment" if is_comment else "main_item"
846 self.style = "mblog_comment" if is_comment else ""
847 self.content = self.getText(entry, "content")
848
849 if is_comment:
850 self.author = _(u"from {}").format(entry["author"])
851 else:
852 self.author = "&nbsp;"
853 self.url = "{}/{}".format(base_url, _quote(entry["id"]))
854 query_data = getDefaultQueryData(request)
855 if query_data:
856 self.url += "?{}".format(_urlencode(query_data))
857 self.title = self.getText(entry, "title")
858 self.tags = [sanitizeHtml(tag) for tag in data_format.dict2iter("tag", entry)]
859
860 count_text = lambda count: D_(u"comments") if count > 1 else D_(u"comment")
861
862 self.comments_text = u"{} {}".format(
863 comments_count, count_text(comments_count)
864 )
865
866 delta = comments_count - len(comments)
867 if request.display_single and delta > 0:
868 prev_url = "{}?{}".format(
869 self.url, _urlencode({"comments_max": comments_count})
870 )
871 prev_text = D_(u"show {count} previous {comments}").format(
872 count=delta, comments=count_text(delta)
873 )
874 self.all_comments_link = BlogLink(prev_url, "comments_link", prev_text)
875
876 if comments:
877 self.comments = [
878 BlogMessage(request, base_url, comment) for comment in comments
879 ]
880
881 def getText(self, entry, key):
882 try:
883 xhtml = entry["{}_xhtml".format(key)]
884 except KeyError:
885 try:
886 processor = addURLToText if key.startswith("content") else sanitizeHtml
887 return convertNewLinesToXHTML(processor(entry[key]))
888 except KeyError:
889 return None
890 else:
891 # FIXME: empty <div /> elements provoke rendering issue
892 # this regex is a temporary workadound, need more investigation
893 xhtml = re_strip_empty_div.sub("", xhtml)
894 return fixXHTMLLinks(xhtml)