Mercurial > libervia-web
comparison src/server/blog.py @ 1113:cdd389ef97bc
server: code style reformatting using black
author | Goffi <goffi@goffi.org> |
---|---|
date | Fri, 29 Jun 2018 17:45:26 +0200 |
parents | f2170536ba23 |
children |
comparison
equal
deleted
inserted
replaced
1112:f287fc8bb31a | 1113:cdd389ef97bc |
---|---|
19 # along with this program. If not, see <http://www.gnu.org/licenses/>. | 19 # along with this program. If not, see <http://www.gnu.org/licenses/>. |
20 | 20 |
21 from sat.core.i18n import _, D_ | 21 from sat.core.i18n import _, D_ |
22 from sat_frontends.tools.strings import addURLToText, fixXHTMLLinks | 22 from sat_frontends.tools.strings import addURLToText, fixXHTMLLinks |
23 from sat.core.log import getLogger | 23 from sat.core.log import getLogger |
24 | |
24 log = getLogger(__name__) | 25 log = getLogger(__name__) |
25 from sat.tools.common import data_format | 26 from sat.tools.common import data_format |
26 from sat.tools import xml_tools | 27 from sat.tools import xml_tools |
27 from dbus.exceptions import DBusException | 28 from dbus.exceptions import DBusException |
28 from twisted.internet import defer | 29 from twisted.internet import defer |
38 import urllib | 39 import urllib |
39 | 40 |
40 from libervia.server.html_tools import sanitizeHtml, convertNewLinesToXHTML | 41 from libervia.server.html_tools import sanitizeHtml, convertNewLinesToXHTML |
41 from libervia.server.constants import Const as C | 42 from libervia.server.constants import Const as C |
42 | 43 |
43 NS_ATOM = 'http://www.w3.org/2005/Atom' | 44 NS_ATOM = "http://www.w3.org/2005/Atom" |
44 PARAMS_TO_GET = (C.STATIC_BLOG_PARAM_TITLE, C.STATIC_BLOG_PARAM_BANNER, C.STATIC_BLOG_PARAM_KEYWORDS, C.STATIC_BLOG_PARAM_DESCRIPTION) | 45 PARAMS_TO_GET = ( |
46 C.STATIC_BLOG_PARAM_TITLE, | |
47 C.STATIC_BLOG_PARAM_BANNER, | |
48 C.STATIC_BLOG_PARAM_KEYWORDS, | |
49 C.STATIC_BLOG_PARAM_DESCRIPTION, | |
50 ) | |
45 re_strip_empty_div = re.compile(r"<div ?/>|<div> *?</div>") | 51 re_strip_empty_div = re.compile(r"<div ?/>|<div> *?</div>") |
46 | 52 |
47 # TODO: check disco features and use max_items when RSM is not available | 53 # TODO: check disco features and use max_items when RSM is not available |
48 # FIXME: change navigation links handling, this is is fragile | 54 # FIXME: change navigation links handling, this is is fragile |
49 # XXX: this page will disappear, LiberviaPage will be used instead | 55 # XXX: this page will disappear, LiberviaPage will be used instead |
56 @param request(twisted.web.http.Request): request instance comming from render | 62 @param request(twisted.web.http.Request): request instance comming from render |
57 @return (dict): a dict with values as expected by urllib.urlencode | 63 @return (dict): a dict with values as expected by urllib.urlencode |
58 """ | 64 """ |
59 default_query_data = {} | 65 default_query_data = {} |
60 try: | 66 try: |
61 default_query_data['tag'] = request.extra_dict['mam_filter_{}'.format(C.MAM_FILTER_CATEGORY)].encode('utf-8') | 67 default_query_data["tag"] = request.extra_dict[ |
68 "mam_filter_{}".format(C.MAM_FILTER_CATEGORY) | |
69 ].encode("utf-8") | |
62 except KeyError: | 70 except KeyError: |
63 pass | 71 pass |
64 return default_query_data | 72 return default_query_data |
65 | 73 |
66 | 74 |
68 """Quote a value for use in url | 76 """Quote a value for use in url |
69 | 77 |
70 @param value(unicode): value to quote | 78 @param value(unicode): value to quote |
71 @return (str): quoted value | 79 @return (str): quoted value |
72 """ | 80 """ |
73 return urllib.quote(value.encode('utf-8'), '') | 81 return urllib.quote(value.encode("utf-8"), "") |
74 | 82 |
75 | 83 |
76 def _unquote(quoted_value): | 84 def _unquote(quoted_value): |
77 """Unquote a value coming from url | 85 """Unquote a value coming from url |
78 | 86 |
79 @param unquote_value(str): value to unquote | 87 @param unquote_value(str): value to unquote |
80 @return (unicode): unquoted value | 88 @return (unicode): unquoted value |
81 """ | 89 """ |
82 assert not isinstance(quoted_value, unicode) | 90 assert not isinstance(quoted_value, unicode) |
83 return urllib.unquote(quoted_value).decode('utf-8') | 91 return urllib.unquote(quoted_value).decode("utf-8") |
84 | 92 |
85 | 93 |
86 def _urlencode(query): | 94 def _urlencode(query): |
87 """Same as urllib.urlencode, but use '&' instead of '&'""" | 95 """Same as urllib.urlencode, but use '&' instead of '&'""" |
88 return '&'.join(["{}={}".format(urllib.quote_plus(str(k)), urllib.quote_plus(str(v))) | 96 return "&".join( |
89 for k,v in query.iteritems()]) | 97 [ |
98 "{}={}".format(urllib.quote_plus(str(k)), urllib.quote_plus(str(v))) | |
99 for k, v in query.iteritems() | |
100 ] | |
101 ) | |
90 | 102 |
91 | 103 |
92 class TemplateProcessor(object): | 104 class TemplateProcessor(object): |
93 | 105 |
94 THEME = 'default' | 106 THEME = "default" |
95 | 107 |
96 def __init__(self, host): | 108 def __init__(self, host): |
97 self.host = host | 109 self.host = host |
98 | 110 |
99 # add Libervia's themes directory to the python path | 111 # add Libervia's themes directory to the python path |
100 sys.path.append(os.path.dirname(os.path.normpath(self.host.themes_dir))) | 112 sys.path.append(os.path.dirname(os.path.normpath(self.host.themes_dir))) |
101 themes = os.path.basename(os.path.normpath(self.host.themes_dir)) | 113 themes = os.path.basename(os.path.normpath(self.host.themes_dir)) |
102 self.env = Environment(loader=PackageLoader(themes, self.THEME)) | 114 self.env = Environment(loader=PackageLoader(themes, self.THEME)) |
103 | 115 |
104 def useTemplate(self, request, tpl, data=None): | 116 def useTemplate(self, request, tpl, data=None): |
105 theme_url = os.path.join('/', C.THEMES_URL, self.THEME) | 117 theme_url = os.path.join("/", C.THEMES_URL, self.THEME) |
106 | 118 |
107 data_ = {'images': os.path.join(theme_url, 'images'), | 119 data_ = { |
108 'styles': os.path.join(theme_url, 'styles'), | 120 "images": os.path.join(theme_url, "images"), |
109 } | 121 "styles": os.path.join(theme_url, "styles"), |
122 } | |
110 if data: | 123 if data: |
111 data_.update(data) | 124 data_.update(data) |
112 | 125 |
113 template = self.env.get_template('{}.html'.format(tpl)) | 126 template = self.env.get_template("{}.html".format(tpl)) |
114 return template.render(**data_).encode('utf-8') | 127 return template.render(**data_).encode("utf-8") |
115 | 128 |
116 | 129 |
117 class MicroBlog(Resource, TemplateProcessor): | 130 class MicroBlog(Resource, TemplateProcessor): |
118 isLeaf = True | 131 isLeaf = True |
119 | 132 |
127 filename = os.path.basename(avatar) | 140 filename = os.path.basename(avatar) |
128 avatar_url = os.path.join(self.host.service_cache_url, filename) | 141 avatar_url = os.path.join(self.host.service_cache_url, filename) |
129 self.avatars_cache[bare_jid_s] = avatar_url | 142 self.avatars_cache[bare_jid_s] = avatar_url |
130 return avatar_url | 143 return avatar_url |
131 | 144 |
132 | |
133 def getAvatarURL(self, pub_jid, request): | 145 def getAvatarURL(self, pub_jid, request): |
134 """Return avatar of a jid if in cache, else ask for it. | 146 """Return avatar of a jid if in cache, else ask for it. |
135 | 147 |
136 @param pub_jid (JID): publisher JID | 148 @param pub_jid (JID): publisher JID |
137 @return: deferred avatar URL (unicode) | 149 @return: deferred avatar URL (unicode) |
138 """ | 150 """ |
139 bare_jid_s = pub_jid.userhost() | 151 bare_jid_s = pub_jid.userhost() |
140 try: | 152 try: |
141 url = self.avatars_cache[bare_jid_s] | 153 url = self.avatars_cache[bare_jid_s] |
142 except KeyError: | 154 except KeyError: |
143 self.avatars_cache[bare_jid_s] = '' # avoid to request the vcard several times | 155 self.avatars_cache[ |
144 d = self.host.bridgeCall('avatarGet', bare_jid_s, False, False, C.SERVICE_PROFILE) | 156 bare_jid_s |
157 ] = "" # avoid to request the vcard several times | |
158 d = self.host.bridgeCall( | |
159 "avatarGet", bare_jid_s, False, False, C.SERVICE_PROFILE | |
160 ) | |
145 d.addCallback(self._avatarPathToUrl, request, bare_jid_s) | 161 d.addCallback(self._avatarPathToUrl, request, bare_jid_s) |
146 return d | 162 return d |
147 return defer.succeed(url if url else C.DEFAULT_AVATAR_URL) | 163 return defer.succeed(url if url else C.DEFAULT_AVATAR_URL) |
148 | 164 |
149 def render_GET(self, request): | 165 def render_GET(self, request): |
150 if not request.postpath or len(request.postpath) > 2: | 166 if not request.postpath or len(request.postpath) > 2: |
151 return self.useTemplate(request, "static_blog_error", {'message': "You must indicate a nickname"}) | 167 return self.useTemplate( |
168 request, "static_blog_error", {"message": "You must indicate a nickname"} | |
169 ) | |
152 | 170 |
153 prof_requested = _unquote(request.postpath[0]) | 171 prof_requested = _unquote(request.postpath[0]) |
154 | 172 |
155 try: | 173 try: |
156 prof_found = self.host.bridge.profileNameGet(prof_requested) | 174 prof_found = self.host.bridge.profileNameGet(prof_requested) |
157 except DBusException: | 175 except DBusException: |
158 prof_found = None | 176 prof_found = None |
159 if not prof_found or prof_found == C.SERVICE_PROFILE: | 177 if not prof_found or prof_found == C.SERVICE_PROFILE: |
160 return self.useTemplate(request, "static_blog_error", {'message': "Invalid nickname"}) | 178 return self.useTemplate( |
179 request, "static_blog_error", {"message": "Invalid nickname"} | |
180 ) | |
161 | 181 |
162 d = defer.Deferred() | 182 d = defer.Deferred() |
163 # TODO: jid caching | 183 # TODO: jid caching |
164 self.host.bridge.asyncGetParamA('JabberID', 'Connection', 'value', profile_key=prof_found, callback=d.callback, errback=d.errback) | 184 self.host.bridge.asyncGetParamA( |
185 "JabberID", | |
186 "Connection", | |
187 "value", | |
188 profile_key=prof_found, | |
189 callback=d.callback, | |
190 errback=d.errback, | |
191 ) | |
165 d.addCallback(self.render_gotJID, request, prof_found) | 192 d.addCallback(self.render_gotJID, request, prof_found) |
166 return server.NOT_DONE_YET | 193 return server.NOT_DONE_YET |
167 | 194 |
168 def render_gotJID(self, pub_jid_s, request, profile): | 195 def render_gotJID(self, pub_jid_s, request, profile): |
169 pub_jid = JID(pub_jid_s) | 196 pub_jid = JID(pub_jid_s) |
170 | 197 |
171 request.extra_dict = {} # will be used for RSM and MAM | 198 request.extra_dict = {} # will be used for RSM and MAM |
172 self.parseURLParams(request) | 199 self.parseURLParams(request) |
173 if request.item_id: | 200 if request.item_id: |
174 # FIXME: this part seems useless | 201 # FIXME: this part seems useless |
175 # we want a specific item | 202 # we want a specific item |
176 # item_ids = [request.item_id] | 203 # item_ids = [request.item_id] |
177 # max_items = 1 | 204 # max_items = 1 |
178 max_items = C.NO_LIMIT # FIXME | 205 max_items = C.NO_LIMIT # FIXME |
179 else: | 206 else: |
180 # max_items = int(request.extra_dict['rsm_max']) # FIXME | 207 # max_items = int(request.extra_dict['rsm_max']) # FIXME |
181 max_items = C.NO_LIMIT | 208 max_items = C.NO_LIMIT |
182 # TODO: use max_items only when RSM is not available | 209 # TODO: use max_items only when RSM is not available |
183 | 210 |
184 if request.atom: | 211 if request.atom: |
185 request.extra_dict.update(request.mam_extra) | 212 request.extra_dict.update(request.mam_extra) |
186 self.getAtom(pub_jid, max_items, request.extra_dict, request.extra_comments_dict, request, profile) | 213 self.getAtom( |
214 pub_jid, | |
215 max_items, | |
216 request.extra_dict, | |
217 request.extra_comments_dict, | |
218 request, | |
219 profile, | |
220 ) | |
187 | 221 |
188 elif request.item_id: | 222 elif request.item_id: |
189 # we can't merge mam_extra now because we'll use item_ids | 223 # we can't merge mam_extra now because we'll use item_ids |
190 self.getItemById(pub_jid, request.item_id, request.extra_dict, | 224 self.getItemById( |
191 request.extra_comments_dict, request, profile) | 225 pub_jid, |
226 request.item_id, | |
227 request.extra_dict, | |
228 request.extra_comments_dict, | |
229 request, | |
230 profile, | |
231 ) | |
192 else: | 232 else: |
193 request.extra_dict.update(request.mam_extra) | 233 request.extra_dict.update(request.mam_extra) |
194 self.getItems(pub_jid, max_items, request.extra_dict, | 234 self.getItems( |
195 request.extra_comments_dict, request, profile) | 235 pub_jid, |
236 max_items, | |
237 request.extra_dict, | |
238 request.extra_comments_dict, | |
239 request, | |
240 profile, | |
241 ) | |
196 | 242 |
197 ## URL parsing | 243 ## URL parsing |
198 | 244 |
199 def parseURLParams(self, request): | 245 def parseURLParams(self, request): |
200 """Parse the request URL parameters. | 246 """Parse the request URL parameters. |
201 | 247 |
202 @param request: HTTP request | 248 @param request: HTTP request |
203 """ | 249 """ |
204 if len(request.postpath) > 1: | 250 if len(request.postpath) > 1: |
205 if request.postpath[1] == 'atom.xml': # return the atom feed | 251 if request.postpath[1] == "atom.xml": # return the atom feed |
206 request.atom = True | 252 request.atom = True |
207 request.item_id = None | 253 request.item_id = None |
208 else: | 254 else: |
209 request.atom = False | 255 request.atom = False |
210 request.item_id = _unquote(request.postpath[1]) | 256 request.item_id = _unquote(request.postpath[1]) |
212 request.item_id = None | 258 request.item_id = None |
213 request.atom = False | 259 request.atom = False |
214 | 260 |
215 self.parseURLParamsRSM(request) | 261 self.parseURLParamsRSM(request) |
216 # XXX: request.display_single is True when only one blog post is visible | 262 # XXX: request.display_single is True when only one blog post is visible |
217 request.display_single = (request.item_id is not None) or int(request.extra_dict['rsm_max']) == 1 | 263 request.display_single = (request.item_id is not None) or int( |
264 request.extra_dict["rsm_max"] | |
265 ) == 1 | |
218 self.parseURLParamsCommentsRSM(request) | 266 self.parseURLParamsCommentsRSM(request) |
219 self.parseURLParamsMAM(request) | 267 self.parseURLParamsMAM(request) |
220 | 268 |
221 def parseURLParamsRSM(self, request): | 269 def parseURLParamsRSM(self, request): |
222 """Parse RSM request data from the URL parameters for main items | 270 """Parse RSM request data from the URL parameters for main items |
225 @param request: HTTP request | 273 @param request: HTTP request |
226 """ | 274 """ |
227 if request.item_id: # XXX: item_id and RSM are not compatible | 275 if request.item_id: # XXX: item_id and RSM are not compatible |
228 return | 276 return |
229 try: | 277 try: |
230 rsm_max = int(request.args['max'][0]) | 278 rsm_max = int(request.args["max"][0]) |
231 if rsm_max > C.STATIC_RSM_MAX_LIMIT: | 279 if rsm_max > C.STATIC_RSM_MAX_LIMIT: |
232 log.warning(u"Request with rsm_max over limit ({})".format(rsm_max)) | 280 log.warning(u"Request with rsm_max over limit ({})".format(rsm_max)) |
233 rsm_max = C.STATIC_RSM_MAX_LIMIT | 281 rsm_max = C.STATIC_RSM_MAX_LIMIT |
234 request.extra_dict['rsm_max'] = unicode(rsm_max) | 282 request.extra_dict["rsm_max"] = unicode(rsm_max) |
235 except (ValueError, KeyError): | 283 except (ValueError, KeyError): |
236 request.extra_dict['rsm_max'] = unicode(C.STATIC_RSM_MAX_DEFAULT) | 284 request.extra_dict["rsm_max"] = unicode(C.STATIC_RSM_MAX_DEFAULT) |
237 try: | 285 try: |
238 request.extra_dict['rsm_index'] = request.args['index'][0] | 286 request.extra_dict["rsm_index"] = request.args["index"][0] |
239 except (ValueError, KeyError): | 287 except (ValueError, KeyError): |
240 try: | 288 try: |
241 request.extra_dict['rsm_before'] = request.args['before'][0].decode('utf-8') | 289 request.extra_dict["rsm_before"] = request.args["before"][0].decode( |
290 "utf-8" | |
291 ) | |
242 except KeyError: | 292 except KeyError: |
243 try: | 293 try: |
244 request.extra_dict['rsm_after'] = request.args['after'][0].decode('utf-8') | 294 request.extra_dict["rsm_after"] = request.args["after"][0].decode( |
295 "utf-8" | |
296 ) | |
245 except KeyError: | 297 except KeyError: |
246 pass | 298 pass |
247 | 299 |
248 def parseURLParamsCommentsRSM(self, request): | 300 def parseURLParamsCommentsRSM(self, request): |
249 """Parse RSM request data from the URL parameters for comments | 301 """Parse RSM request data from the URL parameters for comments |
252 @param request: HTTP request | 304 @param request: HTTP request |
253 """ | 305 """ |
254 request.extra_comments_dict = {} | 306 request.extra_comments_dict = {} |
255 if request.display_single: | 307 if request.display_single: |
256 try: | 308 try: |
257 rsm_max = int(request.args['comments_max'][0]) | 309 rsm_max = int(request.args["comments_max"][0]) |
258 if rsm_max > C.STATIC_RSM_MAX_LIMIT: | 310 if rsm_max > C.STATIC_RSM_MAX_LIMIT: |
259 log.warning(u"Request with rsm_max over limit ({})".format(rsm_max)) | 311 log.warning(u"Request with rsm_max over limit ({})".format(rsm_max)) |
260 rsm_max = C.STATIC_RSM_MAX_LIMIT | 312 rsm_max = C.STATIC_RSM_MAX_LIMIT |
261 request.extra_comments_dict['rsm_max'] = unicode(rsm_max) | 313 request.extra_comments_dict["rsm_max"] = unicode(rsm_max) |
262 except (ValueError, KeyError): | 314 except (ValueError, KeyError): |
263 request.extra_comments_dict['rsm_max'] = unicode(C.STATIC_RSM_MAX_COMMENTS_DEFAULT) | 315 request.extra_comments_dict["rsm_max"] = unicode( |
316 C.STATIC_RSM_MAX_COMMENTS_DEFAULT | |
317 ) | |
264 else: | 318 else: |
265 request.extra_comments_dict['rsm_max'] = "0" | 319 request.extra_comments_dict["rsm_max"] = "0" |
266 | 320 |
267 def parseURLParamsMAM(self, request): | 321 def parseURLParamsMAM(self, request): |
268 """Parse MAM request data from the URL parameters for main items | 322 """Parse MAM request data from the URL parameters for main items |
269 | 323 |
270 fill request.extra_dict accordingly | 324 fill request.extra_dict accordingly |
274 # when display_single is set (because it then use item_ids which | 328 # when display_single is set (because it then use item_ids which |
275 # can't be used with MAM), but it is still used in this case | 329 # can't be used with MAM), but it is still used in this case |
276 # for navigation links. | 330 # for navigation links. |
277 request.mam_extra = {} | 331 request.mam_extra = {} |
278 try: | 332 try: |
279 request.mam_extra['mam_filter_{}'.format(C.MAM_FILTER_CATEGORY)] = request.args['tag'][0].decode('utf-8') | 333 request.mam_extra[ |
334 "mam_filter_{}".format(C.MAM_FILTER_CATEGORY) | |
335 ] = request.args["tag"][0].decode("utf-8") | |
280 except KeyError: | 336 except KeyError: |
281 pass | 337 pass |
282 | 338 |
283 ## Items retrieval | 339 ## Items retrieval |
284 | 340 |
285 def getItemById(self, pub_jid, item_id, extra_dict, extra_comments_dict, request, profile): | 341 def getItemById( |
342 self, pub_jid, item_id, extra_dict, extra_comments_dict, request, profile | |
343 ): | |
286 """ | 344 """ |
287 | 345 |
288 @param pub_jid (jid.JID): publisher JID | 346 @param pub_jid (jid.JID): publisher JID |
289 @param item_id(unicode): ID of the item to retrieve | 347 @param item_id(unicode): ID of the item to retrieve |
290 @param extra_dict (dict): extra configuration for initial items only | 348 @param extra_dict (dict): extra configuration for initial items only |
298 item = items[0] # assume there's only one item | 356 item = items[0] # assume there's only one item |
299 | 357 |
300 def gotMetadata(result): | 358 def gotMetadata(result): |
301 dummy, rsm_metadata = result | 359 dummy, rsm_metadata = result |
302 try: | 360 try: |
303 metadata['rsm_count'] = rsm_metadata['rsm_count'] | 361 metadata["rsm_count"] = rsm_metadata["rsm_count"] |
304 except KeyError: | 362 except KeyError: |
305 pass | 363 pass |
306 try: | 364 try: |
307 metadata['rsm_index'] = unicode(int(rsm_metadata['rsm_index'])-1) | 365 metadata["rsm_index"] = unicode(int(rsm_metadata["rsm_index"]) - 1) |
308 except KeyError: | 366 except KeyError: |
309 pass | 367 pass |
310 | 368 |
311 metadata['rsm_first'] = metadata['rsm_last'] = item["id"] | 369 metadata["rsm_first"] = metadata["rsm_last"] = item["id"] |
312 | 370 |
313 def gotComments(comments): | 371 def gotComments(comments): |
314 # at this point we can merge mam dict | 372 # at this point we can merge mam dict |
315 request.extra_dict.update(request.mam_extra) | 373 request.extra_dict.update(request.mam_extra) |
316 # build the items as self.getItems would do it (and as self.renderHTML expects them to be) | 374 # build the items as self.getItems would do it (and as self.renderHTML expects them to be) |
317 comments = [(item['comments_service'], item['comments_node'], "", comments[0], comments[1])] | 375 comments = [ |
318 self.renderHTML([(item, comments)], metadata, request, pub_jid, profile) | 376 ( |
377 item["comments_service"], | |
378 item["comments_node"], | |
379 "", | |
380 comments[0], | |
381 comments[1], | |
382 ) | |
383 ] | |
384 self.renderHTML( | |
385 [(item, comments)], metadata, request, pub_jid, profile | |
386 ) | |
319 | 387 |
320 # get the comments | 388 # get the comments |
321 # max_comments = int(extra_comments_dict['rsm_max']) # FIXME | 389 # max_comments = int(extra_comments_dict['rsm_max']) # FIXME |
322 max_comments = C.NO_LIMIT | 390 max_comments = C.NO_LIMIT |
323 # TODO: use max_comments only when RSM is not available | 391 # TODO: use max_comments only when RSM is not available |
324 self.host.bridge.mbGet(item['comments_service'], item['comments_node'], max_comments, [], | 392 self.host.bridge.mbGet( |
325 extra_comments_dict, C.SERVICE_PROFILE, | 393 item["comments_service"], |
394 item["comments_node"], | |
395 max_comments, | |
396 [], | |
397 extra_comments_dict, | |
398 C.SERVICE_PROFILE, | |
326 callback=gotComments, | 399 callback=gotComments, |
327 errback=lambda failure: self.renderError(failure, request, pub_jid)) | 400 errback=lambda failure: self.renderError(failure, request, pub_jid), |
401 ) | |
328 | 402 |
329 # XXX: retrieve RSM information related to the main item. We can't do it while | 403 # XXX: retrieve RSM information related to the main item. We can't do it while |
330 # retrieving the item, because item_ids and rsm should not be used together. | 404 # retrieving the item, because item_ids and rsm should not be used together. |
331 self.host.bridge.mbGet(pub_jid.userhost(), '', 0, [], | 405 self.host.bridge.mbGet( |
332 {"rsm_max": "1", "rsm_after": item["id"]}, C.SERVICE_PROFILE, | 406 pub_jid.userhost(), |
407 "", | |
408 0, | |
409 [], | |
410 {"rsm_max": "1", "rsm_after": item["id"]}, | |
411 C.SERVICE_PROFILE, | |
333 callback=gotMetadata, | 412 callback=gotMetadata, |
334 errback=lambda failure: self.renderError(failure, request, pub_jid)) | 413 errback=lambda failure: self.renderError(failure, request, pub_jid), |
414 ) | |
335 | 415 |
336 # get the main item | 416 # get the main item |
337 self.host.bridge.mbGet(pub_jid.userhost(), '', 0, [item_id], | 417 self.host.bridge.mbGet( |
338 extra_dict, C.SERVICE_PROFILE, | 418 pub_jid.userhost(), |
419 "", | |
420 0, | |
421 [item_id], | |
422 extra_dict, | |
423 C.SERVICE_PROFILE, | |
339 callback=gotItems, | 424 callback=gotItems, |
340 errback=lambda failure: self.renderError(failure, request, pub_jid)) | 425 errback=lambda failure: self.renderError(failure, request, pub_jid), |
341 | 426 ) |
342 def getItems(self, pub_jid, max_items, extra_dict, extra_comments_dict, request, profile): | 427 |
428 def getItems( | |
429 self, pub_jid, max_items, extra_dict, extra_comments_dict, request, profile | |
430 ): | |
343 """ | 431 """ |
344 | 432 |
345 @param pub_jid (jid.JID): publisher JID | 433 @param pub_jid (jid.JID): publisher JID |
346 @param max_items(int): maximum number of item to get, C.NO_LIMIT for no limit | 434 @param max_items(int): maximum number of item to get, C.NO_LIMIT for no limit |
347 @param extra_dict (dict): extra configuration for initial items only | 435 @param extra_dict (dict): extra configuration for initial items only |
348 @param extra_comments_dict (dict): extra configuration for comments only | 436 @param extra_comments_dict (dict): extra configuration for comments only |
349 @param request: HTTP request | 437 @param request: HTTP request |
350 @param profile | 438 @param profile |
351 """ | 439 """ |
440 | |
352 def getResultCb(data, rt_session): | 441 def getResultCb(data, rt_session): |
353 remaining, results = data | 442 remaining, results = data |
354 # we have requested one node only | 443 # we have requested one node only |
355 assert remaining == 0 | 444 assert remaining == 0 |
356 assert len(results) == 1 | 445 assert len(results) == 1 |
359 self.renderError(failure, request, pub_jid) | 448 self.renderError(failure, request, pub_jid) |
360 else: | 449 else: |
361 self.renderHTML(items, metadata, request, pub_jid, profile) | 450 self.renderHTML(items, metadata, request, pub_jid, profile) |
362 | 451 |
363 def getResult(rt_session): | 452 def getResult(rt_session): |
364 self.host.bridge.mbGetFromManyWithCommentsRTResult(rt_session, C.SERVICE_PROFILE, | 453 self.host.bridge.mbGetFromManyWithCommentsRTResult( |
365 callback=lambda data: getResultCb(data, rt_session), | 454 rt_session, |
366 errback=lambda failure: self.renderError(failure, request, pub_jid)) | 455 C.SERVICE_PROFILE, |
456 callback=lambda data: getResultCb(data, rt_session), | |
457 errback=lambda failure: self.renderError(failure, request, pub_jid), | |
458 ) | |
367 | 459 |
368 # max_comments = int(extra_comments_dict['rsm_max']) # FIXME | 460 # max_comments = int(extra_comments_dict['rsm_max']) # FIXME |
369 max_comments = 0 | 461 max_comments = 0 |
370 # TODO: use max_comments only when RSM is not available | 462 # TODO: use max_comments only when RSM is not available |
371 self.host.bridge.mbGetFromManyWithComments(C.JID, [pub_jid.userhost()], max_items, | 463 self.host.bridge.mbGetFromManyWithComments( |
372 max_comments, extra_dict, extra_comments_dict, | 464 C.JID, |
373 C.SERVICE_PROFILE, callback=getResult) | 465 [pub_jid.userhost()], |
374 | 466 max_items, |
375 def getAtom(self, pub_jid, max_items, extra_dict, extra_comments_dict, request, profile): | 467 max_comments, |
468 extra_dict, | |
469 extra_comments_dict, | |
470 C.SERVICE_PROFILE, | |
471 callback=getResult, | |
472 ) | |
473 | |
474 def getAtom( | |
475 self, pub_jid, max_items, extra_dict, extra_comments_dict, request, profile | |
476 ): | |
376 """ | 477 """ |
377 | 478 |
378 @param pub_jid (jid.JID): publisher JID | 479 @param pub_jid (jid.JID): publisher JID |
379 @param max_items(int): maximum number of item to get, C.NO_LIMIT for no limit | 480 @param max_items(int): maximum number of item to get, C.NO_LIMIT for no limit |
380 @param extra_dict (dict): extra configuration for initial items only | 481 @param extra_dict (dict): extra configuration for initial items only |
381 @param extra_comments_dict (dict): extra configuration for comments only | 482 @param extra_comments_dict (dict): extra configuration for comments only |
382 @param request: HTTP request | 483 @param request: HTTP request |
383 @param profile | 484 @param profile |
384 """ | 485 """ |
486 | |
385 def gotItems(data): | 487 def gotItems(data): |
386 # Generate a clean atom feed with uri linking to this blog | 488 # Generate a clean atom feed with uri linking to this blog |
387 # from microblog data | 489 # from microblog data |
388 items, metadata= data | 490 items, metadata = data |
389 feed_elt = domish.Element((NS_ATOM, u'feed')) | 491 feed_elt = domish.Element((NS_ATOM, u"feed")) |
390 title = _(u"{user}'s blog").format(user=profile) | 492 title = _(u"{user}'s blog").format(user=profile) |
391 feed_elt.addElement(u'title', content=title) | 493 feed_elt.addElement(u"title", content=title) |
392 | 494 |
393 base_blog_url = self.host.getExtBaseURL(request, | 495 base_blog_url = self.host.getExtBaseURL( |
394 u'blog/{user}'.format(user=profile)) | 496 request, u"blog/{user}".format(user=profile) |
497 ) | |
395 | 498 |
396 # atom link | 499 # atom link |
397 link_feed_elt = feed_elt.addElement('link') | 500 link_feed_elt = feed_elt.addElement("link") |
398 link_feed_elt['href'] = u'{base}/atom.xml'.format(base=base_blog_url) | 501 link_feed_elt["href"] = u"{base}/atom.xml".format(base=base_blog_url) |
399 link_feed_elt['type'] = u'application/atom+xml' | 502 link_feed_elt["type"] = u"application/atom+xml" |
400 link_feed_elt['rel'] = u'self' | 503 link_feed_elt["rel"] = u"self" |
401 | 504 |
402 # blog link | 505 # blog link |
403 link_blog_elt = feed_elt.addElement('link') | 506 link_blog_elt = feed_elt.addElement("link") |
404 link_blog_elt['rel'] = u'alternate' | 507 link_blog_elt["rel"] = u"alternate" |
405 link_blog_elt['type'] = u'text/html' | 508 link_blog_elt["type"] = u"text/html" |
406 link_blog_elt['href'] = base_blog_url | 509 link_blog_elt["href"] = base_blog_url |
407 | 510 |
408 # blog link XMPP uri | 511 # blog link XMPP uri |
409 blog_xmpp_uri = metadata['uri'] | 512 blog_xmpp_uri = metadata["uri"] |
410 link_blog_elt = feed_elt.addElement('link') | 513 link_blog_elt = feed_elt.addElement("link") |
411 link_blog_elt['rel'] = u'alternate' | 514 link_blog_elt["rel"] = u"alternate" |
412 link_blog_elt['type'] = u'application/atom+xml' | 515 link_blog_elt["type"] = u"application/atom+xml" |
413 link_blog_elt['href'] = blog_xmpp_uri | 516 link_blog_elt["href"] = blog_xmpp_uri |
414 | 517 |
415 feed_elt.addElement('id', content=_quote(blog_xmpp_uri)) | 518 feed_elt.addElement("id", content=_quote(blog_xmpp_uri)) |
416 updated_unix = max([float(item['updated']) for item in items]) | 519 updated_unix = max([float(item["updated"]) for item in items]) |
417 updated_dt = datetime.fromtimestamp(updated_unix) | 520 updated_dt = datetime.fromtimestamp(updated_unix) |
418 feed_elt.addElement(u'updated', content=u'{}Z'.format(updated_dt.isoformat("T"))) | 521 feed_elt.addElement( |
522 u"updated", content=u"{}Z".format(updated_dt.isoformat("T")) | |
523 ) | |
419 | 524 |
420 for item in items: | 525 for item in items: |
421 entry_elt = feed_elt.addElement(u'entry') | 526 entry_elt = feed_elt.addElement(u"entry") |
422 | 527 |
423 # Title | 528 # Title |
424 try: | 529 try: |
425 title = item['title'] | 530 title = item["title"] |
426 except KeyError: | 531 except KeyError: |
427 # for microblog (without title), we use an abstract of content as title | 532 # for microblog (without title), we use an abstract of content as title |
428 title = u'{}…'.format(u' '.join(item['content'][:70].split())) | 533 title = u"{}…".format(u" ".join(item["content"][:70].split())) |
429 entry_elt.addElement(u'title', content=title) | 534 entry_elt.addElement(u"title", content=title) |
430 | 535 |
431 # HTTP link | 536 # HTTP link |
432 http_link_elt = entry_elt.addElement(u'link') | 537 http_link_elt = entry_elt.addElement(u"link") |
433 http_link_elt['rel'] = u'alternate' | 538 http_link_elt["rel"] = u"alternate" |
434 http_link_elt['type'] = u'text/html' | 539 http_link_elt["type"] = u"text/html" |
435 http_link_elt['href'] = u'{base}/{quoted_id}'.format(base=base_blog_url, quoted_id=_quote(item['id'])) | 540 http_link_elt["href"] = u"{base}/{quoted_id}".format( |
541 base=base_blog_url, quoted_id=_quote(item["id"]) | |
542 ) | |
436 # XMPP link | 543 # XMPP link |
437 xmpp_link_elt = entry_elt.addElement(u'link') | 544 xmpp_link_elt = entry_elt.addElement(u"link") |
438 xmpp_link_elt['rel'] = u'alternate' | 545 xmpp_link_elt["rel"] = u"alternate" |
439 xmpp_link_elt['type'] = u'application/atom+xml' | 546 xmpp_link_elt["type"] = u"application/atom+xml" |
440 xmpp_link_elt['href'] = u'{blog_uri};item={item_id}'.format(blog_uri=blog_xmpp_uri, item_id=item['id']) | 547 xmpp_link_elt["href"] = u"{blog_uri};item={item_id}".format( |
548 blog_uri=blog_xmpp_uri, item_id=item["id"] | |
549 ) | |
441 | 550 |
442 # date metadata | 551 # date metadata |
443 entry_elt.addElement(u'id', content=item['atom_id']) | 552 entry_elt.addElement(u"id", content=item["atom_id"]) |
444 updated = datetime.fromtimestamp(float(item['updated'])) | 553 updated = datetime.fromtimestamp(float(item["updated"])) |
445 entry_elt.addElement(u'updated', content=u'{}Z'.format(updated.isoformat("T"))) | 554 entry_elt.addElement( |
446 published = datetime.fromtimestamp(float(item['published'])) | 555 u"updated", content=u"{}Z".format(updated.isoformat("T")) |
447 entry_elt.addElement(u'published', content=u'{}Z'.format(published.isoformat("T"))) | 556 ) |
557 published = datetime.fromtimestamp(float(item["published"])) | |
558 entry_elt.addElement( | |
559 u"published", content=u"{}Z".format(published.isoformat("T")) | |
560 ) | |
448 | 561 |
449 # author metadata | 562 # author metadata |
450 author_elt = entry_elt.addElement(u'author') | 563 author_elt = entry_elt.addElement(u"author") |
451 author_elt.addElement('name', content=item.get('author', profile)) | 564 author_elt.addElement("name", content=item.get("author", profile)) |
452 try: | 565 try: |
453 author_elt.addElement('uri', content=u'xmpp:{}'.format(item['author_jid'])) | 566 author_elt.addElement( |
454 except KeyError: | 567 "uri", content=u"xmpp:{}".format(item["author_jid"]) |
455 pass | 568 ) |
456 try: | 569 except KeyError: |
457 author_elt.addElement('email', content=item['author_email']) | 570 pass |
571 try: | |
572 author_elt.addElement("email", content=item["author_email"]) | |
458 except KeyError: | 573 except KeyError: |
459 pass | 574 pass |
460 | 575 |
461 # categories | 576 # categories |
462 for tag in data_format.dict2iter("tag", item): | 577 for tag in data_format.dict2iter("tag", item): |
463 category_elt = entry_elt.addElement(u"category") | 578 category_elt = entry_elt.addElement(u"category") |
464 category_elt["term"] = tag | 579 category_elt["term"] = tag |
465 | 580 |
466 # content | 581 # content |
467 try: | 582 try: |
468 content_xhtml = item['content_xhtml'] | 583 content_xhtml = item["content_xhtml"] |
469 except KeyError: | 584 except KeyError: |
470 content_elt = entry_elt.addElement('content', content='content') | 585 content_elt = entry_elt.addElement("content", content="content") |
471 content_elt['type'] = 'text' | 586 content_elt["type"] = "text" |
472 else: | 587 else: |
473 content_elt = entry_elt.addElement('content') | 588 content_elt = entry_elt.addElement("content") |
474 content_elt['type'] = 'xhtml' | 589 content_elt["type"] = "xhtml" |
475 content_elt.addChild(xml_tools.ElementParser()(content_xhtml, namespace=C.NS_XHTML)) | 590 content_elt.addChild( |
476 | 591 xml_tools.ElementParser()(content_xhtml, namespace=C.NS_XHTML) |
477 atom_feed = u'<?xml version="1.0" encoding="utf-8"?>\n{}'.format(feed_elt.toXml()) | 592 ) |
593 | |
594 atom_feed = u'<?xml version="1.0" encoding="utf-8"?>\n{}'.format( | |
595 feed_elt.toXml() | |
596 ) | |
478 self.renderAtomFeed(atom_feed, request), | 597 self.renderAtomFeed(atom_feed, request), |
479 | 598 |
480 self.host.bridge.mbGet(pub_jid.userhost(), '', max_items, [], extra_dict, C.SERVICE_PROFILE, callback=gotItems) | 599 self.host.bridge.mbGet( |
600 pub_jid.userhost(), | |
601 "", | |
602 max_items, | |
603 [], | |
604 extra_dict, | |
605 C.SERVICE_PROFILE, | |
606 callback=gotItems, | |
607 ) | |
481 | 608 |
482 ## rendering | 609 ## rendering |
483 | 610 |
484 def _updateDict(self, value, dict_, key): | 611 def _updateDict(self, value, dict_, key): |
485 dict_[key] = value | 612 dict_[key] = value |
486 | 613 |
487 def _getImageParams(self, options, key, default, alt): | 614 def _getImageParams(self, options, key, default, alt): |
488 """regexp from http://answers.oreilly.com/topic/280-how-to-validate-urls-with-regular-expressions/""" | 615 """regexp from http://answers.oreilly.com/topic/280-how-to-validate-urls-with-regular-expressions/""" |
489 url = options[key] if key in options else '' | 616 url = options[key] if key in options else "" |
490 regexp = r"^(https?|ftp)://[a-z0-9-]+(\.[a-z0-9-]+)+(/[\w-]+)*/[\w-]+\.(gif|png|jpg)$" | 617 regexp = ( |
618 r"^(https?|ftp)://[a-z0-9-]+(\.[a-z0-9-]+)+(/[\w-]+)*/[\w-]+\.(gif|png|jpg)$" | |
619 ) | |
491 if re.match(regexp, url): | 620 if re.match(regexp, url): |
492 url = url | 621 url = url |
493 else: | 622 else: |
494 url = default | 623 url = default |
495 return BlogImage(url, alt) | 624 return BlogImage(url, alt) |
496 | 625 |
497 def renderError(self, failure, request, pub_jid): | 626 def renderError(self, failure, request, pub_jid): |
498 request.setResponseCode(500) | 627 request.setResponseCode(500) |
499 request.write(self.useTemplate(request, "static_blog_error", {'message': "Can't access requested data"})) | 628 request.write( |
629 self.useTemplate( | |
630 request, "static_blog_error", {"message": "Can't access requested data"} | |
631 ) | |
632 ) | |
500 request.finish() | 633 request.finish() |
501 | 634 |
502 def renderHTML(self, items, metadata, request, pub_jid, profile): | 635 def renderHTML(self, items, metadata, request, pub_jid, profile): |
503 """Retrieve the user parameters before actually rendering the static blog | 636 """Retrieve the user parameters before actually rendering the static blog |
504 | 637 |
510 """ | 643 """ |
511 d_list = [] | 644 d_list = [] |
512 options = {} | 645 options = {} |
513 | 646 |
514 d = self.getAvatarURL(pub_jid, request) | 647 d = self.getAvatarURL(pub_jid, request) |
515 d.addCallback(self._updateDict, options, 'avatar') | 648 d.addCallback(self._updateDict, options, "avatar") |
516 d.addErrback(self.renderError, request, pub_jid) | 649 d.addErrback(self.renderError, request, pub_jid) |
517 d_list.append(d) | 650 d_list.append(d) |
518 | 651 |
519 for param_name in PARAMS_TO_GET: | 652 for param_name in PARAMS_TO_GET: |
520 d = defer.Deferred() | 653 d = defer.Deferred() |
521 self.host.bridge.asyncGetParamA(param_name, C.STATIC_BLOG_KEY, 'value', C.SERVER_SECURITY_LIMIT, profile, callback=d.callback, errback=d.errback) | 654 self.host.bridge.asyncGetParamA( |
655 param_name, | |
656 C.STATIC_BLOG_KEY, | |
657 "value", | |
658 C.SERVER_SECURITY_LIMIT, | |
659 profile, | |
660 callback=d.callback, | |
661 errback=d.errback, | |
662 ) | |
522 d.addCallback(self._updateDict, options, param_name) | 663 d.addCallback(self._updateDict, options, param_name) |
523 d.addErrback(self.renderError, request, pub_jid) | 664 d.addErrback(self.renderError, request, pub_jid) |
524 d_list.append(d) | 665 d_list.append(d) |
525 | 666 |
526 dlist_d = defer.DeferredList(d_list) | 667 dlist_d = defer.DeferredList(d_list) |
527 dlist_d.addCallback(lambda dummy: self._renderHTML(items, metadata, options, request, pub_jid)) | 668 dlist_d.addCallback( |
669 lambda dummy: self._renderHTML(items, metadata, options, request, pub_jid) | |
670 ) | |
528 | 671 |
529 def _renderHTML(self, items, metadata, options, request, pub_jid): | 672 def _renderHTML(self, items, metadata, options, request, pub_jid): |
530 """Actually render the static blog. | 673 """Actually render the static blog. |
531 | 674 |
532 If mblog_data is a list of dict, we are missing the comments items so we just | 675 If mblog_data is a list of dict, we are missing the comments items so we just |
546 @param pub_jid (JID): publisher JID | 689 @param pub_jid (JID): publisher JID |
547 """ | 690 """ |
548 if not isinstance(options, dict): | 691 if not isinstance(options, dict): |
549 options = {} | 692 options = {} |
550 user = sanitizeHtml(pub_jid.user) | 693 user = sanitizeHtml(pub_jid.user) |
551 base_url = os.path.join('/blog/',user) | 694 base_url = os.path.join("/blog/", user) |
552 | 695 |
553 def getOption(key): | 696 def getOption(key): |
554 return sanitizeHtml(options[key]) if key in options else '' | 697 return sanitizeHtml(options[key]) if key in options else "" |
555 | 698 |
556 avatar = os.path.normpath('/{}'.format(getOption('avatar'))) | 699 avatar = os.path.normpath("/{}".format(getOption("avatar"))) |
557 title = getOption(C.STATIC_BLOG_PARAM_TITLE) or user | 700 title = getOption(C.STATIC_BLOG_PARAM_TITLE) or user |
558 query_data = _urlencode(getDefaultQueryData(request)).decode('utf-8') | 701 query_data = _urlencode(getDefaultQueryData(request)).decode("utf-8") |
559 | 702 |
560 xmpp_uri = metadata['uri'] | 703 xmpp_uri = metadata["uri"] |
561 if len(items) == 1: | 704 if len(items) == 1: |
562 # FIXME: that's really not a good way to get item id | 705 # FIXME: that's really not a good way to get item id |
563 # this must be changed after static blog refactorisation | 706 # this must be changed after static blog refactorisation |
564 item_id = items[0][0]['id'] | 707 item_id = items[0][0]["id"] |
565 xmpp_uri+=u";item={}".format(_quote(item_id)) | 708 xmpp_uri += u";item={}".format(_quote(item_id)) |
566 | 709 |
567 data = {'url_base': base_url, | 710 data = { |
568 'xmpp_uri': xmpp_uri, | 711 "url_base": base_url, |
569 'url_query': u'?{}'.format(query_data) if query_data else '' , | 712 "xmpp_uri": xmpp_uri, |
570 'keywords': getOption(C.STATIC_BLOG_PARAM_KEYWORDS), | 713 "url_query": u"?{}".format(query_data) if query_data else "", |
571 'description': getOption(C.STATIC_BLOG_PARAM_DESCRIPTION), | 714 "keywords": getOption(C.STATIC_BLOG_PARAM_KEYWORDS), |
572 'title': title, | 715 "description": getOption(C.STATIC_BLOG_PARAM_DESCRIPTION), |
573 'favicon': avatar, | 716 "title": title, |
574 'banner_img': self._getImageParams(options, C.STATIC_BLOG_PARAM_BANNER, avatar, title) | 717 "favicon": avatar, |
575 } | 718 "banner_img": self._getImageParams( |
576 | 719 options, C.STATIC_BLOG_PARAM_BANNER, avatar, title |
577 data['navlinks'] = NavigationLinks(request, items, metadata, base_url) | 720 ), |
578 data['messages'] = [] | 721 } |
722 | |
723 data["navlinks"] = NavigationLinks(request, items, metadata, base_url) | |
724 data["messages"] = [] | |
579 for item in items: | 725 for item in items: |
580 item, comments_list = item | 726 item, comments_list = item |
581 comments, comments_count = [], 0 | 727 comments, comments_count = [], 0 |
582 for node_comments in comments_list: | 728 for node_comments in comments_list: |
583 comments.extend(node_comments[3]) | 729 comments.extend(node_comments[3]) |
584 try: | 730 try: |
585 comments_count += int(node_comments[4]['rsm_count']) | 731 comments_count += int(node_comments[4]["rsm_count"]) |
586 except KeyError: | 732 except KeyError: |
587 pass | 733 pass |
588 data['messages'].append(BlogMessage(request, base_url, item, comments, comments_count)) | 734 data["messages"].append( |
589 | 735 BlogMessage(request, base_url, item, comments, comments_count) |
590 request.write(self.useTemplate(request, 'static_blog', data)) | 736 ) |
737 | |
738 request.write(self.useTemplate(request, "static_blog", data)) | |
591 request.finish() | 739 request.finish() |
592 | 740 |
593 def renderAtomFeed(self, feed, request): | 741 def renderAtomFeed(self, feed, request): |
594 request.write(feed.encode('utf-8')) | 742 request.write(feed.encode("utf-8")) |
595 request.finish() | 743 request.finish() |
596 | 744 |
597 | 745 |
598 class NavigationLinks(object): | 746 class NavigationLinks(object): |
599 | |
600 def __init__(self, request, items, metadata, base_url): | 747 def __init__(self, request, items, metadata, base_url): |
601 """Build the navigation links. | 748 """Build the navigation links. |
602 | 749 |
603 @param items (list): list of items | 750 @param items (list): list of items |
604 @param metadata (dict): rsm data | 751 @param metadata (dict): rsm data |
611 # query data which must be present in all links | 758 # query data which must be present in all links |
612 default_query_data = getDefaultQueryData(request) | 759 default_query_data = getDefaultQueryData(request) |
613 | 760 |
614 # which links we need to display | 761 # which links we need to display |
615 if request.display_single: | 762 if request.display_single: |
616 links = ('later_message', 'older_message') | 763 links = ("later_message", "older_message") |
617 # key must exist when using the template | 764 # key must exist when using the template |
618 self.later_messages = self.older_messages = '' | 765 self.later_messages = self.older_messages = "" |
619 else: | 766 else: |
620 links = ('later_messages', 'older_messages') | 767 links = ("later_messages", "older_messages") |
621 self.later_message = self.older_message = '' | 768 self.later_message = self.older_message = "" |
622 | 769 |
623 # now we set the links according to RSM | 770 # now we set the links according to RSM |
624 for key in links: | 771 for key in links: |
625 query_data = default_query_data.copy() | 772 query_data = default_query_data.copy() |
626 | 773 |
627 if key.startswith('later_message'): | 774 if key.startswith("later_message"): |
628 try: | 775 try: |
629 index = int(metadata['rsm_index']) | 776 index = int(metadata["rsm_index"]) |
630 except (KeyError, ValueError): | 777 except (KeyError, ValueError): |
631 pass | 778 pass |
632 else: | 779 else: |
633 if index == 0: | 780 if index == 0: |
634 # we don't show this link on first page | 781 # we don't show this link on first page |
635 setattr(self, key, '') | 782 setattr(self, key, "") |
636 continue | 783 continue |
637 try: | 784 try: |
638 query_data['before'] = metadata['rsm_first'].encode('utf-8') | 785 query_data["before"] = metadata["rsm_first"].encode("utf-8") |
639 except KeyError: | 786 except KeyError: |
640 pass | 787 pass |
641 else: | 788 else: |
642 try: | 789 try: |
643 index = int(metadata['rsm_index']) | 790 index = int(metadata["rsm_index"]) |
644 count = int(metadata.get('rsm_count')) | 791 count = int(metadata.get("rsm_count")) |
645 except (KeyError, ValueError): | 792 except (KeyError, ValueError): |
646 # XXX: if we don't have index or count, we can't know if we | 793 # XXX: if we don't have index or count, we can't know if we |
647 # are on the last page or not | 794 # are on the last page or not |
648 pass | 795 pass |
649 else: | 796 else: |
650 # if we have index, we don't show the after link | 797 # if we have index, we don't show the after link |
651 # on the last page | 798 # on the last page |
652 if index + len(items) >= count: | 799 if index + len(items) >= count: |
653 setattr(self, key, '') | 800 setattr(self, key, "") |
654 continue | 801 continue |
655 try: | 802 try: |
656 query_data['after'] = metadata['rsm_last'].encode('utf-8') | 803 query_data["after"] = metadata["rsm_last"].encode("utf-8") |
657 except KeyError: | 804 except KeyError: |
658 pass | 805 pass |
659 | 806 |
660 if request.display_single: | 807 if request.display_single: |
661 query_data['max'] = 1 | 808 query_data["max"] = 1 |
662 | 809 |
663 link = "{}?{}".format(base_url, _urlencode(query_data)) | 810 link = "{}?{}".format(base_url, _urlencode(query_data)) |
664 setattr(self, key, BlogLink(link, key, key.replace('_', ' '))) | 811 setattr(self, key, BlogLink(link, key, key.replace("_", " "))) |
665 | 812 |
666 | 813 |
667 class BlogImage(object): | 814 class BlogImage(object): |
668 | |
669 def __init__(self, url_, alt): | 815 def __init__(self, url_, alt): |
670 self.url = url_ | 816 self.url = url_ |
671 self.alt = alt | 817 self.alt = alt |
672 | 818 |
673 | 819 |
674 class BlogLink(object): | 820 class BlogLink(object): |
675 | |
676 def __init__(self, url_, style, text): | 821 def __init__(self, url_, style, text): |
677 self.url = url_ | 822 self.url = url_ |
678 self.style = style | 823 self.style = style |
679 self.text = text | 824 self.text = text |
680 | 825 |
681 | 826 |
682 class BlogMessage(object): | 827 class BlogMessage(object): |
683 | |
684 def __init__(self, request, base_url, entry, comments=None, comments_count=0): | 828 def __init__(self, request, base_url, entry, comments=None, comments_count=0): |
685 """ | 829 """ |
686 | 830 |
687 @param request: HTTP request | 831 @param request: HTTP request |
688 @param base_url (unicode): the base URL | 832 @param base_url (unicode): the base URL |
690 @param comments(list[dict]): list of microblog data | 834 @param comments(list[dict]): list of microblog data |
691 @param comments_count (int): total number of comments | 835 @param comments_count (int): total number of comments |
692 """ | 836 """ |
693 if comments is None: | 837 if comments is None: |
694 comments = [] | 838 comments = [] |
695 timestamp = float(entry.get('published', 0)) | 839 timestamp = float(entry.get("published", 0)) |
696 | 840 |
697 # FIXME: for now we assume that the comments' depth is only 1 | 841 # FIXME: for now we assume that the comments' depth is only 1 |
698 is_comment = not entry.get('comments', False) | 842 is_comment = not entry.get("comments", False) |
699 | 843 |
700 self.date = datetime.fromtimestamp(timestamp) | 844 self.date = datetime.fromtimestamp(timestamp) |
701 self.type = "comment" if is_comment else "main_item" | 845 self.type = "comment" if is_comment else "main_item" |
702 self.style = 'mblog_comment' if is_comment else '' | 846 self.style = "mblog_comment" if is_comment else "" |
703 self.content = self.getText(entry, 'content') | 847 self.content = self.getText(entry, "content") |
704 | 848 |
705 if is_comment: | 849 if is_comment: |
706 self.author = (_(u"from {}").format(entry['author'])) | 850 self.author = _(u"from {}").format(entry["author"]) |
707 else: | 851 else: |
708 self.author = ' ' | 852 self.author = " " |
709 self.url = "{}/{}".format(base_url, _quote(entry['id'])) | 853 self.url = "{}/{}".format(base_url, _quote(entry["id"])) |
710 query_data = getDefaultQueryData(request) | 854 query_data = getDefaultQueryData(request) |
711 if query_data: | 855 if query_data: |
712 self.url += '?{}'.format(_urlencode(query_data)) | 856 self.url += "?{}".format(_urlencode(query_data)) |
713 self.title = self.getText(entry, 'title') | 857 self.title = self.getText(entry, "title") |
714 self.tags = [sanitizeHtml(tag) for tag in data_format.dict2iter('tag', entry)] | 858 self.tags = [sanitizeHtml(tag) for tag in data_format.dict2iter("tag", entry)] |
715 | 859 |
716 count_text = lambda count: D_(u'comments') if count > 1 else D_(u'comment') | 860 count_text = lambda count: D_(u"comments") if count > 1 else D_(u"comment") |
717 | 861 |
718 self.comments_text = u"{} {}".format(comments_count, count_text(comments_count)) | 862 self.comments_text = u"{} {}".format( |
863 comments_count, count_text(comments_count) | |
864 ) | |
719 | 865 |
720 delta = comments_count - len(comments) | 866 delta = comments_count - len(comments) |
721 if request.display_single and delta > 0: | 867 if request.display_single and delta > 0: |
722 prev_url = "{}?{}".format(self.url, _urlencode({'comments_max': comments_count})) | 868 prev_url = "{}?{}".format( |
869 self.url, _urlencode({"comments_max": comments_count}) | |
870 ) | |
723 prev_text = D_(u"show {count} previous {comments}").format( | 871 prev_text = D_(u"show {count} previous {comments}").format( |
724 count = delta, comments = count_text(delta)) | 872 count=delta, comments=count_text(delta) |
873 ) | |
725 self.all_comments_link = BlogLink(prev_url, "comments_link", prev_text) | 874 self.all_comments_link = BlogLink(prev_url, "comments_link", prev_text) |
726 | 875 |
727 if comments: | 876 if comments: |
728 self.comments = [BlogMessage(request, base_url, comment) for comment in comments] | 877 self.comments = [ |
878 BlogMessage(request, base_url, comment) for comment in comments | |
879 ] | |
729 | 880 |
730 def getText(self, entry, key): | 881 def getText(self, entry, key): |
731 try: | 882 try: |
732 xhtml = entry['{}_xhtml'.format(key)] | 883 xhtml = entry["{}_xhtml".format(key)] |
733 except KeyError: | 884 except KeyError: |
734 try: | 885 try: |
735 processor = addURLToText if key.startswith('content') else sanitizeHtml | 886 processor = addURLToText if key.startswith("content") else sanitizeHtml |
736 return convertNewLinesToXHTML(processor(entry[key])) | 887 return convertNewLinesToXHTML(processor(entry[key])) |
737 except KeyError: | 888 except KeyError: |
738 return None | 889 return None |
739 else: | 890 else: |
740 # FIXME: empty <div /> elements provoke rendering issue | 891 # FIXME: empty <div /> elements provoke rendering issue |