Mercurial > libervia-web
comparison libervia/pages/blog/view/page_meta.py @ 1509:106bae41f5c8
massive refactoring from camelCase -> snake_case. See backend commit log for more details
author | Goffi <goffi@goffi.org> |
---|---|
date | Sat, 08 Apr 2023 13:44:11 +0200 |
parents | 1702b8c821c4 |
children |
comparison
equal
deleted
inserted
replaced
1508:ec3ad9abf9f9 | 1509:106bae41f5c8 |
---|---|
28 | 28 |
29 def microblog_uri(self, uri_data): | 29 def microblog_uri(self, uri_data): |
30 args = [uri_data['path'], uri_data['node']] | 30 args = [uri_data['path'], uri_data['node']] |
31 if 'item' in uri_data: | 31 if 'item' in uri_data: |
32 args.extend(['id', uri_data['item']]) | 32 args.extend(['id', uri_data['item']]) |
33 return self.getURL(*args) | 33 return self.get_url(*args) |
34 | 34 |
35 def parse_url(self, request): | 35 def parse_url(self, request): |
36 """URL is /[service]/[node]/[filter_keyword]/[item]|[other] | 36 """URL is /[service]/[node]/[filter_keyword]/[item]|[other] |
37 | 37 |
38 if [node] is '@', default namespace is used | 38 if [node] is '@', default namespace is used |
39 if a value is unset, default one will be used | 39 if a value is unset, default one will be used |
40 keyword can be one of: | 40 keyword can be one of: |
41 id: next value is a item id | 41 id: next value is a item id |
42 tag: next value is a blog tag | 42 tag: next value is a blog tag |
43 """ | 43 """ |
44 data = self.getRData(request) | 44 data = self.get_r_data(request) |
45 | 45 |
46 try: | 46 try: |
47 service = self.nextPath(request) | 47 service = self.next_path(request) |
48 except IndexError: | 48 except IndexError: |
49 data['service'] = '' | 49 data['service'] = '' |
50 else: | 50 else: |
51 try: | 51 try: |
52 data["service"] = jid.JID(service) | 52 data["service"] = jid.JID(service) |
53 except Exception: | 53 except Exception: |
54 log.warning(_("bad service entered: {}").format(service)) | 54 log.warning(_("bad service entered: {}").format(service)) |
55 self.pageError(request, C.HTTP_BAD_REQUEST) | 55 self.page_error(request, C.HTTP_BAD_REQUEST) |
56 | 56 |
57 try: | 57 try: |
58 node = self.nextPath(request) | 58 node = self.next_path(request) |
59 except IndexError: | 59 except IndexError: |
60 node = '@' | 60 node = '@' |
61 data['node'] = '' if node == '@' else node | 61 data['node'] = '' if node == '@' else node |
62 | 62 |
63 try: | 63 try: |
64 filter_kw = data['filter_keyword'] = self.nextPath(request) | 64 filter_kw = data['filter_keyword'] = self.next_path(request) |
65 except IndexError: | 65 except IndexError: |
66 filter_kw = '@' | 66 filter_kw = '@' |
67 else: | 67 else: |
68 if filter_kw == '@': | 68 if filter_kw == '@': |
69 # No filter, this is used when a subpage is needed, notably Atom feed | 69 # No filter, this is used when a subpage is needed, notably Atom feed |
70 pass | 70 pass |
71 elif filter_kw == 'id': | 71 elif filter_kw == 'id': |
72 try: | 72 try: |
73 data['item'] = self.nextPath(request) | 73 data['item'] = self.next_path(request) |
74 except IndexError: | 74 except IndexError: |
75 self.pageError(request, C.HTTP_BAD_REQUEST) | 75 self.page_error(request, C.HTTP_BAD_REQUEST) |
76 # we get one more argument in case text has been added to have a nice URL | 76 # we get one more argument in case text has been added to have a nice URL |
77 try: | 77 try: |
78 self.nextPath(request) | 78 self.next_path(request) |
79 except IndexError: | 79 except IndexError: |
80 pass | 80 pass |
81 elif filter_kw == 'tag': | 81 elif filter_kw == 'tag': |
82 try: | 82 try: |
83 data['tag'] = self.nextPath(request) | 83 data['tag'] = self.next_path(request) |
84 except IndexError: | 84 except IndexError: |
85 self.pageError(request, C.HTTP_BAD_REQUEST) | 85 self.page_error(request, C.HTTP_BAD_REQUEST) |
86 else: | 86 else: |
87 # invalid filter keyword | 87 # invalid filter keyword |
88 log.warning(_("invalid filter keyword: {filter_kw}").format( | 88 log.warning(_("invalid filter keyword: {filter_kw}").format( |
89 filter_kw=filter_kw)) | 89 filter_kw=filter_kw)) |
90 self.pageError(request, C.HTTP_BAD_REQUEST) | 90 self.page_error(request, C.HTTP_BAD_REQUEST) |
91 | 91 |
92 # if URL is parsed here, we'll have atom.xml available and we need to | 92 # if URL is parsed here, we'll have atom.xml available and we need to |
93 # add the link to the page | 93 # add the link to the page |
94 atom_url = self.getURLByPath( | 94 atom_url = self.get_url_by_path( |
95 SubPage('blog_view'), | 95 SubPage('blog_view'), |
96 service, | 96 service, |
97 node, | 97 node, |
98 filter_kw, | 98 filter_kw, |
99 SubPage('blog_feed_atom'), | 99 SubPage('blog_feed_atom'), |
105 "rel": "alternate", | 105 "rel": "alternate", |
106 "title": "{service}'s blog".format(service=service)}) | 106 "title": "{service}'s blog".format(service=service)}) |
107 | 107 |
108 | 108 |
109 def add_breadcrumb(self, request, breadcrumbs): | 109 def add_breadcrumb(self, request, breadcrumbs): |
110 data = self.getRData(request) | 110 data = self.get_r_data(request) |
111 breadcrumbs.append({ | 111 breadcrumbs.append({ |
112 "label": D_("Feed"), | 112 "label": D_("Feed"), |
113 "url": self.getURL(data["service"].full(), data.get("node", "@")) | 113 "url": self.get_url(data["service"].full(), data.get("node", "@")) |
114 }) | 114 }) |
115 if "item" in data: | 115 if "item" in data: |
116 breadcrumbs.append({ | 116 breadcrumbs.append({ |
117 "label": D_("Post"), | 117 "label": D_("Post"), |
118 }) | 118 }) |
119 | 119 |
120 | 120 |
121 async def appendComments( | 121 async def append_comments( |
122 self, | 122 self, |
123 request: server.Request, | 123 request: server.Request, |
124 blog_items: dict, | 124 blog_items: dict, |
125 profile: str, | 125 profile: str, |
126 _seen: Optional[set] = None | 126 _seen: Optional[set] = None |
131 @param profile: Libervia profile | 131 @param profile: Libervia profile |
132 @param _seen: used to avoid infinite recursion. For internal use only | 132 @param _seen: used to avoid infinite recursion. For internal use only |
133 """ | 133 """ |
134 if _seen is None: | 134 if _seen is None: |
135 _seen = set() | 135 _seen = set() |
136 await self.fillMissingIdentities( | 136 await self.fill_missing_identities( |
137 request, [i['author_jid'] for i in blog_items['items']]) | 137 request, [i['author_jid'] for i in blog_items['items']]) |
138 extra: Dict[str, Any] = {C.KEY_ORDER_BY: C.ORDER_BY_CREATION} | 138 extra: Dict[str, Any] = {C.KEY_ORDER_BY: C.ORDER_BY_CREATION} |
139 if not self.useCache(request): | 139 if not self.use_cache(request): |
140 extra[C.KEY_USE_CACHE] = False | 140 extra[C.KEY_USE_CACHE] = False |
141 for blog_item in blog_items['items']: | 141 for blog_item in blog_items['items']: |
142 for comment_data in blog_item['comments']: | 142 for comment_data in blog_item['comments']: |
143 service = comment_data['service'] | 143 service = comment_data['service'] |
144 node = comment_data['node'] | 144 node = comment_data['node'] |
151 comment_data["items"] = [] | 151 comment_data["items"] = [] |
152 continue | 152 continue |
153 else: | 153 else: |
154 _seen.add(service_node) | 154 _seen.add(service_node) |
155 try: | 155 try: |
156 comments_data = await self.host.bridgeCall('mbGet', | 156 comments_data = await self.host.bridge_call('mb_get', |
157 service, | 157 service, |
158 node, | 158 node, |
159 C.NO_LIMIT, | 159 C.NO_LIMIT, |
160 [], | 160 [], |
161 data_format.serialise( | 161 data_format.serialise( |
175 if comments is None: | 175 if comments is None: |
176 log.error(f"Comments should not be None: {comment_data}") | 176 log.error(f"Comments should not be None: {comment_data}") |
177 comment_data["items"] = [] | 177 comment_data["items"] = [] |
178 continue | 178 continue |
179 comment_data['items'] = comments['items'] | 179 comment_data['items'] = comments['items'] |
180 await appendComments(self, request, comments, profile, _seen=_seen) | 180 await append_comments(self, request, comments, profile, _seen=_seen) |
181 | 181 |
182 async def getBlogItems( | 182 async def get_blog_items( |
183 self, | 183 self, |
184 request: server.Request, | 184 request: server.Request, |
185 service: jid.JID, | 185 service: jid.JID, |
186 node: str, | 186 node: str, |
187 item_id, | 187 item_id, |
191 try: | 191 try: |
192 if item_id: | 192 if item_id: |
193 items_id = [item_id] | 193 items_id = [item_id] |
194 else: | 194 else: |
195 items_id = [] | 195 items_id = [] |
196 if not self.useCache(request): | 196 if not self.use_cache(request): |
197 extra[C.KEY_USE_CACHE] = False | 197 extra[C.KEY_USE_CACHE] = False |
198 blog_data = await self.host.bridgeCall('mbGet', | 198 blog_data = await self.host.bridge_call('mb_get', |
199 service.userhost(), | 199 service.userhost(), |
200 node, | 200 node, |
201 C.NO_LIMIT, | 201 C.NO_LIMIT, |
202 items_id, | 202 items_id, |
203 data_format.serialise(extra), | 203 data_format.serialise(extra), |
204 profile) | 204 profile) |
205 except Exception as e: | 205 except Exception as e: |
206 # FIXME: need a better way to test errors in bridge errback | 206 # FIXME: need a better way to test errors in bridge errback |
207 if "forbidden" in str(e): | 207 if "forbidden" in str(e): |
208 self.pageError(request, 401) | 208 self.page_error(request, 401) |
209 else: | 209 else: |
210 log.warning(_("can't retrieve blog for [{service}]: {msg}".format( | 210 log.warning(_("can't retrieve blog for [{service}]: {msg}".format( |
211 service = service.userhost(), msg=e))) | 211 service = service.userhost(), msg=e))) |
212 blog_data = {"items": []} | 212 blog_data = {"items": []} |
213 else: | 213 else: |
214 blog_data = data_format.deserialise(blog_data) | 214 blog_data = data_format.deserialise(blog_data) |
215 | 215 |
216 return blog_data | 216 return blog_data |
217 | 217 |
218 async def prepare_render(self, request): | 218 async def prepare_render(self, request): |
219 data = self.getRData(request) | 219 data = self.get_r_data(request) |
220 template_data = request.template_data | 220 template_data = request.template_data |
221 page_max = data.get("page_max", 10) | 221 page_max = data.get("page_max", 10) |
222 # if the comments are not explicitly hidden, we show them | 222 # if the comments are not explicitly hidden, we show them |
223 service, node, item_id, show_comments = ( | 223 service, node, item_id, show_comments = ( |
224 data.get('service', ''), | 224 data.get('service', ''), |
225 data.get('node', ''), | 225 data.get('node', ''), |
226 data.get('item'), | 226 data.get('item'), |
227 data.get('show_comments', True) | 227 data.get('show_comments', True) |
228 ) | 228 ) |
229 profile = self.getProfile(request) | 229 profile = self.get_profile(request) |
230 if profile is None: | 230 if profile is None: |
231 profile = C.SERVICE_PROFILE | 231 profile = C.SERVICE_PROFILE |
232 profile_connected = False | 232 profile_connected = False |
233 else: | 233 else: |
234 profile_connected = True | 234 profile_connected = True |
235 | 235 |
236 ## pagination/filtering parameters | 236 ## pagination/filtering parameters |
237 if item_id: | 237 if item_id: |
238 extra = {} | 238 extra = {} |
239 else: | 239 else: |
240 extra = self.getPubsubExtra(request, page_max=page_max) | 240 extra = self.get_pubsub_extra(request, page_max=page_max) |
241 tag = data.get('tag') | 241 tag = data.get('tag') |
242 if tag: | 242 if tag: |
243 extra[f'mam_filter_{C.MAM_FILTER_CATEGORY}'] = tag | 243 extra[f'mam_filter_{C.MAM_FILTER_CATEGORY}'] = tag |
244 self.handleSearch(request, extra) | 244 self.handle_search(request, extra) |
245 | 245 |
246 ## main data ## | 246 ## main data ## |
247 # we get data from backend/XMPP here | 247 # we get data from backend/XMPP here |
248 blog_items = await getBlogItems(self, request, service, node, item_id, extra, profile) | 248 blog_items = await get_blog_items(self, request, service, node, item_id, extra, profile) |
249 | 249 |
250 ## navigation ## | 250 ## navigation ## |
251 # no let's fill service, node and pagination URLs | 251 # no let's fill service, node and pagination URLs |
252 if 'service' not in template_data: | 252 if 'service' not in template_data: |
253 template_data['service'] = service | 253 template_data['service'] = service |
255 template_data['node'] = node | 255 template_data['node'] = node |
256 target_profile = template_data.get('target_profile') | 256 target_profile = template_data.get('target_profile') |
257 | 257 |
258 if blog_items: | 258 if blog_items: |
259 if item_id: | 259 if item_id: |
260 template_data["previous_page_url"] = self.getURL( | 260 template_data["previous_page_url"] = self.get_url( |
261 service.full(), | 261 service.full(), |
262 node, | 262 node, |
263 before=item_id, | 263 before=item_id, |
264 page_max=1 | 264 page_max=1 |
265 ) | 265 ) |
266 template_data["next_page_url"] = self.getURL( | 266 template_data["next_page_url"] = self.get_url( |
267 service.full(), | 267 service.full(), |
268 node, | 268 node, |
269 after=item_id, | 269 after=item_id, |
270 page_max=1 | 270 page_max=1 |
271 ) | 271 ) |
273 "last": item_id, | 273 "last": item_id, |
274 "first": item_id, | 274 "first": item_id, |
275 } | 275 } |
276 blog_items["complete"] = False | 276 blog_items["complete"] = False |
277 else: | 277 else: |
278 self.setPagination(request, blog_items) | 278 self.set_pagination(request, blog_items) |
279 else: | 279 else: |
280 if item_id: | 280 if item_id: |
281 # if item id has been specified in URL and it's not found, | 281 # if item id has been specified in URL and it's not found, |
282 # we must return an error | 282 # we must return an error |
283 self.pageError(request, C.HTTP_NOT_FOUND) | 283 self.page_error(request, C.HTTP_NOT_FOUND) |
284 | 284 |
285 ## identities ## | 285 ## identities ## |
286 # identities are used to show nice nickname or avatars | 286 # identities are used to show nice nickname or avatars |
287 await self.fillMissingIdentities(request, [i['author_jid'] for i in blog_items['items']]) | 287 await self.fill_missing_identities(request, [i['author_jid'] for i in blog_items['items']]) |
288 | 288 |
289 ## Comments ## | 289 ## Comments ## |
290 # if comments are requested, we need to take them | 290 # if comments are requested, we need to take them |
291 if show_comments: | 291 if show_comments: |
292 await appendComments(self, request, blog_items, profile) | 292 await append_comments(self, request, blog_items, profile) |
293 | 293 |
294 ## URLs ## | 294 ## URLs ## |
295 # We will fill items_http_uri and tags_http_uri in template_data with suitable urls | 295 # We will fill items_http_uri and tags_http_uri in template_data with suitable urls |
296 # if we know the profile, we use it instead of service + blog (nicer url) | 296 # if we know the profile, we use it instead of service + blog (nicer url) |
297 if target_profile is None: | 297 if target_profile is None: |
298 blog_base_url_item = self.getPageByName('blog_view').getURL(service.full(), node or '@', 'id') | 298 blog_base_url_item = self.get_page_by_name('blog_view').get_url(service.full(), node or '@', 'id') |
299 blog_base_url_tag = self.getPageByName('blog_view').getURL(service.full(), node or '@', 'tag') | 299 blog_base_url_tag = self.get_page_by_name('blog_view').get_url(service.full(), node or '@', 'tag') |
300 else: | 300 else: |
301 blog_base_url_item = self.getURLByNames([('user', [target_profile]), ('user_blog', ['id'])]) | 301 blog_base_url_item = self.get_url_by_names([('user', [target_profile]), ('user_blog', ['id'])]) |
302 blog_base_url_tag = self.getURLByNames([('user', [target_profile]), ('user_blog', ['tag'])]) | 302 blog_base_url_tag = self.get_url_by_names([('user', [target_profile]), ('user_blog', ['tag'])]) |
303 # we also set the background image if specified by user | 303 # we also set the background image if specified by user |
304 bg_img = await self.host.bridgeCall('asyncGetParamA', 'Background', 'Blog page', 'value', -1, template_data['target_profile']) | 304 bg_img = await self.host.bridge_call('param_get_a_async', 'Background', 'Blog page', 'value', -1, template_data['target_profile']) |
305 if bg_img: | 305 if bg_img: |
306 template_data['dynamic_style'] = safe(""" | 306 template_data['dynamic_style'] = safe(""" |
307 :root { | 307 :root { |
308 --bg-img: url("%s"); | 308 --bg-img: url("%s"); |
309 } | 309 } |
325 # to make it more human readable | 325 # to make it more human readable |
326 # we do it only if there is no "-", as a "-" probably means that | 326 # we do it only if there is no "-", as a "-" probably means that |
327 # item's id is already user friendly. | 327 # item's id is already user friendly. |
328 # TODO: to be removed, this is only kept for a transition period until | 328 # TODO: to be removed, this is only kept for a transition period until |
329 # user friendly item IDs are more common. | 329 # user friendly item IDs are more common. |
330 text = regex.urlFriendlyText(item.get('title', item['content'])) | 330 text = regex.url_friendly_text(item.get('title', item['content'])) |
331 if text: | 331 if text: |
332 blog_url = blog_canonical_url + '/' + text | 332 blog_url = blog_canonical_url + '/' + text |
333 else: | 333 else: |
334 blog_url = blog_canonical_url | 334 blog_url = blog_canonical_url |
335 else: | 335 else: |
336 blog_url = blog_canonical_url | 336 blog_url = blog_canonical_url |
337 | 337 |
338 items_http_uri[item['id']] = self.host.getExtBaseURL(request, blog_url) | 338 items_http_uri[item['id']] = self.host.get_ext_base_url(request, blog_url) |
339 for tag in item['tags']: | 339 for tag in item['tags']: |
340 if tag not in tags_http_uri: | 340 if tag not in tags_http_uri: |
341 tag_url = '/'.join([blog_base_url_tag, utils.quote(tag)]) | 341 tag_url = '/'.join([blog_base_url_tag, utils.quote(tag)]) |
342 tags_http_uri[tag] = self.host.getExtBaseURL(request, tag_url) | 342 tags_http_uri[tag] = self.host.get_ext_base_url(request, tag_url) |
343 | 343 |
344 # if True, page should display a comment box | 344 # if True, page should display a comment box |
345 template_data['allow_commenting'] = data.get('allow_commenting', profile_connected) | 345 template_data['allow_commenting'] = data.get('allow_commenting', profile_connected) |
346 | 346 |
347 # last but not least, we add a xmpp: link to the node | 347 # last but not least, we add a xmpp: link to the node |
348 uri_args = {'path': service.full()} | 348 uri_args = {'path': service.full()} |
349 if node: | 349 if node: |
350 uri_args['node'] = node | 350 uri_args['node'] = node |
351 if item_id: | 351 if item_id: |
352 uri_args['item'] = item_id | 352 uri_args['item'] = item_id |
353 template_data['xmpp_uri'] = uri.buildXMPPUri( | 353 template_data['xmpp_uri'] = uri.build_xmpp_uri( |
354 'pubsub', subtype='microblog', **uri_args | 354 'pubsub', subtype='microblog', **uri_args |
355 ) | 355 ) |
356 | 356 |
357 | 357 |
358 async def on_data_post(self, request): | 358 async def on_data_post(self, request): |
359 profile = self.getProfile(request) | 359 profile = self.get_profile(request) |
360 if profile is None: | 360 if profile is None: |
361 self.pageError(request, C.HTTP_FORBIDDEN) | 361 self.page_error(request, C.HTTP_FORBIDDEN) |
362 type_ = self.getPostedData(request, 'type') | 362 type_ = self.get_posted_data(request, 'type') |
363 if type_ == 'comment': | 363 if type_ == 'comment': |
364 service, node, body = self.getPostedData(request, ('service', 'node', 'body')) | 364 service, node, body = self.get_posted_data(request, ('service', 'node', 'body')) |
365 | 365 |
366 if not body: | 366 if not body: |
367 self.pageError(request, C.HTTP_BAD_REQUEST) | 367 self.page_error(request, C.HTTP_BAD_REQUEST) |
368 comment_data = {"content_rich": body} | 368 comment_data = {"content_rich": body} |
369 try: | 369 try: |
370 await self.host.bridgeCall('mbSend', | 370 await self.host.bridge_call('mb_send', |
371 service, | 371 service, |
372 node, | 372 node, |
373 data_format.serialise(comment_data), | 373 data_format.serialise(comment_data), |
374 profile) | 374 profile) |
375 except Exception as e: | 375 except Exception as e: |
376 if "forbidden" in str(e): | 376 if "forbidden" in str(e): |
377 self.pageError(request, 401) | 377 self.page_error(request, 401) |
378 else: | 378 else: |
379 raise e | 379 raise e |
380 else: | 380 else: |
381 log.warning(_("Unhandled data type: {}").format(type_)) | 381 log.warning(_("Unhandled data type: {}").format(type_)) |