comparison sat/plugins/plugin_comp_file_sharing.py @ 4037:524856bd7b19

massive refactoring to switch from camelCase to snake_case: historically, Libervia (SàT before) was using camelCase as allowed by PEP8 when using a pre-PEP8 code, to use the same coding style as in Twisted. However, snake_case is more readable and it's better to follow PEP8 best practices, so it has been decided to move on full snake_case. Because Libervia has a huge codebase, this ended with a ugly mix of camelCase and snake_case. To fix that, this patch does a big refactoring by renaming every function and method (including bridge) that are not coming from Twisted or Wokkel, to use fully snake_case. This is a massive change, and may result in some bugs.
author Goffi <goffi@goffi.org>
date Sat, 08 Apr 2023 13:54:42 +0200
parents 412b99c29d83
children
comparison
equal deleted inserted replaced
4036:c4464d7ae97b 4037:524856bd7b19
111 f'"{brief}" ({code}): {details}' 111 f'"{brief}" ({code}): {details}'
112 ) 112 )
113 113
114 return resource.ErrorPage(code, brief, details).render(request) 114 return resource.ErrorPage(code, brief, details).render(request)
115 115
116 def getDispositionType(self, media_type, media_subtype): 116 def get_disposition_type(self, media_type, media_subtype):
117 if media_type in ('image', 'video'): 117 if media_type in ('image', 'video'):
118 return 'inline' 118 return 'inline'
119 elif media_type == 'application' and media_subtype == 'pdf': 119 elif media_type == 'application' and media_subtype == 'pdf':
120 return 'inline' 120 return 'inline'
121 else: 121 else:
134 request.setHeader( 134 request.setHeader(
135 "Access-Control-Expose-Headers", 135 "Access-Control-Expose-Headers",
136 "Date, Content-Length, Content-Range") 136 "Date, Content-Length, Content-Range")
137 return super().render(request) 137 return super().render(request)
138 138
139 def render_OPTIONS(self, request): 139 def render_options(self, request):
140 request.setResponseCode(http.OK) 140 request.setResponseCode(http.OK)
141 return b"" 141 return b""
142 142
143 def render_GET(self, request): 143 def render_GET(self, request):
144 try: 144 try:
145 request.upload_data 145 request.upload_data
146 except exceptions.DataError: 146 except exceptions.DataError:
147 return self.errorPage(request, http.NOT_FOUND) 147 return self.errorPage(request, http.NOT_FOUND)
148 148
149 defer.ensureDeferred(self.renderGet(request)) 149 defer.ensureDeferred(self.render_get(request))
150 return server.NOT_DONE_YET 150 return server.NOT_DONE_YET
151 151
152 async def renderGet(self, request): 152 async def render_get(self, request):
153 try: 153 try:
154 upload_id, filename = request.upload_data 154 upload_id, filename = request.upload_data
155 except exceptions.DataError: 155 except exceptions.DataError:
156 request.write(self.errorPage(request, http.FORBIDDEN)) 156 request.write(self.errorPage(request, http.FORBIDDEN))
157 request.finish() 157 request.finish()
158 return 158 return
159 found_files = await request.file_sharing.host.memory.getFiles( 159 found_files = await request.file_sharing.host.memory.get_files(
160 client=None, peer_jid=None, perms_to_check=None, public_id=upload_id) 160 client=None, peer_jid=None, perms_to_check=None, public_id=upload_id)
161 if not found_files: 161 if not found_files:
162 request.write(self.errorPage(request, http.NOT_FOUND)) 162 request.write(self.errorPage(request, http.NOT_FOUND))
163 request.finish() 163 request.finish()
164 return 164 return
168 found_file = found_files[0] 168 found_file = found_files[0]
169 file_path = request.file_sharing.files_path/found_file['file_hash'] 169 file_path = request.file_sharing.files_path/found_file['file_hash']
170 file_res = static.File(file_path) 170 file_res = static.File(file_path)
171 file_res.type = f'{found_file["media_type"]}/{found_file["media_subtype"]}' 171 file_res.type = f'{found_file["media_type"]}/{found_file["media_subtype"]}'
172 file_res.encoding = file_res.contentEncodings.get(Path(found_file['name']).suffix) 172 file_res.encoding = file_res.contentEncodings.get(Path(found_file['name']).suffix)
173 disp_type = self.getDispositionType( 173 disp_type = self.get_disposition_type(
174 found_file['media_type'], found_file['media_subtype']) 174 found_file['media_type'], found_file['media_subtype'])
175 # the URL is percent encoded, and not all browsers/tools unquote the file name, 175 # the URL is percent encoded, and not all browsers/tools unquote the file name,
176 # thus we add a content disposition header 176 # thus we add a content disposition header
177 request.setHeader( 177 request.setHeader(
178 'Content-Disposition', 178 'Content-Disposition',
188 # HEAD returns directly the result (while GET use a produced) 188 # HEAD returns directly the result (while GET use a produced)
189 request.write(ret) 189 request.write(ret)
190 request.finish() 190 request.finish()
191 191
192 def render_PUT(self, request): 192 def render_PUT(self, request):
193 defer.ensureDeferred(self.renderPut(request)) 193 defer.ensureDeferred(self.render_put(request))
194 return server.NOT_DONE_YET 194 return server.NOT_DONE_YET
195 195
196 async def renderPut(self, request): 196 async def render_put(self, request):
197 try: 197 try:
198 client, upload_request = request.upload_request_data 198 client, upload_request = request.upload_request_data
199 upload_id, filename = request.upload_data 199 upload_id, filename = request.upload_data
200 except AttributeError: 200 except AttributeError:
201 request.write(self.errorPage(request, http.BAD_REQUEST)) 201 request.write(self.errorPage(request, http.BAD_REQUEST))
226 "mime_type": upload_request.content_type, 226 "mime_type": upload_request.content_type,
227 "size": upload_request.size, 227 "size": upload_request.size,
228 "path": path 228 "path": path
229 } 229 }
230 230
231 await request.file_sharing.registerReceivedFile( 231 await request.file_sharing.register_received_file(
232 client, upload_request.from_, file_data, tmp_file_path, 232 client, upload_request.from_, file_data, tmp_file_path,
233 public_id=public_id, 233 public_id=public_id,
234 ) 234 )
235 235
236 request.setResponseCode(http.CREATED) 236 request.setResponseCode(http.CREATED)
271 271
272 @property 272 @property
273 def file_tmp_dir(self): 273 def file_tmp_dir(self):
274 return self.channel.site.file_tmp_dir 274 return self.channel.site.file_tmp_dir
275 275
276 def refuseRequest(self): 276 def refuse_request(self):
277 if self.content is not None: 277 if self.content is not None:
278 self.content.close() 278 self.content.close()
279 self.content = open(os.devnull, 'w+b') 279 self.content = open(os.devnull, 'w+b')
280 self.channel._respondToBadRequestAndDisconnect() 280 self.channel._respondToBadRequestAndDisconnect()
281 281
285 # we buffer the file in component's TMP_BUFFER_DIR, so we just have to rename it at the end 285 # we buffer the file in component's TMP_BUFFER_DIR, so we just have to rename it at the end
286 try: 286 try:
287 upload_id, filename = self.upload_data 287 upload_id, filename = self.upload_data
288 except exceptions.DataError as e: 288 except exceptions.DataError as e:
289 log.warning(f"Invalid PUT request, we stop here: {e}") 289 log.warning(f"Invalid PUT request, we stop here: {e}")
290 return self.refuseRequest() 290 return self.refuse_request()
291 try: 291 try:
292 client, upload_request, timer = self.file_sharing.expected_uploads.pop(upload_id) 292 client, upload_request, timer = self.file_sharing.expected_uploads.pop(upload_id)
293 except KeyError: 293 except KeyError:
294 log.warning(f"unknown (expired?) upload ID received for a PUT: {upload_id!r}") 294 log.warning(f"unknown (expired?) upload ID received for a PUT: {upload_id!r}")
295 return self.refuseRequest() 295 return self.refuse_request()
296 296
297 if not timer.active: 297 if not timer.active:
298 log.warning(f"upload id {upload_id!r} used for a PUT, but it is expired") 298 log.warning(f"upload id {upload_id!r} used for a PUT, but it is expired")
299 return self.refuseRequest() 299 return self.refuse_request()
300 300
301 timer.cancel() 301 timer.cancel()
302 302
303 if upload_request.filename != filename: 303 if upload_request.filename != filename:
304 log.warning( 304 log.warning(
305 f"invalid filename for PUT (upload id: {upload_id!r}, URL: {self.channel._path.decode()}). Original " 305 f"invalid filename for PUT (upload id: {upload_id!r}, URL: {self.channel._path.decode()}). Original "
306 f"{upload_request.filename!r} doesn't match {filename!r}" 306 f"{upload_request.filename!r} doesn't match {filename!r}"
307 ) 307 )
308 return self.refuseRequest() 308 return self.refuse_request()
309 309
310 self.upload_request_data = (client, upload_request) 310 self.upload_request_data = (client, upload_request)
311 311
312 file_tmp_path = files_utils.get_unique_name( 312 file_tmp_path = files_utils.get_unique_name(
313 self.file_tmp_dir/upload_id) 313 self.file_tmp_dir/upload_id)
353 self._f = self.host.plugins["FILE"] 353 self._f = self.host.plugins["FILE"]
354 self._jf = self.host.plugins["XEP-0234"] 354 self._jf = self.host.plugins["XEP-0234"]
355 self._h = self.host.plugins["XEP-0300"] 355 self._h = self.host.plugins["XEP-0300"]
356 self._t = self.host.plugins["XEP-0264"] 356 self._t = self.host.plugins["XEP-0264"]
357 self._hu = self.host.plugins["XEP-0363"] 357 self._hu = self.host.plugins["XEP-0363"]
358 self._hu.registerHandler(self._on_http_upload) 358 self._hu.register_handler(self._on_http_upload)
359 self.host.trigger.add("FILE_getDestDir", self._getDestDirTrigger) 359 self.host.trigger.add("FILE_getDestDir", self._get_dest_dir_trigger)
360 self.host.trigger.add( 360 self.host.trigger.add(
361 "XEP-0234_fileSendingRequest", self._fileSendingRequestTrigger, priority=1000 361 "XEP-0234_fileSendingRequest", self._file_sending_request_trigger, priority=1000
362 ) 362 )
363 self.host.trigger.add("XEP-0234_buildFileElement", self._addFileMetadataElts) 363 self.host.trigger.add("XEP-0234_buildFileElement", self._add_file_metadata_elts)
364 self.host.trigger.add("XEP-0234_parseFileElement", self._getFileMetadataElts) 364 self.host.trigger.add("XEP-0234_parseFileElement", self._get_file_metadata_elts)
365 self.host.trigger.add("XEP-0329_compGetFilesFromNode", self._addFileMetadata) 365 self.host.trigger.add("XEP-0329_compGetFilesFromNode", self._add_file_metadata)
366 self.host.trigger.add( 366 self.host.trigger.add(
367 "XEP-0329_compGetFilesFromNode_build_directory", 367 "XEP-0329_compGetFilesFromNode_build_directory",
368 self._addDirectoryMetadataElts) 368 self._add_directory_metadata_elts)
369 self.host.trigger.add( 369 self.host.trigger.add(
370 "XEP-0329_parseResult_directory", 370 "XEP-0329_parseResult_directory",
371 self._getDirectoryMetadataElts) 371 self._get_directory_metadata_elts)
372 self.files_path = self.host.get_local_path(None, C.FILES_DIR) 372 self.files_path = self.host.get_local_path(None, C.FILES_DIR)
373 self.http_port = int(self.host.memory.getConfig( 373 self.http_port = int(self.host.memory.config_get(
374 'component file-sharing', 'http_upload_port', 8888)) 374 'component file-sharing', 'http_upload_port', 8888))
375 connection_type = self.host.memory.getConfig( 375 connection_type = self.host.memory.config_get(
376 'component file-sharing', 'http_upload_connection_type', 'https') 376 'component file-sharing', 'http_upload_connection_type', 'https')
377 if connection_type not in ('http', 'https'): 377 if connection_type not in ('http', 'https'):
378 raise exceptions.ConfigError( 378 raise exceptions.ConfigError(
379 'bad http_upload_connection_type, you must use one of "http" or "https"' 379 'bad http_upload_connection_type, you must use one of "http" or "https"'
380 ) 380 )
381 self.server = FileSharingSite(self) 381 self.server = FileSharingSite(self)
382 self.expected_uploads = {} 382 self.expected_uploads = {}
383 if connection_type == 'http': 383 if connection_type == 'http':
384 reactor.listenTCP(self.http_port, self.server) 384 reactor.listenTCP(self.http_port, self.server)
385 else: 385 else:
386 options = tls.getOptionsFromConfig( 386 options = tls.get_options_from_config(
387 self.host.memory.config, "component file-sharing") 387 self.host.memory.config, "component file-sharing")
388 tls.TLSOptionsCheck(options) 388 tls.tls_options_check(options)
389 context_factory = tls.getTLSContextFactory(options) 389 context_factory = tls.get_tls_context_factory(options)
390 reactor.listenSSL(self.http_port, self.server, context_factory) 390 reactor.listenSSL(self.http_port, self.server, context_factory)
391 391
392 def getHandler(self, client): 392 def get_handler(self, client):
393 return Comments_handler(self) 393 return Comments_handler(self)
394 394
395 def profileConnecting(self, client): 395 def profile_connecting(self, client):
396 # we activate HTTP upload 396 # we activate HTTP upload
397 client.enabled_features.add("XEP-0363") 397 client.enabled_features.add("XEP-0363")
398 398
399 self.init() 399 self.init()
400 public_base_url = self.host.memory.getConfig( 400 public_base_url = self.host.memory.config_get(
401 'component file-sharing', 'http_upload_public_facing_url') 401 'component file-sharing', 'http_upload_public_facing_url')
402 if public_base_url is None: 402 if public_base_url is None:
403 client._file_sharing_base_url = f"https://{client.host}:{self.http_port}" 403 client._file_sharing_base_url = f"https://{client.host}:{self.http_port}"
404 else: 404 else:
405 client._file_sharing_base_url = public_base_url 405 client._file_sharing_base_url = public_base_url
406 path = client.file_tmp_dir = os.path.join( 406 path = client.file_tmp_dir = os.path.join(
407 self.host.memory.getConfig("", "local_dir"), 407 self.host.memory.config_get("", "local_dir"),
408 C.FILES_TMP_DIR, 408 C.FILES_TMP_DIR,
409 regex.pathEscape(client.profile), 409 regex.path_escape(client.profile),
410 ) 410 )
411 if not os.path.exists(path): 411 if not os.path.exists(path):
412 os.makedirs(path) 412 os.makedirs(path)
413 413
414 def getQuota(self, client, entity): 414 def get_quota(self, client, entity):
415 """Return maximum size allowed for all files for entity""" 415 """Return maximum size allowed for all files for entity"""
416 quotas = self.host.memory.getConfig("component file-sharing", "quotas_json", {}) 416 quotas = self.host.memory.config_get("component file-sharing", "quotas_json", {})
417 if self.host.memory.isAdminJID(entity): 417 if self.host.memory.is_admin_jid(entity):
418 quota = quotas.get("admins") 418 quota = quotas.get("admins")
419 else: 419 else:
420 try: 420 try:
421 quota = quotas["jids"][entity.userhost()] 421 quota = quotas["jids"][entity.userhost()]
422 except KeyError: 422 except KeyError:
423 quota = quotas.get("users") 423 quota = quotas.get("users")
424 return None if quota is None else utils.parseSize(quota) 424 return None if quota is None else utils.parse_size(quota)
425 425
426 async def generate_thumbnails(self, extra: dict, image_path: Path): 426 async def generate_thumbnails(self, extra: dict, image_path: Path):
427 thumbnails = extra.setdefault(C.KEY_THUMBNAILS, []) 427 thumbnails = extra.setdefault(C.KEY_THUMBNAILS, [])
428 for max_thumb_size in self._t.SIZES: 428 for max_thumb_size in self._t.SIZES:
429 try: 429 try:
430 thumb_size, thumb_id = await self._t.generateThumbnail( 430 thumb_size, thumb_id = await self._t.generate_thumbnail(
431 image_path, 431 image_path,
432 max_thumb_size, 432 max_thumb_size,
433 #  we keep thumbnails for 6 months 433 #  we keep thumbnails for 6 months
434 60 * 60 * 24 * 31 * 6, 434 60 * 60 * 24 * 31 * 6,
435 ) 435 )
436 except Exception as e: 436 except Exception as e:
437 log.warning(_("Can't create thumbnail: {reason}").format(reason=e)) 437 log.warning(_("Can't create thumbnail: {reason}").format(reason=e))
438 break 438 break
439 thumbnails.append({"id": thumb_id, "size": thumb_size}) 439 thumbnails.append({"id": thumb_id, "size": thumb_size})
440 440
441 async def registerReceivedFile( 441 async def register_received_file(
442 self, client, peer_jid, file_data, file_path, public_id=None, extra=None): 442 self, client, peer_jid, file_data, file_path, public_id=None, extra=None):
443 """Post file reception tasks 443 """Post file reception tasks
444 444
445 once file is received, this method create hash/thumbnails if necessary 445 once file is received, this method create hash/thumbnails if necessary
446 move the file to the right location, and create metadata entry in database 446 move the file to the right location, and create metadata entry in database
458 458
459 if file_data.get("hash_algo") == HASH_ALGO: 459 if file_data.get("hash_algo") == HASH_ALGO:
460 log.debug(_("Reusing already generated hash")) 460 log.debug(_("Reusing already generated hash"))
461 file_hash = file_data["hash_hasher"].hexdigest() 461 file_hash = file_data["hash_hasher"].hexdigest()
462 else: 462 else:
463 hasher = self._h.getHasher(HASH_ALGO) 463 hasher = self._h.get_hasher(HASH_ALGO)
464 with file_path.open('rb') as f: 464 with file_path.open('rb') as f:
465 file_hash = await self._h.calculateHash(f, hasher) 465 file_hash = await self._h.calculate_hash(f, hasher)
466 final_path = self.files_path/file_hash 466 final_path = self.files_path/file_hash
467 467
468 if final_path.is_file(): 468 if final_path.is_file():
469 log.debug( 469 log.debug(
470 "file [{file_hash}] already exists, we can remove temporary one".format( 470 "file [{file_hash}] already exists, we can remove temporary one".format(
491 log.warning(_("Can't get thumbnail for {final_path}: {e}").format( 491 log.warning(_("Can't get thumbnail for {final_path}: {e}").format(
492 final_path=final_path, e=e)) 492 final_path=final_path, e=e))
493 else: 493 else:
494 await self.generate_thumbnails(extra, thumb_path) 494 await self.generate_thumbnails(extra, thumb_path)
495 495
496 await self.host.memory.setFile( 496 await self.host.memory.set_file(
497 client, 497 client,
498 name=name, 498 name=name,
499 version="", 499 version="",
500 file_hash=file_hash, 500 file_hash=file_hash,
501 hash_algo=HASH_ALGO, 501 hash_algo=HASH_ALGO,
506 public_id=public_id, 506 public_id=public_id,
507 owner=peer_jid, 507 owner=peer_jid,
508 extra=extra, 508 extra=extra,
509 ) 509 )
510 510
511 async def _getDestDirTrigger( 511 async def _get_dest_dir_trigger(
512 self, client, peer_jid, transfer_data, file_data, stream_object 512 self, client, peer_jid, transfer_data, file_data, stream_object
513 ): 513 ):
514 """This trigger accept file sending request, and store file locally""" 514 """This trigger accept file sending request, and store file locally"""
515 if not client.is_component: 515 if not client.is_component:
516 return True, None 516 return True, None
520 assert stream_object 520 assert stream_object
521 assert "stream_object" not in transfer_data 521 assert "stream_object" not in transfer_data
522 assert C.KEY_PROGRESS_ID in file_data 522 assert C.KEY_PROGRESS_ID in file_data
523 filename = file_data["name"] 523 filename = file_data["name"]
524 assert filename and not "/" in filename 524 assert filename and not "/" in filename
525 quota = self.getQuota(client, peer_jid) 525 quota = self.get_quota(client, peer_jid)
526 if quota is not None: 526 if quota is not None:
527 used_space = await self.host.memory.fileGetUsedSpace(client, peer_jid) 527 used_space = await self.host.memory.file_get_used_space(client, peer_jid)
528 528
529 if (used_space + file_data["size"]) > quota: 529 if (used_space + file_data["size"]) > quota:
530 raise error.StanzaError( 530 raise error.StanzaError(
531 "not-acceptable", 531 "not-acceptable",
532 text=OVER_QUOTA_TXT.format( 532 text=OVER_QUOTA_TXT.format(
533 quota=utils.getHumanSize(quota), 533 quota=utils.get_human_size(quota),
534 used_space=utils.getHumanSize(used_space), 534 used_space=utils.get_human_size(used_space),
535 file_size=utils.getHumanSize(file_data['size']) 535 file_size=utils.get_human_size(file_data['size'])
536 ) 536 )
537 ) 537 )
538 file_tmp_dir = self.host.get_local_path( 538 file_tmp_dir = self.host.get_local_path(
539 None, C.FILES_TMP_DIR, peer_jid.userhost(), component=True 539 None, C.FILES_TMP_DIR, peer_jid.userhost(), component=True
540 ) 540 )
541 file_tmp_path = file_data['file_path'] = files_utils.get_unique_name( 541 file_tmp_path = file_data['file_path'] = files_utils.get_unique_name(
542 file_tmp_dir/filename) 542 file_tmp_dir/filename)
543 543
544 transfer_data["finished_d"].addCallback( 544 transfer_data["finished_d"].addCallback(
545 lambda __: defer.ensureDeferred( 545 lambda __: defer.ensureDeferred(
546 self.registerReceivedFile(client, peer_jid, file_data, file_tmp_path) 546 self.register_received_file(client, peer_jid, file_data, file_tmp_path)
547 ) 547 )
548 ) 548 )
549 549
550 self._f.openFileWrite( 550 self._f.open_file_write(
551 client, file_tmp_path, transfer_data, file_data, stream_object 551 client, file_tmp_path, transfer_data, file_data, stream_object
552 ) 552 )
553 return False, True 553 return False, True
554 554
555 async def _retrieveFiles( 555 async def _retrieve_files(
556 self, client, session, content_data, content_name, file_data, file_elt 556 self, client, session, content_data, content_name, file_data, file_elt
557 ): 557 ):
558 """This method retrieve a file on request, and send if after checking permissions""" 558 """This method retrieve a file on request, and send if after checking permissions"""
559 peer_jid = session["peer_jid"] 559 peer_jid = session["peer_jid"]
560 if session['local_jid'].user: 560 if session['local_jid'].user:
561 owner = client.getOwnerFromJid(session['local_jid']) 561 owner = client.get_owner_from_jid(session['local_jid'])
562 else: 562 else:
563 owner = peer_jid 563 owner = peer_jid
564 try: 564 try:
565 found_files = await self.host.memory.getFiles( 565 found_files = await self.host.memory.get_files(
566 client, 566 client,
567 peer_jid=peer_jid, 567 peer_jid=peer_jid,
568 name=file_data.get("name"), 568 name=file_data.get("name"),
569 file_hash=file_data.get("file_hash"), 569 file_hash=file_data.get("file_hash"),
570 hash_algo=file_data.get("hash_algo"), 570 hash_algo=file_data.get("hash_algo"),
593 if found_file['type'] != C.FILE_TYPE_FILE: 593 if found_file['type'] != C.FILE_TYPE_FILE:
594 raise TypeError("a file was expected, type is {type_}".format( 594 raise TypeError("a file was expected, type is {type_}".format(
595 type_=found_file['type'])) 595 type_=found_file['type']))
596 file_hash = found_file["file_hash"] 596 file_hash = found_file["file_hash"]
597 file_path = self.files_path / file_hash 597 file_path = self.files_path / file_hash
598 file_data["hash_hasher"] = hasher = self._h.getHasher(found_file["hash_algo"]) 598 file_data["hash_hasher"] = hasher = self._h.get_hasher(found_file["hash_algo"])
599 size = file_data["size"] = found_file["size"] 599 size = file_data["size"] = found_file["size"]
600 file_data["file_hash"] = file_hash 600 file_data["file_hash"] = file_hash
601 file_data["hash_algo"] = found_file["hash_algo"] 601 file_data["hash_algo"] = found_file["hash_algo"]
602 602
603 # we complete file_elt so peer can have some details on the file 603 # we complete file_elt so peer can have some details on the file
606 file_elt.addElement("size", content=str(size)) 606 file_elt.addElement("size", content=str(size))
607 content_data["stream_object"] = stream.FileStreamObject( 607 content_data["stream_object"] = stream.FileStreamObject(
608 self.host, 608 self.host,
609 client, 609 client,
610 file_path, 610 file_path,
611 uid=self._jf.getProgressId(session, content_name), 611 uid=self._jf.get_progress_id(session, content_name),
612 size=size, 612 size=size,
613 data_cb=lambda data: hasher.update(data), 613 data_cb=lambda data: hasher.update(data),
614 ) 614 )
615 return True 615 return True
616 616
617 def _fileSendingRequestTrigger( 617 def _file_sending_request_trigger(
618 self, client, session, content_data, content_name, file_data, file_elt 618 self, client, session, content_data, content_name, file_data, file_elt
619 ): 619 ):
620 if not client.is_component: 620 if not client.is_component:
621 return True, None 621 return True, None
622 else: 622 else:
623 return ( 623 return (
624 False, 624 False,
625 defer.ensureDeferred(self._retrieveFiles( 625 defer.ensureDeferred(self._retrieve_files(
626 client, session, content_data, content_name, file_data, file_elt 626 client, session, content_data, content_name, file_data, file_elt
627 )), 627 )),
628 ) 628 )
629 629
630 ## HTTP Upload ## 630 ## HTTP Upload ##
640 assert '/' not in request.filename 640 assert '/' not in request.filename
641 # client._file_sharing_allowed_hosts is set in plugin XEP-0329 641 # client._file_sharing_allowed_hosts is set in plugin XEP-0329
642 if request.from_.host not in client._file_sharing_allowed_hosts: 642 if request.from_.host not in client._file_sharing_allowed_hosts:
643 raise error.StanzaError("forbidden") 643 raise error.StanzaError("forbidden")
644 644
645 quota = self.getQuota(client, request.from_) 645 quota = self.get_quota(client, request.from_)
646 if quota is not None: 646 if quota is not None:
647 used_space = await self.host.memory.fileGetUsedSpace(client, request.from_) 647 used_space = await self.host.memory.file_get_used_space(client, request.from_)
648 648
649 if (used_space + request.size) > quota: 649 if (used_space + request.size) > quota:
650 raise error.StanzaError( 650 raise error.StanzaError(
651 "not-acceptable", 651 "not-acceptable",
652 text=OVER_QUOTA_TXT.format( 652 text=OVER_QUOTA_TXT.format(
653 quota=utils.getHumanSize(quota), 653 quota=utils.get_human_size(quota),
654 used_space=utils.getHumanSize(used_space), 654 used_space=utils.get_human_size(used_space),
655 file_size=utils.getHumanSize(request.size) 655 file_size=utils.get_human_size(request.size)
656 ), 656 ),
657 appCondition = self._hu.getFileTooLargeElt(max(quota - used_space, 0)) 657 appCondition = self._hu.get_file_too_large_elt(max(quota - used_space, 0))
658 ) 658 )
659 659
660 upload_id = shortuuid.ShortUUID().random(length=30) 660 upload_id = shortuuid.ShortUUID().random(length=30)
661 assert '/' not in upload_id 661 assert '/' not in upload_id
662 timer = reactor.callLater(30, self._purge_slot, upload_id) 662 timer = reactor.callLater(30, self._purge_slot, upload_id)
669 ) 669 )
670 return slot 670 return slot
671 671
672 ## metadata triggers ## 672 ## metadata triggers ##
673 673
674 def _addFileMetadataElts(self, client, file_elt, extra_args): 674 def _add_file_metadata_elts(self, client, file_elt, extra_args):
675 # affiliation 675 # affiliation
676 affiliation = extra_args.get('affiliation') 676 affiliation = extra_args.get('affiliation')
677 if affiliation is not None: 677 if affiliation is not None:
678 file_elt.addElement((NS_FS_AFFILIATION, "affiliation"), content=affiliation) 678 file_elt.addElement((NS_FS_AFFILIATION, "affiliation"), content=affiliation)
679 679
691 count = 0 691 count = 0
692 692
693 comment_elt["count"] = str(count) 693 comment_elt["count"] = str(count)
694 return True 694 return True
695 695
696 def _getFileMetadataElts(self, client, file_elt, file_data): 696 def _get_file_metadata_elts(self, client, file_elt, file_data):
697 # affiliation 697 # affiliation
698 try: 698 try:
699 affiliation_elt = next(file_elt.elements(NS_FS_AFFILIATION, "affiliation")) 699 affiliation_elt = next(file_elt.elements(NS_FS_AFFILIATION, "affiliation"))
700 except StopIteration: 700 except StopIteration:
701 pass 701 pass
710 else: 710 else:
711 file_data["comments_url"] = str(comments_elt) 711 file_data["comments_url"] = str(comments_elt)
712 file_data["comments_count"] = comments_elt["count"] 712 file_data["comments_count"] = comments_elt["count"]
713 return True 713 return True
714 714
715 def _addFileMetadata( 715 def _add_file_metadata(
716 self, client, iq_elt, iq_result_elt, owner, node_path, files_data): 716 self, client, iq_elt, iq_result_elt, owner, node_path, files_data):
717 for file_data in files_data: 717 for file_data in files_data:
718 file_data["comments_url"] = uri.buildXMPPUri( 718 file_data["comments_url"] = uri.build_xmpp_uri(
719 "pubsub", 719 "pubsub",
720 path=client.jid.full(), 720 path=client.jid.full(),
721 node=COMMENT_NODE_PREFIX + file_data["id"], 721 node=COMMENT_NODE_PREFIX + file_data["id"],
722 ) 722 )
723 return True 723 return True
724 724
725 def _addDirectoryMetadataElts( 725 def _add_directory_metadata_elts(
726 self, client, file_data, directory_elt, owner, node_path): 726 self, client, file_data, directory_elt, owner, node_path):
727 affiliation = file_data.get('affiliation') 727 affiliation = file_data.get('affiliation')
728 if affiliation is not None: 728 if affiliation is not None:
729 directory_elt.addElement( 729 directory_elt.addElement(
730 (NS_FS_AFFILIATION, "affiliation"), 730 (NS_FS_AFFILIATION, "affiliation"),
731 content=affiliation 731 content=affiliation
732 ) 732 )
733 733
734 def _getDirectoryMetadataElts( 734 def _get_directory_metadata_elts(
735 self, client, elt, file_data): 735 self, client, elt, file_data):
736 try: 736 try:
737 affiliation_elt = next(elt.elements(NS_FS_AFFILIATION, "affiliation")) 737 affiliation_elt = next(elt.elements(NS_FS_AFFILIATION, "affiliation"))
738 except StopIteration: 738 except StopIteration:
739 pass 739 pass
752 "category": "pubsub", 752 "category": "pubsub",
753 "type": "virtual", # FIXME: non standard, here to avoid this service being considered as main pubsub one 753 "type": "virtual", # FIXME: non standard, here to avoid this service being considered as main pubsub one
754 "name": "files commenting service", 754 "name": "files commenting service",
755 } 755 }
756 756
757 def _getFileId(self, nodeIdentifier): 757 def _get_file_id(self, nodeIdentifier):
758 if not nodeIdentifier.startswith(COMMENT_NODE_PREFIX): 758 if not nodeIdentifier.startswith(COMMENT_NODE_PREFIX):
759 raise error.StanzaError("item-not-found") 759 raise error.StanzaError("item-not-found")
760 file_id = nodeIdentifier[len(COMMENT_NODE_PREFIX) :] 760 file_id = nodeIdentifier[len(COMMENT_NODE_PREFIX) :]
761 if not file_id: 761 if not file_id:
762 raise error.StanzaError("item-not-found") 762 raise error.StanzaError("item-not-found")
763 return file_id 763 return file_id
764 764
765 async def getFileData(self, requestor, nodeIdentifier): 765 async def get_file_data(self, requestor, nodeIdentifier):
766 file_id = self._getFileId(nodeIdentifier) 766 file_id = self._get_file_id(nodeIdentifier)
767 try: 767 try:
768 files = await self.host.memory.getFiles(self.parent, requestor, file_id) 768 files = await self.host.memory.get_files(self.parent, requestor, file_id)
769 except (exceptions.NotFound, exceptions.PermissionError): 769 except (exceptions.NotFound, exceptions.PermissionError):
770 # we don't differenciate between NotFound and PermissionError 770 # we don't differenciate between NotFound and PermissionError
771 # to avoid leaking information on existing files 771 # to avoid leaking information on existing files
772 raise error.StanzaError("item-not-found") 772 raise error.StanzaError("item-not-found")
773 if not files: 773 if not files:
774 raise error.StanzaError("item-not-found") 774 raise error.StanzaError("item-not-found")
775 if len(files) > 1: 775 if len(files) > 1:
776 raise error.InternalError("there should be only one file") 776 raise error.InternalError("there should be only one file")
777 return files[0] 777 return files[0]
778 778
779 def commentsUpdate(self, extra, new_comments, peer_jid): 779 def comments_update(self, extra, new_comments, peer_jid):
780 """update comments (replace or insert new_comments) 780 """update comments (replace or insert new_comments)
781 781
782 @param extra(dict): extra data to update 782 @param extra(dict): extra data to update
783 @param new_comments(list[tuple(unicode, unicode, unicode)]): comments to update or insert 783 @param new_comments(list[tuple(unicode, unicode, unicode)]): comments to update or insert
784 @param peer_jid(unicode, None): bare jid of the requestor, or None if request is done by owner 784 @param peer_jid(unicode, None): bare jid of the requestor, or None if request is done by owner
805 for comment in updated: 805 for comment in updated:
806 new_comments.remove(comment) 806 new_comments.remove(comment)
807 807
808 current_comments.extend(new_comments) 808 current_comments.extend(new_comments)
809 809
810 def commentsDelete(self, extra, comments): 810 def comments_delete(self, extra, comments):
811 try: 811 try:
812 comments_dict = extra["comments"] 812 comments_dict = extra["comments"]
813 except KeyError: 813 except KeyError:
814 return 814 return
815 for comment in comments: 815 for comment in comments:
816 try: 816 try:
817 comments_dict.remove(comment) 817 comments_dict.remove(comment)
818 except ValueError: 818 except ValueError:
819 continue 819 continue
820 820
821 def _getFrom(self, item_elt): 821 def _get_from(self, item_elt):
822 """retrieve publisher of an item 822 """retrieve publisher of an item
823 823
824 @param item_elt(domish.element): <item> element 824 @param item_elt(domish.element): <item> element
825 @return (unicode): full jid as string 825 @return (unicode): full jid as string
826 """ 826 """
830 return iq_elt["from"] 830 return iq_elt["from"]
831 831
832 @ensure_deferred 832 @ensure_deferred
833 async def publish(self, requestor, service, nodeIdentifier, items): 833 async def publish(self, requestor, service, nodeIdentifier, items):
834 #  we retrieve file a first time to check authorisations 834 #  we retrieve file a first time to check authorisations
835 file_data = await self.getFileData(requestor, nodeIdentifier) 835 file_data = await self.get_file_data(requestor, nodeIdentifier)
836 file_id = file_data["id"] 836 file_id = file_data["id"]
837 comments = [(item["id"], self._getFrom(item), item.toXml()) for item in items] 837 comments = [(item["id"], self._get_from(item), item.toXml()) for item in items]
838 if requestor.userhostJID() == file_data["owner"]: 838 if requestor.userhostJID() == file_data["owner"]:
839 peer_jid = None 839 peer_jid = None
840 else: 840 else:
841 peer_jid = requestor.userhost() 841 peer_jid = requestor.userhost()
842 update_cb = partial(self.commentsUpdate, new_comments=comments, peer_jid=peer_jid) 842 update_cb = partial(self.comments_update, new_comments=comments, peer_jid=peer_jid)
843 try: 843 try:
844 await self.host.memory.fileUpdate(file_id, "extra", update_cb) 844 await self.host.memory.file_update(file_id, "extra", update_cb)
845 except exceptions.PermissionError: 845 except exceptions.PermissionError:
846 raise error.StanzaError("not-authorized") 846 raise error.StanzaError("not-authorized")
847 847
848 @ensure_deferred 848 @ensure_deferred
849 async def items(self, requestor, service, nodeIdentifier, maxItems, itemIdentifiers): 849 async def items(self, requestor, service, nodeIdentifier, maxItems, itemIdentifiers):
850 file_data = await self.getFileData(requestor, nodeIdentifier) 850 file_data = await self.get_file_data(requestor, nodeIdentifier)
851 comments = file_data["extra"].get("comments", []) 851 comments = file_data["extra"].get("comments", [])
852 if itemIdentifiers: 852 if itemIdentifiers:
853 return [generic.parseXml(c[2]) for c in comments if c[0] in itemIdentifiers] 853 return [generic.parseXml(c[2]) for c in comments if c[0] in itemIdentifiers]
854 else: 854 else:
855 return [generic.parseXml(c[2]) for c in comments] 855 return [generic.parseXml(c[2]) for c in comments]
856 856
857 @ensure_deferred 857 @ensure_deferred
858 async def retract(self, requestor, service, nodeIdentifier, itemIdentifiers): 858 async def retract(self, requestor, service, nodeIdentifier, itemIdentifiers):
859 file_data = await self.getFileData(requestor, nodeIdentifier) 859 file_data = await self.get_file_data(requestor, nodeIdentifier)
860 file_id = file_data["id"] 860 file_id = file_data["id"]
861 try: 861 try:
862 comments = file_data["extra"]["comments"] 862 comments = file_data["extra"]["comments"]
863 except KeyError: 863 except KeyError:
864 raise error.StanzaError("item-not-found") 864 raise error.StanzaError("item-not-found")
878 878
879 if requestor.userhostJID() != file_data["owner"]: 879 if requestor.userhostJID() != file_data["owner"]:
880 if not all([c[1] == requestor.userhost() for c in to_remove]): 880 if not all([c[1] == requestor.userhost() for c in to_remove]):
881 raise error.StanzaError("not-authorized") 881 raise error.StanzaError("not-authorized")
882 882
883 remove_cb = partial(self.commentsDelete, comments=to_remove) 883 remove_cb = partial(self.comments_delete, comments=to_remove)
884 await self.host.memory.fileUpdate(file_id, "extra", remove_cb) 884 await self.host.memory.file_update(file_id, "extra", remove_cb)