Mercurial > libervia-backend
diff sat/plugins/plugin_comp_file_sharing.py @ 4037:524856bd7b19
massive refactoring to switch from camelCase to snake_case:
historically, Libervia (SàT before) was using camelCase as allowed by PEP8 when using a
pre-PEP8 code, to use the same coding style as in Twisted.
However, snake_case is more readable and it's better to follow PEP8 best practices, so it
has been decided to move on full snake_case. Because Libervia has a huge codebase, this
ended with a ugly mix of camelCase and snake_case.
To fix that, this patch does a big refactoring by renaming every function and method
(including bridge) that are not coming from Twisted or Wokkel, to use fully snake_case.
This is a massive change, and may result in some bugs.
author | Goffi <goffi@goffi.org> |
---|---|
date | Sat, 08 Apr 2023 13:54:42 +0200 |
parents | 412b99c29d83 |
children |
line wrap: on
line diff
--- a/sat/plugins/plugin_comp_file_sharing.py Fri Apr 07 15:18:39 2023 +0200 +++ b/sat/plugins/plugin_comp_file_sharing.py Sat Apr 08 13:54:42 2023 +0200 @@ -113,7 +113,7 @@ return resource.ErrorPage(code, brief, details).render(request) - def getDispositionType(self, media_type, media_subtype): + def get_disposition_type(self, media_type, media_subtype): if media_type in ('image', 'video'): return 'inline' elif media_type == 'application' and media_subtype == 'pdf': @@ -136,7 +136,7 @@ "Date, Content-Length, Content-Range") return super().render(request) - def render_OPTIONS(self, request): + def render_options(self, request): request.setResponseCode(http.OK) return b"" @@ -146,17 +146,17 @@ except exceptions.DataError: return self.errorPage(request, http.NOT_FOUND) - defer.ensureDeferred(self.renderGet(request)) + defer.ensureDeferred(self.render_get(request)) return server.NOT_DONE_YET - async def renderGet(self, request): + async def render_get(self, request): try: upload_id, filename = request.upload_data except exceptions.DataError: request.write(self.errorPage(request, http.FORBIDDEN)) request.finish() return - found_files = await request.file_sharing.host.memory.getFiles( + found_files = await request.file_sharing.host.memory.get_files( client=None, peer_jid=None, perms_to_check=None, public_id=upload_id) if not found_files: request.write(self.errorPage(request, http.NOT_FOUND)) @@ -170,7 +170,7 @@ file_res = static.File(file_path) file_res.type = f'{found_file["media_type"]}/{found_file["media_subtype"]}' file_res.encoding = file_res.contentEncodings.get(Path(found_file['name']).suffix) - disp_type = self.getDispositionType( + disp_type = self.get_disposition_type( found_file['media_type'], found_file['media_subtype']) # the URL is percent encoded, and not all browsers/tools unquote the file name, # thus we add a content disposition header @@ -190,10 +190,10 @@ request.finish() def render_PUT(self, request): - defer.ensureDeferred(self.renderPut(request)) + defer.ensureDeferred(self.render_put(request)) return server.NOT_DONE_YET - async def renderPut(self, request): + async def render_put(self, request): try: client, upload_request = request.upload_request_data upload_id, filename = request.upload_data @@ -228,7 +228,7 @@ "path": path } - await request.file_sharing.registerReceivedFile( + await request.file_sharing.register_received_file( client, upload_request.from_, file_data, tmp_file_path, public_id=public_id, ) @@ -273,7 +273,7 @@ def file_tmp_dir(self): return self.channel.site.file_tmp_dir - def refuseRequest(self): + def refuse_request(self): if self.content is not None: self.content.close() self.content = open(os.devnull, 'w+b') @@ -287,16 +287,16 @@ upload_id, filename = self.upload_data except exceptions.DataError as e: log.warning(f"Invalid PUT request, we stop here: {e}") - return self.refuseRequest() + return self.refuse_request() try: client, upload_request, timer = self.file_sharing.expected_uploads.pop(upload_id) except KeyError: log.warning(f"unknown (expired?) upload ID received for a PUT: {upload_id!r}") - return self.refuseRequest() + return self.refuse_request() if not timer.active: log.warning(f"upload id {upload_id!r} used for a PUT, but it is expired") - return self.refuseRequest() + return self.refuse_request() timer.cancel() @@ -305,7 +305,7 @@ f"invalid filename for PUT (upload id: {upload_id!r}, URL: {self.channel._path.decode()}). Original " f"{upload_request.filename!r} doesn't match {filename!r}" ) - return self.refuseRequest() + return self.refuse_request() self.upload_request_data = (client, upload_request) @@ -355,24 +355,24 @@ self._h = self.host.plugins["XEP-0300"] self._t = self.host.plugins["XEP-0264"] self._hu = self.host.plugins["XEP-0363"] - self._hu.registerHandler(self._on_http_upload) - self.host.trigger.add("FILE_getDestDir", self._getDestDirTrigger) + self._hu.register_handler(self._on_http_upload) + self.host.trigger.add("FILE_getDestDir", self._get_dest_dir_trigger) self.host.trigger.add( - "XEP-0234_fileSendingRequest", self._fileSendingRequestTrigger, priority=1000 + "XEP-0234_fileSendingRequest", self._file_sending_request_trigger, priority=1000 ) - self.host.trigger.add("XEP-0234_buildFileElement", self._addFileMetadataElts) - self.host.trigger.add("XEP-0234_parseFileElement", self._getFileMetadataElts) - self.host.trigger.add("XEP-0329_compGetFilesFromNode", self._addFileMetadata) + self.host.trigger.add("XEP-0234_buildFileElement", self._add_file_metadata_elts) + self.host.trigger.add("XEP-0234_parseFileElement", self._get_file_metadata_elts) + self.host.trigger.add("XEP-0329_compGetFilesFromNode", self._add_file_metadata) self.host.trigger.add( "XEP-0329_compGetFilesFromNode_build_directory", - self._addDirectoryMetadataElts) + self._add_directory_metadata_elts) self.host.trigger.add( "XEP-0329_parseResult_directory", - self._getDirectoryMetadataElts) + self._get_directory_metadata_elts) self.files_path = self.host.get_local_path(None, C.FILES_DIR) - self.http_port = int(self.host.memory.getConfig( + self.http_port = int(self.host.memory.config_get( 'component file-sharing', 'http_upload_port', 8888)) - connection_type = self.host.memory.getConfig( + connection_type = self.host.memory.config_get( 'component file-sharing', 'http_upload_connection_type', 'https') if connection_type not in ('http', 'https'): raise exceptions.ConfigError( @@ -383,51 +383,51 @@ if connection_type == 'http': reactor.listenTCP(self.http_port, self.server) else: - options = tls.getOptionsFromConfig( + options = tls.get_options_from_config( self.host.memory.config, "component file-sharing") - tls.TLSOptionsCheck(options) - context_factory = tls.getTLSContextFactory(options) + tls.tls_options_check(options) + context_factory = tls.get_tls_context_factory(options) reactor.listenSSL(self.http_port, self.server, context_factory) - def getHandler(self, client): + def get_handler(self, client): return Comments_handler(self) - def profileConnecting(self, client): + def profile_connecting(self, client): # we activate HTTP upload client.enabled_features.add("XEP-0363") self.init() - public_base_url = self.host.memory.getConfig( + public_base_url = self.host.memory.config_get( 'component file-sharing', 'http_upload_public_facing_url') if public_base_url is None: client._file_sharing_base_url = f"https://{client.host}:{self.http_port}" else: client._file_sharing_base_url = public_base_url path = client.file_tmp_dir = os.path.join( - self.host.memory.getConfig("", "local_dir"), + self.host.memory.config_get("", "local_dir"), C.FILES_TMP_DIR, - regex.pathEscape(client.profile), + regex.path_escape(client.profile), ) if not os.path.exists(path): os.makedirs(path) - def getQuota(self, client, entity): + def get_quota(self, client, entity): """Return maximum size allowed for all files for entity""" - quotas = self.host.memory.getConfig("component file-sharing", "quotas_json", {}) - if self.host.memory.isAdminJID(entity): + quotas = self.host.memory.config_get("component file-sharing", "quotas_json", {}) + if self.host.memory.is_admin_jid(entity): quota = quotas.get("admins") else: try: quota = quotas["jids"][entity.userhost()] except KeyError: quota = quotas.get("users") - return None if quota is None else utils.parseSize(quota) + return None if quota is None else utils.parse_size(quota) async def generate_thumbnails(self, extra: dict, image_path: Path): thumbnails = extra.setdefault(C.KEY_THUMBNAILS, []) for max_thumb_size in self._t.SIZES: try: - thumb_size, thumb_id = await self._t.generateThumbnail( + thumb_size, thumb_id = await self._t.generate_thumbnail( image_path, max_thumb_size, # we keep thumbnails for 6 months @@ -438,7 +438,7 @@ break thumbnails.append({"id": thumb_id, "size": thumb_size}) - async def registerReceivedFile( + async def register_received_file( self, client, peer_jid, file_data, file_path, public_id=None, extra=None): """Post file reception tasks @@ -460,9 +460,9 @@ log.debug(_("Reusing already generated hash")) file_hash = file_data["hash_hasher"].hexdigest() else: - hasher = self._h.getHasher(HASH_ALGO) + hasher = self._h.get_hasher(HASH_ALGO) with file_path.open('rb') as f: - file_hash = await self._h.calculateHash(f, hasher) + file_hash = await self._h.calculate_hash(f, hasher) final_path = self.files_path/file_hash if final_path.is_file(): @@ -493,7 +493,7 @@ else: await self.generate_thumbnails(extra, thumb_path) - await self.host.memory.setFile( + await self.host.memory.set_file( client, name=name, version="", @@ -508,7 +508,7 @@ extra=extra, ) - async def _getDestDirTrigger( + async def _get_dest_dir_trigger( self, client, peer_jid, transfer_data, file_data, stream_object ): """This trigger accept file sending request, and store file locally""" @@ -522,17 +522,17 @@ assert C.KEY_PROGRESS_ID in file_data filename = file_data["name"] assert filename and not "/" in filename - quota = self.getQuota(client, peer_jid) + quota = self.get_quota(client, peer_jid) if quota is not None: - used_space = await self.host.memory.fileGetUsedSpace(client, peer_jid) + used_space = await self.host.memory.file_get_used_space(client, peer_jid) if (used_space + file_data["size"]) > quota: raise error.StanzaError( "not-acceptable", text=OVER_QUOTA_TXT.format( - quota=utils.getHumanSize(quota), - used_space=utils.getHumanSize(used_space), - file_size=utils.getHumanSize(file_data['size']) + quota=utils.get_human_size(quota), + used_space=utils.get_human_size(used_space), + file_size=utils.get_human_size(file_data['size']) ) ) file_tmp_dir = self.host.get_local_path( @@ -543,26 +543,26 @@ transfer_data["finished_d"].addCallback( lambda __: defer.ensureDeferred( - self.registerReceivedFile(client, peer_jid, file_data, file_tmp_path) + self.register_received_file(client, peer_jid, file_data, file_tmp_path) ) ) - self._f.openFileWrite( + self._f.open_file_write( client, file_tmp_path, transfer_data, file_data, stream_object ) return False, True - async def _retrieveFiles( + async def _retrieve_files( self, client, session, content_data, content_name, file_data, file_elt ): """This method retrieve a file on request, and send if after checking permissions""" peer_jid = session["peer_jid"] if session['local_jid'].user: - owner = client.getOwnerFromJid(session['local_jid']) + owner = client.get_owner_from_jid(session['local_jid']) else: owner = peer_jid try: - found_files = await self.host.memory.getFiles( + found_files = await self.host.memory.get_files( client, peer_jid=peer_jid, name=file_data.get("name"), @@ -595,7 +595,7 @@ type_=found_file['type'])) file_hash = found_file["file_hash"] file_path = self.files_path / file_hash - file_data["hash_hasher"] = hasher = self._h.getHasher(found_file["hash_algo"]) + file_data["hash_hasher"] = hasher = self._h.get_hasher(found_file["hash_algo"]) size = file_data["size"] = found_file["size"] file_data["file_hash"] = file_hash file_data["hash_algo"] = found_file["hash_algo"] @@ -608,13 +608,13 @@ self.host, client, file_path, - uid=self._jf.getProgressId(session, content_name), + uid=self._jf.get_progress_id(session, content_name), size=size, data_cb=lambda data: hasher.update(data), ) return True - def _fileSendingRequestTrigger( + def _file_sending_request_trigger( self, client, session, content_data, content_name, file_data, file_elt ): if not client.is_component: @@ -622,7 +622,7 @@ else: return ( False, - defer.ensureDeferred(self._retrieveFiles( + defer.ensureDeferred(self._retrieve_files( client, session, content_data, content_name, file_data, file_elt )), ) @@ -642,19 +642,19 @@ if request.from_.host not in client._file_sharing_allowed_hosts: raise error.StanzaError("forbidden") - quota = self.getQuota(client, request.from_) + quota = self.get_quota(client, request.from_) if quota is not None: - used_space = await self.host.memory.fileGetUsedSpace(client, request.from_) + used_space = await self.host.memory.file_get_used_space(client, request.from_) if (used_space + request.size) > quota: raise error.StanzaError( "not-acceptable", text=OVER_QUOTA_TXT.format( - quota=utils.getHumanSize(quota), - used_space=utils.getHumanSize(used_space), - file_size=utils.getHumanSize(request.size) + quota=utils.get_human_size(quota), + used_space=utils.get_human_size(used_space), + file_size=utils.get_human_size(request.size) ), - appCondition = self._hu.getFileTooLargeElt(max(quota - used_space, 0)) + appCondition = self._hu.get_file_too_large_elt(max(quota - used_space, 0)) ) upload_id = shortuuid.ShortUUID().random(length=30) @@ -671,7 +671,7 @@ ## metadata triggers ## - def _addFileMetadataElts(self, client, file_elt, extra_args): + def _add_file_metadata_elts(self, client, file_elt, extra_args): # affiliation affiliation = extra_args.get('affiliation') if affiliation is not None: @@ -693,7 +693,7 @@ comment_elt["count"] = str(count) return True - def _getFileMetadataElts(self, client, file_elt, file_data): + def _get_file_metadata_elts(self, client, file_elt, file_data): # affiliation try: affiliation_elt = next(file_elt.elements(NS_FS_AFFILIATION, "affiliation")) @@ -712,17 +712,17 @@ file_data["comments_count"] = comments_elt["count"] return True - def _addFileMetadata( + def _add_file_metadata( self, client, iq_elt, iq_result_elt, owner, node_path, files_data): for file_data in files_data: - file_data["comments_url"] = uri.buildXMPPUri( + file_data["comments_url"] = uri.build_xmpp_uri( "pubsub", path=client.jid.full(), node=COMMENT_NODE_PREFIX + file_data["id"], ) return True - def _addDirectoryMetadataElts( + def _add_directory_metadata_elts( self, client, file_data, directory_elt, owner, node_path): affiliation = file_data.get('affiliation') if affiliation is not None: @@ -731,7 +731,7 @@ content=affiliation ) - def _getDirectoryMetadataElts( + def _get_directory_metadata_elts( self, client, elt, file_data): try: affiliation_elt = next(elt.elements(NS_FS_AFFILIATION, "affiliation")) @@ -754,7 +754,7 @@ "name": "files commenting service", } - def _getFileId(self, nodeIdentifier): + def _get_file_id(self, nodeIdentifier): if not nodeIdentifier.startswith(COMMENT_NODE_PREFIX): raise error.StanzaError("item-not-found") file_id = nodeIdentifier[len(COMMENT_NODE_PREFIX) :] @@ -762,10 +762,10 @@ raise error.StanzaError("item-not-found") return file_id - async def getFileData(self, requestor, nodeIdentifier): - file_id = self._getFileId(nodeIdentifier) + async def get_file_data(self, requestor, nodeIdentifier): + file_id = self._get_file_id(nodeIdentifier) try: - files = await self.host.memory.getFiles(self.parent, requestor, file_id) + files = await self.host.memory.get_files(self.parent, requestor, file_id) except (exceptions.NotFound, exceptions.PermissionError): # we don't differenciate between NotFound and PermissionError # to avoid leaking information on existing files @@ -776,7 +776,7 @@ raise error.InternalError("there should be only one file") return files[0] - def commentsUpdate(self, extra, new_comments, peer_jid): + def comments_update(self, extra, new_comments, peer_jid): """update comments (replace or insert new_comments) @param extra(dict): extra data to update @@ -807,7 +807,7 @@ current_comments.extend(new_comments) - def commentsDelete(self, extra, comments): + def comments_delete(self, extra, comments): try: comments_dict = extra["comments"] except KeyError: @@ -818,7 +818,7 @@ except ValueError: continue - def _getFrom(self, item_elt): + def _get_from(self, item_elt): """retrieve publisher of an item @param item_elt(domish.element): <item> element @@ -832,22 +832,22 @@ @ensure_deferred async def publish(self, requestor, service, nodeIdentifier, items): # we retrieve file a first time to check authorisations - file_data = await self.getFileData(requestor, nodeIdentifier) + file_data = await self.get_file_data(requestor, nodeIdentifier) file_id = file_data["id"] - comments = [(item["id"], self._getFrom(item), item.toXml()) for item in items] + comments = [(item["id"], self._get_from(item), item.toXml()) for item in items] if requestor.userhostJID() == file_data["owner"]: peer_jid = None else: peer_jid = requestor.userhost() - update_cb = partial(self.commentsUpdate, new_comments=comments, peer_jid=peer_jid) + update_cb = partial(self.comments_update, new_comments=comments, peer_jid=peer_jid) try: - await self.host.memory.fileUpdate(file_id, "extra", update_cb) + await self.host.memory.file_update(file_id, "extra", update_cb) except exceptions.PermissionError: raise error.StanzaError("not-authorized") @ensure_deferred async def items(self, requestor, service, nodeIdentifier, maxItems, itemIdentifiers): - file_data = await self.getFileData(requestor, nodeIdentifier) + file_data = await self.get_file_data(requestor, nodeIdentifier) comments = file_data["extra"].get("comments", []) if itemIdentifiers: return [generic.parseXml(c[2]) for c in comments if c[0] in itemIdentifiers] @@ -856,7 +856,7 @@ @ensure_deferred async def retract(self, requestor, service, nodeIdentifier, itemIdentifiers): - file_data = await self.getFileData(requestor, nodeIdentifier) + file_data = await self.get_file_data(requestor, nodeIdentifier) file_id = file_data["id"] try: comments = file_data["extra"]["comments"] @@ -880,5 +880,5 @@ if not all([c[1] == requestor.userhost() for c in to_remove]): raise error.StanzaError("not-authorized") - remove_cb = partial(self.commentsDelete, comments=to_remove) - await self.host.memory.fileUpdate(file_id, "extra", remove_cb) + remove_cb = partial(self.comments_delete, comments=to_remove) + await self.host.memory.file_update(file_id, "extra", remove_cb)