Mercurial > libervia-backend
diff sat/memory/disco.py @ 4037:524856bd7b19
massive refactoring to switch from camelCase to snake_case:
historically, Libervia (SàT before) was using camelCase as allowed by PEP8 when using a
pre-PEP8 code, to use the same coding style as in Twisted.
However, snake_case is more readable and it's better to follow PEP8 best practices, so it
has been decided to move on full snake_case. Because Libervia has a huge codebase, this
ended with a ugly mix of camelCase and snake_case.
To fix that, this patch does a big refactoring by renaming every function and method
(including bridge) that are not coming from Twisted or Wokkel, to use fully snake_case.
This is a massive change, and may result in some bugs.
author | Goffi <goffi@goffi.org> |
---|---|
date | Sat, 08 Apr 2023 13:54:42 +0200 |
parents | 073f386a191a |
children |
line wrap: on
line diff
--- a/sat/memory/disco.py Fri Apr 07 15:18:39 2023 +0200 +++ b/sat/memory/disco.py Sat Apr 08 13:54:42 2023 +0200 @@ -87,7 +87,7 @@ return self.hashes.__contains__(hash_) def load(self): - def fillHashes(hashes): + def fill_hashes(hashes): for hash_, xml in hashes.items(): element = xml_tools.ElementParser()(xml) disco_info = disco.DiscoInfo.fromElement(element) @@ -106,7 +106,7 @@ log.info("Disco hashes loaded") d = self.persistent.load() - d.addCallback(fillHashes) + d.addCallback(fill_hashes) return d @@ -131,11 +131,11 @@ @param node(unicode): optional node to use for disco request @return: a Deferred which fire a boolean (True if feature is available) """ - disco_infos = yield self.getInfos(client, jid_, node) + disco_infos = yield self.get_infos(client, jid_, node) defer.returnValue(feature in disco_infos.features) @defer.inlineCallbacks - def checkFeature(self, client, feature, jid_=None, node=""): + def check_feature(self, client, feature, jid_=None, node=""): """Like hasFeature, but raise an exception is feature is not Found @param feature: feature namespace @@ -144,13 +144,13 @@ @raise: exceptions.FeatureNotFound """ - disco_infos = yield self.getInfos(client, jid_, node) + disco_infos = yield self.get_infos(client, jid_, node) if not feature in disco_infos.features: raise failure.Failure(exceptions.FeatureNotFound()) @defer.inlineCallbacks - def checkFeatures(self, client, features, jid_=None, identity=None, node=""): - """Like checkFeature, but check several features at once, and check also identity + def check_features(self, client, features, jid_=None, identity=None, node=""): + """Like check_feature, but check several features at once, and check also identity @param features(iterable[unicode]): features to check @param jid_(jid.JID): jid of the target, or None for profile's server @@ -159,14 +159,14 @@ @raise: exceptions.FeatureNotFound """ - disco_infos = yield self.getInfos(client, jid_, node) + disco_infos = yield self.get_infos(client, jid_, node) if not set(features).issubset(disco_infos.features): raise failure.Failure(exceptions.FeatureNotFound()) if identity is not None and identity not in disco_infos.identities: raise failure.Failure(exceptions.FeatureNotFound()) - async def hasIdentity( + async def has_identity( self, client: SatXMPPEntity, category: str, @@ -182,10 +182,10 @@ @param node(unicode): optional node to use for disco request @return: True if the entity has the given identity """ - disco_infos = await self.getInfos(client, jid_, node) + disco_infos = await self.get_infos(client, jid_, node) return (category, type_) in disco_infos.identities - def getInfos(self, client, jid_=None, node="", use_cache=True): + def get_infos(self, client, jid_=None, node="", use_cache=True): """get disco infos from jid_, filling capability hash if needed @param jid_: jid of the target, or None for profile's server @@ -199,13 +199,13 @@ if not use_cache: # we ignore cache, so we pretend we haven't found it raise KeyError - cap_hash = self.host.memory.getEntityData( + cap_hash = self.host.memory.entity_data_get( client, jid_, [C.ENTITY_CAP_HASH] )[C.ENTITY_CAP_HASH] except (KeyError, exceptions.UnknownEntityError): # capability hash is not available, we'll compute one - def infosCb(disco_infos): - cap_hash = self.generateHash(disco_infos) + def infos_cb(disco_infos): + cap_hash = self.generate_hash(disco_infos) for ext_form in disco_infos.extensions.values(): # wokkel doesn't call typeCheck on reception, so we do it here # to avoid ending up with incorrect types. We have to do it after @@ -213,12 +213,12 @@ # hash) ext_form.typeCheck() self.hashes[cap_hash] = disco_infos - self.host.memory.updateEntityData( + self.host.memory.update_entity_data( client, jid_, C.ENTITY_CAP_HASH, cap_hash ) return disco_infos - def infosEb(fail): + def infos_eb(fail): if fail.check(defer.CancelledError): reason = "request time-out" fail = failure.Failure(exceptions.TimeOutError(str(fail.value))) @@ -236,21 +236,21 @@ # XXX we set empty disco in cache, to avoid getting an error or waiting # for a timeout again the next time - self.host.memory.updateEntityData( + self.host.memory.update_entity_data( client, jid_, C.ENTITY_CAP_HASH, CAP_HASH_ERROR ) raise fail d = client.disco.requestInfo(jid_, nodeIdentifier=node) - d.addCallback(infosCb) - d.addErrback(infosEb) + d.addCallback(infos_cb) + d.addErrback(infos_eb) return d else: disco_infos = self.hashes[cap_hash] return defer.succeed(disco_infos) @defer.inlineCallbacks - def getItems(self, client, jid_=None, node="", use_cache=True): + def get_items(self, client, jid_=None, node="", use_cache=True): """get disco items from jid_, cache them for our own server @param jid_(jid.JID): jid of the target, or None for profile's server @@ -264,7 +264,7 @@ if jid_ == client.server_jid and not node: # we cache items only for our own server and if node is not set try: - items = self.host.memory.getEntityData( + items = self.host.memory.entity_data_get( client, jid_, ["DISCO_ITEMS"] )["DISCO_ITEMS"] log.debug("[%s] disco items are in cache" % jid_.full()) @@ -274,7 +274,7 @@ except (KeyError, exceptions.UnknownEntityError): log.debug("Caching [%s] disco items" % jid_.full()) items = yield client.disco.requestItems(jid_, nodeIdentifier=node) - self.host.memory.updateEntityData( + self.host.memory.update_entity_data( client, jid_, "DISCO_ITEMS", items ) else: @@ -290,24 +290,24 @@ defer.returnValue(items) - def _infosEb(self, failure_, entity_jid): + def _infos_eb(self, failure_, entity_jid): failure_.trap(StanzaError) log.warning( _("Error while requesting [%(jid)s]: %(error)s") % {"jid": entity_jid.full(), "error": failure_.getErrorMessage()} ) - def findServiceEntity(self, client, category, type_, jid_=None): - """Helper method to find first available entity from findServiceEntities + def find_service_entity(self, client, category, type_, jid_=None): + """Helper method to find first available entity from find_service_entities - args are the same as for [findServiceEntities] + args are the same as for [find_service_entities] @return (jid.JID, None): found entity """ - d = self.host.findServiceEntities(client, category, type_) + d = self.host.find_service_entities(client, category, type_) d.addCallback(lambda entities: entities.pop() if entities else None) return d - def findServiceEntities(self, client, category, type_, jid_=None): + def find_service_entities(self, client, category, type_, jid_=None): """Return all available items of an entity which correspond to (category, type_) @param category: identity's category @@ -318,29 +318,29 @@ """ found_entities = set() - def infosCb(infos, entity_jid): + def infos_cb(infos, entity_jid): if (category, type_) in infos.identities: found_entities.add(entity_jid) - def gotItems(items): + def got_items(items): defers_list = [] for item in items: - info_d = self.getInfos(client, item.entity) + info_d = self.get_infos(client, item.entity) info_d.addCallbacks( - infosCb, self._infosEb, [item.entity], None, [item.entity] + infos_cb, self._infos_eb, [item.entity], None, [item.entity] ) defers_list.append(info_d) return defer.DeferredList(defers_list) - d = self.getItems(client, jid_) - d.addCallback(gotItems) + d = self.get_items(client, jid_) + d.addCallback(got_items) d.addCallback(lambda __: found_entities) reactor.callLater( TIMEOUT, d.cancel ) # FIXME: one bad service make a general timeout return d - def findFeaturesSet(self, client, features, identity=None, jid_=None): + def find_features_set(self, client, features, identity=None, jid_=None): """Return entities (including jid_ and its items) offering features @param features: iterable of features which must be present @@ -355,7 +355,7 @@ features = set(features) found_entities = set() - def infosCb(infos, entity): + def infos_cb(infos, entity): if entity is None: log.warning(_("received an item without jid")) return @@ -364,23 +364,23 @@ if features.issubset(infos.features): found_entities.add(entity) - def gotItems(items): + def got_items(items): defer_list = [] for entity in [jid_] + [item.entity for item in items]: - infos_d = self.getInfos(client, entity) - infos_d.addCallbacks(infosCb, self._infosEb, [entity], None, [entity]) + infos_d = self.get_infos(client, entity) + infos_d.addCallbacks(infos_cb, self._infos_eb, [entity], None, [entity]) defer_list.append(infos_d) return defer.DeferredList(defer_list) - d = self.getItems(client, jid_) - d.addCallback(gotItems) + d = self.get_items(client, jid_) + d.addCallback(got_items) d.addCallback(lambda __: found_entities) reactor.callLater( TIMEOUT, d.cancel ) # FIXME: one bad service make a general timeout return d - def generateHash(self, services): + def generate_hash(self, services): """ Generate a unique hash for given service hash algorithm is the one described in XEP-0115 @@ -433,7 +433,7 @@ return cap_hash @defer.inlineCallbacks - def _discoInfos( + def _disco_infos( self, entity_jid_s, node="", use_cache=True, profile_key=C.PROF_KEY_NONE ): """Discovery method for the bridge @@ -443,9 +443,9 @@ @return: list of tuples """ - client = self.host.getClient(profile_key) + client = self.host.get_client(profile_key) entity = jid.JID(entity_jid_s) - disco_infos = yield self.getInfos(client, entity, node, use_cache) + disco_infos = yield self.get_infos(client, entity, node, use_cache) extensions = {} # FIXME: should extensions be serialised using tools.common.data_format? for form_type, form in list(disco_infos.extensions.items()): @@ -459,7 +459,7 @@ values = [field.value] if field.value is not None else field.values if field.fieldType == "boolean": - values = [C.boolConst(v) for v in values] + values = [C.bool_const(v) for v in values] fields.append((data, values)) extensions[form_type or ""] = fields @@ -483,7 +483,7 @@ yield (item.entity.full(), item.nodeIdentifier or "", item.name or "") @defer.inlineCallbacks - def _discoItems( + def _disco_items( self, entity_jid_s, node="", use_cache=True, profile_key=C.PROF_KEY_NONE ): """ Discovery method for the bridge @@ -492,8 +492,8 @@ @param node(unicode): optional node to use @param use_cache(bool): if True, use cached data if available @return: list of tuples""" - client = self.host.getClient(profile_key) + client = self.host.get_client(profile_key) entity = jid.JID(entity_jid_s) - disco_items = yield self.getItems(client, entity, node, use_cache) + disco_items = yield self.get_items(client, entity, node, use_cache) ret = list(self.items2tuples(disco_items)) defer.returnValue(ret)