Mercurial > libervia-backend
diff sat/memory/memory.py @ 4037:524856bd7b19
massive refactoring to switch from camelCase to snake_case:
historically, Libervia (SàT before) was using camelCase as allowed by PEP8 when using a
pre-PEP8 code, to use the same coding style as in Twisted.
However, snake_case is more readable and it's better to follow PEP8 best practices, so it
has been decided to move on full snake_case. Because Libervia has a huge codebase, this
ended with a ugly mix of camelCase and snake_case.
To fix that, this patch does a big refactoring by renaming every function and method
(including bridge) that are not coming from Twisted or Wokkel, to use fully snake_case.
This is a massive change, and may result in some bugs.
author | Goffi <goffi@goffi.org> |
---|---|
date | Sat, 08 Apr 2023 13:54:42 +0200 |
parents | 7af29260ecb8 |
children |
line wrap: on
line diff
--- a/sat/memory/memory.py Fri Apr 07 15:18:39 2023 +0200 +++ b/sat/memory/memory.py Sat Apr 08 13:54:42 2023 +0200 @@ -65,13 +65,13 @@ self.timeout = timeout or Sessions.DEFAULT_TIMEOUT self.resettable_timeout = resettable_timeout - def newSession(self, session_data=None, session_id=None, profile=None): + def new_session(self, session_data=None, session_id=None, profile=None): """Create a new session @param session_data: mutable data to use, default to a dict @param session_id (str): force the session_id to the given string @param profile: if set, the session is owned by the profile, - and profileGet must be used instead of __getitem__ + and profile_get must be used instead of __getitem__ @return: session_id, session_data """ if session_id is None: @@ -80,7 +80,7 @@ raise exceptions.ConflictError( "Session id {} is already used".format(session_id) ) - timer = reactor.callLater(self.timeout, self._purgeSession, session_id) + timer = reactor.callLater(self.timeout, self._purge_session, session_id) if session_data is None: session_data = {} self._sessions[session_id] = ( @@ -88,7 +88,7 @@ ) return session_id, session_data - def _purgeSession(self, session_id): + def _purge_session(self, session_id): try: timer, session_data, profile = self._sessions[session_id] except ValueError: @@ -113,7 +113,7 @@ def __contains__(self, session_id): return session_id in self._sessions - def profileGet(self, session_id, profile): + def profile_get(self, session_id, profile): try: timer, session_data, profile_set = self._sessions[session_id] except ValueError: @@ -133,7 +133,7 @@ timer, session_data = self._sessions[session_id] except ValueError: raise exceptions.InternalError( - "You need to use profileGet instead of __getitem__ when profile is set" + "You need to use profile_get instead of __getitem__ when profile is set" ) except KeyError: raise failure.Failure(KeyError(MSG_NO_SESSION)) @@ -142,11 +142,11 @@ return session_data def __setitem__(self, key, value): - raise NotImplementedError("You need do use newSession to create a session") + raise NotImplementedError("You need do use new_session to create a session") def __delitem__(self, session_id): """ delete the session data """ - self._purgeSession(session_id) + self._purge_session(session_id) def keys(self): return list(self._sessions.keys()) @@ -160,7 +160,7 @@ used as the key to retrieve data or delete a session (instead of session id). """ - def _profileGetAllIds(self, profile): + def _profile_get_all_ids(self, profile): """Return a list of the sessions ids that are associated to the given profile. @param profile: %(doc_profile)s @@ -176,7 +176,7 @@ ret.append(session_id) return ret - def profileGetUnique(self, profile): + def profile_get_unique(self, profile): """Return the data of the unique session that is associated to the given profile. @param profile: %(doc_profile)s @@ -185,25 +185,25 @@ - None if no session is associated to the profile - raise an error if more than one session are found """ - ids = self._profileGetAllIds(profile) + ids = self._profile_get_all_ids(profile) if len(ids) > 1: raise exceptions.InternalError( - "profileGetUnique has been used but more than one session has been found!" + "profile_get_unique has been used but more than one session has been found!" ) return ( - self.profileGet(ids[0], profile) if len(ids) == 1 else None + self.profile_get(ids[0], profile) if len(ids) == 1 else None ) # XXX: timeout might be reset - def profileDelUnique(self, profile): + def profile_del_unique(self, profile): """Delete the unique session that is associated to the given profile. @param profile: %(doc_profile)s @return: None, but raise an error if more than one session are found """ - ids = self._profileGetAllIds(profile) + ids = self._profile_get_all_ids(profile) if len(ids) > 1: raise exceptions.InternalError( - "profileDelUnique has been used but more than one session has been found!" + "profile_del_unique has been used but more than one session has been found!" ) if len(ids) == 1: del self._sessions[ids[0]] @@ -217,7 +217,7 @@ def __init__(self, timeout=None): ProfileSessions.__init__(self, timeout, resettable_timeout=False) - def _purgeSession(self, session_id): + def _purge_session(self, session_id): log.debug( "FIXME: PasswordSessions should ask for the profile password after the session expired" ) @@ -237,9 +237,9 @@ self.subscriptions = {} self.auth_sessions = PasswordSessions() # remember the authenticated profiles self.disco = Discovery(host) - self.config = tools_config.parseMainConf(log_filenames=True) - self._cache_path = Path(self.getConfig("", "local_dir"), C.CACHE_DIR) - self.admins = self.getConfig("", "admins_list", []) + self.config = tools_config.parse_main_conf(log_filenames=True) + self._cache_path = Path(self.config_get("", "local_dir"), C.CACHE_DIR) + self.admins = self.config_get("", "admins_list", []) self.admin_jids = set() @@ -256,7 +256,7 @@ await self.disco.load() for admin in self.admins: try: - admin_jid_s = await self.asyncGetParamA( + admin_jid_s = await self.param_get_a_async( "JabberID", "Connection", profile_key=admin ) except Exception as e: @@ -273,7 +273,7 @@ ## Configuration ## - def getConfig(self, section, name, default=None): + def config_get(self, section, name, default=None): """Get the main configuration option @param section: section of the config file (None or '' for DEFAULT) @@ -281,7 +281,7 @@ @param default: value to use if not found @return: str, list or dict """ - return tools_config.getConfig(self.config, section, name, default) + return tools_config.config_get(self.config, section, name, default) def load_xml(self, filename): """Load parameters template from xml file @@ -322,16 +322,16 @@ def load(self): """Load parameters and all memory things from db""" # parameters data - return self.params.loadGenParams() + return self.params.load_gen_params() - def loadIndividualParams(self, profile): + def load_individual_params(self, profile): """Load individual parameters for a profile @param profile: %(doc_profile)s""" - return self.params.loadIndParams(profile) + return self.params.load_ind_params(profile) ## Profiles/Sessions management ## - def startSession(self, password, profile): + def start_session(self, password, profile): """"Iniatialise session for a profile @param password(unicode): profile session password @@ -340,59 +340,59 @@ @raise exceptions.ProfileUnknownError if profile doesn't exists @raise exceptions.PasswordError: the password does not match """ - profile = self.getProfileName(profile) + profile = self.get_profile_name(profile) - def createSession(__): + def create_session(__): """Called once params are loaded.""" self._entities_cache[profile] = {} log.info("[{}] Profile session started".format(profile)) return False - def backendInitialised(__): - def doStartSession(__=None): - if self.isSessionStarted(profile): + def backend_initialised(__): + def do_start_session(__=None): + if self.is_session_started(profile): log.info("Session already started!") return True try: # if there is a value at this point in self._entities_cache, - # it is the loadIndividualParams Deferred, the session is starting + # it is the load_individual_params Deferred, the session is starting session_d = self._entities_cache[profile] except KeyError: # else we do request the params - session_d = self._entities_cache[profile] = self.loadIndividualParams( + session_d = self._entities_cache[profile] = self.load_individual_params( profile ) - session_d.addCallback(createSession) + session_d.addCallback(create_session) finally: return session_d - auth_d = defer.ensureDeferred(self.profileAuthenticate(password, profile)) - auth_d.addCallback(doStartSession) + auth_d = defer.ensureDeferred(self.profile_authenticate(password, profile)) + auth_d.addCallback(do_start_session) return auth_d if self.host.initialised.called: - return defer.succeed(None).addCallback(backendInitialised) + return defer.succeed(None).addCallback(backend_initialised) else: - return self.host.initialised.addCallback(backendInitialised) + return self.host.initialised.addCallback(backend_initialised) - def stopSession(self, profile): + def stop_session(self, profile): """Delete a profile session @param profile: %(doc_profile)s """ - if self.host.isConnected(profile): + if self.host.is_connected(profile): log.debug("Disconnecting profile because of session stop") self.host.disconnect(profile) - self.auth_sessions.profileDelUnique(profile) + self.auth_sessions.profile_del_unique(profile) try: self._entities_cache[profile] except KeyError: log.warning("Profile was not in cache") - def _isSessionStarted(self, profile_key): - return self.isSessionStarted(self.getProfileName(profile_key)) + def _is_session_started(self, profile_key): + return self.is_session_started(self.get_profile_name(profile_key)) - def isSessionStarted(self, profile): + def is_session_started(self, profile): try: # XXX: if the value in self._entities_cache is a Deferred, # the session is starting but not started yet @@ -400,20 +400,20 @@ except KeyError: return False - async def profileAuthenticate(self, password, profile): + async def profile_authenticate(self, password, profile): """Authenticate the profile. @param password (unicode): the SàT profile password @return: None in case of success (an exception is raised otherwise) @raise exceptions.PasswordError: the password does not match """ - if not password and self.auth_sessions.profileGetUnique(profile): + if not password and self.auth_sessions.profile_get_unique(profile): # XXX: this allows any frontend to connect with the empty password as soon as # the profile has been authenticated at least once before. It is OK as long as # submitting a form with empty passwords is restricted to local frontends. return - sat_cipher = await self.asyncGetParamA( + sat_cipher = await self.param_get_a_async( C.PROFILE_PASS_PATH[1], C.PROFILE_PASS_PATH[0], profile_key=profile ) valid = PasswordHasher.verify(password, sat_cipher) @@ -421,9 +421,9 @@ log.warning(_("Authentication failure of profile {profile}").format( profile=profile)) raise exceptions.PasswordError("The provided profile password doesn't match.") - return await self.newAuthSession(password, profile) + return await self.new_auth_session(password, profile) - async def newAuthSession(self, key, profile): + async def new_auth_session(self, key, profile): """Start a new session for the authenticated profile. If there is already an existing session, no new one is created @@ -435,18 +435,18 @@ data = await PersistentDict(C.MEMORY_CRYPTO_NAMESPACE, profile).load() personal_key = BlockCipher.decrypt(key, data[C.MEMORY_CRYPTO_KEY]) # Create the session for this profile and store the personal key - session_data = self.auth_sessions.profileGetUnique(profile) + session_data = self.auth_sessions.profile_get_unique(profile) if not session_data: - self.auth_sessions.newSession( + self.auth_sessions.new_session( {C.MEMORY_CRYPTO_KEY: personal_key}, profile=profile ) log.debug("auth session created for profile %s" % profile) - def purgeProfileSession(self, profile): + def purge_profile_session(self, profile): """Delete cache of data of profile @param profile: %(doc_profile)s""" log.info(_("[%s] Profile session purge" % profile)) - self.params.purgeProfile(profile) + self.params.purge_profile(profile) try: del self._entities_cache[profile] except KeyError: @@ -457,7 +457,7 @@ % profile ) - def getProfilesList(self, clients=True, components=False): + def get_profiles_list(self, clients=True, components=False): """retrieve profiles list @param clients(bool): if True return clients profiles @@ -467,18 +467,18 @@ if not clients and not components: log.warning(_("requesting no profiles at all")) return [] - profiles = self.storage.getProfilesList() + profiles = self.storage.get_profiles_list() if clients and components: return sorted(profiles) - isComponent = self.storage.profileIsComponent + is_component = self.storage.profile_is_component if clients: - p_filter = lambda p: not isComponent(p) + p_filter = lambda p: not is_component(p) else: - p_filter = lambda p: isComponent(p) + p_filter = lambda p: is_component(p) return sorted(p for p in profiles if p_filter(p)) - def getProfileName(self, profile_key, return_profile_keys=False): + def get_profile_name(self, profile_key, return_profile_keys=False): """Return name of profile from keyword @param profile_key: can be the profile name or a keyword (like @DEFAULT@) @@ -486,19 +486,19 @@ @return: requested profile name @raise exceptions.ProfileUnknownError if profile doesn't exists """ - return self.params.getProfileName(profile_key, return_profile_keys) + return self.params.get_profile_name(profile_key, return_profile_keys) - def profileSetDefault(self, profile): + def profile_set_default(self, profile): """Set default profile @param profile: %(doc_profile)s """ # we want to be sure that the profile exists - profile = self.getProfileName(profile) + profile = self.get_profile_name(profile) self.memory_data["Profile_default"] = profile - def createProfile(self, name, password, component=None): + def create_profile(self, name, password, component=None): """Create a new profile @param name(unicode): profile name @@ -532,40 +532,40 @@ # raise ValueError(_(u"Plugin {component} is not an entry point !".format( # component = component))) - d = self.params.createProfile(name, component) + d = self.params.create_profile(name, component) - def initPersonalKey(__): + def init_personal_key(__): # be sure to call this after checking that the profile doesn't exist yet # generated once for all and saved in a PersistentDict - personal_key = BlockCipher.getRandomKey( + personal_key = BlockCipher.get_random_key( base64=True ).decode('utf-8') - self.auth_sessions.newSession( + self.auth_sessions.new_session( {C.MEMORY_CRYPTO_KEY: personal_key}, profile=name - ) # will be encrypted by setParam + ) # will be encrypted by param_set - def startFakeSession(__): - # avoid ProfileNotConnected exception in setParam + def start_fake_session(__): + # avoid ProfileNotConnected exception in param_set self._entities_cache[name] = None - self.params.loadIndParams(name) + self.params.load_ind_params(name) - def stopFakeSession(__): + def stop_fake_session(__): del self._entities_cache[name] - self.params.purgeProfile(name) + self.params.purge_profile(name) - d.addCallback(initPersonalKey) - d.addCallback(startFakeSession) + d.addCallback(init_personal_key) + d.addCallback(start_fake_session) d.addCallback( - lambda __: self.setParam( + lambda __: self.param_set( C.PROFILE_PASS_PATH[1], password, C.PROFILE_PASS_PATH[0], profile_key=name ) ) - d.addCallback(stopFakeSession) - d.addCallback(lambda __: self.auth_sessions.profileDelUnique(name)) + d.addCallback(stop_fake_session) + d.addCallback(lambda __: self.auth_sessions.profile_del_unique(name)) return d - def asyncDeleteProfile(self, name, force=False): + def profile_delete_async(self, name, force=False): """Delete an existing profile @param name: Name of the profile @@ -574,55 +574,55 @@ @return: a Deferred instance """ - def cleanMemory(__): - self.auth_sessions.profileDelUnique(name) + def clean_memory(__): + self.auth_sessions.profile_del_unique(name) try: del self._entities_cache[name] except KeyError: pass - d = self.params.asyncDeleteProfile(name, force) - d.addCallback(cleanMemory) + d = self.params.profile_delete_async(name, force) + d.addCallback(clean_memory) return d - def isComponent(self, profile_name): + def is_component(self, profile_name): """Tell if a profile is a component @param profile_name(unicode): name of the profile @return (bool): True if profile is a component @raise exceptions.NotFound: profile doesn't exist """ - return self.storage.profileIsComponent(profile_name) + return self.storage.profile_is_component(profile_name) - def getEntryPoint(self, profile_name): + def get_entry_point(self, profile_name): """Get a component entry point @param profile_name(unicode): name of the profile @return (bool): True if profile is a component @raise exceptions.NotFound: profile doesn't exist """ - return self.storage.getEntryPoint(profile_name) + return self.storage.get_entry_point(profile_name) ## History ## - def addToHistory(self, client, data): - return self.storage.addToHistory(data, client.profile) + def add_to_history(self, client, data): + return self.storage.add_to_history(data, client.profile) - def _historyGetSerialise(self, history_data): + def _history_get_serialise(self, history_data): return [ (uid, timestamp, from_jid, to_jid, message, subject, mess_type, data_format.serialise(extra)) for uid, timestamp, from_jid, to_jid, message, subject, mess_type, extra in history_data ] - def _historyGet(self, from_jid_s, to_jid_s, limit=C.HISTORY_LIMIT_NONE, between=True, + def _history_get(self, from_jid_s, to_jid_s, limit=C.HISTORY_LIMIT_NONE, between=True, filters=None, profile=C.PROF_KEY_NONE): - d = self.historyGet(jid.JID(from_jid_s), jid.JID(to_jid_s), limit, between, + d = self.history_get(jid.JID(from_jid_s), jid.JID(to_jid_s), limit, between, filters, profile) - d.addCallback(self._historyGetSerialise) + d.addCallback(self._history_get_serialise) return d - def historyGet(self, from_jid, to_jid, limit=C.HISTORY_LIMIT_NONE, between=True, + def history_get(self, from_jid, to_jid, limit=C.HISTORY_LIMIT_NONE, between=True, filters=None, profile=C.PROF_KEY_NONE): """Retrieve messages in history @@ -636,31 +636,31 @@ @param filters (dict[unicode, unicode]): pattern to filter the history results (see bridge API for details) @param profile (str): %(doc_profile)s - @return (D(list)): list of message data as in [messageNew] + @return (D(list)): list of message data as in [message_new] """ assert profile != C.PROF_KEY_NONE if limit == C.HISTORY_LIMIT_DEFAULT: - limit = int(self.getParamA(C.HISTORY_LIMIT, "General", profile_key=profile)) + limit = int(self.param_get_a(C.HISTORY_LIMIT, "General", profile_key=profile)) elif limit == C.HISTORY_LIMIT_NONE: limit = None if limit == 0: return defer.succeed([]) - return self.storage.historyGet(from_jid, to_jid, limit, between, filters, profile) + return self.storage.history_get(from_jid, to_jid, limit, between, filters, profile) ## Statuses ## - def _getPresenceStatuses(self, profile_key): - ret = self.getPresenceStatuses(profile_key) + def _get_presence_statuses(self, profile_key): + ret = self.presence_statuses_get(profile_key) return {entity.full(): data for entity, data in ret.items()} - def getPresenceStatuses(self, profile_key): + def presence_statuses_get(self, profile_key): """Get all the presence statuses of a profile @param profile_key: %(doc_profile_key)s @return: presence data: key=entity JID, value=presence data for this entity """ - client = self.host.getClient(profile_key) - profile_cache = self._getProfileCache(client) + client = self.host.get_client(profile_key) + profile_cache = self._get_profile_cache(client) entities_presence = {} for entity_jid, entity_data in profile_cache.items(): @@ -668,7 +668,7 @@ full_jid = copy.copy(entity_jid) full_jid.resource = resource try: - presence_data = self.getEntityDatum(client, full_jid, "presence") + presence_data = self.get_entity_datum(client, full_jid, "presence") except KeyError: continue entities_presence.setdefault(entity_jid, {})[ @@ -677,7 +677,7 @@ return entities_presence - def setPresenceStatus(self, entity_jid, show, priority, statuses, profile_key): + def set_presence_status(self, entity_jid, show, priority, statuses, profile_key): """Change the presence status of an entity @param entity_jid: jid.JID of the entity @@ -686,26 +686,26 @@ @param statuses: dictionary of statuses @param profile_key: %(doc_profile_key)s """ - client = self.host.getClient(profile_key) + client = self.host.get_client(profile_key) presence_data = PresenceTuple(show, priority, statuses) - self.updateEntityData( + self.update_entity_data( client, entity_jid, "presence", presence_data ) if entity_jid.resource and show != C.PRESENCE_UNAVAILABLE: # If a resource is available, bare jid should not have presence information try: - self.delEntityDatum(client, entity_jid.userhostJID(), "presence") + self.del_entity_datum(client, entity_jid.userhostJID(), "presence") except (KeyError, exceptions.UnknownEntityError): pass ## Resources ## - def _getAllResource(self, jid_s, profile_key): - client = self.host.getClient(profile_key) + def _get_all_resource(self, jid_s, profile_key): + client = self.host.get_client(profile_key) jid_ = jid.JID(jid_s) - return self.getAllResources(client, jid_) + return self.get_all_resources(client, jid_) - def getAllResources(self, client, entity_jid): + def get_all_resources(self, client, entity_jid): """Return all resource from jid for which we have had data in this session @param entity_jid: bare jid of the entity @@ -717,9 +717,9 @@ # FIXME: is there a need to keep cache data for resources which are not connected anymore? if entity_jid.resource: raise ValueError( - "getAllResources must be used with a bare jid (got {})".format(entity_jid) + "get_all_resources must be used with a bare jid (got {})".format(entity_jid) ) - profile_cache = self._getProfileCache(client) + profile_cache = self._get_profile_cache(client) try: entity_data = profile_cache[entity_jid.userhostJID()] except KeyError: @@ -730,21 +730,21 @@ resources.discard(None) return resources - def getAvailableResources(self, client, entity_jid): + def get_available_resources(self, client, entity_jid): """Return available resource for entity_jid - This method differs from getAllResources by returning only available resources + This method differs from get_all_resources by returning only available resources @param entity_jid: bare jid of the entit return (list[unicode]): list of available resources @raise exceptions.UnknownEntityError: if entity is not in cache """ available = [] - for resource in self.getAllResources(client, entity_jid): + for resource in self.get_all_resources(client, entity_jid): full_jid = copy.copy(entity_jid) full_jid.resource = resource try: - presence_data = self.getEntityDatum(client, full_jid, "presence") + presence_data = self.get_entity_datum(client, full_jid, "presence") except KeyError: log.debug("Can't get presence data for {}".format(full_jid)) else: @@ -752,12 +752,12 @@ available.append(resource) return available - def _getMainResource(self, jid_s, profile_key): - client = self.host.getClient(profile_key) + def _get_main_resource(self, jid_s, profile_key): + client = self.host.get_client(profile_key) jid_ = jid.JID(jid_s) - return self.getMainResource(client, jid_) or "" + return self.main_resource_get(client, jid_) or "" - def getMainResource(self, client, entity_jid): + def main_resource_get(self, client, entity_jid): """Return the main resource used by an entity @param entity_jid: bare entity jid @@ -765,15 +765,15 @@ """ if entity_jid.resource: raise ValueError( - "getMainResource must be used with a bare jid (got {})".format(entity_jid) + "main_resource_get must be used with a bare jid (got {})".format(entity_jid) ) try: - if self.host.plugins["XEP-0045"].isJoinedRoom(client, entity_jid): + if self.host.plugins["XEP-0045"].is_joined_room(client, entity_jid): return None # MUC rooms have no main resource except KeyError: # plugin not found pass try: - resources = self.getAllResources(client, entity_jid) + resources = self.get_all_resources(client, entity_jid) except exceptions.UnknownEntityError: log.warning("Entity is not in cache, we can't find any resource") return None @@ -782,7 +782,7 @@ full_jid = copy.copy(entity_jid) full_jid.resource = resource try: - presence_data = self.getEntityDatum(client, full_jid, "presence") + presence_data = self.get_entity_datum(client, full_jid, "presence") except KeyError: log.debug("No presence information for {}".format(full_jid)) continue @@ -795,7 +795,7 @@ ## Entities data ## - def _getProfileCache(self, client): + def _get_profile_cache(self, client): """Check profile validity and return its cache @param client: SatXMPPClient @@ -803,7 +803,7 @@ """ return self._entities_cache[client.profile] - def setSignalOnUpdate(self, key, signal=True): + def set_signal_on_update(self, key, signal=True): """Set a signal flag on the key When the key will be updated, a signal will be sent to frontends @@ -815,13 +815,13 @@ else: self._key_signals.discard(key) - def getAllEntitiesIter(self, client, with_bare=False): + def get_all_entities_iter(self, client, with_bare=False): """Return an iterator of full jids of all entities in cache @param with_bare: if True, include bare jids @return (list[unicode]): list of jids """ - profile_cache = self._getProfileCache(client) + profile_cache = self._get_profile_cache(client) # we construct a list of all known full jids (bare jid of entities x resources) for bare_jid, entity_data in profile_cache.items(): for resource in entity_data.keys(): @@ -831,22 +831,22 @@ full_jid.resource = resource yield full_jid - def updateEntityData( + def update_entity_data( self, client, entity_jid, key, value, silent=False ): """Set a misc data for an entity - If key was registered with setSignalOnUpdate, a signal will be sent to frontends + If key was registered with set_signal_on_update, a signal will be sent to frontends @param entity_jid: JID of the entity, C.ENTITY_ALL_RESOURCES for all resources of all entities, C.ENTITY_ALL for all entities (all resources + bare jids) @param key: key to set (eg: C.ENTITY_TYPE) @param value: value for this key (eg: C.ENTITY_TYPE_MUC) @param silent(bool): if True, doesn't send signal to frontend, even if there is a - signal flag (see setSignalOnUpdate) + signal flag (see set_signal_on_update) """ - profile_cache = self._getProfileCache(client) + profile_cache = self._get_profile_cache(client) if entity_jid in (C.ENTITY_ALL_RESOURCES, C.ENTITY_ALL): - entities = self.getAllEntitiesIter(client, entity_jid == C.ENTITY_ALL) + entities = self.get_all_entities_iter(client, entity_jid == C.ENTITY_ALL) else: entities = (entity_jid,) @@ -857,14 +857,14 @@ entity_data[key] = value if key in self._key_signals and not silent: - self.host.bridge.entityDataUpdated( + self.host.bridge.entity_data_updated( jid_.full(), key, data_format.serialise(value), client.profile ) - def delEntityDatum(self, client, entity_jid, key): + def del_entity_datum(self, client, entity_jid, key): """Delete a data for an entity @param entity_jid: JID of the entity, C.ENTITY_ALL_RESOURCES for all resources of all entities, @@ -874,9 +874,9 @@ @raise exceptions.UnknownEntityError: if entity is not in cache @raise KeyError: key is not in cache """ - profile_cache = self._getProfileCache(client) + profile_cache = self._get_profile_cache(client) if entity_jid in (C.ENTITY_ALL_RESOURCES, C.ENTITY_ALL): - entities = self.getAllEntitiesIter(client, entity_jid == C.ENTITY_ALL) + entities = self.get_all_entities_iter(client, entity_jid == C.ENTITY_ALL) else: entities = (entity_jid,) @@ -895,9 +895,9 @@ else: raise e - def _getEntitiesData(self, entities_jids, keys_list, profile_key): - client = self.host.getClient(profile_key) - ret = self.getEntitiesData( + def _get_entities_data(self, entities_jids, keys_list, profile_key): + client = self.host.get_client(profile_key) + ret = self.entities_data_get( client, [jid.JID(jid_) for jid_ in entities_jids], keys_list ) return { @@ -905,7 +905,7 @@ for jid_, data in ret.items() } - def getEntitiesData(self, client, entities_jids, keys_list=None): + def entities_data_get(self, client, entities_jids, keys_list=None): """Get a list of cached values for several entities at once @param entities_jids: jids of the entities, or empty list for all entities in cache @@ -920,7 +920,7 @@ @raise exceptions.UnknownEntityError: if entity is not in cache """ - def fillEntityData(entity_cache_data): + def fill_entity_data(entity_cache_data): entity_data = {} if keys_list is None: entity_data = entity_cache_data @@ -932,7 +932,7 @@ continue return entity_data - profile_cache = self._getProfileCache(client) + profile_cache = self._get_profile_cache(client) ret_data = {} if entities_jids: for entity in entities_jids: @@ -942,21 +942,21 @@ ] except KeyError: continue - ret_data[entity.full()] = fillEntityData(entity_cache_data, keys_list) + ret_data[entity.full()] = fill_entity_data(entity_cache_data, keys_list) else: for bare_jid, data in profile_cache.items(): for resource, entity_cache_data in data.items(): full_jid = copy.copy(bare_jid) full_jid.resource = resource - ret_data[full_jid] = fillEntityData(entity_cache_data) + ret_data[full_jid] = fill_entity_data(entity_cache_data) return ret_data - def _getEntityData(self, entity_jid_s, keys_list=None, profile=C.PROF_KEY_NONE): - return self.getEntityData( - self.host.getClient(profile), jid.JID(entity_jid_s), keys_list) + def _get_entity_data(self, entity_jid_s, keys_list=None, profile=C.PROF_KEY_NONE): + return self.entity_data_get( + self.host.get_client(profile), jid.JID(entity_jid_s), keys_list) - def getEntityData(self, client, entity_jid, keys_list=None): + def entity_data_get(self, client, entity_jid, keys_list=None): """Get a list of cached values for entity @param entity_jid: JID of the entity @@ -968,7 +968,7 @@ @raise exceptions.UnknownEntityError: if entity is not in cache """ - profile_cache = self._getProfileCache(client) + profile_cache = self._get_profile_cache(client) try: entity_data = profile_cache[entity_jid.userhostJID()][entity_jid.resource] except KeyError: @@ -982,7 +982,7 @@ return {key: entity_data[key] for key in keys_list if key in entity_data} - def getEntityDatum(self, client, entity_jid, key): + def get_entity_datum(self, client, entity_jid, key): """Get a datum from entity @param entity_jid: JID of the entity @@ -992,9 +992,9 @@ @raise exceptions.UnknownEntityError: if entity is not in cache @raise KeyError: if there is no value for this key and this entity """ - return self.getEntityData(client, entity_jid, (key,))[key] + return self.entity_data_get(client, entity_jid, (key,))[key] - def delEntityCache( + def del_entity_cache( self, entity_jid, delete_all_resources=True, profile_key=C.PROF_KEY_NONE ): """Remove all cached data for entity @@ -1005,8 +1005,8 @@ @raise exceptions.UnknownEntityError: if entity is not in cache """ - client = self.host.getClient(profile_key) - profile_cache = self._getProfileCache(client) + client = self.host.get_client(profile_key) + profile_cache = self._get_profile_cache(client) if delete_all_resources: if entity_jid.resource: @@ -1027,7 +1027,7 @@ ## Encryption ## - def encryptValue(self, value, profile): + def encrypt_value(self, value, profile): """Encrypt a value for the given profile. The personal key must be loaded already in the profile session, that should be the case if the profile is already authenticated. @@ -1037,7 +1037,7 @@ @return: the deferred encrypted value """ try: - personal_key = self.auth_sessions.profileGetUnique(profile)[ + personal_key = self.auth_sessions.profile_get_unique(profile)[ C.MEMORY_CRYPTO_KEY ] except TypeError: @@ -1047,7 +1047,7 @@ ) return BlockCipher.encrypt(personal_key, value) - def decryptValue(self, value, profile): + def decrypt_value(self, value, profile): """Decrypt a value for the given profile. The personal key must be loaded already in the profile session, that should be the case if the profile is already authenticated. @@ -1057,7 +1057,7 @@ @return: the deferred decrypted value """ try: - personal_key = self.auth_sessions.profileGetUnique(profile)[ + personal_key = self.auth_sessions.profile_get_unique(profile)[ C.MEMORY_CRYPTO_KEY ] except TypeError: @@ -1067,7 +1067,7 @@ ) return BlockCipher.decrypt(personal_key, value) - def encryptPersonalData(self, data_key, data_value, crypto_key, profile): + def encrypt_personal_data(self, data_key, data_value, crypto_key, profile): """Re-encrypt a personal data (saved to a PersistentDict). @param data_key: key for the individual PersistentDict instance @@ -1077,7 +1077,7 @@ @return: a deferred None value """ - def gotIndMemory(data): + def got_ind_memory(data): data[data_key] = BlockCipher.encrypt(crypto_key, data_value) return data.force(data_key) @@ -1088,28 +1088,28 @@ ) d = PersistentDict(C.MEMORY_CRYPTO_NAMESPACE, profile).load() - return d.addCallback(gotIndMemory).addCallback(done) + return d.addCallback(got_ind_memory).addCallback(done) ## Subscription requests ## - def addWaitingSub(self, type_, entity_jid, profile_key): + def add_waiting_sub(self, type_, entity_jid, profile_key): """Called when a subcription request is received""" - profile = self.getProfileName(profile_key) + profile = self.get_profile_name(profile_key) assert profile if profile not in self.subscriptions: self.subscriptions[profile] = {} self.subscriptions[profile][entity_jid] = type_ - def delWaitingSub(self, entity_jid, profile_key): + def del_waiting_sub(self, entity_jid, profile_key): """Called when a subcription request is finished""" - profile = self.getProfileName(profile_key) + profile = self.get_profile_name(profile_key) assert profile if profile in self.subscriptions and entity_jid in self.subscriptions[profile]: del self.subscriptions[profile][entity_jid] - def getWaitingSub(self, profile_key): + def sub_waiting_get(self, profile_key): """Called to get a list of currently waiting subscription requests""" - profile = self.getProfileName(profile_key) + profile = self.get_profile_name(profile_key) if not profile: log.error(_("Asking waiting subscriptions for a non-existant profile")) return {} @@ -1120,13 +1120,13 @@ ## Parameters ## - def getStringParamA(self, name, category, attr="value", profile_key=C.PROF_KEY_NONE): - return self.params.getStringParamA(name, category, attr, profile_key) + def get_string_param_a(self, name, category, attr="value", profile_key=C.PROF_KEY_NONE): + return self.params.get_string_param_a(name, category, attr, profile_key) - def getParamA(self, name, category, attr="value", profile_key=C.PROF_KEY_NONE): - return self.params.getParamA(name, category, attr, profile_key=profile_key) + def param_get_a(self, name, category, attr="value", profile_key=C.PROF_KEY_NONE): + return self.params.param_get_a(name, category, attr, profile_key=profile_key) - def asyncGetParamA( + def param_get_a_async( self, name, category, @@ -1134,33 +1134,33 @@ security_limit=C.NO_SECURITY_LIMIT, profile_key=C.PROF_KEY_NONE, ): - return self.params.asyncGetParamA( + return self.params.param_get_a_async( name, category, attr, security_limit, profile_key ) - def _getParamsValuesFromCategory( + def _get_params_values_from_category( self, category, security_limit, app, extra_s, profile_key ): - return self.params._getParamsValuesFromCategory( + return self.params._get_params_values_from_category( category, security_limit, app, extra_s, profile_key ) - def asyncGetStringParamA( + def async_get_string_param_a( self, name, category, attribute="value", security_limit=C.NO_SECURITY_LIMIT, profile_key=C.PROF_KEY_NONE): - profile = self.getProfileName(profile_key) - return defer.ensureDeferred(self.params.asyncGetStringParamA( + profile = self.get_profile_name(profile_key) + return defer.ensureDeferred(self.params.async_get_string_param_a( name, category, attribute, security_limit, profile )) - def _getParamsUI(self, security_limit, app, extra_s, profile_key): - return self.params._getParamsUI(security_limit, app, extra_s, profile_key) + def _get_params_ui(self, security_limit, app, extra_s, profile_key): + return self.params._get_params_ui(security_limit, app, extra_s, profile_key) - def getParamsCategories(self): - return self.params.getParamsCategories() + def params_categories_get(self): + return self.params.params_categories_get() - def setParam( + def param_set( self, name, value, @@ -1168,43 +1168,43 @@ security_limit=C.NO_SECURITY_LIMIT, profile_key=C.PROF_KEY_NONE, ): - return self.params.setParam(name, value, category, security_limit, profile_key) + return self.params.param_set(name, value, category, security_limit, profile_key) - def updateParams(self, xml): - return self.params.updateParams(xml) + def update_params(self, xml): + return self.params.update_params(xml) - def paramsRegisterApp(self, xml, security_limit=C.NO_SECURITY_LIMIT, app=""): - return self.params.paramsRegisterApp(xml, security_limit, app) + def params_register_app(self, xml, security_limit=C.NO_SECURITY_LIMIT, app=""): + return self.params.params_register_app(xml, security_limit, app) - def setDefault(self, name, category, callback, errback=None): - return self.params.setDefault(name, category, callback, errback) + def set_default(self, name, category, callback, errback=None): + return self.params.set_default(name, category, callback, errback) ## Private Data ## - def _privateDataSet(self, namespace, key, data_s, profile_key): - client = self.host.getClient(profile_key) + def _private_data_set(self, namespace, key, data_s, profile_key): + client = self.host.get_client(profile_key) # we accept any type data = data_format.deserialise(data_s, type_check=None) - return defer.ensureDeferred(self.storage.setPrivateValue( + return defer.ensureDeferred(self.storage.set_private_value( namespace, key, data, binary=True, profile=client.profile)) - def _privateDataGet(self, namespace, key, profile_key): - client = self.host.getClient(profile_key) + def _private_data_get(self, namespace, key, profile_key): + client = self.host.get_client(profile_key) d = defer.ensureDeferred( - self.storage.getPrivates( + self.storage.get_privates( namespace, [key], binary=True, profile=client.profile) ) d.addCallback(lambda data_dict: data_format.serialise(data_dict.get(key))) return d - def _privateDataDelete(self, namespace, key, profile_key): - client = self.host.getClient(profile_key) - return defer.ensureDeferred(self.storage.delPrivateValue( + def _private_data_delete(self, namespace, key, profile_key): + client = self.host.get_client(profile_key) + return defer.ensureDeferred(self.storage.del_private_value( namespace, key, binary=True, profile=client.profile)) ## Files ## - def checkFilePermission( + def check_file_permission( self, file_data: dict, peer_jid: Optional[jid.JID], @@ -1213,7 +1213,7 @@ ) -> None: """Check that an entity has the right permission on a file - @param file_data: data of one file, as returned by getFiles + @param file_data: data of one file, as returned by get_files @param peer_jid: entity trying to access the file @param perms_to_check: permissions to check tuple of C.ACCESS_PERM_* @@ -1268,15 +1268,15 @@ _("unknown access type: {type}").format(type=perm_type) ) - async def checkPermissionToRoot(self, client, file_data, peer_jid, perms_to_check): - """do checkFilePermission on file_data and all its parents until root""" + async def check_permission_to_root(self, client, file_data, peer_jid, perms_to_check): + """do check_file_permission on file_data and all its parents until root""" current = file_data while True: - self.checkFilePermission(current, peer_jid, perms_to_check) + self.check_file_permission(current, peer_jid, perms_to_check) parent = current["parent"] if not parent: break - files_data = await self.getFiles( + files_data = await self.get_files( client, peer_jid=None, file_id=parent, perms_to_check=None ) try: @@ -1284,7 +1284,7 @@ except IndexError: raise exceptions.DataError("Missing parent") - async def _getParentDir( + async def _get_parent_dir( self, client, path, parent, namespace, owner, peer_jid, perms_to_check ): """Retrieve parent node from a path, or last existing directory @@ -1308,7 +1308,7 @@ # non existing directories will be created parent = "" for idx, path_elt in enumerate(path_elts): - directories = await self.storage.getFiles( + directories = await self.storage.get_files( client, parent=parent, type_=C.FILE_TYPE_DIRECTORY, @@ -1325,11 +1325,11 @@ ) else: directory = directories[0] - self.checkFilePermission(directory, peer_jid, perms_to_check) + self.check_file_permission(directory, peer_jid, perms_to_check) parent = directory["id"] return (parent, []) - def getFileAffiliations(self, file_data: dict) -> Dict[jid.JID, str]: + def get_file_affiliations(self, file_data: dict) -> Dict[jid.JID, str]: """Convert file access to pubsub like affiliations""" affiliations = {} access_data = file_data['access'] @@ -1352,7 +1352,7 @@ return affiliations - def _setFileAffiliationsUpdate( + def _set_file_affiliations_update( self, access: dict, file_data: dict, @@ -1401,7 +1401,7 @@ else: raise ValueError(f"unknown affiliation: {affiliation!r}") - async def setFileAffiliations( + async def set_file_affiliations( self, client, file_data: dict, @@ -1417,17 +1417,17 @@ - "none" removes both read and write permissions """ file_id = file_data['id'] - await self.fileUpdate( + await self.file_update( file_id, 'access', update_cb=partial( - self._setFileAffiliationsUpdate, + self._set_file_affiliations_update, file_data=file_data, affiliations=affiliations ), ) - def _setFileAccessModelUpdate( + def _set_file_access_model_update( self, access: dict, file_data: dict, @@ -1445,7 +1445,7 @@ if requested_type == C.ACCESS_TYPE_WHITELIST and 'jids' not in read_data: read_data['jids'] = [] - async def setFileAccessModel( + async def set_file_access_model( self, client, file_data: dict, @@ -1458,17 +1458,17 @@ - "whitelist": set whitelist to file/dir """ file_id = file_data['id'] - await self.fileUpdate( + await self.file_update( file_id, 'access', update_cb=partial( - self._setFileAccessModelUpdate, + self._set_file_access_model_update, file_data=file_data, access_model=access_model ), ) - def getFilesOwner( + def get_files_owner( self, client, owner: Optional[jid.JID], @@ -1499,7 +1499,7 @@ ) return peer_jid.userhostJID() - async def getFiles( + async def get_files( self, client, peer_jid, file_id=None, version=None, parent=None, path=None, type_=None, file_hash=None, hash_algo=None, name=None, namespace=None, mime_type=None, public_id=None, owner=None, access=None, projection=None, @@ -1526,7 +1526,7 @@ @param mime_type(unicode, None): filter on this mime type @param public_id(unicode, None): filter on this public id @param owner(jid.JID, None): if not None, only get files from this owner - @param access(dict, None): get file with given access (see [setFile]) + @param access(dict, None): get file with given access (see [set_file]) @param projection(list[unicode], None): name of columns to retrieve None to retrieve all @param unique(bool): if True will remove duplicates @@ -1534,7 +1534,7 @@ must be a tuple of C.ACCESS_PERM_* or None if None, permission will no be checked (peer_jid must be None too in this case) - other params are the same as for [setFile] + other params are the same as for [set_file] @return (list[dict]): files corresponding to filters @raise exceptions.NotFound: parent directory not found (when path is specified) @raise exceptions.PermissionError: peer_jid can't use perms_to_check for one of @@ -1546,11 +1546,11 @@ "if you want to disable permission check, both peer_jid and " "perms_to_check must be None" ) - owner = self.getFilesOwner(client, owner, peer_jid, file_id, parent) + owner = self.get_files_owner(client, owner, peer_jid, file_id, parent) if path is not None: path = str(path) - # permission are checked by _getParentDir - parent, remaining_path_elts = await self._getParentDir( + # permission are checked by _get_parent_dir + parent, remaining_path_elts = await self._get_parent_dir( client, path, parent, namespace, owner, peer_jid, perms_to_check ) if remaining_path_elts: @@ -1560,16 +1560,16 @@ if parent and peer_jid: # if parent is given directly and permission check is requested, # we need to check all the parents - parent_data = await self.storage.getFiles(client, file_id=parent) + parent_data = await self.storage.get_files(client, file_id=parent) try: parent_data = parent_data[0] except IndexError: raise exceptions.DataError("mising parent") - await self.checkPermissionToRoot( + await self.check_permission_to_root( client, parent_data, peer_jid, perms_to_check ) - files = await self.storage.getFiles( + files = await self.storage.get_files( client, file_id=file_id, version=version, @@ -1592,7 +1592,7 @@ to_remove = [] for file_data in files: try: - self.checkFilePermission( + self.check_file_permission( file_data, peer_jid, perms_to_check, set_affiliation=True ) except exceptions.PermissionError: @@ -1601,7 +1601,7 @@ files.remove(file_data) return files - async def setFile( + async def set_file( self, client, name, file_id=None, version="", parent=None, path=None, type_=C.FILE_TYPE_FILE, file_hash=None, hash_algo=None, size=None, namespace=None, mime_type=None, public_id=None, created=None, modified=None, @@ -1678,18 +1678,18 @@ raise ValueError( "version, file_hash, size and mime_type can't be set for a directory" ) - owner = self.getFilesOwner(client, owner, peer_jid, file_id, parent) + owner = self.get_files_owner(client, owner, peer_jid, file_id, parent) if path is not None: path = str(path) - # _getParentDir will check permissions if peer_jid is set, so we use owner - parent, remaining_path_elts = await self._getParentDir( + # _get_parent_dir will check permissions if peer_jid is set, so we use owner + parent, remaining_path_elts = await self._get_parent_dir( client, path, parent, namespace, owner, owner, perms_to_check ) # if remaining directories don't exist, we have to create them for new_dir in remaining_path_elts: new_dir_id = shortuuid.uuid() - await self.storage.setFile( + await self.storage.set_file( client, name=new_dir, file_id=new_dir_id, @@ -1706,7 +1706,7 @@ elif parent is None: parent = "" - await self.storage.setFile( + await self.storage.set_file( client, file_id=file_id, version=version, @@ -1726,7 +1726,7 @@ extra=extra, ) - async def fileGetUsedSpace( + async def file_get_used_space( self, client, peer_jid: jid.JID, @@ -1736,15 +1736,15 @@ @param peer_jid: entity requesting the size @param owner: entity owning the file to check. If None, will be determined by - getFilesOwner + get_files_owner @return: size of total space used by files of this owner """ - owner = self.getFilesOwner(client, owner, peer_jid) + owner = self.get_files_owner(client, owner, peer_jid) if peer_jid.userhostJID() != owner and client.profile not in self.admins: raise exceptions.PermissionError("You are not allowed to check this size") - return await self.storage.fileGetUsedSpace(client, owner) + return await self.storage.file_get_used_space(client, owner) - def fileUpdate(self, file_id, column, update_cb): + def file_update(self, file_id, column, update_cb): """Update a file column taking care of race condition access is NOT checked in this method, it must be checked beforehand @@ -1754,10 +1754,10 @@ the method will take older value as argument, and must update it in place Note that the callable must be thread-safe """ - return self.storage.fileUpdate(file_id, column, update_cb) + return self.storage.file_update(file_id, column, update_cb) @defer.inlineCallbacks - def _deleteFile( + def _delete_file( self, client, peer_jid: jid.JID, @@ -1778,7 +1778,7 @@ "file {file_name} can't be deleted, {peer_jid} is not the owner" .format(file_name=file_data['name'], peer_jid=peer_jid.full())) if file_data['type'] == C.FILE_TYPE_DIRECTORY: - sub_files = yield self.getFiles(client, peer_jid, parent=file_data['id']) + sub_files = yield self.get_files(client, peer_jid, parent=file_data['id']) if sub_files and not recursive: raise exceptions.DataError(_("Can't delete directory, it is not empty")) # we first delete the sub-files @@ -1787,15 +1787,15 @@ sub_file_path = files_path / sub_file_data['name'] else: sub_file_path = files_path - yield self._deleteFile( + yield self._delete_file( client, peer_jid, recursive, sub_file_path, sub_file_data) # then the directory itself - yield self.storage.fileDelete(file_data['id']) + yield self.storage.file_delete(file_data['id']) elif file_data['type'] == C.FILE_TYPE_FILE: log.info(_("deleting file {name} with hash {file_hash}").format( name=file_data['name'], file_hash=file_data['file_hash'])) - yield self.storage.fileDelete(file_data['id']) - references = yield self.getFiles( + yield self.storage.file_delete(file_data['id']) + references = yield self.get_files( client, peer_jid, file_hash=file_data['file_hash']) if references: log.debug("there are still references to the file, we keep it") @@ -1811,7 +1811,7 @@ raise exceptions.InternalError('Unexpected file type: {file_type}' .format(file_type=file_data['type'])) - async def fileDelete(self, client, peer_jid, file_id, recursive=False): + async def file_delete(self, client, peer_jid, file_id, recursive=False): """Delete a single file or a directory and all its sub-files @param file_id(unicode): id of the file to delete @@ -1821,7 +1821,7 @@ """ # FIXME: we only allow owner of file to delete files for now, but WRITE access # should be checked too - files_data = await self.getFiles(client, peer_jid, file_id) + files_data = await self.get_files(client, peer_jid, file_id) if not files_data: raise exceptions.NotFound("Can't find the file with id {file_id}".format( file_id=file_id)) @@ -1829,11 +1829,11 @@ if file_data["type"] != C.FILE_TYPE_DIRECTORY and recursive: raise ValueError("recursive can only be set for directories") files_path = self.host.get_local_path(None, C.FILES_DIR) - await self._deleteFile(client, peer_jid, recursive, files_path, file_data) + await self._delete_file(client, peer_jid, recursive, files_path, file_data) ## Cache ## - def getCachePath(self, namespace: str, *args: str) -> Path: + def get_cache_path(self, namespace: str, *args: str) -> Path: """Get path to use to get a common path for a namespace This can be used by plugins to manage permanent data. It's the responsability @@ -1844,13 +1844,13 @@ namespace = namespace.strip().lower() return Path( self._cache_path, - regex.pathEscape(namespace), - *(regex.pathEscape(a) for a in args) + regex.path_escape(namespace), + *(regex.path_escape(a) for a in args) ) ## Misc ## - def isEntityAvailable(self, client, entity_jid): + def is_entity_available(self, client, entity_jid): """Tell from the presence information if the given entity is available. @param entity_jid (JID): the entity to check (if bare jid is used, all resources are tested) @@ -1858,20 +1858,20 @@ """ if not entity_jid.resource: return bool( - self.getAvailableResources(client, entity_jid) + self.get_available_resources(client, entity_jid) ) # is any resource is available, entity is available try: - presence_data = self.getEntityDatum(client, entity_jid, "presence") + presence_data = self.get_entity_datum(client, entity_jid, "presence") except KeyError: log.debug("No presence information for {}".format(entity_jid)) return False return presence_data.show != C.PRESENCE_UNAVAILABLE - def isAdmin(self, profile: str) -> bool: + def is_admin(self, profile: str) -> bool: """Tell if given profile has administrator privileges""" return profile in self.admins - def isAdminJID(self, entity: jid.JID) -> bool: + def is_admin_jid(self, entity: jid.JID) -> bool: """Tells if an entity jid correspond to an admin one It is sometime not possible to use the profile alone to check if an entity is an