Mercurial > libervia-backend
diff sat/plugins/plugin_xep_0198.py @ 4037:524856bd7b19
massive refactoring to switch from camelCase to snake_case:
historically, Libervia (SàT before) was using camelCase as allowed by PEP8 when using a
pre-PEP8 code, to use the same coding style as in Twisted.
However, snake_case is more readable and it's better to follow PEP8 best practices, so it
has been decided to move on full snake_case. Because Libervia has a huge codebase, this
ended with a ugly mix of camelCase and snake_case.
To fix that, this patch does a big refactoring by renaming every function and method
(including bridge) that are not coming from Twisted or Wokkel, to use fully snake_case.
This is a massive change, and may result in some bugs.
author | Goffi <goffi@goffi.org> |
---|---|
date | Sat, 08 Apr 2023 13:54:42 +0200 |
parents | 32d714a8ea51 |
children |
line wrap: on
line diff
--- a/sat/plugins/plugin_xep_0198.py Fri Apr 07 15:18:39 2023 +0200 +++ b/sat/plugins/plugin_xep_0198.py Sat Apr 08 13:54:42 2023 +0200 @@ -123,7 +123,7 @@ self.req_timer.cancel() self.req_timer = None - def getBufferCopy(self): + def get_buffer_copy(self): return list(self.buffer) @@ -133,13 +133,13 @@ def __init__(self, host): log.info(_("Plugin Stream Management initialization")) self.host = host - host.registerNamespace('sm', NS_SM) - host.trigger.add("stream_hooks", self.addHooks) - host.trigger.add("xml_init", self._XMLInitTrigger) - host.trigger.add("disconnecting", self._disconnectingTrigger) - host.trigger.add("disconnected", self._disconnectedTrigger) + host.register_namespace('sm', NS_SM) + host.trigger.add("stream_hooks", self.add_hooks) + host.trigger.add("xml_init", self._xml_init_trigger) + host.trigger.add("disconnecting", self._disconnecting_trigger) + host.trigger.add("disconnected", self._disconnected_trigger) try: - self._ack_timeout = int(host.memory.getConfig("", "ack_timeout", ACK_TIMEOUT)) + self._ack_timeout = int(host.memory.config_get("", "ack_timeout", ACK_TIMEOUT)) except ValueError: log.error(_("Invalid ack_timeout value, please check your configuration")) self._ack_timeout = ACK_TIMEOUT @@ -149,20 +149,20 @@ log.info(_("Ack timeout set to {timeout}s").format( timeout=self._ack_timeout)) - def profileConnecting(self, client): - client._xep_0198_session = ProfileSessionData(callback=self.checkAcks, + def profile_connecting(self, client): + client._xep_0198_session = ProfileSessionData(callback=self.check_acks, client=client) - def getHandler(self, client): + def get_handler(self, client): return XEP_0198_handler(self) - def addHooks(self, client, receive_hooks, send_hooks): + def add_hooks(self, client, receive_hooks, send_hooks): """Add hooks to handle in/out stanzas counters""" - receive_hooks.append(partial(self.onReceive, client=client)) - send_hooks.append(partial(self.onSend, client=client)) + receive_hooks.append(partial(self.on_receive, client=client)) + send_hooks.append(partial(self.on_send, client=client)) return True - def _XMLInitTrigger(self, client): + def _xml_init_trigger(self, client): """Enable or resume a stream mangement""" if not (NS_SM, 'sm') in client.xmlstream.features: log.warning(_( @@ -201,16 +201,16 @@ session.enabled = True return True - def _disconnectingTrigger(self, client): + def _disconnecting_trigger(self, client): session = client._xep_0198_session if session.enabled: - self.sendAck(client) + self.send_ack(client) # This is a requested disconnection, so we can reset the session # to disable resuming and close normally the stream session.reset() return True - def _disconnectedTrigger(self, client, reason): + def _disconnected_trigger(self, client, reason): if client.is_component: return True session = client._xep_0198_session @@ -218,29 +218,29 @@ if session.resume_enabled: session.disconnected_time = time.time() session.disconnect_timer = reactor.callLater(session.session_max, - client.disconnectProfile, + client.disconnect_profile, reason) - # disconnectProfile must not be called at this point + # disconnect_profile must not be called at this point # because session can be resumed return False else: return True - def checkAcks(self, client): + def check_acks(self, client): """Request ack if needed""" session = client._xep_0198_session - # log.debug("checkAcks (in_counter={}, out_counter={}, buf len={}, buf idx={})" + # log.debug("check_acks (in_counter={}, out_counter={}, buf len={}, buf idx={})" # .format(session.in_counter, session.out_counter, len(session.buffer), # session.buffer_idx)) if session.ack_requested or not session.buffer: return if (session.out_counter - session.buffer_idx >= MAX_STANZA_ACK_R or time.time() - session.last_ack_r >= MAX_DELAY_ACK_R): - self.requestAck(client) + self.request_ack(client) session.ack_requested = True session.last_ack_r = time.time() - def updateBuffer(self, session, server_acked): + def update_buffer(self, session, server_acked): """Update buffer and buffer_index""" if server_acked > session.buffer_idx: diff = server_acked - session.buffer_idx @@ -257,7 +257,7 @@ buffer_id=session.buffer_idx)) session.buffer_idx += diff - def replayBuffer(self, client, buffer_, discard_results=False): + def replay_buffer(self, client, buffer_, discard_results=False): """Resend all stanza in buffer @param buffer_(collection.deque, list): buffer to replay @@ -276,13 +276,13 @@ continue client.send(stanza) - def sendAck(self, client): + def send_ack(self, client): """Send an answer element with current IN counter""" a_elt = domish.Element((NS_SM, 'a')) a_elt['h'] = str(client._xep_0198_session.in_counter) client.send(a_elt) - def requestAck(self, client): + def request_ack(self, client): """Send a request element""" session = client._xep_0198_session r_elt = domish.Element((NS_SM, 'r')) @@ -290,7 +290,7 @@ if session.req_timer is not None: raise exceptions.InternalError("req_timer should not be set") if self._ack_timeout: - session.req_timer = reactor.callLater(self._ack_timeout, self.onAckTimeOut, + session.req_timer = reactor.callLater(self._ack_timeout, self.on_ack_time_out, client) def _connectionFailed(self, failure_, connector): @@ -306,7 +306,7 @@ del connector.connectionFailed_ori return connector.connectionFailed(failure_) - def onEnabled(self, enabled_elt, client): + def on_enabled(self, enabled_elt, client): session = client._xep_0198_session session.in_counter = 0 @@ -367,25 +367,25 @@ .format(res_m = max_s/60))) session.session_max = max_s - def onResumed(self, enabled_elt, client): + def on_resumed(self, enabled_elt, client): session = client._xep_0198_session assert not session.enabled del session.resuming server_acked = int(enabled_elt['h']) - self.updateBuffer(session, server_acked) + self.update_buffer(session, server_acked) resend_count = len(session.buffer) # we resend all stanza which have not been received properly - self.replayBuffer(client, session.buffer) + self.replay_buffer(client, session.buffer) # now we can continue the session session.enabled = True d_time = time.time() - session.disconnected_time log.info(_("Stream session resumed (disconnected for {d_time} s, {count} " "stanza(s) resent)").format(d_time=int(d_time), count=resend_count)) - def onFailed(self, failed_elt, client): + def on_failed(self, failed_elt, client): session = client._xep_0198_session condition_elt = failed_elt.firstChildElement() - buffer_ = session.getBufferCopy() + buffer_ = session.get_buffer_copy() session.reset() try: @@ -429,7 +429,7 @@ if plg_0045 is not None: # we have to remove joined rooms - muc_join_args = plg_0045.popRooms(client) + muc_join_args = plg_0045.pop_rooms(client) # we need to recreate roster client.handlers.remove(client.roster) client.roster = client.roster.__class__(self.host) @@ -441,9 +441,9 @@ # we set the jid, which may have changed d.addCallback(lambda __: setattr(client.factory.authenticator, "jid", client.jid)) # we call the trigger who will send the <enable/> element - d.addCallback(lambda __: self._XMLInitTrigger(client)) + d.addCallback(lambda __: self._xml_init_trigger(client)) # then we have to re-request the roster, as changes may have occured - d.addCallback(lambda __: client.roster.requestRoster()) + d.addCallback(lambda __: client.roster.request_roster()) # we add got_roster to be sure to have roster before sending initial presence d.addCallback(lambda __: client.roster.got_roster) if plg_0313 is not None: @@ -460,16 +460,16 @@ d.addCallback(lambda __: muc_d_list) # at the end we replay the buffer, as those stanzas have probably not # been received - d.addCallback(lambda __: self.replayBuffer(client, buffer_, + d.addCallback(lambda __: self.replay_buffer(client, buffer_, discard_results=True)) - def onReceive(self, element, client): + def on_receive(self, element, client): if not client.is_component: session = client._xep_0198_session if session.enabled and element.name.lower() in C.STANZA_NAMES: session.in_counter += 1 % MAX_COUNTER - def onSend(self, obj, client): + def on_send(self, obj, client): if not client.is_component: session = client._xep_0198_session if (session.enabled @@ -477,12 +477,12 @@ and obj.name.lower() in C.STANZA_NAMES): session.out_counter += 1 % MAX_COUNTER session.buffer.appendleft(obj) - self.checkAcks(client) + self.check_acks(client) - def onAckRequest(self, r_elt, client): - self.sendAck(client) + def on_ack_request(self, r_elt, client): + self.send_ack(client) - def onAckAnswer(self, a_elt, client): + def on_ack_answer(self, a_elt, client): session = client._xep_0198_session session.ack_requested = False if self._ack_timeout: @@ -505,10 +505,10 @@ session.reset() return - self.updateBuffer(session, server_acked) - self.checkAcks(client) + self.update_buffer(session, server_acked) + self.check_acks(client) - def onAckTimeOut(self, client): + def on_ack_time_out(self, client): """Called when a requested ACK has not been received in time""" log.info(_("Ack was not received in time, aborting connection")) try: @@ -533,19 +533,19 @@ def connectionInitialized(self): self.xmlstream.addObserver( - SM_ENABLED, self.plugin_parent.onEnabled, client=self.parent + SM_ENABLED, self.plugin_parent.on_enabled, client=self.parent ) self.xmlstream.addObserver( - SM_RESUMED, self.plugin_parent.onResumed, client=self.parent + SM_RESUMED, self.plugin_parent.on_resumed, client=self.parent ) self.xmlstream.addObserver( - SM_FAILED, self.plugin_parent.onFailed, client=self.parent + SM_FAILED, self.plugin_parent.on_failed, client=self.parent ) self.xmlstream.addObserver( - SM_R_REQUEST, self.plugin_parent.onAckRequest, client=self.parent + SM_R_REQUEST, self.plugin_parent.on_ack_request, client=self.parent ) self.xmlstream.addObserver( - SM_A_REQUEST, self.plugin_parent.onAckAnswer, client=self.parent + SM_A_REQUEST, self.plugin_parent.on_ack_answer, client=self.parent ) def getDiscoInfo(self, requestor, target, nodeIdentifier=""):