diff sat/core/xmpp.py @ 3153:2c7b42f53e9a

core (xmpp): avoid starting several clients at the same time: a check is done to avoid running startConnection several times at once, which would lead to the creation of several SatXMPPEntity instances at the same time, resulting in many issues. If startConnection is called while a previous one is not finished yet, a CancelError is now raised.
author Goffi <goffi@goffi.org>
date Mon, 03 Feb 2020 13:46:24 +0100
parents f3700175c6a3
children b5c058c7692e
line wrap: on
line diff
--- a/sat/core/xmpp.py	Mon Feb 03 13:42:43 2020 +0100
+++ b/sat/core/xmpp.py	Mon Feb 03 13:46:24 2020 +0100
@@ -56,8 +56,10 @@
 ROSTER_VER_KEY = "@version@"
 
 
-class SatXMPPEntity(object):
+class SatXMPPEntity:
     """Common code for Client and Component"""
+    # profile is added there when startConnection begins and removed when it is finished
+    profiles_connecting = set()
 
     def __init__(self, host_app, profile, max_retries):
         factory = self.factory
@@ -154,6 +156,9 @@
         #        (e.g. adding subprotocols)
         #        but client should not be deleted except if session is finished
         #        (independently of connection/deconnection)
+        if profile in cls.profiles_connecting:
+            raise exceptions.CancelError(f"{profile} is already being connected")
+        cls.profiles_connecting.add(profile)
         try:
             port = int(
                 host.memory.getParamA(
@@ -169,9 +174,6 @@
         password = yield host.memory.asyncGetParamA(
             "Password", "Connection", profile_key=profile
         )
-        if profile in host.profiles:
-            raise exceptions.InternalError(
-                f"There is already a profile of name {profile} in host")
 
         entity_jid_s = yield host.memory.asyncGetParamA(
             "JabberID", "Connection", profile_key=profile)
@@ -195,6 +197,9 @@
                 resource=resource))
             entity_jid.resource = resource
 
+        if profile in host.profiles:
+            raise exceptions.InternalError(
+                f"There is already a profile of name {profile} in host")
         entity = host.profiles[profile] = cls(
             host, profile, entity_jid, password,
             host.memory.getParamA(C.FORCE_SERVER_PARAM, "Connection",
@@ -249,6 +254,8 @@
         )  # FIXME: we should have a timeout here, and a way to know if a plugin freeze
         # TODO: mesure launch time of each plugin
 
+        cls.profiles_connecting.remove(profile)
+
     def _disconnectionCb(self, __):
         self._connected_d = None
 
@@ -784,7 +791,11 @@
     @classmethod
     @defer.inlineCallbacks
     def startConnection(cls, host, profile, max_retries):
-        yield super(SatXMPPClient, cls).startConnection(host, profile, max_retries)
+        try:
+            yield super(SatXMPPClient, cls).startConnection(host, profile, max_retries)
+        except exceptions.CancelError as e:
+            log.warning(f"startConnection cancelled: {e}")
+            return
         entity = host.profiles[profile]
         # we finally send our presence
         entity.presence.available()