comparison sat/memory/disco.py @ 2624:56f94936df1e

code style reformatting using black
author Goffi <goffi@goffi.org>
date Wed, 27 Jun 2018 20:14:46 +0200
parents e70023e84974
children c3f59c1dcb0a
comparison
equal deleted inserted replaced
2623:49533de4540b 2624:56f94936df1e
18 # along with this program. If not, see <http://www.gnu.org/licenses/>. 18 # along with this program. If not, see <http://www.gnu.org/licenses/>.
19 19
20 from sat.core.i18n import _ 20 from sat.core.i18n import _
21 from sat.core import exceptions 21 from sat.core import exceptions
22 from sat.core.log import getLogger 22 from sat.core.log import getLogger
23
23 log = getLogger(__name__) 24 log = getLogger(__name__)
24 from twisted.words.protocols.jabber import jid 25 from twisted.words.protocols.jabber import jid
25 from twisted.words.protocols.jabber.error import StanzaError 26 from twisted.words.protocols.jabber.error import StanzaError
26 from twisted.internet import defer 27 from twisted.internet import defer
27 from twisted.internet import reactor 28 from twisted.internet import reactor
33 from base64 import b64encode 34 from base64 import b64encode
34 from hashlib import sha1 35 from hashlib import sha1
35 36
36 37
37 TIMEOUT = 15 38 TIMEOUT = 15
38 CAP_HASH_ERROR = 'ERROR' 39 CAP_HASH_ERROR = "ERROR"
40
39 41
40 class HashGenerationError(Exception): 42 class HashGenerationError(Exception):
41 pass 43 pass
42 44
43 45
44 class ByteIdentity(object): 46 class ByteIdentity(object):
45 """This class manage identity as bytes (needed for i;octet sort), it is used for the hash generation""" 47 """This class manage identity as bytes (needed for i;octet sort), it is used for the hash generation"""
46 48
47 def __init__(self, identity, lang=None): 49 def __init__(self, identity, lang=None):
48 assert isinstance(identity, disco.DiscoIdentity) 50 assert isinstance(identity, disco.DiscoIdentity)
49 self.category = identity.category.encode('utf-8') 51 self.category = identity.category.encode("utf-8")
50 self.idType = identity.type.encode('utf-8') 52 self.idType = identity.type.encode("utf-8")
51 self.name = identity.name.encode('utf-8') if identity.name else '' 53 self.name = identity.name.encode("utf-8") if identity.name else ""
52 self.lang = lang.encode('utf-8') if lang is not None else '' 54 self.lang = lang.encode("utf-8") if lang is not None else ""
53 55
54 def __str__(self): 56 def __str__(self):
55 return "%s/%s/%s/%s" % (self.category, self.idType, self.lang, self.name) 57 return "%s/%s/%s/%s" % (self.category, self.idType, self.lang, self.name)
56 58
57 59
61 persistent storage is update when a new hash is added 63 persistent storage is update when a new hash is added
62 """ 64 """
63 65
64 def __init__(self, persistent): 66 def __init__(self, persistent):
65 self.hashes = { 67 self.hashes = {
66 CAP_HASH_ERROR: disco.DiscoInfo(), # used when we can't get disco infos 68 CAP_HASH_ERROR: disco.DiscoInfo() # used when we can't get disco infos
67 } 69 }
68 self.persistent = persistent 70 self.persistent = persistent
69 71
70 def __getitem__(self, key): 72 def __getitem__(self, key):
71 return self.hashes[key] 73 return self.hashes[key]
72 74
84 def fillHashes(hashes): 86 def fillHashes(hashes):
85 for hash_, xml in hashes.iteritems(): 87 for hash_, xml in hashes.iteritems():
86 element = xml_tools.ElementParser()(xml) 88 element = xml_tools.ElementParser()(xml)
87 disco_info = disco.DiscoInfo.fromElement(element) 89 disco_info = disco.DiscoInfo.fromElement(element)
88 if not disco_info.features and not disco_info.identities: 90 if not disco_info.features and not disco_info.identities:
89 log.warning(_(u"no feature/identity found in disco element (hash: {cap_hash}), ignoring: {xml}").format( 91 log.warning(
90 cap_hash=hash_, xml=xml)) 92 _(
93 u"no feature/identity found in disco element (hash: {cap_hash}), ignoring: {xml}"
94 ).format(cap_hash=hash_, xml=xml)
95 )
91 else: 96 else:
92 self.hashes[hash_] = disco_info 97 self.hashes[hash_] = disco_info
93 98
94 log.info(u"Disco hashes loaded") 99 log.info(u"Disco hashes loaded")
100
95 d = self.persistent.load() 101 d = self.persistent.load()
96 d.addCallback(fillHashes) 102 d.addCallback(fillHashes)
97 return d 103 return d
98 104
99 105
108 """Load persistent hashes""" 114 """Load persistent hashes"""
109 self.hashes = HashManager(persistent.PersistentDict("disco")) 115 self.hashes = HashManager(persistent.PersistentDict("disco"))
110 return self.hashes.load() 116 return self.hashes.load()
111 117
112 @defer.inlineCallbacks 118 @defer.inlineCallbacks
113 def hasFeature(self, client, feature, jid_=None, node=u''): 119 def hasFeature(self, client, feature, jid_=None, node=u""):
114 """Tell if an entity has the required feature 120 """Tell if an entity has the required feature
115 121
116 @param feature: feature namespace 122 @param feature: feature namespace
117 @param jid_: jid of the target, or None for profile's server 123 @param jid_: jid of the target, or None for profile's server
118 @param node(unicode): optional node to use for disco request 124 @param node(unicode): optional node to use for disco request
120 """ 126 """
121 disco_infos = yield self.getInfos(client, jid_, node) 127 disco_infos = yield self.getInfos(client, jid_, node)
122 defer.returnValue(feature in disco_infos.features) 128 defer.returnValue(feature in disco_infos.features)
123 129
124 @defer.inlineCallbacks 130 @defer.inlineCallbacks
125 def checkFeature(self, client, feature, jid_=None, node=u''): 131 def checkFeature(self, client, feature, jid_=None, node=u""):
126 """Like hasFeature, but raise an exception is feature is not Found 132 """Like hasFeature, but raise an exception is feature is not Found
127 133
128 @param feature: feature namespace 134 @param feature: feature namespace
129 @param jid_: jid of the target, or None for profile's server 135 @param jid_: jid of the target, or None for profile's server
130 @param node(unicode): optional node to use for disco request 136 @param node(unicode): optional node to use for disco request
134 disco_infos = yield self.getInfos(client, jid_, node) 140 disco_infos = yield self.getInfos(client, jid_, node)
135 if not feature in disco_infos.features: 141 if not feature in disco_infos.features:
136 raise failure.Failure(exceptions.FeatureNotFound) 142 raise failure.Failure(exceptions.FeatureNotFound)
137 143
138 @defer.inlineCallbacks 144 @defer.inlineCallbacks
139 def checkFeatures(self, client, features, jid_=None, identity=None, node=u''): 145 def checkFeatures(self, client, features, jid_=None, identity=None, node=u""):
140 """Like checkFeature, but check several features at once, and check also identity 146 """Like checkFeature, but check several features at once, and check also identity
141 147
142 @param features(iterable[unicode]): features to check 148 @param features(iterable[unicode]): features to check
143 @param jid_(jid.JID): jid of the target, or None for profile's server 149 @param jid_(jid.JID): jid of the target, or None for profile's server
144 @param node(unicode): optional node to use for disco request 150 @param node(unicode): optional node to use for disco request
151 raise failure.Failure(exceptions.FeatureNotFound()) 157 raise failure.Failure(exceptions.FeatureNotFound())
152 158
153 if identity is not None and identity not in disco_infos.identities: 159 if identity is not None and identity not in disco_infos.identities:
154 raise failure.Failure(exceptions.FeatureNotFound()) 160 raise failure.Failure(exceptions.FeatureNotFound())
155 161
156 def getInfos(self, client, jid_=None, node=u'', use_cache=True): 162 def getInfos(self, client, jid_=None, node=u"", use_cache=True):
157 """get disco infos from jid_, filling capability hash if needed 163 """get disco infos from jid_, filling capability hash if needed
158 164
159 @param jid_: jid of the target, or None for profile's server 165 @param jid_: jid of the target, or None for profile's server
160 @param node(unicode): optional node to use for disco request 166 @param node(unicode): optional node to use for disco request
161 @param use_cache(bool): if True, use cached data if available 167 @param use_cache(bool): if True, use cached data if available
165 jid_ = jid.JID(client.jid.host) 171 jid_ = jid.JID(client.jid.host)
166 try: 172 try:
167 if not use_cache: 173 if not use_cache:
168 # we ignore cache, so we pretend we haven't found it 174 # we ignore cache, so we pretend we haven't found it
169 raise KeyError 175 raise KeyError
170 cap_hash = self.host.memory.getEntityData(jid_, [C.ENTITY_CAP_HASH], client.profile)[C.ENTITY_CAP_HASH] 176 cap_hash = self.host.memory.getEntityData(
177 jid_, [C.ENTITY_CAP_HASH], client.profile
178 )[C.ENTITY_CAP_HASH]
171 except (KeyError, exceptions.UnknownEntityError): 179 except (KeyError, exceptions.UnknownEntityError):
172 # capability hash is not available, we'll compute one 180 # capability hash is not available, we'll compute one
173 def infosCb(disco_infos): 181 def infosCb(disco_infos):
174 cap_hash = self.generateHash(disco_infos) 182 cap_hash = self.generateHash(disco_infos)
175 self.hashes[cap_hash] = disco_infos 183 self.hashes[cap_hash] = disco_infos
176 self.host.memory.updateEntityData(jid_, C.ENTITY_CAP_HASH, cap_hash, profile_key=client.profile) 184 self.host.memory.updateEntityData(
185 jid_, C.ENTITY_CAP_HASH, cap_hash, profile_key=client.profile
186 )
177 return disco_infos 187 return disco_infos
188
178 def infosEb(fail): 189 def infosEb(fail):
179 if fail.check(defer.CancelledError): 190 if fail.check(defer.CancelledError):
180 reason = u"request time-out" 191 reason = u"request time-out"
181 else: 192 else:
182 try: 193 try:
183 reason = unicode(fail.value) 194 reason = unicode(fail.value)
184 except AttributeError: 195 except AttributeError:
185 reason = unicode(fail) 196 reason = unicode(fail)
186 log.warning(u"Error while requesting disco infos from {jid}: {reason}".format(jid=jid_.full(), reason=reason)) 197 log.warning(
187 self.host.memory.updateEntityData(jid_, C.ENTITY_CAP_HASH, CAP_HASH_ERROR, profile_key=client.profile) 198 u"Error while requesting disco infos from {jid}: {reason}".format(
199 jid=jid_.full(), reason=reason
200 )
201 )
202 self.host.memory.updateEntityData(
203 jid_, C.ENTITY_CAP_HASH, CAP_HASH_ERROR, profile_key=client.profile
204 )
188 disco_infos = self.hashes[CAP_HASH_ERROR] 205 disco_infos = self.hashes[CAP_HASH_ERROR]
189 return disco_infos 206 return disco_infos
207
190 d = client.disco.requestInfo(jid_, nodeIdentifier=node) 208 d = client.disco.requestInfo(jid_, nodeIdentifier=node)
191 d.addCallback(infosCb) 209 d.addCallback(infosCb)
192 d.addErrback(infosEb) 210 d.addErrback(infosEb)
193 return d 211 return d
194 else: 212 else:
195 disco_infos = self.hashes[cap_hash] 213 disco_infos = self.hashes[cap_hash]
196 return defer.succeed(disco_infos) 214 return defer.succeed(disco_infos)
197 215
198 @defer.inlineCallbacks 216 @defer.inlineCallbacks
199 def getItems(self, client, jid_=None, node=u'', use_cache=True): 217 def getItems(self, client, jid_=None, node=u"", use_cache=True):
200 """get disco items from jid_, cache them for our own server 218 """get disco items from jid_, cache them for our own server
201 219
202 @param jid_(jid.JID): jid of the target, or None for profile's server 220 @param jid_(jid.JID): jid of the target, or None for profile's server
203 @param node(unicode): optional node to use for disco request 221 @param node(unicode): optional node to use for disco request
204 @param use_cache(bool): if True, use cached data if available 222 @param use_cache(bool): if True, use cached data if available
209 jid_ = server_jid 227 jid_ = server_jid
210 228
211 if jid_ == server_jid and not node: 229 if jid_ == server_jid and not node:
212 # we cache items only for our own server and if node is not set 230 # we cache items only for our own server and if node is not set
213 try: 231 try:
214 items = self.host.memory.getEntityData(jid_, ["DISCO_ITEMS"], client.profile)["DISCO_ITEMS"] 232 items = self.host.memory.getEntityData(
233 jid_, ["DISCO_ITEMS"], client.profile
234 )["DISCO_ITEMS"]
215 log.debug(u"[%s] disco items are in cache" % jid_.full()) 235 log.debug(u"[%s] disco items are in cache" % jid_.full())
216 if not use_cache: 236 if not use_cache:
217 # we ignore cache, so we pretend we haven't found it 237 # we ignore cache, so we pretend we haven't found it
218 raise KeyError 238 raise KeyError
219 except (KeyError, exceptions.UnknownEntityError): 239 except (KeyError, exceptions.UnknownEntityError):
220 log.debug(u"Caching [%s] disco items" % jid_.full()) 240 log.debug(u"Caching [%s] disco items" % jid_.full())
221 items = yield client.disco.requestItems(jid_, nodeIdentifier=node) 241 items = yield client.disco.requestItems(jid_, nodeIdentifier=node)
222 self.host.memory.updateEntityData(jid_, "DISCO_ITEMS", items, profile_key=client.profile) 242 self.host.memory.updateEntityData(
243 jid_, "DISCO_ITEMS", items, profile_key=client.profile
244 )
223 else: 245 else:
224 try: 246 try:
225 items = yield client.disco.requestItems(jid_, nodeIdentifier=node) 247 items = yield client.disco.requestItems(jid_, nodeIdentifier=node)
226 except StanzaError as e: 248 except StanzaError as e:
227 log.warning(u"Error while requesting items for {jid}: {reason}" 249 log.warning(
228 .format(jid=jid_.full(), reason=e.condition)) 250 u"Error while requesting items for {jid}: {reason}".format(
251 jid=jid_.full(), reason=e.condition
252 )
253 )
229 items = disco.DiscoItems() 254 items = disco.DiscoItems()
230 255
231 defer.returnValue(items) 256 defer.returnValue(items)
232
233 257
234 def _infosEb(self, failure_, entity_jid): 258 def _infosEb(self, failure_, entity_jid):
235 failure_.trap(StanzaError) 259 failure_.trap(StanzaError)
236 log.warning(_(u"Error while requesting [%(jid)s]: %(error)s") % {'jid': entity_jid.full(), 260 log.warning(
237 'error': failure_.getErrorMessage()}) 261 _(u"Error while requesting [%(jid)s]: %(error)s")
262 % {"jid": entity_jid.full(), "error": failure_.getErrorMessage()}
263 )
238 264
239 def findServiceEntity(self, client, category, type_, jid_=None): 265 def findServiceEntity(self, client, category, type_, jid_=None):
240 """Helper method to find first available entity from findServiceEntities 266 """Helper method to find first available entity from findServiceEntities
241 267
242 args are the same as for [findServiceEntities] 268 args are the same as for [findServiceEntities]
263 289
264 def gotItems(items): 290 def gotItems(items):
265 defers_list = [] 291 defers_list = []
266 for item in items: 292 for item in items:
267 info_d = self.getInfos(client, item.entity) 293 info_d = self.getInfos(client, item.entity)
268 info_d.addCallbacks(infosCb, self._infosEb, [item.entity], None, [item.entity]) 294 info_d.addCallbacks(
295 infosCb, self._infosEb, [item.entity], None, [item.entity]
296 )
269 defers_list.append(info_d) 297 defers_list.append(info_d)
270 return defer.DeferredList(defers_list) 298 return defer.DeferredList(defers_list)
271 299
272 d = self.getItems(client, jid_) 300 d = self.getItems(client, jid_)
273 d.addCallback(gotItems) 301 d.addCallback(gotItems)
274 d.addCallback(lambda dummy: found_entities) 302 d.addCallback(lambda dummy: found_entities)
275 reactor.callLater(TIMEOUT, d.cancel) # FIXME: one bad service make a general timeout 303 reactor.callLater(
304 TIMEOUT, d.cancel
305 ) # FIXME: one bad service make a general timeout
276 return d 306 return d
277 307
278 def findFeaturesSet(self, client, features, identity=None, jid_=None): 308 def findFeaturesSet(self, client, features, identity=None, jid_=None):
279 """Return entities (including jid_ and its items) offering features 309 """Return entities (including jid_ and its items) offering features
280 310
289 features = set(features) 319 features = set(features)
290 found_entities = set() 320 found_entities = set()
291 321
292 def infosCb(infos, entity): 322 def infosCb(infos, entity):
293 if entity is None: 323 if entity is None:
294 log.warning(_(u'received an item without jid')) 324 log.warning(_(u"received an item without jid"))
295 return 325 return
296 if identity is not None and identity not in infos.identities: 326 if identity is not None and identity not in infos.identities:
297 return 327 return
298 if features.issubset(infos.features): 328 if features.issubset(infos.features):
299 found_entities.add(entity) 329 found_entities.add(entity)
307 return defer.DeferredList(defer_list) 337 return defer.DeferredList(defer_list)
308 338
309 d = self.getItems(client, jid_) 339 d = self.getItems(client, jid_)
310 d.addCallback(gotItems) 340 d.addCallback(gotItems)
311 d.addCallback(lambda dummy: found_entities) 341 d.addCallback(lambda dummy: found_entities)
312 reactor.callLater(TIMEOUT, d.cancel) # FIXME: one bad service make a general timeout 342 reactor.callLater(
343 TIMEOUT, d.cancel
344 ) # FIXME: one bad service make a general timeout
313 return d 345 return d
314 346
315 def generateHash(self, services): 347 def generateHash(self, services):
316 """ Generate a unique hash for given service 348 """ Generate a unique hash for given service
317 349
318 hash algorithm is the one described in XEP-0115 350 hash algorithm is the one described in XEP-0115
319 @param services: iterable of disco.DiscoIdentity/disco.DiscoFeature, as returned by discoHandler.info 351 @param services: iterable of disco.DiscoIdentity/disco.DiscoFeature, as returned by discoHandler.info
320 352
321 """ 353 """
322 s = [] 354 s = []
323 byte_identities = [ByteIdentity(service) for service in services if isinstance(service, disco.DiscoIdentity)] # FIXME: lang must be managed here 355 byte_identities = [
356 ByteIdentity(service)
357 for service in services
358 if isinstance(service, disco.DiscoIdentity)
359 ] # FIXME: lang must be managed here
324 byte_identities.sort(key=lambda i: i.lang) 360 byte_identities.sort(key=lambda i: i.lang)
325 byte_identities.sort(key=lambda i: i.idType) 361 byte_identities.sort(key=lambda i: i.idType)
326 byte_identities.sort(key=lambda i: i.category) 362 byte_identities.sort(key=lambda i: i.category)
327 for identity in byte_identities: 363 for identity in byte_identities:
328 s.append(str(identity)) 364 s.append(str(identity))
329 s.append('<') 365 s.append("<")
330 byte_features = [service.encode('utf-8') for service in services if isinstance(service, disco.DiscoFeature)] 366 byte_features = [
367 service.encode("utf-8")
368 for service in services
369 if isinstance(service, disco.DiscoFeature)
370 ]
331 byte_features.sort() # XXX: the default sort has the same behaviour as the requested RFC 4790 i;octet sort 371 byte_features.sort() # XXX: the default sort has the same behaviour as the requested RFC 4790 i;octet sort
332 for feature in byte_features: 372 for feature in byte_features:
333 s.append(feature) 373 s.append(feature)
334 s.append('<') 374 s.append("<")
335 #TODO: manage XEP-0128 data form here 375 # TODO: manage XEP-0128 data form here
336 cap_hash = b64encode(sha1(''.join(s)).digest()) 376 cap_hash = b64encode(sha1("".join(s)).digest())
337 log.debug(_(u'Capability hash generated: [%s]') % cap_hash) 377 log.debug(_(u"Capability hash generated: [%s]") % cap_hash)
338 return cap_hash 378 return cap_hash
339 379
340 @defer.inlineCallbacks 380 @defer.inlineCallbacks
341 def _discoInfos(self, entity_jid_s, node=u'', use_cache=True, profile_key=C.PROF_KEY_NONE): 381 def _discoInfos(
382 self, entity_jid_s, node=u"", use_cache=True, profile_key=C.PROF_KEY_NONE
383 ):
342 """ Discovery method for the bridge 384 """ Discovery method for the bridge
343 @param entity_jid_s: entity we want to discover 385 @param entity_jid_s: entity we want to discover
344 @param use_cache(bool): if True, use cached data if available 386 @param use_cache(bool): if True, use cached data if available
345 @param node(unicode): optional node to use 387 @param node(unicode): optional node to use
346 388
351 disco_infos = yield self.getInfos(client, entity, node, use_cache) 393 disco_infos = yield self.getInfos(client, entity, node, use_cache)
352 extensions = {} 394 extensions = {}
353 for form_type, form in disco_infos.extensions.items(): 395 for form_type, form in disco_infos.extensions.items():
354 fields = [] 396 fields = []
355 for field in form.fieldList: 397 for field in form.fieldList:
356 data = {'type': field.fieldType} 398 data = {"type": field.fieldType}
357 for attr in ('var', 'label', 'desc'): 399 for attr in ("var", "label", "desc"):
358 value = getattr(field, attr) 400 value = getattr(field, attr)
359 if value is not None: 401 if value is not None:
360 data[attr] = value 402 data[attr] = value
361 403
362 values = [field.value] if field.value is not None else field.values 404 values = [field.value] if field.value is not None else field.values
363 fields.append((data, values)) 405 fields.append((data, values))
364 406
365 extensions[form_type or ""] = fields 407 extensions[form_type or ""] = fields
366 408
367 defer.returnValue((disco_infos.features, 409 defer.returnValue(
368 [(cat, type_, name or '') for (cat, type_), name in disco_infos.identities.items()], 410 (
369 extensions)) 411 disco_infos.features,
412 [
413 (cat, type_, name or "")
414 for (cat, type_), name in disco_infos.identities.items()
415 ],
416 extensions,
417 )
418 )
370 419
371 def items2tuples(self, disco_items): 420 def items2tuples(self, disco_items):
372 """convert disco items to tuple of strings 421 """convert disco items to tuple of strings
373 422
374 @param disco_items(iterable[disco.DiscoItem]): items 423 @param disco_items(iterable[disco.DiscoItem]): items
376 """ 425 """
377 for item in disco_items: 426 for item in disco_items:
378 if not item.entity: 427 if not item.entity:
379 log.warning(_(u"invalid item (no jid)")) 428 log.warning(_(u"invalid item (no jid)"))
380 continue 429 continue
381 yield (item.entity.full(), item.nodeIdentifier or '', item.name or '') 430 yield (item.entity.full(), item.nodeIdentifier or "", item.name or "")
382 431
383 @defer.inlineCallbacks 432 @defer.inlineCallbacks
384 def _discoItems(self, entity_jid_s, node=u'', use_cache=True, profile_key=C.PROF_KEY_NONE): 433 def _discoItems(
434 self, entity_jid_s, node=u"", use_cache=True, profile_key=C.PROF_KEY_NONE
435 ):
385 """ Discovery method for the bridge 436 """ Discovery method for the bridge
386 437
387 @param entity_jid_s: entity we want to discover 438 @param entity_jid_s: entity we want to discover
388 @param node(unicode): optional node to use 439 @param node(unicode): optional node to use
389 @param use_cache(bool): if True, use cached data if available 440 @param use_cache(bool): if True, use cached data if available