Mercurial > libervia-backend
view sat/tools/common/async_utils.py @ 4002:5245b675f7ad
plugin XEP-0313: don't wait for MAM to be retrieved in connection workflow:
MAM retrieval can be long, and can be done after connection, message just need to be
sorted when being inserted (i.e. frontends must do insort).
To avoid blocking connection for too long and result in bad UX and timeout risk, one2one
MAM message are not retrieved in background.
author | Goffi <goffi@goffi.org> |
---|---|
date | Fri, 10 Mar 2023 17:22:45 +0100 |
parents | ebe45ea2df3b |
children |
line wrap: on
line source
#!/usr/bin/env python3 # Libervia: an XMPP client # Copyright (C) 2009-2021 Jérôme Poisson (goffi@goffi.org) # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """tools to launch process in a async way (using Twisted)""" from collections import OrderedDict from typing import Optional, Callable, Awaitable from sat.core.log import getLogger log = getLogger(__name__) def async_lru(maxsize: Optional[int] = 50) -> Callable: """Decorator to cache async function results using LRU algorithm @param maxsize: maximum number of items to keep in cache. None to have no limit """ def decorator(func: Callable) -> Callable: cache = OrderedDict() async def wrapper(*args) -> Awaitable: if args in cache: log.debug(f"using result in cache for {args}") cache.move_to_end(args) result = cache[args] return result log.debug(f"caching result for {args}") result = await func(*args) cache[args] = result if maxsize is not None and len(cache) > maxsize: value = cache.popitem(False) log.debug(f"Removing LRU value: {value}") return result return wrapper return decorator