view sat/tools/common/async_utils.py @ 3833:381340b9a9ee

component AP gateway: convert XMPP mentions to AP: When a XEP-0372 mention is received, the linked pubsub item is looked after in cache, and if found, it is send to mentioned entity with `mention` tag added. However, this doesn't work in some cases (see incoming doc for details). To work around that, `@user@server.tld` type mention are also scanned in body, and mentions are added when found (this can be disabled with `auto_mentions` setting). Mention are only scanned in "public" messages, i.e. for pubsub items, and not direct messages. rel 369
author Goffi <goffi@goffi.org>
date Sun, 10 Jul 2022 16:15:06 +0200
parents 9b45f0f168cf
children ebe45ea2df3b
line wrap: on
line source

#!/usr/bin/env python3


# Libervia: an XMPP client
# Copyright (C) 2009-2021 Jérôme Poisson (goffi@goffi.org)

# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.

# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU Affero General Public License for more details.

# You should have received a copy of the GNU Affero General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.

"""tools to launch process in a async way (using Twisted)"""

from collections import OrderedDict
from typing import Optional, Callable, Awaitable
from sat.core.log import getLogger


log = getLogger(__name__)


def async_lru(maxsize: Optional[int] = None) -> Callable:
    """Decorator to cache async function results using LRU algorithm"""
    def decorator(func: Callable) -> Callable:
        cache = OrderedDict()
        async def wrapper(*args) -> Awaitable:
            if args in cache:
                log.debug(f"using result in cache for {args}")
                cache.move_to_end(args)
                result = cache[args]
                return result
            log.debug(f"caching result for {args}")
            result = await func(*args)
            cache[args] = result
            if maxsize is not None and len(cache) > maxsize:
                value = cache.popitem(False)
                log.debug(f"Removing LRU value: {value}")
            return result
        return wrapper
    return decorator