diff src/plugins/plugin_xep_0277.py @ 1459:4c4f88d7b156

plugins xep-0060, xep-0163, xep-0277, groupblog: bloging improvments (huge patch, sorry): /!\ not everything is working yet, and specially groupblogs are broken /!\ - renamed bridge api to use prefixed methods (e.g. psSubscribeToMany instead of subscribeToMany in PubSub) - (xep-0060): try to find a default PubSub service, and put it in client.pubsub_service - (xep-0060): extra dictionary can be used in bridge method for RSM and other options - (xep-0060): XEP_0060.addManagedNode and XEP_0060.removeManagedNode allow to easily catch notifications for a specific node - (xep-0060): retractItem manage "notify" attribute - (xep-0060): new signal psEvent will be used to transmit notifications to frontends - (xep-0060, constants): added a bunch of useful constants - (xep-0163): removed personalEvent in favor of psEvent - (xep-0163): addPEPEvent now filter non PEP events for in_callback - (xep-0277): use of new XEP-0060 plugin's addManagedNode - (xep-0277): fixed author handling for incoming blogs: author is the human readable name, author_jid it jid, and author_jid_verified is set to True is the jid is checked - (xep-0277): reworked data2entry with Twisted instead of feed, item_id can now be specified, <content/> is changed to <title/> if there is only content - (xep-0277): comments are now managed here (core removed from groupblog) - (xep-0277): (comments) node is created if needed, default pubsub service is used if available, else PEP - (xep-0277): retract is managed
author Goffi <goffi@goffi.org>
date Sun, 16 Aug 2015 00:39:44 +0200
parents 4e2fab4de195
children 83f71763e1a7
line wrap: on
line diff
--- a/src/plugins/plugin_xep_0277.py	Sun Aug 16 00:06:59 2015 +0200
+++ b/src/plugins/plugin_xep_0277.py	Sun Aug 16 00:39:44 2015 +0200
@@ -21,25 +21,28 @@
 from sat.core.constants import Const as C
 from sat.core.log import getLogger
 log = getLogger(__name__)
-from twisted.words.protocols.jabber import jid
+from twisted.words.protocols.jabber import jid, error
+from twisted.words.xish import domish
 from twisted.internet import defer
 from twisted.python import failure
 from sat.core import exceptions
-from sat.tools.xml_tools import ElementParser
+from sat.tools import xml_tools
 from sat.tools import sat_defer
 
+# XXX: tmp.pubsub is actually use instead of wokkel version
 from wokkel import pubsub
-from wokkel import rsm
-from feed import atom, date
+from feed.date import rfc3339
 import uuid
-from time import time
+import time
 import urlparse
-from cgi import escape
+import urllib
 
 NS_MICROBLOG = 'urn:xmpp:microblog:0'
 NS_ATOM = 'http://www.w3.org/2005/Atom'
 NS_XHTML = 'http://www.w3.org/1999/xhtml'
 NS_PUBSUB_EVENT = "{}{}".format(pubsub.NS_PUBSUB, "#event")
+NS_COMMENT_PREFIX = '{}:comments/'.format(NS_MICROBLOG)
+
 
 PLUGIN_INFO = {
     "name": "Microblogging over XMPP Plugin",
@@ -65,52 +68,56 @@
         self.host = host
         self._p = self.host.plugins["XEP-0060"] # this facilitate the access to pubsub plugin
         self.rt_sessions = sat_defer.RTDeferredSessions()
-        self.host.plugins["XEP-0163"].addPEPEvent("MICROBLOG", NS_MICROBLOG, self.microblogCB, self.sendMicroblog, notify=False)
-        host.bridge.addMethod("getLastMicroblogs", ".plugin",
-                              in_sign='sis', out_sign='(aa{ss}a{ss})',
-                              method=self._getLastMicroblogs,
-                              async=True,
-                              doc={'summary': 'retrieve items',
-                                   'param_0': 'jid: publisher of wanted microblog',
-                                   'param_1': 'max_items: see XEP-0060 #6.5.7',
-                                   'param_2': '%(doc_profile)s',
-                                   'return': 'list of microblog data (dict)'})
-        host.bridge.addMethod("setMicroblogAccess", ".plugin", in_sign='ss', out_sign='',
-                              method=self.setMicroblogAccess,
+        self.host.plugins["XEP-0060"].addManagedNode(None, items_cb=self._itemsReceived)
+
+        host.bridge.addMethod("mbSend", ".plugin",
+                              in_sign='ssa{ss}s', out_sign='',
+                              method=self._mbSend,
+                              async=True)
+        host.bridge.addMethod("mbRetract", ".plugin",
+                              in_sign='ssss', out_sign='',
+                              method=self._mbRetract,
+                              async=True)
+        host.bridge.addMethod("mbGetLast", ".plugin",
+                              in_sign='ssia{ss}s', out_sign='(aa{ss}a{ss})',
+                              method=self._mbGetLast,
                               async=True)
-        host.bridge.addMethod("mBSubscribeToMany", ".plugin", in_sign='sass', out_sign='s',
-                              method=self._mBSubscribeToMany)
-        host.bridge.addMethod("mBGetFromManyRTResult", ".plugin", in_sign='ss', out_sign='(ua(sssaa{ss}a{ss}))',
-                              method=self._mBGetFromManyRTResult, async=True)
-        host.bridge.addMethod("mBGetFromMany", ".plugin", in_sign='sasia{ss}s', out_sign='s',
-                              method=self._mBGetFromMany)
-        host.bridge.addMethod("mBGetFromManyWithCommentsRTResult", ".plugin", in_sign='ss', out_sign='(ua(sssa(a{ss}a(sssaa{ss}a{ss}))a{ss}))',
-                              method=self._mBGetFromManyWithCommentsRTResult, async=True)
-        host.bridge.addMethod("mBGetFromManyWithComments", ".plugin", in_sign='sasiia{ss}a{ss}s', out_sign='s', method=self._mBGetFromManyWithComments)
+        host.bridge.addMethod("mbSetAccess", ".plugin", in_sign='ss', out_sign='',
+                              method=self.mbSetAccess,
+                              async=True)
+        host.bridge.addMethod("mbSetAccess", ".plugin", in_sign='ss', out_sign='',
+                              method=self.mbSetAccess,
+                              async=True)
+        host.bridge.addMethod("mbSubscribeToMany", ".plugin", in_sign='sass', out_sign='s',
+                              method=self._mbSubscribeToMany)
+        host.bridge.addMethod("mbGetFromManyRTResult", ".plugin", in_sign='ss', out_sign='(ua(sssaa{ss}a{ss}))',
+                              method=self._mbGetFromManyRTResult, async=True)
+        host.bridge.addMethod("mbGetFromMany", ".plugin", in_sign='sasia{ss}s', out_sign='s',
+                              method=self._mbGetFromMany)
+        host.bridge.addMethod("mbGetFromManyWithCommentsRTResult", ".plugin", in_sign='ss', out_sign='(ua(sssa(a{ss}a(sssaa{ss}a{ss}))a{ss}))',
+                              method=self._mbGetFromManyWithCommentsRTResult, async=True)
+        host.bridge.addMethod("mbGetFromManyWithComments", ".plugin", in_sign='sasiia{ss}a{ss}s', out_sign='s', method=self._mbGetFromManyWithComments)
 
     ## plugin management methods ##
 
-    def microblogCB(self, itemsEvent, profile):
-        """Callback to "MICROBLOG" PEP event."""
-        def manageItem(microblog_data):
-            self.host.bridge.personalEvent(itemsEvent.sender.full(), "MICROBLOG", microblog_data, profile)
+    def _itemsReceived(self, itemsEvent, profile):
+        """Callback which manage items notifications (publish + retract)"""
+        if not itemsEvent.nodeIdentifier.startswith(NS_MICROBLOG):
+            return
+        def manageItem(data, event):
+            self.host.bridge.psEvent(C.PS_MICROBLOG, itemsEvent.sender.full(), itemsEvent.nodeIdentifier, event, data, profile)
 
         for item in itemsEvent.items:
-            self.item2mbdata(item).addCallbacks(manageItem, lambda failure: None)
+            if item.name == C.PS_ITEM:
+                self.item2mbdata(item).addCallbacks(manageItem, lambda failure: None, (C.PS_PUBLISH,))
+            elif item.name == C.PS_RETRACT:
+                manageItem({'id': item['id']}, C.PS_RETRACT)
+            else:
+                raise exceptions.InternalError("Invalid event value")
+
 
     ## data/item transformation ##
 
-    def _removeXHTMLMarkups(self, xhtml):
-        """Remove XHTML markups from the given string.
-
-        @param xhtml: the XHTML string to be cleaned
-        @return: a Deferred instance for the cleaned string
-        """
-        return self.host.plugins["TEXT-SYNTAXES"].convert(xhtml,
-                                                          self.host.plugins["TEXT-SYNTAXES"].SYNTAX_XHTML,
-                                                          self.host.plugins["TEXT-SYNTAXES"].SYNTAX_TEXT,
-                                                          False)
-
     @defer.inlineCallbacks
     def item2mbdata(self, item_elt):
         """Convert an XML Item to microblog data used in bridge API
@@ -151,7 +158,7 @@
                 if data_elt.uri != NS_XHTML:
                     raise failure.Failure(exceptions.DataError(_('Content of type XHTML must declare its namespace!')))
                 key = check_conflict(u'{}_xhtml'.format(elem.name))
-                data = unicode(data_elt)
+                data = data_elt.toXml()
                 microblog_data[key] = yield self.host.plugins["TEXT-SYNTAXES"].clean_xhtml(data)
             else:
                 key = check_conflict(elem.name)
@@ -196,7 +203,7 @@
         # we check that text content is present
         for key in ('title', 'content'):
             if key not in microblog_data and ('{}_xhtml'.format(key)) in microblog_data:
-                log.warning(u"item {id_} provide a {key}_xhtml data but not a text one".format(id_, key))
+                log.warning(u"item {id_} provide a {key}_xhtml data but not a text one".format(id_=id_, key=key))
                 # ... and do the conversion if it's not
                 microblog_data[key] = yield self.host.plugins["TEXT-SYNTAXES"].\
                                             convert(microblog_data['{}_xhtml'.format(key)],
@@ -218,13 +225,13 @@
         except StopIteration:
             msg = u'No atom updated element found in the pubsub item {}'.format(id_)
             raise failure.Failure(exceptions.DataError(msg))
-        microblog_data['updated'] = unicode(date.rfc3339.tf_from_timestamp(unicode(updated_elt)))
+        microblog_data['updated'] = unicode(rfc3339.tf_from_timestamp(unicode(updated_elt)))
         try:
             published_elt = entry_elt.elements(NS_ATOM, 'published').next()
         except StopIteration:
             microblog_data['published'] = microblog_data['updated']
         else:
-            microblog_data['published'] = unicode(date.rfc3339.tf_from_timestamp(unicode(published_elt)))
+            microblog_data['published'] = unicode(rfc3339.tf_from_timestamp(unicode(published_elt)))
 
         # links
         for link_elt in entry_elt.elements(NS_ATOM, 'link'):
@@ -246,6 +253,7 @@
                 log.warning(u"Unmanaged link element: rel={rel} title={title} href={href}".format(rel=rel, title=title, href=href))
 
         # author
+        publisher = item_elt.getAttribute("publisher")
         try:
             author_elt = entry_elt.elements(NS_ATOM, 'author').next()
         except StopIteration:
@@ -263,16 +271,24 @@
                 uri_elt = author_elt.elements(NS_ATOM, 'uri').next()
             except StopIteration:
                 log.debug("No uri element found in author element of item {}".format(id_))
+                if publisher:
+                    microblog_data['author_jid'] =  publisher
             else:
                 uri = unicode(uri_elt)
                 if uri.startswith("xmpp:"):
                     uri = uri[5:]
-                    microblog_data['author_uri'] = uri
-                if item_elt.getAttribute("publisher") == uri:
-                    microblog_data['author_uri_verified'] = C.BOOL_TRUE
+                    microblog_data['author_jid'] = uri
+                else:
+                    microblog_data['author_jid'] = item_elt.getAttribute("publisher") or ""
+
+                if not publisher:
+                    log.debug("No publisher attribute, we can't verify author jid")
+                    microblog_data['author_jid_verified'] = C.BOOL_FALSE
+                elif publisher == uri:
+                    microblog_data['author_jid_verified'] = C.BOOL_TRUE
                 else:
                     log.warning("item atom:uri differ from publisher attribute, spoofing attempt ? atom:uri = {} publisher = {}".format(uri, item_elt.getAttribute("publisher")))
-                    microblog_data['author_uri_verified'] = C.BOOL_FALSE
+                    microblog_data['author_jid_verified'] = C.BOOL_FALSE
             # email
             try:
                 email_elt = author_elt.elements(NS_ATOM, 'email').next()
@@ -284,105 +300,216 @@
         defer.returnValue(microblog_data)
 
     @defer.inlineCallbacks
-    def data2entry(self, data, profile):
+    def data2entry(self, data, item_id=None, profile_key=C.PROF_KEY_NONE):
         """Convert a data dict to en entry usable to create an item
 
         @param data: data dict as given by bridge method.
+        @param item_id(unicode, None): id of the item to use
+            if None the id will be generated randomly
         @return: deferred which fire domish.Element
         """
-        #TODO: rewrite this directly with twisted (i.e. without atom / reparsing)
-        _uuid = unicode(uuid.uuid1())
-        _entry = atom.Entry()
-        _entry.title = ''  # reset the default value which is not empty
+        if item_id is None:
+            item_id = unicode(uuid.uuid4())
+        client = self.host.getClient(profile_key)
+        entry_elt = domish.Element((NS_ATOM, 'entry'))
 
-        elems = {'title': atom.Title, 'content': atom.Content}
+        ## content and title ##
         synt = self.host.plugins["TEXT-SYNTAXES"]
 
-        # loop on ('title', 'title_rich', 'title_xhtml', 'content', 'content_rich', 'content_xhtml')
-        for key in elems.keys():
-            for type_ in ['', 'rich', 'xhtml']:
-                attr = "%s_%s" % (key, type_) if type_ else key
+        for elem_name in ('title', 'content'):
+            for type_ in ['', '_rich', '_xhtml']:
+                attr = "{}{}".format(elem_name, type_)
                 if attr in data:
+                    elem = entry_elt.addElement(elem_name)
                     if type_:
-                        if type_ == 'rich':  # convert input from current syntax to XHTML
-                            converted = yield synt.convert(data[attr], synt.getCurrentSyntax(profile), "XHTML")
+                        if type_ == '_rich':  # convert input from current syntax to XHTML
+                            converted = yield synt.convert(data[attr], synt.getCurrentSyntax(profile_key), "XHTML")
+                            if '{}_xhtml'.format(elem_name) in data:
+                                raise failure.Failure(exceptions.DataError(_("Can't have xhtml and rich content at the same time")))
                         else:  # clean the XHTML input
                             converted = yield synt.clean_xhtml(data[attr])
-                        elem = elems[key]((u'<div xmlns="%s">%s</div>' % (NS_XHTML, converted)).encode('utf-8'))
-                        elem.attrs['type'] = 'xhtml'
-                        if hasattr(_entry, '%s_xhtml' % key):
-                            raise failure.Failure(exceptions.DataError(_("Can't have xhtml and rich content at the same time")))
-                        setattr(_entry, '%s_xhtml' % key, elem)
+
+                        xml_content = u'<div xmlns="{ns}">{converted}</div>'.format(
+                                        ns=NS_XHTML,
+                                        converted=converted)
+                        elem.addChild(xml_tools.ElementParser()(xml_content))
+                        elem['type'] = 'xhtml'
+                        if elem_name not in data:
+                            # there is raw text content, which is mandatory
+                            # so we create one from xhtml content
+                            elem_txt = entry_elt.addElement(elem_name)
+                            text_content = yield self.host.plugins["TEXT-SYNTAXES"].convert(xml_content,
+                                self.host.plugins["TEXT-SYNTAXES"].SYNTAX_XHTML,
+                                self.host.plugins["TEXT-SYNTAXES"].SYNTAX_TEXT,
+                                False)
+                            elem_txt.addContent(text_content)
+                            elem_txt['type'] = 'text'
+
                     else:  # raw text only needs to be escaped to get HTML-safe sequence
-                        elem = elems[key](escape(data[attr]).encode('utf-8'))
-                        elem.attrs['type'] = 'text'
-                        setattr(_entry, key, elem)
-            if not getattr(_entry, key).text:
-                if hasattr(_entry, '%s_xhtml' % key):
-                    text = yield self._removeXHTMLMarkups(getattr(_entry, '%s_xhtml' % key).text)
-                    setattr(_entry, key, text)
-        if not _entry.title.text:  # eventually move the data from content to title
-            _entry.title = _entry.content.text
-            _entry.title.attrs['type'] = _entry.content.attrs['type']
-            _entry.content.text = ''
-            _entry.content.attrs['type'] = ''
-            if hasattr(_entry, 'content_xhtml'):
-                _entry.title_xhtml = atom.Title(_entry.content_xhtml.text)
-                _entry.title_xhtml.attrs['type'] = _entry.content_xhtml.attrs['type']
-                _entry.content_xhtml.text = ''
-                _entry.content_xhtml.attrs['type'] = ''
+                        elem.addContent(data[attr])
+                        elem['type'] = 'text'
+
+        try:
+            entry_elt.elements(NS_ATOM, 'title').next()
+        except StopIteration:
+            # we have no title element which is mandatory
+            # so we transform content element to title
+            elems = list(entry_elt.elements(NS_ATOM, 'content'))
+            if not elems:
+                raise exceptions.DataError("There must be at least one content or title element")
+            for elem in elems:
+                elem.name = 'title'
+
+        ## author ##
+        author_elt = entry_elt.addElement('author')
+        try:
+            author_name = data['author']
+        except KeyError:
+            # FIXME: must use better name
+            author_name = client.jid.user
+        author_elt.addElement('name', content=author_name)
 
-        _entry.author = atom.Author()
-        _entry.author.name = data.get('author', self.host.getJidNStream(profile)[0].userhost()).encode('utf-8')
-        _entry.updated = float(data.get('updated', time()))
-        _entry.published = float(data.get('published', time()))
-        entry_id = data.get('id', unicode(_uuid))
-        _entry.id = entry_id.encode('utf-8')
+        try:
+            author_jid_s = data['author_jid']
+        except KeyError:
+            author_jid_s = client.jid.userhost()
+        author_elt.addElement('uri', content="xmpp:{}".format(author_jid_s))
+
+        ## published/updated time ##
+        current_time = time.time()
+        entry_elt.addElement('updated',
+            content=rfc3339.timestamp_from_tf(float(data.get('updated', current_time))))
+        entry_elt.addElement('published',
+            content=rfc3339.timestamp_from_tf(float(data.get('published', current_time))))
+
+        ## id ##
+        entry_id = data.get('id', item_id) # FIXME: use a proper id (see XEP-0277 ยง7.1)
+        entry_elt.addElement('id', content=entry_id) #
+
+        ## comments ##
         if 'comments' in data:
-            link = atom.Link()
-            link.attrs['href'] = data['comments']
-            link.attrs['rel'] = 'replies'
-            link.attrs['title'] = 'comments'
-            _entry.links.append(link)
-        _entry_elt = ElementParser()(str(_entry).decode('utf-8'))
-        item = pubsub.Item(id=entry_id, payload=_entry_elt)
-        defer.returnValue(item)
+            link_elt = entry_elt.addElement('link')
+            link_elt['href'] = data['comments']
+            link_elt['rel'] = 'replies'
+            link_elt['title'] = 'comments'
+
+        ## final item building ##
+        item_elt = pubsub.Item(id=item_id, payload=entry_elt)
+        defer.returnValue(item_elt)
 
     ## publish ##
 
     @defer.inlineCallbacks
-    def sendMicroblog(self, data, profile):
+    def _manageComments(self, access, mb_data, service, node, item_id, profile):
+        """Check comments keys in mb_data and create comments node if necessary
+
+        @param access(unicode): access model
+        @param mb_data(dict): microblog mb_data
+        @param service(jid.JID): Pubsub service of the parent item
+        @param node(unicode): node of the parent item
+        @param item_id(unicoe): id of the parent item
+        """
+        allow_comments = C.bool(mb_data.pop("allow_comments", "false"))
+        if not allow_comments:
+            return
+
+        client = self.host.getClient(profile)
+
+        options = {self._p.OPT_ACCESS_MODEL: access,
+                   self._p.OPT_PERSIST_ITEMS: 1,
+                   self._p.OPT_MAX_ITEMS: -1,
+                   self._p.OPT_DELIVER_PAYLOADS: 1,
+                   self._p.OPT_SEND_ITEM_SUBSCRIBE: 1,
+                   self._p.OPT_PUBLISH_MODEL: "subscribers",  # TODO: should be open if *both* node and item access_model are open (public node and item)
+                   }
+
+        comments_node_base = u"{}{}".format(NS_COMMENT_PREFIX, item_id)
+        comments_node = comments_node_base
+
+        suffix = None
+        comments_service = client.pubsub_service if client.pubsub_service is not None else service
+        max_tries = 3
+
+        for i in xrange(max_tries+1):
+            try:
+                yield self._p.createNode(comments_service, comments_node, options, profile_key=profile)
+                break
+            except error.StanzaError as e:
+                if e.condition == 'conflict' and i<max_tries:
+                    log.warning(u"node {} already exists on service {}".format(comments_node, comments_service))
+                    suffix = 0 if suffix is None else suffix + 1
+                    comments_node = u"{}_{}".format(comments_node_base, suffix)
+                else:
+                    raise e
+
+        if comments_service is None:
+            comments_service = client.jid.userhostJID()
+
+        mb_data['comments'] = "xmpp:%(service)s?%(query)s" % {
+            'service': comments_service.userhost(),
+            'query': urllib.urlencode([('node', comments_node.encode('utf-8'))])
+            }
+
+    def _mbSend(self, service, node, data, profile_key):
+        service = jid.JID(service) if service else None
+        node = node if node else NS_MICROBLOG
+        profile = self.host.memory.getProfileName(profile_key)
+        return self.send(service, node, data, profile)
+
+    @defer.inlineCallbacks
+    def send(self, service=None, node=NS_MICROBLOG, data=None, profile=None):
         """Send XEP-0277's microblog data
 
-        @param data: must include content
-        @param profile: profile which send the mood"""
-        if 'content' not in data:
-            log.error("Microblog data must contain at least 'content' key")
-            raise failure.Failure(exceptions.DataError('no "content" key found'))
-        content = data['content']
-        if not content:
-            log.error("Microblog data's content value must not be empty")
-            raise failure.Failure(exceptions.DataError('empty content'))
-        item = yield self.data2entry(data, profile)
-        ret = yield self._p.publish(None, NS_MICROBLOG, [item], profile_key=profile)
+        @param service(jid.JID, None): PubSub service where the microblog must be published
+            None to publish on profile's PEP
+        @param node(unicode): PubSub node to use (defaut to microblog NS)
+        @param data(dict): microblog data (must include at least a "content" or a "title" key).
+            see http://wiki.goffi.org/wiki/Bridge_API_-_Microblogging/en for details
+        @param profile: %(doc_profile)s
+        """
+        assert profile is not None
+
+        item_id = data.get('id') or unicode(uuid.uuid4())
+        try:
+            yield self._manageComments(self._p.ACCESS_OPEN, data, service, node, item_id, profile)
+        except error.StanzaError:
+            log.warning("Can't create comments node for item {}".format(item_id))
+        item = yield self.data2entry(data, item_id, profile)
+        ret = yield self._p.publish(service, node, [item], profile_key=profile)
         defer.returnValue(ret)
 
+
+    ## retract ##
+
+    def _mbRetract(self, service_jid_s, nodeIdentifier, itemIdentifier, profile_key):
+        """Call self._p._retractItem, but use default node if node is empty"""
+        return self._p._retractItem(service_jid_s, nodeIdentifier or NS_MICROBLOG, itemIdentifier, True, profile_key)
+
     ## get ##
 
-    def _getLastMicroblogs(self, pub_jid_s, max_items=10, profile_key=C.PROF_KEY_NONE):
-        return self.getLastMicroblogs(jid.JID(pub_jid_s), max_items, profile_key)
+    def _mbGetLast(self, service_jid_s, node="", max_items=10, extra_dict=None, profile_key=C.PROF_KEY_NONE):
+        """
+        @param max_items(int): maximum number of item to get, C.NO_LIMIT for no limit
+        """
+        max_items = None if max_items == C.NO_LIMIT else max_items
+        extra = self._p.parseExtra(extra_dict)
+        return self.mbGetLast(jid.JID(service_jid_s), node or None, max_items, extra.rsm_request, extra.extra, profile_key)
 
     @defer.inlineCallbacks
-    def getLastMicroblogs(self, pub_jid, max_items=10, profile_key=C.PROF_KEY_NONE):
+    def mbGetLast(self, service_jid, node=None, max_items=None, rsm_request=None, extra=None, profile_key=C.PROF_KEY_NONE):
         """Get the last published microblogs
 
-        @param pub_jid(jid.JID): jid of the publisher
-        @param max_items: how many microblogs we want to get
+        @param service_jid(jid.JID): jid of the publisher
+        @param node(unicode, None): node to get (or microblog node if None)
+        @param max_items(int): maximum number of item to get, C.NO_LIMIT for no limit
+        @param rsm_request (rsm.RSMRequest): RSM request data
         @param profile_key: profile key
 
         @return: a deferred couple with the list of items and metadatas.
         """
-        items_data = yield self._p.getItems(pub_jid, NS_MICROBLOG, max_items=max_items, profile_key=profile_key)
+        if node is None:
+            node = NS_MICROBLOG
+        items_data = yield self._p.getItems(service_jid, node, max_items=max_items, rsm_request=rsm_request, extra=extra, profile_key=profile_key)
         serialised = yield self._p.serItemsDataD(items_data, self.item2mbdata)
         defer.returnValue(serialised)
 
@@ -410,7 +537,7 @@
 
     ## configure ##
 
-    def setMicroblogAccess(self, access="presence", profile_key=C.PROF_KEY_NONE):
+    def mbSetAccess(self, access="presence", profile_key=C.PROF_KEY_NONE):
         """Create a microblog node on PEP with given access
 
         If the node already exists, it change options
@@ -491,17 +618,19 @@
             publishers[:] = [jid.JID(publisher) for publisher in publishers]
         return publishers_type, publishers
 
+
+
     # subscribe #
 
-    def _mBSubscribeToMany(self, publishers_type, publishers, profile_key):
+    def _mbSubscribeToMany(self, publishers_type, publishers, profile_key):
         """
 
         @return (str): session id: Use pubsub.getSubscribeRTResult to get the results
         """
         publishers_type, publishers = self._checkPublishers(publishers_type, publishers)
-        return self.mBSubscribeToMany(publishers_type, publishers, profile_key)
+        return self.mbSubscribeToMany(publishers_type, publishers, profile_key)
 
-    def mBSubscribeToMany(self, publishers_type, publishers, profile_key):
+    def mbSubscribeToMany(self, publishers_type, publishers, profile_key):
         """Subscribe microblogs for a list of groups or jids
 
         @param publishers_type: type of the list of publishers, one of:
@@ -517,8 +646,8 @@
 
     # get #
 
-    def _mBGetFromManyRTResult(self, session_id, profile_key=C.PROF_KEY_DEFAULT):
-        """Get real-time results for mBGetFromMany session
+    def _mbGetFromManyRTResult(self, session_id, profile_key=C.PROF_KEY_DEFAULT):
+        """Get real-time results for mbGetFromMany session
 
         @param session_id: id of the real-time deferred session
         @param return (tuple): (remaining, results) where:
@@ -527,7 +656,8 @@
                 - service (unicode): pubsub service
                 - node (unicode): pubsub node
                 - failure (unicode): empty string in case of success, error message else
-                - items_data(tuple): data tuple as returned by [getLastMicroblogs]
+                - items_data(list): data as returned by [mbGetLast]
+                - items_metadata(dict): metadata as returned by [mbGetLast]
         @param profile_key: %(doc_profile_key)s
         """
         def onSuccess(items_data):
@@ -546,35 +676,34 @@
                                     for (service, node), (success, (failure, (items, metadata))) in ret[1].iteritems()]))
         return d
 
-    def _mBGetFromMany(self, publishers_type, publishers, max_item=10, rsm_dict=None, profile_key=C.PROF_KEY_NONE):
+    def _mbGetFromMany(self, publishers_type, publishers, max_items=10, extra_dict=None, profile_key=C.PROF_KEY_NONE):
         """
-        @param max_item(int): maximum number of item to get, C.NO_LIMIT for no limit
+        @param max_items(int): maximum number of item to get, C.NO_LIMIT for no limit
         """
-        max_item = None if max_item == C.NO_LIMIT else max_item
+        max_items = None if max_items == C.NO_LIMIT else max_items
         publishers_type, publishers = self._checkPublishers(publishers_type, publishers)
-        return self.mBGetFromMany(publishers_type, publishers, max_item, rsm.RSMRequest(**rsm_dict) if rsm_dict else None, profile_key)
+        extra = self._p.parseExtra(extra_dict)
+        return self.mbGetFromMany(publishers_type, publishers, max_items, extra.rsm_request, extra.extra, profile_key)
 
-    def mBGetFromMany(self, publishers_type, publishers, max_item=None, rsm_data=None, profile_key=C.PROF_KEY_NONE):
+    def mbGetFromMany(self, publishers_type, publishers, max_items=None, rsm_request=None, extra=None, profile_key=C.PROF_KEY_NONE):
         """Get the published microblogs for a list of groups or jids
 
         @param publishers_type (str): type of the list of publishers (one of "GROUP" or "JID" or "ALL")
         @param publishers (list): list of publishers, according to publishers_type (list of groups or list of jids)
         @param max_items (int): optional limit on the number of retrieved items.
-        @param rsm_data (rsm.RSMRequest): RSM request data, common to all publishers
+        @param rsm_request (rsm.RSMRequest): RSM request data, common to all publishers
+        @param extra (dict): Extra data
         @param profile_key: profile key
-        @return: a deferred dict with:
-            - key: publisher (unicode)
-            - value: couple (list[dict], dict) with:
-                - the microblogs data
-                - RSM response data
+        @return (str): RT Deferred session id
         """
+        # XXX: extra is unused here so far
         client, node_data = self._getClientAndNodeData(publishers_type, publishers, profile_key)
-        return self._p.getFromMany(node_data, max_item, rsm_data, profile_key=profile_key)
+        return self._p.getFromMany(node_data, max_items, rsm_request, profile_key=profile_key)
 
     # comments #
 
-    def _mBGetFromManyWithCommentsRTResult(self, session_id, profile_key=C.PROF_KEY_DEFAULT):
-        """Get real-time results for [mBGetFromManyWithComments] session
+    def _mbGetFromManyWithCommentsRTResult(self, session_id, profile_key=C.PROF_KEY_DEFAULT):
+        """Get real-time results for [mbGetFromManyWithComments] session
 
         @param session_id: id of the real-time deferred session
         @param return (tuple): (remaining, results) where:
@@ -582,7 +711,6 @@
             - results is a list of tuple with
                 - service (unicode): pubsub service
                 - node (unicode): pubsub node
-                - success (bool): True if the getItems was successful
                 - failure (unicode): empty string in case of success, error message else
                 - items(list): list of items with:
                     - item(dict): item microblog data
@@ -602,20 +730,24 @@
                                     for (service, node), (success, (failure, (items, metadata))) in ret[1].iteritems()]))
         return d
 
-    def _mBGetFromManyWithComments(self, publishers_type, publishers, max_item=10, max_comments=C.NO_LIMIT, rsm_dict=None, rsm_comments_dict=None, profile_key=C.PROF_KEY_NONE):
+    def _mbGetFromManyWithComments(self, publishers_type, publishers, max_items=10, max_comments=C.NO_LIMIT, extra_dict=None, extra_comments_dict=None, profile_key=C.PROF_KEY_NONE):
         """
-        @param max_item(int): maximum number of item to get, C.NO_LIMIT for no limit
+        @param max_items(int): maximum number of item to get, C.NO_LIMIT for no limit
         @param max_comments(int): maximum number of comments to get, C.NO_LIMIT for no limit
         """
-        max_item = None if max_item == C.NO_LIMIT else max_item
+        max_items = None if max_items == C.NO_LIMIT else max_items
         max_comments = None if max_comments == C.NO_LIMIT else max_comments
         publishers_type, publishers = self._checkPublishers(publishers_type, publishers)
-        return self.mBGetFromManyWithComments(publishers_type, publishers, max_item, max_comments,
-                                              rsm.RSMRequest(**rsm_dict) if rsm_dict else None,
-                                              rsm.RSMRequest(**rsm_comments_dict) if rsm_comments_dict else None,
+        extra = self._p.parseExtra(extra_dict)
+        extra_comments = self._p.parseExtra(extra_comments_dict)
+        return self.mbGetFromManyWithComments(publishers_type, publishers, max_items, max_comments,
+                                              extra.rsm_request,
+                                              extra.extra,
+                                              extra_comments.rsm_request,
+                                              extra_comments.extra,
                                               profile_key)
 
-    def mBGetFromManyWithComments(self, publishers_type, publishers, max_item=None, max_comments=None, rsm_request=None, rsm_comments=None, profile_key=C.PROF_KEY_NONE):
+    def mbGetFromManyWithComments(self, publishers_type, publishers, max_items=None, max_comments=None, rsm_request=None, extra=None, rsm_comments=None, extra_comments=None, profile_key=C.PROF_KEY_NONE):
         """Helper method to get the microblogs and their comments in one shot
 
         @param publishers_type (str): type of the list of publishers (one of "GROUP" or "JID" or "ALL")
@@ -623,13 +755,11 @@
         @param max_items (int): optional limit on the number of retrieved items.
         @param max_comments (int): maximum number of comments to retrieve
         @param rsm_request (rsm.RSMRequest): RSM request for initial items only
+        @param extra (dict): extra configuration for initial items only
         @param rsm_comments (rsm.RSMRequest): RSM request for comments only
+        @param extra_comments (dict): extra configuration for comments only
         @param profile_key: profile key
-        @return: a deferred dict with:
-            - key: publisher (unicode)
-            - value: couple (list[dict], dict) with:
-                - the microblogs data
-                - RSM response data
+        @return (str): RT Deferred session id
         """
         # XXX: this method seems complicated because it do a couple of treatments
         #      to serialise and associate the data, but it make life in frontends side
@@ -653,30 +783,30 @@
                         service_s = value
                         node = item["{}{}".format(prefix, "_node")]
                         # time to get the comments
-                        d = self._p.getItems(jid.JID(service_s), node, max_comments, rsm_request=rsm_comments, profile_key=profile_key)
+                        d = self._p.getItems(jid.JID(service_s), node, max_comments, rsm_request=rsm_comments, extra=extra_comments, profile_key=profile_key)
                         # then serialise
                         d.addCallback(lambda items_data: self._p.serItemsDataD(items_data, self.item2mbdata))
                         # with failure handling
                         d.addCallback(lambda serialised_items_data: ('',) + serialised_items_data)
                         d.addErrback(lambda failure: (unicode(failure.value), [], {}))
                         # and associate with service/node (needed if there are several comments nodes)
-                        d.addCallback(lambda serialised: (service_s, node) + serialised)
+                        d.addCallback(lambda serialised, service_s=service_s, node=node: (service_s, node) + serialised)
                         dlist.append(d)
                 # we get the comments
                 comments_d = defer.gatherResults(dlist)
                 # and add them to the item data
-                comments_d.addCallback(lambda comments_data: (item, comments_data))
+                comments_d.addCallback(lambda comments_data, item=item: (item, comments_data))
                 items_dlist.append(comments_d)
             # we gather the items + comments in a list
             items_d = defer.gatherResults(items_dlist)
             # and add the metadata
-            items_d.addCallback(lambda items: (items, metadata))
+            items_d.addCallback(lambda items_completed: (items_completed, metadata))
             return items_d
 
         client, node_data = self._getClientAndNodeData(publishers_type, publishers, profile_key)
         deferreds = {}
         for service, node in node_data:
-            d = deferreds[(service, node)] = self._p.getItems(service, node, max_item, rsm_request=rsm_request, profile_key=profile_key)
+            d = deferreds[(service, node)] = self._p.getItems(service, node, max_items, rsm_request=rsm_request, extra=extra, profile_key=profile_key)
             d.addCallback(lambda items_data: self._p.serItemsDataD(items_data, self.item2mbdata))
             d.addCallback(getComments)
             d.addCallback(lambda items_comments_data: ('', items_comments_data))