changeset 4209:fe29fbdabce6

frontends (tools/webrtc): add a options to merge video for PiP and to specify size: - the `merge_pip` option is now used to indicate if local feedback and remote video streams must be merged with local feedback being a PiP of remote video. By default, it's done for `SINKS_AUTO`, but it can be manually specified to use it with `SINKS_APP` or to split ``SINKS_AUTO`` in 2 windows. - target size of the compositor used with `merge_pip` can be specified. By default, 720p is used. rel 428
author Goffi <goffi@goffi.org>
date Fri, 16 Feb 2024 18:46:02 +0100
parents fd9e78b7a0cd
children 9218d4331bb2
files libervia/frontends/tools/webrtc.py
diffstat 1 files changed, 79 insertions(+), 21 deletions(-) [+]
line wrap: on
line diff
--- a/libervia/frontends/tools/webrtc.py	Sun Feb 11 23:22:18 2024 +0100
+++ b/libervia/frontends/tools/webrtc.py	Fri Feb 16 18:46:02 2024 +0100
@@ -69,7 +69,7 @@
 @dataclass
 class AppSinkData:
     local_video_cb: Callable
-    remote_video_cb: Callable
+    remote_video_cb: Callable|None
 
 
 class DesktopPortal:
@@ -255,7 +255,29 @@
         sinks: str = SINKS_AUTO,
         appsink_data: AppSinkData | None = None,
         reset_cb: Callable | None = None,
+        merge_pip: bool|None = None,
+        target_size: tuple[int, int]|None = None,
     ) -> None:
+        """Initializes a new WebRTC instance.
+
+        @param bridge: An instance of backend bridge.
+        @param profile: Libervia profile.
+        @param sources: Which kind of source to use.
+        @param sinks: Which kind of sinks to use.
+        @param appsink_data: configuration data for appsink (when SINKS_APP is used). Must
+            not be used for other sinks.
+        @param reset_cb: An optional Callable that is triggered on reset events. Can be
+            used to reset UI data on new calls.
+        @param merge_pip: A boolean flag indicating whether Picture-in-Picture mode is
+            enabled. When PiP is used, local feedback is merged to remote video stream.
+            Only one video stream is then produced (the local one).
+            If None, PiP mode is selected automatically according to selected sink (it's
+            used for SINKS_AUTO only for now).
+        @param target_size: Expected size of the final sink stream. Mainly use by composer
+            when ``merge_pip`` is set.
+            None to autodetect (not real autodetection implemeted yet, default to
+            (1280,720)).
+        """
         self.main_loop = asyncio.get_event_loop()
         self.bridge = bridge
         self.profile = profile
@@ -266,7 +288,19 @@
         self.desktop_sharing_data = None
         self.sources = sources
         self.sinks = sinks
+        if target_size is None:
+            target_size=(1280, 720)
+        self.target_width, self.target_height = target_size
+        if merge_pip is None:
+            merge_pip = sinks == SINKS_AUTO
+        self.merge_pip = merge_pip
         if sinks == SINKS_APP:
+            if (
+                merge_pip
+                and appsink_data is not None
+                and appsink_data.remote_video_cb is not None
+            ):
+                raise ValueError("Remote_video_cb can't be used when merge_pip is used!")
             self.appsink_data = appsink_data
         elif appsink_data is not None:
             raise exceptions.InternalError(
@@ -576,7 +610,6 @@
         else:
             raise exceptions.InternalError(f'Unknown "sources" value: {self.sources!r}')
 
-        extra_elt = ""
 
         if self.sinks == SINKS_APP:
             local_video_sink_elt = (
@@ -584,11 +617,21 @@
                 "sync=True"
             )
         elif self.sinks == SINKS_AUTO:
-            extra_elt = "compositor name=compositor ! autovideosink"
-            local_video_sink_elt = """compositor.sink_1"""
+            local_video_sink_elt = "autovideosink"
         else:
             raise exceptions.InternalError(f"Unknown sinks value {self.sinks!r}")
 
+        if self.merge_pip:
+            extra_elt = (
+                "compositor name=compositor background=black "
+                f"! video/x-raw,width={self.target_width},height={self.target_height},"
+                "framerate=30/1 "
+                f"! {local_video_sink_elt}"
+            )
+            local_video_sink_elt = "compositor.sink_1"
+        else:
+            extra_elt = ""
+
         self.gst_pipe_desc = f"""
         webrtcbin latency=100 name=sendrecv bundle-policy=max-bundle
 
@@ -629,7 +672,11 @@
         log.debug(f"Gstreamer pipeline: {self.gst_pipe_desc}")
 
         # Create the pipeline
-        self.pipeline = Gst.parse_launch(self.gst_pipe_desc)
+        try:
+            self.pipeline = Gst.parse_launch(self.gst_pipe_desc)
+        except Exception:
+            log.exception("Can't parse pipeline")
+            self.pipeline = None
         if not self.pipeline:
             raise exceptions.InternalError("Failed to create Gstreamer pipeline.")
 
@@ -819,8 +866,8 @@
             self._remote_video_pad = pad
 
             # Check and log the original size of the video
-            width = s.get_int("width").value
-            height = s.get_int("height").value
+            width = self.target_width
+            height = self.target_height
             log.info(f"Original video size: {width}x{height}")
 
             # This is a fix for an issue found with Movim on desktop: a non standard
@@ -834,20 +881,8 @@
                 log.info(f"Adjusted video size: {width}x{height}")
 
             conv = Gst.ElementFactory.make("videoconvert")
-            if self.sinks == SINKS_APP:
-                assert self.appsink_data is not None
-                remote_video_sink = Gst.ElementFactory.make("appsink")
-
-                appsink_caps = Gst.Caps.from_string("video/x-raw,format=RGB")
-                remote_video_sink.set_property("caps", appsink_caps)
-
-                remote_video_sink.set_property("emit-signals", True)
-                remote_video_sink.set_property("drop", True)
-                remote_video_sink.set_property("max-buffers", 1)
-                remote_video_sink.set_property("sync", True)
-                remote_video_sink.connect("new-sample", self.appsink_data.remote_video_cb)
-                self.pipeline.add(remote_video_sink)
-            elif self.sinks == SINKS_AUTO:
+            if self.merge_pip:
+                # with ``merge_pip`` set, we plug the remote stream to the composer
                 compositor = self.pipeline.get_by_name("compositor")
 
                 sink1_pad = compositor.get_static_pad("sink_1")
@@ -863,13 +898,36 @@
                 sink1_pad.set_property("ypos", height - local_height)
                 sink1_pad.set_property("width", local_width)
                 sink1_pad.set_property("height", local_height)
+                sink1_pad.set_property("sizing-policy", 1)
                 sink1_pad.set_property("zorder", 1)
 
                 # Request a new pad for the remote stream
                 sink_pad_template = compositor.get_pad_template("sink_%u")
                 remote_video_sink = compositor.request_pad(sink_pad_template, None, None)
                 remote_video_sink.set_property("zorder", 0)
+                remote_video_sink.set_property("width", width)
+                remote_video_sink.set_property("height", height)
+                remote_video_sink.set_property("sizing-policy", 1)
+            elif self.sinks == SINKS_APP:
+                # ``app`` sink without ``self.merge_pip`` set, be create the sink and
+                # connect it to the ``remote_video_cb``.
+                assert self.appsink_data is not None
+                remote_video_sink = Gst.ElementFactory.make("appsink")
 
+                remote_video_caps = Gst.Caps.from_string("video/x-raw,format=RGB")
+                remote_video_sink.set_property("caps", remote_video_caps)
+
+                remote_video_sink.set_property("emit-signals", True)
+                remote_video_sink.set_property("drop", True)
+                remote_video_sink.set_property("max-buffers", 1)
+                remote_video_sink.set_property("sync", True)
+                remote_video_sink.connect("new-sample", self.appsink_data.remote_video_cb)
+                self.pipeline.add(remote_video_sink)
+            elif self.sinks == SINKS_AUTO:
+                # if ``self.merge_pip`` is not set, we create a dedicated
+                # ``autovideosink`` for remote stream.
+                remote_video_sink = Gst.ElementFactory.make("autovideosink")
+                self.pipeline.add(remote_video_sink)
             else:
                 raise exceptions.InternalError(f'Unhandled "sinks" value: {self.sinks!r}')