Mercurial > libervia-desktop-kivy
comparison libervia/desktop_kivy/plugins/plugin_wid_calls.py @ 506:0480f883f0a6
plugin calls: update UI:
- there is now a "search" UI to select a contact to call
- "call" UI is displayed only when we actually are in a call
- new control button to (un)mute audio and video
- new control button to go to fullscreen/back to normal
- add an extra "hang up" button directly in the call UI, so there is always one even in
fullscreen mode
- UI is similar to the one implemented in web frontend
- notification + ringtone + desktop notification on incoming call
- if an incoming call is cancelled from initiator, confirmation dialog is removed
rel 425
author | Goffi <goffi@goffi.org> |
---|---|
date | Wed, 25 Oct 2023 15:28:44 +0200 |
parents | f387992d8e37 |
children | f0ce49b360c8 |
comparison
equal
deleted
inserted
replaced
505:bbef1a413515 | 506:0480f883f0a6 |
---|---|
1 from dataclasses import dataclass | 1 from dataclasses import dataclass |
2 from pathlib import Path | |
2 import re | 3 import re |
3 from typing import Optional, Callable | 4 from typing import Optional, Callable |
4 from urllib.parse import quote_plus | 5 from urllib.parse import quote_plus |
5 from functools import partial | 6 from functools import partial |
6 | 7 |
13 print( | 14 print( |
14 "no GStreamer python overrides available, please install relevant pacakges on " | 15 "no GStreamer python overrides available, please install relevant pacakges on " |
15 "your system." | 16 "your system." |
16 ) | 17 ) |
17 from kivy.clock import Clock | 18 from kivy.clock import Clock |
19 from kivy.core.audio import Sound, SoundLoader | |
20 from kivy.core.window import Window | |
18 from kivy.graphics.texture import Texture | 21 from kivy.graphics.texture import Texture |
19 from kivy.properties import BooleanProperty, ObjectProperty | 22 from kivy.properties import ( |
23 BooleanProperty, | |
24 ColorProperty, | |
25 NumericProperty, | |
26 ObjectProperty, | |
27 ReferenceListProperty, | |
28 ) | |
20 from kivy.support import install_gobject_iteration | 29 from kivy.support import install_gobject_iteration |
21 from kivy.uix.button import Button | 30 from kivy.uix.button import Button |
22 from kivy.uix.image import Image | 31 from kivy.uix.image import Image |
32 from kivy.uix.screenmanager import Screen | |
33 from kivy.uix.widget import Widget | |
23 from libervia.backend.core.constants import Const as C | 34 from libervia.backend.core.constants import Const as C |
24 from libervia.backend.core import log as logging | 35 from libervia.backend.core import log as logging |
25 from libervia.backend.core.i18n import _ | 36 from libervia.backend.core.i18n import _ |
26 from libervia.backend.core import exceptions | 37 from libervia.backend.core import exceptions |
27 from libervia.backend.tools.common import data_format | 38 from libervia.backend.tools.common import data_format |
29 from libervia.frontends.tools import jid, aio | 40 from libervia.frontends.tools import jid, aio |
30 | 41 |
31 from libervia.desktop_kivy import G | 42 from libervia.desktop_kivy import G |
32 | 43 |
33 from ..core import cagou_widget | 44 from ..core import cagou_widget |
45 from ..core import common | |
46 from ..core.behaviors import FilterBehavior | |
34 | 47 |
35 log = logging.getLogger(__name__) | 48 log = logging.getLogger(__name__) |
36 | 49 |
37 install_gobject_iteration() | 50 install_gobject_iteration() |
38 | 51 |
51 class TextureData: | 64 class TextureData: |
52 texture: Optional[Texture] = None | 65 texture: Optional[Texture] = None |
53 size: Optional[tuple[int, int]] = None | 66 size: Optional[tuple[int, int]] = None |
54 | 67 |
55 | 68 |
69 class SearchScreen(Screen): | |
70 pass | |
71 | |
72 | |
73 class InCallScreen(Screen): | |
74 pass | |
75 | |
76 | |
56 class CallButton(Button): | 77 class CallButton(Button): |
57 parent_widget = ObjectProperty(None) | 78 parent_widget = ObjectProperty(None) |
79 | |
80 | |
81 class CallControlButton(common.SymbolButton): | |
82 active = BooleanProperty(True) | |
83 background_color = ColorProperty() | |
84 margin_x = NumericProperty(0) | |
85 margin_y = NumericProperty(0) | |
86 margin = ReferenceListProperty(margin_x, margin_y) | |
58 | 87 |
59 | 88 |
60 class VideoStreamWidget(Image): | 89 class VideoStreamWidget(Image): |
61 pass | 90 pass |
62 | 91 |
69 | 98 |
70 @attribute test_mode: A flag to indicate whether the WebRTC instance is in test mode. | 99 @attribute test_mode: A flag to indicate whether the WebRTC instance is in test mode. |
71 If true, test video and audio sources will be used. Otherwise first webcam and | 100 If true, test video and audio sources will be used. Otherwise first webcam and |
72 microphone available will be used. | 101 microphone available will be used. |
73 """ | 102 """ |
103 | |
74 test_mode: bool = False | 104 test_mode: bool = False |
75 | |
76 | 105 |
77 def __init__(self, parent_calls: "Calls", profile: str) -> None: | 106 def __init__(self, parent_calls: "Calls", profile: str) -> None: |
78 self.parent_calls = parent_calls | 107 self.parent_calls = parent_calls |
79 self.profile = profile | 108 self.profile = profile |
80 self.pipeline = None | 109 self.pipeline = None |
262 "audio": {"candidates": []}, | 291 "audio": {"candidates": []}, |
263 "video": {"candidates": []}, | 292 "video": {"candidates": []}, |
264 } | 293 } |
265 self._media_types = None | 294 self._media_types = None |
266 self._media_types_inv = None | 295 self._media_types_inv = None |
296 self.audio_valve = None | |
297 self.video_valve = None | |
267 | 298 |
268 async def setup_call( | 299 async def setup_call( |
269 self, | 300 self, |
270 role: str, | 301 role: str, |
271 audio_pt: int | None = 96, | 302 audio_pt: int | None = 96, |
297 audio_source_elt = "pulsesrc" | 328 audio_source_elt = "pulsesrc" |
298 | 329 |
299 self.gst_pipe_desc = f""" | 330 self.gst_pipe_desc = f""" |
300 webrtcbin latency=100 name=sendrecv bundle-policy=max-compat | 331 webrtcbin latency=100 name=sendrecv bundle-policy=max-compat |
301 | 332 |
302 {video_source_elt} name=video_src | 333 input-selector name=video_selector |
303 ! videorate | 334 ! videorate |
304 ! video/x-raw,framerate=30/1 | 335 ! video/x-raw,framerate=30/1 |
305 ! tee name=t | 336 ! tee name=t |
337 | |
338 {video_source_elt} name=video_src ! queue ! video_selector. | |
339 videotestsrc is-live=true pattern=black ! queue ! video_selector. | |
306 | 340 |
307 t. | 341 t. |
308 ! queue max-size-buffers=5 max-size-time=0 max-size-bytes=0 leaky=downstream | 342 ! queue max-size-buffers=5 max-size-time=0 max-size-bytes=0 leaky=downstream |
309 ! videoconvert | 343 ! videoconvert |
310 ! vp8enc deadline=1 keyframe-max-dist=60 | 344 ! vp8enc deadline=1 keyframe-max-dist=60 |
316 ! queue max-size-buffers=5 max-size-time=0 max-size-bytes=0 | 350 ! queue max-size-buffers=5 max-size-time=0 max-size-bytes=0 |
317 ! videoconvert | 351 ! videoconvert |
318 ! appsink name=local_video_sink emit-signals=true drop=true max-buffers=1 sync=True | 352 ! appsink name=local_video_sink emit-signals=true drop=true max-buffers=1 sync=True |
319 | 353 |
320 {audio_source_elt} name=audio_src | 354 {audio_source_elt} name=audio_src |
355 ! valve name=audio_valve | |
321 ! queue max-size-buffers=10 max-size-time=0 max-size-bytes=0 | 356 ! queue max-size-buffers=10 max-size-time=0 max-size-bytes=0 |
322 ! audioconvert | 357 ! audioconvert |
323 ! audioresample | 358 ! audioresample |
324 ! opusenc audio-type=voice | 359 ! opusenc audio-type=voice |
325 ! rtpopuspay | 360 ! rtpopuspay |
333 self.pipeline = Gst.parse_launch(self.gst_pipe_desc) | 368 self.pipeline = Gst.parse_launch(self.gst_pipe_desc) |
334 if not self.pipeline: | 369 if not self.pipeline: |
335 raise exceptions.InternalError("Failed to create Gstreamer pipeline.") | 370 raise exceptions.InternalError("Failed to create Gstreamer pipeline.") |
336 | 371 |
337 self.webrtc = self.pipeline.get_by_name("sendrecv") | 372 self.webrtc = self.pipeline.get_by_name("sendrecv") |
338 | |
339 self.video_src = self.pipeline.get_by_name("video_src") | 373 self.video_src = self.pipeline.get_by_name("video_src") |
340 self.audio_src = self.pipeline.get_by_name("audio_src") | 374 self.video_selector = self.pipeline.get_by_name("video_selector") |
375 self.audio_valve = self.pipeline.get_by_name("audio_valve") | |
376 | |
377 if self.parent_calls.video_muted: | |
378 self.on_video_mute(True) | |
379 if self.parent_calls.audio_muted: | |
380 self.on_audio_mute(True) | |
341 | 381 |
342 # set STUN and TURN servers | 382 # set STUN and TURN servers |
343 external_disco = data_format.deserialise( | 383 external_disco = data_format.deserialise( |
344 await G.host.a_bridge.external_disco_get("", self.profile), type_check=list | 384 await G.host.a_bridge.external_disco_get("", self.profile), type_check=list |
345 ) | 385 ) |
353 url = f"stun://{server['host']}:{server['port']}" | 393 url = f"stun://{server['host']}:{server['port']}" |
354 log.debug(f"adding stun server: {url}") | 394 log.debug(f"adding stun server: {url}") |
355 self.webrtc.set_property("stun-server", url) | 395 self.webrtc.set_property("stun-server", url) |
356 elif server["type"] == "turn": | 396 elif server["type"] == "turn": |
357 url = "{scheme}://{username}:{password}@{host}:{port}".format( | 397 url = "{scheme}://{username}:{password}@{host}:{port}".format( |
358 scheme = "turns" if server["transport"] == "tcp" else "turn", | 398 scheme="turns" if server["transport"] == "tcp" else "turn", |
359 username=quote_plus(server["username"]), | 399 username=quote_plus(server["username"]), |
360 password=quote_plus(server["password"]), | 400 password=quote_plus(server["password"]), |
361 host=server["host"], | 401 host=server["host"], |
362 port=server["port"], | 402 port=server["port"], |
363 ) | 403 ) |
606 self.parent_calls.remote_video, | 646 self.parent_calls.remote_video, |
607 ) | 647 ) |
608 | 648 |
609 if adjust_resolution: | 649 if adjust_resolution: |
610 videoscale = Gst.ElementFactory.make("videoscale") | 650 videoscale = Gst.ElementFactory.make("videoscale") |
611 adjusted_caps = Gst.Caps.from_string(f"video/x-raw,width={width},height={height}") | 651 adjusted_caps = Gst.Caps.from_string( |
652 f"video/x-raw,width={width},height={height}" | |
653 ) | |
612 capsfilter = Gst.ElementFactory.make("capsfilter") | 654 capsfilter = Gst.ElementFactory.make("capsfilter") |
613 capsfilter.set_property("caps", adjusted_caps) | 655 capsfilter.set_property("caps", adjusted_caps) |
614 | 656 |
615 self.pipeline.add(q, conv, videoscale, capsfilter, remote_video_sink) | 657 self.pipeline.add(q, conv, videoscale, capsfilter, remote_video_sink) |
616 self.pipeline.sync_children_states() | 658 self.pipeline.sync_children_states() |
822 @param bus: The GStreamer bus. | 864 @param bus: The GStreamer bus. |
823 @param message: The eos message. | 865 @param message: The eos message. |
824 """ | 866 """ |
825 log.info("End of stream") | 867 log.info("End of stream") |
826 | 868 |
869 def on_audio_mute(self, muted: bool) -> None: | |
870 if self.audio_valve is not None: | |
871 self.audio_valve.set_property("drop", muted) | |
872 state = "muted" if muted else "unmuted" | |
873 log.info(f"audio is now {state}") | |
874 | |
875 def on_video_mute(self, muted: bool) -> None: | |
876 if self.video_selector is not None: | |
877 # when muted, we switch to a black image and deactivate the camera | |
878 if not muted: | |
879 self.video_src.set_state(Gst.State.PLAYING) | |
880 pad = self.video_selector.get_static_pad("sink_1" if muted else "sink_0") | |
881 self.video_selector.props.active_pad = pad | |
882 if muted: | |
883 self.video_src.set_state(Gst.State.NULL) | |
884 state = "muted" if muted else "unmuted" | |
885 log.info(f"video is now {state}") | |
886 | |
827 async def end_call(self) -> None: | 887 async def end_call(self) -> None: |
828 """Stop streaming and clean instance""" | 888 """Stop streaming and clean instance""" |
829 self.reset_instance() | 889 self.reset_instance() |
830 | 890 |
831 | 891 |
832 class Calls(quick_widgets.QuickWidget, cagou_widget.LiberviaDesktopKivyWidget): | 892 class Calls( |
893 quick_widgets.QuickWidget, | |
894 cagou_widget.LiberviaDesktopKivyWidget, | |
895 FilterBehavior | |
896 ): | |
897 audio_muted = BooleanProperty(False) | |
898 call_layout = ObjectProperty() | |
899 call_screen = ObjectProperty() | |
900 fullscreen = BooleanProperty(False) | |
901 in_call = BooleanProperty(False) | |
902 incoming_call_dialog: dict[str, Widget] = {} | |
903 jid_selector = ObjectProperty() | |
904 local_video = ObjectProperty() | |
833 remote_video = ObjectProperty() | 905 remote_video = ObjectProperty() |
834 local_video = ObjectProperty() | 906 ringtone: Sound | None = None |
907 screen_manager = ObjectProperty() | |
908 signals_registered = False | |
835 use_header_input = True | 909 use_header_input = True |
836 signals_registered = False | 910 video_muted = BooleanProperty(False) |
837 in_call = BooleanProperty(False) | |
838 | 911 |
839 def __init__(self, host, target, profiles): | 912 def __init__(self, host, target, profiles): |
840 quick_widgets.QuickWidget.__init__(self, G.host, target, profiles) | 913 quick_widgets.QuickWidget.__init__(self, G.host, target, profiles) |
841 cagou_widget.LiberviaDesktopKivyWidget.__init__(self) | 914 cagou_widget.LiberviaDesktopKivyWidget.__init__(self) |
842 call_btn = CallButton( | 915 call_btn = CallButton( |
843 parent_widget=self, on_press=lambda *__: aio.run_async(self.toggle_call()) | 916 parent_widget=self, on_press=lambda *__: aio.run_async(self.toggle_call()) |
844 ) | 917 ) |
845 self.header_input_add_extra(call_btn) | 918 self.header_input_add_extra(call_btn) |
846 self.webrtc = WebRTC(self, self.profile) | 919 self.webrtc = WebRTC(self, self.profile) |
847 | 920 self.previous_fullscreen = None |
848 if not self.__class__.signals_registered: | 921 self.bind( |
849 log.debug("registering signals") | 922 audio_muted=lambda __, value: self.webrtc.on_audio_mute(value), |
850 G.host.register_signal( | 923 video_muted=lambda __, value: self.webrtc.on_video_mute(value), |
851 "ice_candidates_new", | 924 ) |
852 handler=self.__class__.ice_candidates_new_handler, | |
853 iface="plugin", | |
854 ) | |
855 G.host.register_signal( | |
856 "call_setup", handler=self.__class__.call_setup_handler, iface="plugin" | |
857 ) | |
858 G.host.register_signal( | |
859 "call_ended", handler=self.__class__.call_ended_handler, iface="plugin" | |
860 ) | |
861 G.host.register_action_handler( | |
862 C.META_TYPE_CALL, self.__class__.action_new_handler | |
863 ) | |
864 self.__class__.signals_registered = True | |
865 | |
866 self.reset_instance() | 925 self.reset_instance() |
867 | 926 |
868 @property | 927 @property |
869 def sid(self): | 928 def sid(self): |
870 return self.webrtc.sid | 929 return self.webrtc.sid |
930 | |
931 def hang_up(self): | |
932 if self.sid is not None: | |
933 aio.run_async(self.toggle_call()) | |
871 | 934 |
872 async def toggle_call(self): | 935 async def toggle_call(self): |
873 """Toggle between making a call and hanging up. | 936 """Toggle between making a call and hanging up. |
874 | 937 |
875 This function will initiate or terminate a call based on the call state. | 938 This function will initiate or terminate a call based on the call state. |
876 """ | 939 """ |
877 if self.sid is None: | 940 if self.sid is None: |
878 # Initiate the call | 941 # Initiate the call |
879 log.info("initiating call") | 942 log.info("initiating call") |
880 callee = jid.JID(self.header_input.text.strip()) | 943 callee = jid.JID(self.header_input.text.strip()) |
944 if not callee: | |
945 return | |
946 if not callee.is_valid(): | |
947 G.host.add_note( | |
948 _("Calls"), | |
949 _("Can't make a call: invalid destinee {}").format(repr(callee)), | |
950 level=C.XMLUI_DATA_LVL_ERROR | |
951 ) | |
952 return | |
953 | |
881 self.webrtc.callee = callee | 954 self.webrtc.callee = callee |
882 await self.webrtc.setup_call("initiator") | 955 await self.webrtc.setup_call("initiator") |
883 self.webrtc.start_pipeline() | 956 self.webrtc.start_pipeline() |
884 self.in_call = True | 957 self.in_call = True |
885 else: | 958 else: |
945 @param data: end call data, often includes the reason of the call ending. | 1018 @param data: end call data, often includes the reason of the call ending. |
946 """ | 1019 """ |
947 await self.webrtc.end_call() | 1020 await self.webrtc.end_call() |
948 self.reset_instance() | 1021 self.reset_instance() |
949 | 1022 |
1023 def on_in_call(self, instance, in_call: bool) -> None: | |
1024 if in_call: | |
1025 self.screen_manager.transition.direction = "up" | |
1026 self.screen_manager.current = "call" | |
1027 else: | |
1028 self.fullscreen = False | |
1029 self.screen_manager.transition.direction = "down" | |
1030 self.screen_manager.current = "search" | |
1031 | |
1032 def on_fullscreen(self, instance, fullscreen: bool) -> None: | |
1033 if fullscreen: | |
1034 G.host.app.show_head_widget(False, animation=False) | |
1035 self.call_layout.parent.remove_widget(self.call_layout) | |
1036 G.host.show_extra_ui(self.call_layout) | |
1037 self.previous_fullscreen = Window.fullscreen | |
1038 Window.fullscreen = "auto" | |
1039 else: | |
1040 G.host.app.show_head_widget(True, animation=False) | |
1041 G.host.close_ui() | |
1042 self.call_screen.add_widget(self.call_layout) | |
1043 Window.fullscreen = self.previous_fullscreen or False | |
1044 | |
1045 def on_header_wid_input(self): | |
1046 aio.run_async(self.toggle_call()) | |
1047 | |
1048 def on_header_wid_input_complete(self, wid, text, **kwargs): | |
1049 """we filter items when text is entered in input box""" | |
1050 for layout in self.jid_selector.items_layouts: | |
1051 self.do_filter( | |
1052 layout, | |
1053 text, | |
1054 # we append nick to jid to filter on both | |
1055 lambda c: c.jid + c.data.get('nick', ''), | |
1056 width_cb=lambda c: c.base_width, | |
1057 height_cb=lambda c: c.minimum_height, | |
1058 continue_tests=[lambda c: not isinstance(c, common.ContactButton)]) | |
1059 | |
1060 def on_jid_select(self, contact_button): | |
1061 self.header_input.text = contact_button.jid | |
1062 aio.run_async(self.toggle_call()) | |
1063 | |
950 @classmethod | 1064 @classmethod |
951 def ice_candidates_new_handler( | 1065 def ice_candidates_new_handler( |
952 cls, sid: str, candidates_s: str, profile: str | 1066 cls, sid: str, candidates_s: str, profile: str |
953 ) -> None: | 1067 ) -> None: |
954 for wid in G.host.get_visible_list(cls): | 1068 for wid in G.host.get_visible_list(cls): |
967 wid.on_call_setup(data_format.deserialise(setup_data_s), profile) | 1081 wid.on_call_setup(data_format.deserialise(setup_data_s), profile) |
968 ) | 1082 ) |
969 | 1083 |
970 @classmethod | 1084 @classmethod |
971 def call_ended_handler(cls, sid: str, data_s: str, profile: str) -> None: | 1085 def call_ended_handler(cls, sid: str, data_s: str, profile: str) -> None: |
1086 if sid in cls.incoming_call_dialog: | |
1087 dialog_wid = cls.incoming_call_dialog.pop(sid) | |
1088 G.host.del_notif_widget(dialog_wid) | |
1089 G.host.add_note(_("Call cancelled"), _("The call has been cancelled.")) | |
1090 | |
1091 | |
972 for wid in G.host.get_visible_list(cls): | 1092 for wid in G.host.get_visible_list(cls): |
973 if profile not in wid.profiles or sid != wid.sid: | 1093 if profile not in wid.profiles or sid != wid.sid: |
974 continue | 1094 continue |
975 aio.run_async(wid.end_call(data_format.deserialise(data_s), profile)) | 1095 aio.run_async(wid.end_call(data_format.deserialise(data_s), profile)) |
976 | 1096 |
977 @classmethod | 1097 @classmethod |
978 def action_new_handler( | 1098 def action_new_handler( |
979 cls, action_data: dict, action_id: str, security_limit: int, profile: str | 1099 cls, action_data: dict, action_id: str, security_limit: int, profile: str |
980 ) -> None: | 1100 ) -> None: |
981 for wid in G.host.get_visible_list(cls): | 1101 if profile in G.host.profiles: |
982 if profile not in wid.profiles: | 1102 if cls.ringtone is None: |
983 continue | 1103 cls.ringtone = SoundLoader.load( |
984 aio.run_async(wid.on_remote_call(action_data, action_id, profile)) | 1104 str(Path(G.host.media_dir) / "sounds/notifications/ring_1.mp3") |
1105 ) | |
1106 if cls.ringtone is not None: | |
1107 cls.ringtone.play() | |
1108 peer_jid = jid.JID(action_data["from_jid"]).bare | |
1109 sid = action_data["session_id"] | |
1110 notif_body = f"{peer_jid} is calling you." | |
1111 notif_title = "Incoming call" | |
1112 G.host.desktop_notif(notif_body, title=notif_title, duration=10) | |
1113 | |
1114 def on_call_answer(accepted, __): | |
1115 del cls.incoming_call_dialog[sid] | |
1116 if cls.ringtone is not None: | |
1117 cls.ringtone.stop() | |
1118 if accepted: | |
1119 wid = G.host.do_action("calls", str(peer_jid), [profile]) | |
1120 aio.run_async(wid.on_incoming_call(action_data, action_id, profile)) | |
1121 else: | |
1122 aio.run_async( | |
1123 G.host.a_bridge.action_launch( | |
1124 action_id, data_format.serialise({"cancelled": True}), profile | |
1125 ) | |
1126 ) | |
1127 | |
1128 dialog_wid = G.host.show_dialog( | |
1129 notif_body, notif_title, type="yes/no", answer_cb=on_call_answer | |
1130 ) | |
1131 cls.incoming_call_dialog[sid] = dialog_wid | |
1132 | |
1133 | |
1134 if G.host is not None: | |
1135 log.debug("registering signals") | |
1136 G.host.register_signal( | |
1137 "ice_candidates_new", | |
1138 handler=Calls.ice_candidates_new_handler, | |
1139 iface="plugin", | |
1140 ) | |
1141 G.host.register_signal("call_setup", handler=Calls.call_setup_handler, iface="plugin") | |
1142 G.host.register_signal("call_ended", handler=Calls.call_ended_handler, iface="plugin") | |
1143 G.host.register_action_handler(C.META_TYPE_CALL, Calls.action_new_handler) |