Mercurial > libervia-web
changeset 1553:83c2a6faa2ae
browser (calls): screen sharing implementation:
- the new screen sharing button toggle screen sharing state
- the button reflect the screen sharing state (green crossed when not sharing, red
uncrossed otherwise)
- the screen sharing stream replaces the camera one, and vice versa. No re-negociation is
needed.
- stopping the sharing through browser's dialog is supported
- the screen sharing button is only visibile if supported by the platform
rel 432
author | Goffi <goffi@goffi.org> |
---|---|
date | Mon, 14 Aug 2023 16:49:02 +0200 |
parents | c62027660ec1 |
children | 1dc23ba67f47 |
files | libervia/web/pages/calls/_browser/__init__.py libervia/web/pages/calls/_browser/webrtc.py |
diffstat | 2 files changed, 222 insertions(+), 150 deletions(-) [+] |
line wrap: on
line diff
--- a/libervia/web/pages/calls/_browser/__init__.py Wed Aug 09 00:48:21 2023 +0200 +++ b/libervia/web/pages/calls/_browser/__init__.py Mon Aug 14 16:49:02 2023 +0200 @@ -15,15 +15,18 @@ bridge = Bridge() GATHER_TIMEOUT = 10000 ALLOWED_STATUSES = (None, "dialing") -AUDIO = 'audio' -VIDEO = 'video' +AUDIO = "audio" +VIDEO = "video" ALLOWED_CALL_MODES = {AUDIO, VIDEO} +INACTIVE_CLASS = "inactive" +MUTED_CLASS = "muted" +SCREEN_OFF_CLASS = "screen-off" class CallUI: - def __init__(self): self.webrtc = WebRTC() + self.webrtc.screen_sharing_cb = self.on_sharing_screen self.mode = "search" self._status = None self._callee = None @@ -44,42 +47,34 @@ self._call_mode = VIDEO self.call_button_tpl = Template("call/call_button.html") self._update_call_button() - document['toggle_call_mode_btn'].bind('click', self.switch_call_mode) - document["hangup_btn"].bind( - "click", - lambda __: aio.run(self.hang_up()) - ) + document["toggle_call_mode_btn"].bind("click", self.switch_call_mode) + document["hangup_btn"].bind("click", lambda __: aio.run(self.hang_up())) # other buttons - document["full_screen_btn"].bind( - "click", - lambda __: self.toggle_fullscreen() - ) + document["full_screen_btn"].bind("click", lambda __: self.toggle_fullscreen()) document["exit_full_screen_btn"].bind( - "click", - lambda __: self.toggle_fullscreen() + "click", lambda __: self.toggle_fullscreen() ) - document["mute_audio_btn"].bind( - "click", - self.toggle_audio_mute - ) - document["mute_video_btn"].bind( - "click", - self.toggle_video_mute - ) + document["mute_audio_btn"].bind("click", self.toggle_audio_mute) + document["mute_video_btn"].bind("click", self.toggle_video_mute) + self.share_desktop_btn_elt = document["share_desktop_btn"] + if hasattr(window.navigator.mediaDevices, "getDisplayMedia"): + self.share_desktop_btn_elt.classList.remove("is-hidden-touch") + # screen sharing is supported + self.share_desktop_btn_elt.bind("click", self.toggle_screen_sharing) + else: + self.share_desktop_btn_elt.classList.add("is-hidden") # search self.jid_search = JidSearch( - document["search"], - document["contacts"], - click_cb = self._on_entity_click + document["search"], document["contacts"], click_cb=self._on_entity_click ) # incoming call dialog self.incoming_call_dialog_elt = None @property - def sid(self) -> str|None: + def sid(self) -> str | None: return self.webrtc.sid @sid.setter @@ -97,10 +92,7 @@ raise Exception( f"INTERNAL ERROR: this status is not allowed: {new_status!r}" ) - tpl_data = { - "entity": self._callee, - "status": new_status - } + tpl_data = {"entity": self._callee, "status": new_status} if self._callee is not None: try: tpl_data["name"] = cache.identities[self._callee]["nicknames"][0] @@ -110,7 +102,6 @@ self.call_status_wrapper_elt.clear() self.call_status_wrapper_elt <= status_elt - self._status = new_status @property @@ -139,10 +130,9 @@ def _update_call_button(self): new_button = self.call_button_tpl.get_elt({"call_mode": self.call_mode}) new_button.bind( - "click", - lambda __: aio.run(self.make_call(video=not self.call_mode == AUDIO)) + "click", lambda __: aio.run(self.make_call(video=not self.call_mode == AUDIO)) ) - document['call_btn'].replaceWith(new_button) + document["call_btn"].replaceWith(new_button) def _on_action_new( self, action_data_s: str, action_id: str, security_limit: int, profile: str @@ -161,9 +151,7 @@ async def on_action_new(self, action_data: dict, action_id: str) -> None: peer_jid = action_data["from_jid"] - log.info( - f"{peer_jid} wants to start a call ({action_data['sub_type']})" - ) + log.info(f"{peer_jid} wants to start a call ({action_data['sub_type']})") if self.sid is not None: log.warning( f"already in a call ({self.sid}), can't receive a new call from " @@ -173,7 +161,7 @@ sid = self.sid = action_data["session_id"] await cache.fill_identities([peer_jid]) identity = cache.identities[peer_jid] - peer_name = identity['nicknames'][0] + peer_name = identity["nicknames"][0] # we start the ring self.audio_player_elt.play() @@ -181,19 +169,14 @@ # and ask user if we take the call try: self.incoming_call_dialog_elt = dialog.Confirm( - f"{peer_name} is calling you.", - ok_label="Answer", - cancel_label="Reject" + f"{peer_name} is calling you.", ok_label="Answer", cancel_label="Reject" ) accepted = await self.incoming_call_dialog_elt.ashow() except dialog.CancelError: log.info("Call has been cancelled") self.incoming_call_dialog_elt = None self.sid = None - dialog.notification.show( - f"{peer_name} has cancelled the call", - level="info" - ) + dialog.notification.show(f"{peer_name} has cancelled the call", level="info") return self.incoming_call_dialog_elt = None @@ -210,10 +193,7 @@ else: log.info(f"your are declining the call from {peer_jid}") self.sid = None - await bridge.action_launch( - action_id, - json.dumps({"cancelled": not accepted}) - ) + await bridge.action_launch(action_id, json.dumps({"cancelled": not accepted})) def _on_call_ended(self, session_id: str, data_s: str, profile: str) -> None: """Call has been terminated @@ -241,7 +221,9 @@ """ aio.run(self.on_call_setup(session_id, json.loads(setup_data_s), profile)) - async def on_call_setup(self, session_id: str, setup_data: dict, profile: str) -> None: + async def on_call_setup( + self, session_id: str, setup_data: dict, profile: str + ) -> None: """Call has been accepted, connection can be established @param session_id: Session identifier @@ -260,8 +242,7 @@ sdp = setup_data["sdp"] except KeyError: dialog.notification.show( - f"Invalid setup data received: {setup_data}", - level="error" + f"Invalid setup data received: {setup_data}", level="error" ) return if role == "initiator": @@ -270,8 +251,7 @@ await self.webrtc.answer_call(session_id, sdp, profile) else: dialog.notification.show( - f"Invalid role received during setup: {setup_data}", - level="error" + f"Invalid role received during setup: {setup_data}", level="error" ) return @@ -287,22 +267,22 @@ raise ValueError except ValueError: dialog.notification.show( - "Invalid identifier, please use a valid callee identifier", - level="error" + "Invalid identifier, please use a valid callee identifier", level="error" ) return self._callee = callee_jid await cache.fill_identities([callee_jid]) self.status = "dialing" - call_avatar_elt = self.call_avatar_tpl.get_elt({ - "entity": str(callee_jid), - "identities": cache.identities, - }) + call_avatar_elt = self.call_avatar_tpl.get_elt( + { + "entity": str(callee_jid), + "identities": cache.identities, + } + ) self.call_avatar_wrapper_elt.clear() self.call_avatar_wrapper_elt <= call_avatar_elt - self.switch_mode("call") await self.webrtc.make_call(callee_jid, audio, video) @@ -335,16 +315,13 @@ log.warning("Can't hand_up, not call in progress") return await self.end_call({"reason": "terminated"}) - await bridge.call_end( - session_id, - "" - ) + await bridge.call_end(session_id, "") def _handle_animation_end( self, element, - remove = None, - add = None, + remove=None, + add=None, ): """Return a handler that removes specified classes and the event handler. @@ -352,6 +329,7 @@ @param remove: List of class names to remove from the element. @param add: List of class names to add to the element. """ + def handler(__, remove=remove, add=add): log.info(f"animation end OK {element=}") if add: @@ -362,7 +340,7 @@ if isinstance(remove, str): remove = [remove] element.classList.remove(*remove) - element.unbind('animationend', handler) + element.unbind("animationend", handler) return handler @@ -374,21 +352,16 @@ # Hide contacts with fade-out animation and bring up the call box self.search_container_elt.classList.add("fade-out-y") self.search_container_elt.bind( - 'animationend', + "animationend", self._handle_animation_end( - self.search_container_elt, - remove="fade-out-y", - add="is-hidden" - ) + self.search_container_elt, remove="fade-out-y", add="is-hidden" + ), ) self.call_container_elt.classList.remove("is-hidden") self.call_container_elt.classList.add("slide-in") self.call_container_elt.bind( - 'animationend', - self._handle_animation_end( - self.call_container_elt, - remove="slide-in" - ) + "animationend", + self._handle_animation_end(self.call_container_elt, remove="slide-in"), ) self.mode = mode elif mode == "search": @@ -396,26 +369,26 @@ self.search_container_elt.classList.add("fade-out-y", "animation-reverse") self.search_container_elt.classList.remove("is-hidden") self.search_container_elt.bind( - 'animationend', + "animationend", self._handle_animation_end( self.search_container_elt, remove=["fade-out-y", "animation-reverse"], - ) + ), ) self.call_container_elt.classList.add("slide-in", "animation-reverse") self.call_container_elt.bind( - 'animationend', + "animationend", self._handle_animation_end( self.call_container_elt, remove=["slide-in", "animation-reverse"], - add="is-hidden" - ) + add="is-hidden", + ), ) self.mode = mode else: log.error(f"Internal Error: Unknown call mode: {mode}") - def toggle_fullscreen(self, fullscreen: bool|None = None): + def toggle_fullscreen(self, fullscreen: bool | None = None): """Toggle fullscreen mode for video elements. @param fullscreen: if set, determine the fullscreen state; otherwise, @@ -439,8 +412,7 @@ except Exception as e: dialog.notification.show( - f"An error occurred while toggling fullscreen: {e}", - level="error" + f"An error occurred while toggling fullscreen: {e}", level="error" ) def toggle_audio_mute(self, evt): @@ -448,14 +420,14 @@ btn_elt = evt.currentTarget if is_muted: btn_elt.classList.remove("is-success") - btn_elt.classList.add("muted", "is-warning") + btn_elt.classList.add(INACTIVE_CLASS, MUTED_CLASS, "is-warning") dialog.notification.show( f"audio is now muted", level="info", delay=2, ) else: - btn_elt.classList.remove("muted", "is-warning") + btn_elt.classList.remove(INACTIVE_CLASS, MUTED_CLASS, "is-warning") btn_elt.classList.add("is-success") def toggle_video_mute(self, evt): @@ -463,16 +435,29 @@ btn_elt = evt.currentTarget if is_muted: btn_elt.classList.remove("is-success") - btn_elt.classList.add("muted", "is-warning") + btn_elt.classList.add(INACTIVE_CLASS, MUTED_CLASS, "is-warning") dialog.notification.show( f"video is now muted", level="info", delay=2, ) else: - btn_elt.classList.remove("muted", "is-warning") + btn_elt.classList.remove(INACTIVE_CLASS, MUTED_CLASS, "is-warning") btn_elt.classList.add("is-success") + def toggle_screen_sharing(self, evt): + aio.run(self.webrtc.toggle_screen_sharing()) + + def on_sharing_screen(self, sharing: bool) -> None: + """Called when screen sharing state changes""" + share_desktop_btn_elt = self.share_desktop_btn_elt + if sharing: + share_desktop_btn_elt.classList.add("is-danger") + share_desktop_btn_elt.classList.remove(INACTIVE_CLASS, SCREEN_OFF_CLASS) + else: + share_desktop_btn_elt.classList.remove("is-danger") + share_desktop_btn_elt.classList.add(INACTIVE_CLASS, SCREEN_OFF_CLASS) + def _on_entity_click(self, item: dict) -> None: aio.run(self.on_entity_click(item))
--- a/libervia/web/pages/calls/_browser/webrtc.py Wed Aug 09 00:48:21 2023 +0200 +++ b/libervia/web/pages/calls/_browser/webrtc.py Mon Aug 14 16:49:02 2023 +0200 @@ -3,8 +3,10 @@ from bridge import AsyncBridge as Bridge from browser import aio, console as log, document, timer, window +import dialog import errors -import jid +from javascript import JSObject +import jid log.warning = log.warn profile = window.profile or "" @@ -13,15 +15,27 @@ class WebRTC: - def __init__(self): self.reset_instance() bridge.register_signal("ice_candidates_new", self._on_ice_candidates_new) self.is_audio_muted = None self.is_video_muted = None + self._is_sharing_screen = False + self.screen_sharing_cb = None self.local_video_elt = document["local_video"] self.remote_video_elt = document["remote_video"] + @property + def is_sharing_screen(self) -> bool: + return self._is_sharing_screen + + @is_sharing_screen.setter + def is_sharing_screen(self, sharing: bool) -> None: + if sharing != self._is_sharing_screen: + self._is_sharing_screen = sharing + if self.screen_sharing_cb is not None: + self.screen_sharing_cb(sharing) + def reset_instance(self): """Inits or resets the instance variables to their default state.""" self._peer_connection = None @@ -47,7 +61,7 @@ @media_types.setter def media_types(self, new_media_types: dict) -> None: self._media_types = new_media_types - self._media_types_inv = {v:k for k,v in new_media_types.items()} + self._media_types_inv = {v: k for k, v in new_media_types.items()} @property def media_types_inv(self) -> dict: @@ -88,10 +102,7 @@ fingerprint_line = re.search(r"a=fingerprint:(\S+)\s+(\S+)", sdp) if fingerprint_line: algorithm, fingerprint = fingerprint_line.groups() - fingerprint_data = { - "hash": algorithm, - "fingerprint": fingerprint - } + fingerprint_data = {"hash": algorithm, "fingerprint": fingerprint} setup_line = re.search(r"a=setup:(\S+)", sdp) if setup_line: @@ -144,11 +155,10 @@ "{address} {port} typ {type}" ) - if ((parsed_candidate.get('rel_addr') - and parsed_candidate.get('rel_port'))): + if parsed_candidate.get("rel_addr") and parsed_candidate.get("rel_port"): base_format += " raddr {rel_addr} rport {rel_port}" - if parsed_candidate.get('generation'): + if parsed_candidate.get("generation"): base_format += " generation {generation}" return base_format.format(**parsed_candidate) @@ -187,7 +197,7 @@ for line in sdp_lines: if line.startswith("m="): - media_types[mline_index] = line[2:line.find(" ")] + media_types[mline_index] = line[2 : line.find(" ")] mline_index += 1 self.media_types = media_types @@ -218,9 +228,9 @@ if server["type"] == "stun": ice_server["urls"] = f"stun:{server['host']}:{server['port']}" elif server["type"] == "turn": - ice_server["urls"] = ( - f"turn:{server['host']}:{server['port']}?transport={server['transport']}" - ) + ice_server[ + "urls" + ] = f"turn:{server['host']}:{server['port']}?transport={server['transport']}" ice_server["username"] = server["username"] ice_server["credential"] = server["password"] ice_servers.append(ice_server) @@ -231,28 +241,102 @@ peer_connection.addEventListener("track", self.on_track) peer_connection.addEventListener("negotiationneeded", self.on_negotiation_needed) peer_connection.addEventListener("icecandidate", self.on_ice_candidate) - peer_connection.addEventListener("icegatheringstatechange", self.on_ice_gathering_state_change) + peer_connection.addEventListener( + "icegatheringstatechange", self.on_ice_gathering_state_change + ) self._peer_connection = peer_connection window.pc = self._peer_connection - async def _get_user_media( - self, - audio: bool = True, - video: bool = True - ): - """Gets user media + async def _get_user_media(self, audio: bool = True, video: bool = True) -> None: + """ + Gets user media (camera and microphone). - @param audio: True if an audio flux is required - @param video: True if a video flux is required + @param audio: True if an audio flux is required. + @param video: True if a video flux is required. """ - media_constraints = {'audio': audio, 'video': video} + media_constraints = {"audio": audio, "video": video} local_stream = await window.navigator.mediaDevices.getUserMedia(media_constraints) + + if not local_stream: + log.error("Failed to get the media stream.") + return + self.local_video_elt.srcObject = local_stream for track in local_stream.getTracks(): self._peer_connection.addTrack(track) + async def _replace_user_video( + self, + screen: bool = False, + ) -> JSObject | None: + """Replaces the user video track with either a camera or desktop sharing track. + + @param screen: True if desktop sharing is required. False will use the camera. + @return: The local media stream or None if failed. + """ + if screen: + media_constraints = {"video": {"cursor": "always"}} + new_stream = await window.navigator.mediaDevices.getDisplayMedia( + media_constraints + ) + else: + if self.local_video_elt.srcObject: + for track in self.local_video_elt.srcObject.getTracks(): + if track.kind == "video": + track.stop() + media_constraints = {"video": True} + new_stream = await window.navigator.mediaDevices.getUserMedia( + media_constraints + ) + + if not new_stream: + log.error("Failed to get the media stream.") + return None + + new_video_tracks = [ + track for track in new_stream.getTracks() if track.kind == "video" + ] + + if not new_video_tracks: + log.error("Failed to retrieve the video track from the new stream.") + return None + + # Retrieve the current local stream's video track. + local_stream = self.local_video_elt.srcObject + if local_stream: + local_video_tracks = [ + track for track in local_stream.getTracks() if track.kind == "video" + ] + if local_video_tracks: + # Remove the old video track and add the new one to the local stream. + local_stream.removeTrack(local_video_tracks[0]) + local_stream.addTrack(new_video_tracks[0]) + + video_sender = next( + ( + sender + for sender in self._peer_connection.getSenders() + if sender.track and sender.track.kind == "video" + ), + None, + ) + if video_sender: + await video_sender.replaceTrack(new_video_tracks[0]) + + if screen: + # For screen sharing, we track the end event to properly stop the sharing when + # the user clicks on the browser's stop sharing dialog. + def on_track_ended(event): + aio.run(self.toggle_screen_sharing()) + + new_video_tracks[0].bind("ended", on_track_ended) + + self.is_sharing_screen = screen + + return local_stream + async def _gather_ice_candidates(self, is_initiator: bool, remote_candidates=None): """Get ICE candidates and wait to have them all before returning them @@ -260,14 +344,16 @@ @param remote_candidates: Remote ICE candidates, if any """ if self._peer_connection is None: - raise Exception("The peer connection must be created before gathering ICE candidates!") + raise Exception( + "The peer connection must be created before gathering ICE candidates!" + ) self.media_candidates.clear() gather_timeout = timer.set_timeout( lambda: self.candidates_gathered.set_exception( errors.TimeoutError("ICE gathering time out") ), - GATHER_TIMEOUT + GATHER_TIMEOUT, ) if is_initiator: @@ -298,10 +384,7 @@ @param sdp: Session Description Protocol data @param profile: Profile associated """ - await self._peer_connection.setRemoteDescription({ - "type": "answer", - "sdp": sdp - }) + await self._peer_connection.setRemoteDescription({"type": "answer", "sdp": sdp}) await self.on_ice_candidates_new(self.candidates_buffer) self.candidates_buffer.clear() @@ -313,9 +396,7 @@ @param profile: Profile associated with the action """ if sid != self.sid: - log.debug( - f"ignoring peer ice candidates for {sid=} ({self.sid=})." - ) + log.debug(f"ignoring peer ice candidates for {sid=} ({self.sid=}).") return candidates = json.loads(candidates_s) aio.run(self.on_ice_candidates_new(candidates)) @@ -344,11 +425,9 @@ except Exception as e: log.warning(e) continue - ice_candidate = window.RTCIceCandidate.new({ - "candidate": candidate_sdp, - "sdpMLineIndex": sdp_mline_index - } - ) + ice_candidate = window.RTCIceCandidate.new( + {"candidate": candidate_sdp, "sdpMLineIndex": sdp_mline_index} + ) await self._peer_connection.addIceCandidate(ice_candidate) def on_track(self, event): @@ -373,15 +452,12 @@ """We respond to the call""" log.debug("answering call") if sid != self.sid: - raise Exception( - f"Internal Error: unexpected sid: {sid=} {self.sid=}" - ) + raise Exception(f"Internal Error: unexpected sid: {sid=} {self.sid=}") await self._create_peer_connection() - await self._peer_connection.setRemoteDescription({ - "type": "offer", - "sdp": offer_sdp - }) + await self._peer_connection.setRemoteDescription( + {"type": "offer", "sdp": offer_sdp} + ) await self.on_ice_candidates_new(self.candidates_buffer) self.candidates_buffer.clear() await self._get_user_media() @@ -393,10 +469,7 @@ await bridge.call_answer_sdp(sid, self._peer_connection.localDescription.sdp) async def make_call( - self, - callee_jid: jid.JID, - audio: bool = True, - video: bool = True + self, callee_jid: jid.JID, audio: bool = True, video: bool = True ) -> None: """Start a WebRTC call @@ -407,14 +480,9 @@ await self._get_user_media(audio, video) await self._gather_ice_candidates(True) - call_data = { - "sdp": self._peer_connection.localDescription.sdp - } + call_data = {"sdp": self._peer_connection.localDescription.sdp} log.info(f"calling {callee_jid!r}") - self.sid = await bridge.call_start( - str(callee_jid), - json.dumps(call_data) - ) + self.sid = await bridge.call_start(str(callee_jid), json.dumps(call_data)) log.debug(f"Call SID: {self.sid}") async def end_call(self) -> None: @@ -423,9 +491,15 @@ log.debug("There is currently no call to end.") else: self._peer_connection.removeEventListener("track", self.on_track) - self._peer_connection.removeEventListener("negotiationneeded", self.on_negotiation_needed) - self._peer_connection.removeEventListener("icecandidate", self.on_ice_candidate) - self._peer_connection.removeEventListener("icegatheringstatechange", self.on_ice_gathering_state_change) + self._peer_connection.removeEventListener( + "negotiationneeded", self.on_negotiation_needed + ) + self._peer_connection.removeEventListener( + "icecandidate", self.on_ice_candidate + ) + self._peer_connection.removeEventListener( + "icegatheringstatechange", self.on_ice_gathering_state_change + ) # Base64 encoded 1x1 black pixel image # this is a trick to reset the image displayed, so we don't see last image of @@ -462,8 +536,13 @@ is_muted_attr = f"is_{media_type}_muted" if local_video.srcObject: - track_getter = getattr(local_video.srcObject, f"get{media_type.capitalize()}Tracks") + log.debug(f"{local_video.srcObject=}") + track_getter = getattr( + local_video.srcObject, f"get{media_type.capitalize()}Tracks" + ) + log.debug("track go") for track in track_getter(): + log.debug(f"{track=}") track.enabled = not track.enabled setattr(self, is_muted_attr, not track.enabled) @@ -487,3 +566,11 @@ def toggle_video_mute(self) -> bool: """Toggle mute/unmute for video tracks.""" return self.toggle_media_mute("video") + + async def toggle_screen_sharing(self): + log.debug(f"toggle_screen_sharing {self._is_sharing_screen=}") + + if self._is_sharing_screen: + await self._replace_user_video(screen=False) + else: + await self._replace_user_video(screen=True)