Mercurial > libervia-web
diff libervia/web/pages/calls/_browser/webrtc.py @ 1553:83c2a6faa2ae
browser (calls): screen sharing implementation:
- the new screen sharing button toggle screen sharing state
- the button reflect the screen sharing state (green crossed when not sharing, red
uncrossed otherwise)
- the screen sharing stream replaces the camera one, and vice versa. No re-negociation is
needed.
- stopping the sharing through browser's dialog is supported
- the screen sharing button is only visibile if supported by the platform
rel 432
author | Goffi <goffi@goffi.org> |
---|---|
date | Mon, 14 Aug 2023 16:49:02 +0200 |
parents | e47c24204449 |
children | 410064b31dca |
line wrap: on
line diff
--- a/libervia/web/pages/calls/_browser/webrtc.py Wed Aug 09 00:48:21 2023 +0200 +++ b/libervia/web/pages/calls/_browser/webrtc.py Mon Aug 14 16:49:02 2023 +0200 @@ -3,8 +3,10 @@ from bridge import AsyncBridge as Bridge from browser import aio, console as log, document, timer, window +import dialog import errors -import jid +from javascript import JSObject +import jid log.warning = log.warn profile = window.profile or "" @@ -13,15 +15,27 @@ class WebRTC: - def __init__(self): self.reset_instance() bridge.register_signal("ice_candidates_new", self._on_ice_candidates_new) self.is_audio_muted = None self.is_video_muted = None + self._is_sharing_screen = False + self.screen_sharing_cb = None self.local_video_elt = document["local_video"] self.remote_video_elt = document["remote_video"] + @property + def is_sharing_screen(self) -> bool: + return self._is_sharing_screen + + @is_sharing_screen.setter + def is_sharing_screen(self, sharing: bool) -> None: + if sharing != self._is_sharing_screen: + self._is_sharing_screen = sharing + if self.screen_sharing_cb is not None: + self.screen_sharing_cb(sharing) + def reset_instance(self): """Inits or resets the instance variables to their default state.""" self._peer_connection = None @@ -47,7 +61,7 @@ @media_types.setter def media_types(self, new_media_types: dict) -> None: self._media_types = new_media_types - self._media_types_inv = {v:k for k,v in new_media_types.items()} + self._media_types_inv = {v: k for k, v in new_media_types.items()} @property def media_types_inv(self) -> dict: @@ -88,10 +102,7 @@ fingerprint_line = re.search(r"a=fingerprint:(\S+)\s+(\S+)", sdp) if fingerprint_line: algorithm, fingerprint = fingerprint_line.groups() - fingerprint_data = { - "hash": algorithm, - "fingerprint": fingerprint - } + fingerprint_data = {"hash": algorithm, "fingerprint": fingerprint} setup_line = re.search(r"a=setup:(\S+)", sdp) if setup_line: @@ -144,11 +155,10 @@ "{address} {port} typ {type}" ) - if ((parsed_candidate.get('rel_addr') - and parsed_candidate.get('rel_port'))): + if parsed_candidate.get("rel_addr") and parsed_candidate.get("rel_port"): base_format += " raddr {rel_addr} rport {rel_port}" - if parsed_candidate.get('generation'): + if parsed_candidate.get("generation"): base_format += " generation {generation}" return base_format.format(**parsed_candidate) @@ -187,7 +197,7 @@ for line in sdp_lines: if line.startswith("m="): - media_types[mline_index] = line[2:line.find(" ")] + media_types[mline_index] = line[2 : line.find(" ")] mline_index += 1 self.media_types = media_types @@ -218,9 +228,9 @@ if server["type"] == "stun": ice_server["urls"] = f"stun:{server['host']}:{server['port']}" elif server["type"] == "turn": - ice_server["urls"] = ( - f"turn:{server['host']}:{server['port']}?transport={server['transport']}" - ) + ice_server[ + "urls" + ] = f"turn:{server['host']}:{server['port']}?transport={server['transport']}" ice_server["username"] = server["username"] ice_server["credential"] = server["password"] ice_servers.append(ice_server) @@ -231,28 +241,102 @@ peer_connection.addEventListener("track", self.on_track) peer_connection.addEventListener("negotiationneeded", self.on_negotiation_needed) peer_connection.addEventListener("icecandidate", self.on_ice_candidate) - peer_connection.addEventListener("icegatheringstatechange", self.on_ice_gathering_state_change) + peer_connection.addEventListener( + "icegatheringstatechange", self.on_ice_gathering_state_change + ) self._peer_connection = peer_connection window.pc = self._peer_connection - async def _get_user_media( - self, - audio: bool = True, - video: bool = True - ): - """Gets user media + async def _get_user_media(self, audio: bool = True, video: bool = True) -> None: + """ + Gets user media (camera and microphone). - @param audio: True if an audio flux is required - @param video: True if a video flux is required + @param audio: True if an audio flux is required. + @param video: True if a video flux is required. """ - media_constraints = {'audio': audio, 'video': video} + media_constraints = {"audio": audio, "video": video} local_stream = await window.navigator.mediaDevices.getUserMedia(media_constraints) + + if not local_stream: + log.error("Failed to get the media stream.") + return + self.local_video_elt.srcObject = local_stream for track in local_stream.getTracks(): self._peer_connection.addTrack(track) + async def _replace_user_video( + self, + screen: bool = False, + ) -> JSObject | None: + """Replaces the user video track with either a camera or desktop sharing track. + + @param screen: True if desktop sharing is required. False will use the camera. + @return: The local media stream or None if failed. + """ + if screen: + media_constraints = {"video": {"cursor": "always"}} + new_stream = await window.navigator.mediaDevices.getDisplayMedia( + media_constraints + ) + else: + if self.local_video_elt.srcObject: + for track in self.local_video_elt.srcObject.getTracks(): + if track.kind == "video": + track.stop() + media_constraints = {"video": True} + new_stream = await window.navigator.mediaDevices.getUserMedia( + media_constraints + ) + + if not new_stream: + log.error("Failed to get the media stream.") + return None + + new_video_tracks = [ + track for track in new_stream.getTracks() if track.kind == "video" + ] + + if not new_video_tracks: + log.error("Failed to retrieve the video track from the new stream.") + return None + + # Retrieve the current local stream's video track. + local_stream = self.local_video_elt.srcObject + if local_stream: + local_video_tracks = [ + track for track in local_stream.getTracks() if track.kind == "video" + ] + if local_video_tracks: + # Remove the old video track and add the new one to the local stream. + local_stream.removeTrack(local_video_tracks[0]) + local_stream.addTrack(new_video_tracks[0]) + + video_sender = next( + ( + sender + for sender in self._peer_connection.getSenders() + if sender.track and sender.track.kind == "video" + ), + None, + ) + if video_sender: + await video_sender.replaceTrack(new_video_tracks[0]) + + if screen: + # For screen sharing, we track the end event to properly stop the sharing when + # the user clicks on the browser's stop sharing dialog. + def on_track_ended(event): + aio.run(self.toggle_screen_sharing()) + + new_video_tracks[0].bind("ended", on_track_ended) + + self.is_sharing_screen = screen + + return local_stream + async def _gather_ice_candidates(self, is_initiator: bool, remote_candidates=None): """Get ICE candidates and wait to have them all before returning them @@ -260,14 +344,16 @@ @param remote_candidates: Remote ICE candidates, if any """ if self._peer_connection is None: - raise Exception("The peer connection must be created before gathering ICE candidates!") + raise Exception( + "The peer connection must be created before gathering ICE candidates!" + ) self.media_candidates.clear() gather_timeout = timer.set_timeout( lambda: self.candidates_gathered.set_exception( errors.TimeoutError("ICE gathering time out") ), - GATHER_TIMEOUT + GATHER_TIMEOUT, ) if is_initiator: @@ -298,10 +384,7 @@ @param sdp: Session Description Protocol data @param profile: Profile associated """ - await self._peer_connection.setRemoteDescription({ - "type": "answer", - "sdp": sdp - }) + await self._peer_connection.setRemoteDescription({"type": "answer", "sdp": sdp}) await self.on_ice_candidates_new(self.candidates_buffer) self.candidates_buffer.clear() @@ -313,9 +396,7 @@ @param profile: Profile associated with the action """ if sid != self.sid: - log.debug( - f"ignoring peer ice candidates for {sid=} ({self.sid=})." - ) + log.debug(f"ignoring peer ice candidates for {sid=} ({self.sid=}).") return candidates = json.loads(candidates_s) aio.run(self.on_ice_candidates_new(candidates)) @@ -344,11 +425,9 @@ except Exception as e: log.warning(e) continue - ice_candidate = window.RTCIceCandidate.new({ - "candidate": candidate_sdp, - "sdpMLineIndex": sdp_mline_index - } - ) + ice_candidate = window.RTCIceCandidate.new( + {"candidate": candidate_sdp, "sdpMLineIndex": sdp_mline_index} + ) await self._peer_connection.addIceCandidate(ice_candidate) def on_track(self, event): @@ -373,15 +452,12 @@ """We respond to the call""" log.debug("answering call") if sid != self.sid: - raise Exception( - f"Internal Error: unexpected sid: {sid=} {self.sid=}" - ) + raise Exception(f"Internal Error: unexpected sid: {sid=} {self.sid=}") await self._create_peer_connection() - await self._peer_connection.setRemoteDescription({ - "type": "offer", - "sdp": offer_sdp - }) + await self._peer_connection.setRemoteDescription( + {"type": "offer", "sdp": offer_sdp} + ) await self.on_ice_candidates_new(self.candidates_buffer) self.candidates_buffer.clear() await self._get_user_media() @@ -393,10 +469,7 @@ await bridge.call_answer_sdp(sid, self._peer_connection.localDescription.sdp) async def make_call( - self, - callee_jid: jid.JID, - audio: bool = True, - video: bool = True + self, callee_jid: jid.JID, audio: bool = True, video: bool = True ) -> None: """Start a WebRTC call @@ -407,14 +480,9 @@ await self._get_user_media(audio, video) await self._gather_ice_candidates(True) - call_data = { - "sdp": self._peer_connection.localDescription.sdp - } + call_data = {"sdp": self._peer_connection.localDescription.sdp} log.info(f"calling {callee_jid!r}") - self.sid = await bridge.call_start( - str(callee_jid), - json.dumps(call_data) - ) + self.sid = await bridge.call_start(str(callee_jid), json.dumps(call_data)) log.debug(f"Call SID: {self.sid}") async def end_call(self) -> None: @@ -423,9 +491,15 @@ log.debug("There is currently no call to end.") else: self._peer_connection.removeEventListener("track", self.on_track) - self._peer_connection.removeEventListener("negotiationneeded", self.on_negotiation_needed) - self._peer_connection.removeEventListener("icecandidate", self.on_ice_candidate) - self._peer_connection.removeEventListener("icegatheringstatechange", self.on_ice_gathering_state_change) + self._peer_connection.removeEventListener( + "negotiationneeded", self.on_negotiation_needed + ) + self._peer_connection.removeEventListener( + "icecandidate", self.on_ice_candidate + ) + self._peer_connection.removeEventListener( + "icegatheringstatechange", self.on_ice_gathering_state_change + ) # Base64 encoded 1x1 black pixel image # this is a trick to reset the image displayed, so we don't see last image of @@ -462,8 +536,13 @@ is_muted_attr = f"is_{media_type}_muted" if local_video.srcObject: - track_getter = getattr(local_video.srcObject, f"get{media_type.capitalize()}Tracks") + log.debug(f"{local_video.srcObject=}") + track_getter = getattr( + local_video.srcObject, f"get{media_type.capitalize()}Tracks" + ) + log.debug("track go") for track in track_getter(): + log.debug(f"{track=}") track.enabled = not track.enabled setattr(self, is_muted_attr, not track.enabled) @@ -487,3 +566,11 @@ def toggle_video_mute(self) -> bool: """Toggle mute/unmute for video tracks.""" return self.toggle_media_mute("video") + + async def toggle_screen_sharing(self): + log.debug(f"toggle_screen_sharing {self._is_sharing_screen=}") + + if self._is_sharing_screen: + await self._replace_user_video(screen=False) + else: + await self._replace_user_video(screen=True)