diff --git a/packages/client/src/devices/MicrophoneManager.ts b/packages/client/src/devices/MicrophoneManager.ts index 2f2caf8554..ebfe2b74b5 100644 --- a/packages/client/src/devices/MicrophoneManager.ts +++ b/packages/client/src/devices/MicrophoneManager.ts @@ -8,9 +8,11 @@ import { createSoundDetector } from '../helpers/sound-detector'; import { isReactNative } from '../helpers/platforms'; import { OwnCapability } from '../gen/coordinator'; import { CallingState } from '../store'; +import { RNSpeechDetector } from '../helpers/RNSpeechDetector'; export class MicrophoneManager extends InputMediaDeviceManager { private soundDetectorCleanup?: Function; + private rnSpeechDetector: RNSpeechDetector | undefined; constructor(call: Call) { super(call, new MicrophoneManagerState(), TrackType.AUDIO); @@ -58,21 +60,33 @@ export class MicrophoneManager extends InputMediaDeviceManager { + this.state.setSpeakingWhileMuted(event.isSoundDetected); + }, + ); + this.soundDetectorCleanup = () => { + unsubscribe(); + this.rnSpeechDetector?.stop(); + this.rnSpeechDetector = undefined; + }; + } else { + // Need to start a new stream that's not connected to publisher + const stream = await this.getStream({ + deviceId, + }); + this.soundDetectorCleanup = createSoundDetector(stream, (event) => { + this.state.setSpeakingWhileMuted(event.isSoundDetected); + }); } - await this.stopSpeakingWhileMutedDetection(); - // Need to start a new stream that's not connected to publisher - const stream = await this.getStream({ - deviceId, - }); - this.soundDetectorCleanup = createSoundDetector(stream, (event) => { - this.state.setSpeakingWhileMuted(event.isSoundDetected); - }); } private async stopSpeakingWhileMutedDetection() { - if (isReactNative() || !this.soundDetectorCleanup) { + if (!this.soundDetectorCleanup) { return; } this.state.setSpeakingWhileMuted(false); diff --git a/packages/client/src/devices/__tests__/MicrophoneManagerRN.test.ts b/packages/client/src/devices/__tests__/MicrophoneManagerRN.test.ts new file mode 100644 index 0000000000..6a9242619c --- /dev/null +++ b/packages/client/src/devices/__tests__/MicrophoneManagerRN.test.ts @@ -0,0 +1,126 @@ +import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'; +import { MicrophoneManager } from '../MicrophoneManager'; +import { Call } from '../../Call'; +import { StreamClient } from '../../coordinator/connection/client'; +import { StreamVideoWriteableStateStore } from '../../store'; +import { mockAudioDevices, mockAudioStream, mockCall } from './mocks'; +import { of } from 'rxjs'; +import '../../rtc/__tests__/mocks/webrtc.mocks'; +import { OwnCapability } from '../../gen/coordinator'; + +let handler; + +vi.mock('../../helpers/platforms.ts', () => { + return { + isReactNative: vi.fn(() => true), + }; +}); + +vi.mock('../devices.ts', () => { + console.log('MOCKING devices API'); + return { + disposeOfMediaStream: vi.fn(), + getAudioDevices: vi.fn(() => { + return of(mockAudioDevices); + }), + getAudioStream: vi.fn(() => Promise.resolve(mockAudioStream())), + deviceIds$: {}, + }; +}); + +vi.mock('../../Call.ts', () => { + console.log('MOCKING Call'); + return { + Call: vi.fn(() => mockCall()), + }; +}); + +vi.mock('../../helpers/RNSpeechDetector.ts', () => { + console.log('MOCKING RNSpeechDetector'); + return { + RNSpeechDetector: vi.fn().mockImplementation(() => ({ + start: vi.fn(), + stop: vi.fn(), + onSpeakingDetectedStateChange: vi.fn((callback) => { + handler = callback; + return vi.fn(); + }), + })), + }; +}); + +describe('MicrophoneManager React Native', () => { + let manager: MicrophoneManager; + beforeEach(() => { + manager = new MicrophoneManager( + new Call({ + id: '', + type: '', + streamClient: new StreamClient('abc123'), + clientStore: new StreamVideoWriteableStateStore(), + }), + ); + }); + + it(`should start sound detection if mic is disabled`, async () => { + await manager.enable(); + // @ts-expect-error + vi.spyOn(manager, 'startSpeakingWhileMutedDetection'); + await manager.disable(); + + expect(manager['startSpeakingWhileMutedDetection']).toHaveBeenCalled(); + expect(manager['rnSpeechDetector']?.start).toHaveBeenCalled(); + }); + + it(`should stop sound detection if mic is enabled`, async () => { + manager.state.setSpeakingWhileMuted(true); + manager['soundDetectorCleanup'] = () => {}; + + await manager.enable(); + + expect(manager.state.speakingWhileMuted).toBe(false); + }); + + it('should update speaking while muted state', async () => { + await manager['startSpeakingWhileMutedDetection'](); + + expect(manager.state.speakingWhileMuted).toBe(false); + + handler!({ isSoundDetected: true, audioLevel: 2 }); + + expect(manager.state.speakingWhileMuted).toBe(true); + + handler!({ isSoundDetected: false, audioLevel: 0 }); + + expect(manager.state.speakingWhileMuted).toBe(false); + }); + + it('should stop speaking while muted notifications if user loses permission to send audio', async () => { + await manager.enable(); + await manager.disable(); + + // @ts-expect-error + vi.spyOn(manager, 'stopSpeakingWhileMutedDetection'); + manager['call'].state.setOwnCapabilities([]); + + expect(manager['stopSpeakingWhileMutedDetection']).toHaveBeenCalled(); + }); + + it('should start speaking while muted notifications if user gains permission to send audio', async () => { + await manager.enable(); + await manager.disable(); + + manager['call'].state.setOwnCapabilities([]); + + // @ts-expect-error + vi.spyOn(manager, 'stopSpeakingWhileMutedDetection'); + manager['call'].state.setOwnCapabilities([OwnCapability.SEND_AUDIO]); + + expect(manager['stopSpeakingWhileMutedDetection']).toHaveBeenCalled(); + }); + + afterEach(() => { + vi.clearAllMocks(); + vi.resetModules(); + }); +}); diff --git a/packages/client/src/helpers/RNSpeechDetector.ts b/packages/client/src/helpers/RNSpeechDetector.ts new file mode 100644 index 0000000000..75b20a71af --- /dev/null +++ b/packages/client/src/helpers/RNSpeechDetector.ts @@ -0,0 +1,112 @@ +import { BaseStats } from '../stats/types'; +import { SoundStateChangeHandler } from './sound-detector'; + +/** + * Flatten the stats report into an array of stats objects. + * + * @param report the report to flatten. + */ +const flatten = (report: RTCStatsReport) => { + const stats: RTCStats[] = []; + report.forEach((s) => { + stats.push(s); + }); + return stats; +}; + +const AUDIO_LEVEL_THRESHOLD = 0.2; + +export class RNSpeechDetector { + private pc1 = new RTCPeerConnection({}); + private pc2 = new RTCPeerConnection({}); + private intervalId: NodeJS.Timer | undefined; + + /** + * Starts the speech detection. + */ + public async start() { + try { + const audioStream = await navigator.mediaDevices.getUserMedia({ + audio: true, + }); + + this.pc1.addEventListener('icecandidate', async (e) => { + await this.pc2.addIceCandidate( + e.candidate as RTCIceCandidateInit | undefined, + ); + }); + this.pc2.addEventListener('icecandidate', async (e) => { + await this.pc1.addIceCandidate( + e.candidate as RTCIceCandidateInit | undefined, + ); + }); + + audioStream + .getTracks() + .forEach((track) => this.pc1.addTrack(track, audioStream)); + const offer = await this.pc1.createOffer({}); + await this.pc2.setRemoteDescription(offer); + await this.pc1.setLocalDescription(offer); + const answer = await this.pc2.createAnswer(); + await this.pc1.setRemoteDescription(answer); + await this.pc2.setLocalDescription(answer); + const audioTracks = audioStream.getAudioTracks(); + // We need to mute the audio track for this temporary stream, or else you will hear yourself twice while in the call. + audioTracks.forEach((track) => (track.enabled = false)); + } catch (error) { + console.error( + 'Error connecting and negotiating between PeerConnections:', + error, + ); + } + } + + /** + * Stops the speech detection and releases all allocated resources. + */ + public stop() { + this.pc1.close(); + this.pc2.close(); + if (this.intervalId) { + clearInterval(this.intervalId); + } + } + + /** + * Public method that detects the audio levels and returns the status. + */ + public onSpeakingDetectedStateChange( + onSoundDetectedStateChanged: SoundStateChangeHandler, + ) { + this.intervalId = setInterval(async () => { + const stats = (await this.pc1.getStats()) as RTCStatsReport; + const report = flatten(stats); + // Audio levels are present inside stats of type `media-source` and of kind `audio` + const audioMediaSourceStats = report.find( + (stat) => + stat.type === 'media-source' && + (stat as RTCRtpStreamStats).kind === 'audio', + ) as BaseStats; + if (audioMediaSourceStats) { + const { audioLevel } = audioMediaSourceStats; + if (audioLevel) { + if (audioLevel >= AUDIO_LEVEL_THRESHOLD) { + onSoundDetectedStateChanged({ + isSoundDetected: true, + audioLevel, + }); + } else { + onSoundDetectedStateChanged({ + isSoundDetected: false, + audioLevel: 0, + }); + } + } + } + }, 1000); + + return () => { + clearInterval(this.intervalId); + }; + } +} diff --git a/packages/client/src/stats/types.ts b/packages/client/src/stats/types.ts index d5562801b6..1697777cec 100644 --- a/packages/client/src/stats/types.ts +++ b/packages/client/src/stats/types.ts @@ -1,4 +1,5 @@ export type BaseStats = { + audioLevel?: number; bytesSent?: number; bytesReceived?: number; codec?: string; diff --git a/packages/react-native-sdk/docusaurus/docs/reactnative/05-ui-cookbook/15-speaking-while-muted.mdx b/packages/react-native-sdk/docusaurus/docs/reactnative/05-ui-cookbook/15-speaking-while-muted.mdx new file mode 100644 index 0000000000..6ddf619db7 --- /dev/null +++ b/packages/react-native-sdk/docusaurus/docs/reactnative/05-ui-cookbook/15-speaking-while-muted.mdx @@ -0,0 +1,26 @@ +--- +id: speaking-while-muted +title: Speaking while muted +--- + +It's a UI best practice to show some visual feedback when the user is speaking while muted. You can observe the state for this in call.state.speakingWhileMuted. + +### Custom Speaking while muted Component + +Our speaking-while-muted notification component will be based on simple principle of reading the `isSpeakingWhileMuted` state of the currently selected mic. The UI will be rendered only, when `isSpeakingWhileMuted` is set to true. + +This can be derived from `useMicrophoneState` hook available in `useCallStateHooks`. + +![Preview of the Speaking While Muted notification component](../assets/05-ui-cookbook/15-speaking-while-muted/speaking-while-muted.png) + +```tsx +import { useCallStateHooks } from '@stream-io/video-react-sdk'; + +export const SpeakingWhileMutedNotification = () => { + const { useMicrophoneState } = useCallStateHooks(); + const { isSpeakingWhileMuted } = useMicrophoneState(); + + if (!isSpeakingWhileMuted) return null; + return You are muted. Unmute to speak.; +}; +``` diff --git a/packages/react-native-sdk/docusaurus/docs/reactnative/assets/05-ui-cookbook/15-speaking-while-muted/speaking-while-muted.png b/packages/react-native-sdk/docusaurus/docs/reactnative/assets/05-ui-cookbook/15-speaking-while-muted/speaking-while-muted.png new file mode 100644 index 0000000000..79d329cd9b Binary files /dev/null and b/packages/react-native-sdk/docusaurus/docs/reactnative/assets/05-ui-cookbook/15-speaking-while-muted/speaking-while-muted.png differ diff --git a/packages/react-native-sdk/jest-setup.ts b/packages/react-native-sdk/jest-setup.ts index dc563a65cf..e982e22f82 100644 --- a/packages/react-native-sdk/jest-setup.ts +++ b/packages/react-native-sdk/jest-setup.ts @@ -48,3 +48,6 @@ global.navigator = { }, product: 'ReactNative', }; + +// @ts-expect-error due to dom typing incompatible with RN +global.RTCPeerConnection = jest.fn(); diff --git a/sample-apps/react-native/dogfood/ios/Podfile.lock b/sample-apps/react-native/dogfood/ios/Podfile.lock index 8fc1cf24a2..de8362d427 100644 --- a/sample-apps/react-native/dogfood/ios/Podfile.lock +++ b/sample-apps/react-native/dogfood/ios/Podfile.lock @@ -524,7 +524,7 @@ PODS: - stream-react-native-webrtc (118.0.1): - JitsiWebRTC (~> 118.0.0) - React-Core - - stream-video-react-native (0.2.11): + - stream-video-react-native (0.3.0): - React-Core - stream-react-native-webrtc - TOCropViewController (2.6.1) @@ -835,7 +835,7 @@ SPEC CHECKSUMS: RNVoipPushNotification: 543e18f83089134a35e7f1d2eba4c8b1f7776b08 SocketRocket: fccef3f9c5cedea1353a9ef6ada904fde10d6608 stream-react-native-webrtc: 31fe9ee69d5b4fc191380a78efa292377494b7ac - stream-video-react-native: 87db9cd0f19ea8052db8ec74602f63847fe1816f + stream-video-react-native: 325f2e8d32daca923978c3ee51fed56976f29043 TOCropViewController: edfd4f25713d56905ad1e0b9f5be3fbe0f59c863 Yoga: f7decafdc5e8c125e6fa0da38a687e35238420fa YogaKit: f782866e155069a2cca2517aafea43200b01fd5a diff --git a/sample-apps/react-native/dogfood/src/components/CallControlsComponent.tsx b/sample-apps/react-native/dogfood/src/components/CallControlsComponent.tsx index 6e4fa227ba..1dc130c5f6 100644 --- a/sample-apps/react-native/dogfood/src/components/CallControlsComponent.tsx +++ b/sample-apps/react-native/dogfood/src/components/CallControlsComponent.tsx @@ -7,9 +7,10 @@ import { ToggleCameraFaceButton, ToggleVideoPublishingButton, ScreenShareButton, + useCallStateHooks, } from '@stream-io/video-react-native-sdk'; import React from 'react'; -import { StyleSheet, View, ViewStyle } from 'react-native'; +import { StyleSheet, Text, View, ViewStyle } from 'react-native'; import { appTheme } from '../theme'; import { useSafeAreaInsets } from 'react-native-safe-area-context'; import { Z_INDEX } from '../constants'; @@ -31,6 +32,8 @@ export const CallControlsComponent = ({ landscape, }: CallControlsComponentProps) => { const { bottom } = useSafeAreaInsets(); + const { useMicrophoneState } = useCallStateHooks(); + const { isSpeakingWhileMuted } = useMicrophoneState(); const landscapeStyles: ViewStyle = { flexDirection: landscape ? 'column-reverse' : 'row', paddingHorizontal: landscape ? 12 : 0, @@ -39,22 +42,38 @@ export const CallControlsComponent = ({ }; return ( - - - - - - - - + + {isSpeakingWhileMuted && ( + + You are muted. Unmute to speak. + + )} + + + + + + + + + ); }; const styles = StyleSheet.create({ + speakingLabelContainer: { + backgroundColor: appTheme.colors.static_overlay, + paddingVertical: 10, + width: '100%', + }, + label: { + textAlign: 'center', + color: appTheme.colors.static_white, + }, callControlsWrapper: { justifyContent: 'space-evenly', zIndex: Z_INDEX.IN_FRONT, diff --git a/sample-apps/react-native/dogfood/src/theme.ts b/sample-apps/react-native/dogfood/src/theme.ts index 7fbfe6f8d7..372e7f67ce 100644 --- a/sample-apps/react-native/dogfood/src/theme.ts +++ b/sample-apps/react-native/dogfood/src/theme.ts @@ -1,7 +1,14 @@ +const opacityToHex = (opacity: number) => { + return Math.round(opacity * 255) + .toString(16) + .padStart(2, '0'); +}; + export const appTheme = { colors: { static_grey: '#272A30', static_white: '#ffffff', + static_overlay: '#080707' + opacityToHex(0.85), primary: '#005FFF', light_gray: '#979797', light_blue: '#669FFF',