-
Notifications
You must be signed in to change notification settings - Fork 23
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
feat(client): speaking while muted in React Native using temporary pe…
…er connection (#1207)
- Loading branch information
Showing
10 changed files
with
332 additions
and
24 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
126 changes: 126 additions & 0 deletions
126
packages/client/src/devices/__tests__/MicrophoneManagerRN.test.ts
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,126 @@ | ||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'; | ||
import { MicrophoneManager } from '../MicrophoneManager'; | ||
import { Call } from '../../Call'; | ||
import { StreamClient } from '../../coordinator/connection/client'; | ||
import { StreamVideoWriteableStateStore } from '../../store'; | ||
import { mockAudioDevices, mockAudioStream, mockCall } from './mocks'; | ||
import { of } from 'rxjs'; | ||
import '../../rtc/__tests__/mocks/webrtc.mocks'; | ||
import { OwnCapability } from '../../gen/coordinator'; | ||
|
||
let handler; | ||
|
||
vi.mock('../../helpers/platforms.ts', () => { | ||
return { | ||
isReactNative: vi.fn(() => true), | ||
}; | ||
}); | ||
|
||
vi.mock('../devices.ts', () => { | ||
console.log('MOCKING devices API'); | ||
return { | ||
disposeOfMediaStream: vi.fn(), | ||
getAudioDevices: vi.fn(() => { | ||
return of(mockAudioDevices); | ||
}), | ||
getAudioStream: vi.fn(() => Promise.resolve(mockAudioStream())), | ||
deviceIds$: {}, | ||
}; | ||
}); | ||
|
||
vi.mock('../../Call.ts', () => { | ||
console.log('MOCKING Call'); | ||
return { | ||
Call: vi.fn(() => mockCall()), | ||
}; | ||
}); | ||
|
||
vi.mock('../../helpers/RNSpeechDetector.ts', () => { | ||
console.log('MOCKING RNSpeechDetector'); | ||
return { | ||
RNSpeechDetector: vi.fn().mockImplementation(() => ({ | ||
start: vi.fn(), | ||
stop: vi.fn(), | ||
onSpeakingDetectedStateChange: vi.fn((callback) => { | ||
handler = callback; | ||
return vi.fn(); | ||
}), | ||
})), | ||
}; | ||
}); | ||
|
||
describe('MicrophoneManager React Native', () => { | ||
let manager: MicrophoneManager; | ||
beforeEach(() => { | ||
manager = new MicrophoneManager( | ||
new Call({ | ||
id: '', | ||
type: '', | ||
streamClient: new StreamClient('abc123'), | ||
clientStore: new StreamVideoWriteableStateStore(), | ||
}), | ||
); | ||
}); | ||
|
||
it(`should start sound detection if mic is disabled`, async () => { | ||
await manager.enable(); | ||
// @ts-expect-error | ||
vi.spyOn(manager, 'startSpeakingWhileMutedDetection'); | ||
await manager.disable(); | ||
|
||
expect(manager['startSpeakingWhileMutedDetection']).toHaveBeenCalled(); | ||
expect(manager['rnSpeechDetector']?.start).toHaveBeenCalled(); | ||
}); | ||
|
||
it(`should stop sound detection if mic is enabled`, async () => { | ||
manager.state.setSpeakingWhileMuted(true); | ||
manager['soundDetectorCleanup'] = () => {}; | ||
|
||
await manager.enable(); | ||
|
||
expect(manager.state.speakingWhileMuted).toBe(false); | ||
}); | ||
|
||
it('should update speaking while muted state', async () => { | ||
await manager['startSpeakingWhileMutedDetection'](); | ||
|
||
expect(manager.state.speakingWhileMuted).toBe(false); | ||
|
||
handler!({ isSoundDetected: true, audioLevel: 2 }); | ||
|
||
expect(manager.state.speakingWhileMuted).toBe(true); | ||
|
||
handler!({ isSoundDetected: false, audioLevel: 0 }); | ||
|
||
expect(manager.state.speakingWhileMuted).toBe(false); | ||
}); | ||
|
||
it('should stop speaking while muted notifications if user loses permission to send audio', async () => { | ||
await manager.enable(); | ||
await manager.disable(); | ||
|
||
// @ts-expect-error | ||
vi.spyOn(manager, 'stopSpeakingWhileMutedDetection'); | ||
manager['call'].state.setOwnCapabilities([]); | ||
|
||
expect(manager['stopSpeakingWhileMutedDetection']).toHaveBeenCalled(); | ||
}); | ||
|
||
it('should start speaking while muted notifications if user gains permission to send audio', async () => { | ||
await manager.enable(); | ||
await manager.disable(); | ||
|
||
manager['call'].state.setOwnCapabilities([]); | ||
|
||
// @ts-expect-error | ||
vi.spyOn(manager, 'stopSpeakingWhileMutedDetection'); | ||
manager['call'].state.setOwnCapabilities([OwnCapability.SEND_AUDIO]); | ||
|
||
expect(manager['stopSpeakingWhileMutedDetection']).toHaveBeenCalled(); | ||
}); | ||
|
||
afterEach(() => { | ||
vi.clearAllMocks(); | ||
vi.resetModules(); | ||
}); | ||
}); |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,112 @@ | ||
import { BaseStats } from '../stats/types'; | ||
import { SoundStateChangeHandler } from './sound-detector'; | ||
|
||
/** | ||
* Flatten the stats report into an array of stats objects. | ||
* | ||
* @param report the report to flatten. | ||
*/ | ||
const flatten = (report: RTCStatsReport) => { | ||
const stats: RTCStats[] = []; | ||
report.forEach((s) => { | ||
stats.push(s); | ||
}); | ||
return stats; | ||
}; | ||
|
||
const AUDIO_LEVEL_THRESHOLD = 0.2; | ||
|
||
export class RNSpeechDetector { | ||
private pc1 = new RTCPeerConnection({}); | ||
private pc2 = new RTCPeerConnection({}); | ||
private intervalId: NodeJS.Timer | undefined; | ||
|
||
/** | ||
* Starts the speech detection. | ||
*/ | ||
public async start() { | ||
try { | ||
const audioStream = await navigator.mediaDevices.getUserMedia({ | ||
audio: true, | ||
}); | ||
|
||
this.pc1.addEventListener('icecandidate', async (e) => { | ||
await this.pc2.addIceCandidate( | ||
e.candidate as RTCIceCandidateInit | undefined, | ||
); | ||
}); | ||
this.pc2.addEventListener('icecandidate', async (e) => { | ||
await this.pc1.addIceCandidate( | ||
e.candidate as RTCIceCandidateInit | undefined, | ||
); | ||
}); | ||
|
||
audioStream | ||
.getTracks() | ||
.forEach((track) => this.pc1.addTrack(track, audioStream)); | ||
const offer = await this.pc1.createOffer({}); | ||
await this.pc2.setRemoteDescription(offer); | ||
await this.pc1.setLocalDescription(offer); | ||
const answer = await this.pc2.createAnswer(); | ||
await this.pc1.setRemoteDescription(answer); | ||
await this.pc2.setLocalDescription(answer); | ||
const audioTracks = audioStream.getAudioTracks(); | ||
// We need to mute the audio track for this temporary stream, or else you will hear yourself twice while in the call. | ||
audioTracks.forEach((track) => (track.enabled = false)); | ||
} catch (error) { | ||
console.error( | ||
'Error connecting and negotiating between PeerConnections:', | ||
error, | ||
); | ||
} | ||
} | ||
|
||
/** | ||
* Stops the speech detection and releases all allocated resources. | ||
*/ | ||
public stop() { | ||
this.pc1.close(); | ||
this.pc2.close(); | ||
if (this.intervalId) { | ||
clearInterval(this.intervalId); | ||
} | ||
} | ||
|
||
/** | ||
* Public method that detects the audio levels and returns the status. | ||
*/ | ||
public onSpeakingDetectedStateChange( | ||
onSoundDetectedStateChanged: SoundStateChangeHandler, | ||
) { | ||
this.intervalId = setInterval(async () => { | ||
const stats = (await this.pc1.getStats()) as RTCStatsReport; | ||
const report = flatten(stats); | ||
// Audio levels are present inside stats of type `media-source` and of kind `audio` | ||
const audioMediaSourceStats = report.find( | ||
(stat) => | ||
stat.type === 'media-source' && | ||
(stat as RTCRtpStreamStats).kind === 'audio', | ||
) as BaseStats; | ||
if (audioMediaSourceStats) { | ||
const { audioLevel } = audioMediaSourceStats; | ||
if (audioLevel) { | ||
if (audioLevel >= AUDIO_LEVEL_THRESHOLD) { | ||
onSoundDetectedStateChanged({ | ||
isSoundDetected: true, | ||
audioLevel, | ||
}); | ||
} else { | ||
onSoundDetectedStateChanged({ | ||
isSoundDetected: false, | ||
audioLevel: 0, | ||
}); | ||
} | ||
} | ||
} | ||
}, 1000); | ||
|
||
return () => { | ||
clearInterval(this.intervalId); | ||
}; | ||
} | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,4 +1,5 @@ | ||
export type BaseStats = { | ||
audioLevel?: number; | ||
bytesSent?: number; | ||
bytesReceived?: number; | ||
codec?: string; | ||
|
26 changes: 26 additions & 0 deletions
26
...tive-sdk/docusaurus/docs/reactnative/05-ui-cookbook/15-speaking-while-muted.mdx
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,26 @@ | ||
--- | ||
id: speaking-while-muted | ||
title: Speaking while muted | ||
--- | ||
|
||
It's a UI best practice to show some visual feedback when the user is speaking while muted. You can observe the state for this in call.state.speakingWhileMuted. | ||
|
||
### Custom Speaking while muted Component | ||
|
||
Our speaking-while-muted notification component will be based on simple principle of reading the `isSpeakingWhileMuted` state of the currently selected mic. The UI will be rendered only, when `isSpeakingWhileMuted` is set to true. | ||
|
||
This can be derived from `useMicrophoneState` hook available in `useCallStateHooks`. | ||
|
||
![Preview of the Speaking While Muted notification component](../assets/05-ui-cookbook/15-speaking-while-muted/speaking-while-muted.png) | ||
|
||
```tsx | ||
import { useCallStateHooks } from '@stream-io/video-react-sdk'; | ||
|
||
export const SpeakingWhileMutedNotification = () => { | ||
const { useMicrophoneState } = useCallStateHooks(); | ||
const { isSpeakingWhileMuted } = useMicrophoneState(); | ||
|
||
if (!isSpeakingWhileMuted) return null; | ||
return <Text>You are muted. Unmute to speak.</Text>; | ||
}; | ||
``` |
Binary file added
BIN
+1.56 MB
...ctnative/assets/05-ui-cookbook/15-speaking-while-muted/speaking-while-muted.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.