Skip to content

Commit

Permalink
feat(client): speaking while muted in React Native using temporary pe…
Browse files Browse the repository at this point in the history
…er connection (#1207)
  • Loading branch information
khushal87 authored Dec 5, 2023
1 parent b4e8848 commit 9093006
Show file tree
Hide file tree
Showing 10 changed files with 332 additions and 24 deletions.
34 changes: 24 additions & 10 deletions packages/client/src/devices/MicrophoneManager.ts
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,11 @@ import { createSoundDetector } from '../helpers/sound-detector';
import { isReactNative } from '../helpers/platforms';
import { OwnCapability } from '../gen/coordinator';
import { CallingState } from '../store';
import { RNSpeechDetector } from '../helpers/RNSpeechDetector';

export class MicrophoneManager extends InputMediaDeviceManager<MicrophoneManagerState> {
private soundDetectorCleanup?: Function;
private rnSpeechDetector: RNSpeechDetector | undefined;

constructor(call: Call) {
super(call, new MicrophoneManagerState(), TrackType.AUDIO);
Expand Down Expand Up @@ -58,21 +60,33 @@ export class MicrophoneManager extends InputMediaDeviceManager<MicrophoneManager
}

private async startSpeakingWhileMutedDetection(deviceId?: string) {
await this.stopSpeakingWhileMutedDetection();
if (isReactNative()) {
return;
this.rnSpeechDetector = new RNSpeechDetector();
await this.rnSpeechDetector.start();
const unsubscribe = this.rnSpeechDetector?.onSpeakingDetectedStateChange(
(event) => {
this.state.setSpeakingWhileMuted(event.isSoundDetected);
},
);
this.soundDetectorCleanup = () => {
unsubscribe();
this.rnSpeechDetector?.stop();
this.rnSpeechDetector = undefined;
};
} else {
// Need to start a new stream that's not connected to publisher
const stream = await this.getStream({
deviceId,
});
this.soundDetectorCleanup = createSoundDetector(stream, (event) => {
this.state.setSpeakingWhileMuted(event.isSoundDetected);
});
}
await this.stopSpeakingWhileMutedDetection();
// Need to start a new stream that's not connected to publisher
const stream = await this.getStream({
deviceId,
});
this.soundDetectorCleanup = createSoundDetector(stream, (event) => {
this.state.setSpeakingWhileMuted(event.isSoundDetected);
});
}

private async stopSpeakingWhileMutedDetection() {
if (isReactNative() || !this.soundDetectorCleanup) {
if (!this.soundDetectorCleanup) {
return;
}
this.state.setSpeakingWhileMuted(false);
Expand Down
126 changes: 126 additions & 0 deletions packages/client/src/devices/__tests__/MicrophoneManagerRN.test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,126 @@
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
import { MicrophoneManager } from '../MicrophoneManager';
import { Call } from '../../Call';
import { StreamClient } from '../../coordinator/connection/client';
import { StreamVideoWriteableStateStore } from '../../store';
import { mockAudioDevices, mockAudioStream, mockCall } from './mocks';
import { of } from 'rxjs';
import '../../rtc/__tests__/mocks/webrtc.mocks';
import { OwnCapability } from '../../gen/coordinator';

let handler;

vi.mock('../../helpers/platforms.ts', () => {
return {
isReactNative: vi.fn(() => true),
};
});

vi.mock('../devices.ts', () => {
console.log('MOCKING devices API');
return {
disposeOfMediaStream: vi.fn(),
getAudioDevices: vi.fn(() => {
return of(mockAudioDevices);
}),
getAudioStream: vi.fn(() => Promise.resolve(mockAudioStream())),
deviceIds$: {},
};
});

vi.mock('../../Call.ts', () => {
console.log('MOCKING Call');
return {
Call: vi.fn(() => mockCall()),
};
});

vi.mock('../../helpers/RNSpeechDetector.ts', () => {
console.log('MOCKING RNSpeechDetector');
return {
RNSpeechDetector: vi.fn().mockImplementation(() => ({
start: vi.fn(),
stop: vi.fn(),
onSpeakingDetectedStateChange: vi.fn((callback) => {
handler = callback;
return vi.fn();
}),
})),
};
});

describe('MicrophoneManager React Native', () => {
let manager: MicrophoneManager;
beforeEach(() => {
manager = new MicrophoneManager(
new Call({
id: '',
type: '',
streamClient: new StreamClient('abc123'),
clientStore: new StreamVideoWriteableStateStore(),
}),
);
});

it(`should start sound detection if mic is disabled`, async () => {
await manager.enable();
// @ts-expect-error
vi.spyOn(manager, 'startSpeakingWhileMutedDetection');
await manager.disable();

expect(manager['startSpeakingWhileMutedDetection']).toHaveBeenCalled();
expect(manager['rnSpeechDetector']?.start).toHaveBeenCalled();
});

it(`should stop sound detection if mic is enabled`, async () => {
manager.state.setSpeakingWhileMuted(true);
manager['soundDetectorCleanup'] = () => {};

await manager.enable();

expect(manager.state.speakingWhileMuted).toBe(false);
});

it('should update speaking while muted state', async () => {
await manager['startSpeakingWhileMutedDetection']();

expect(manager.state.speakingWhileMuted).toBe(false);

handler!({ isSoundDetected: true, audioLevel: 2 });

expect(manager.state.speakingWhileMuted).toBe(true);

handler!({ isSoundDetected: false, audioLevel: 0 });

expect(manager.state.speakingWhileMuted).toBe(false);
});

it('should stop speaking while muted notifications if user loses permission to send audio', async () => {
await manager.enable();
await manager.disable();

// @ts-expect-error
vi.spyOn(manager, 'stopSpeakingWhileMutedDetection');
manager['call'].state.setOwnCapabilities([]);

expect(manager['stopSpeakingWhileMutedDetection']).toHaveBeenCalled();
});

it('should start speaking while muted notifications if user gains permission to send audio', async () => {
await manager.enable();
await manager.disable();

manager['call'].state.setOwnCapabilities([]);

// @ts-expect-error
vi.spyOn(manager, 'stopSpeakingWhileMutedDetection');
manager['call'].state.setOwnCapabilities([OwnCapability.SEND_AUDIO]);

expect(manager['stopSpeakingWhileMutedDetection']).toHaveBeenCalled();
});

afterEach(() => {
vi.clearAllMocks();
vi.resetModules();
});
});
112 changes: 112 additions & 0 deletions packages/client/src/helpers/RNSpeechDetector.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,112 @@
import { BaseStats } from '../stats/types';
import { SoundStateChangeHandler } from './sound-detector';

/**
* Flatten the stats report into an array of stats objects.
*
* @param report the report to flatten.
*/
const flatten = (report: RTCStatsReport) => {
const stats: RTCStats[] = [];
report.forEach((s) => {
stats.push(s);
});
return stats;
};

const AUDIO_LEVEL_THRESHOLD = 0.2;

export class RNSpeechDetector {
private pc1 = new RTCPeerConnection({});
private pc2 = new RTCPeerConnection({});
private intervalId: NodeJS.Timer | undefined;

/**
* Starts the speech detection.
*/
public async start() {
try {
const audioStream = await navigator.mediaDevices.getUserMedia({
audio: true,
});

this.pc1.addEventListener('icecandidate', async (e) => {
await this.pc2.addIceCandidate(
e.candidate as RTCIceCandidateInit | undefined,
);
});
this.pc2.addEventListener('icecandidate', async (e) => {
await this.pc1.addIceCandidate(
e.candidate as RTCIceCandidateInit | undefined,
);
});

audioStream
.getTracks()
.forEach((track) => this.pc1.addTrack(track, audioStream));
const offer = await this.pc1.createOffer({});
await this.pc2.setRemoteDescription(offer);
await this.pc1.setLocalDescription(offer);
const answer = await this.pc2.createAnswer();
await this.pc1.setRemoteDescription(answer);
await this.pc2.setLocalDescription(answer);
const audioTracks = audioStream.getAudioTracks();
// We need to mute the audio track for this temporary stream, or else you will hear yourself twice while in the call.
audioTracks.forEach((track) => (track.enabled = false));
} catch (error) {
console.error(
'Error connecting and negotiating between PeerConnections:',
error,
);
}
}

/**
* Stops the speech detection and releases all allocated resources.
*/
public stop() {
this.pc1.close();
this.pc2.close();
if (this.intervalId) {
clearInterval(this.intervalId);
}
}

/**
* Public method that detects the audio levels and returns the status.
*/
public onSpeakingDetectedStateChange(
onSoundDetectedStateChanged: SoundStateChangeHandler,
) {
this.intervalId = setInterval(async () => {
const stats = (await this.pc1.getStats()) as RTCStatsReport;
const report = flatten(stats);
// Audio levels are present inside stats of type `media-source` and of kind `audio`
const audioMediaSourceStats = report.find(
(stat) =>
stat.type === 'media-source' &&
(stat as RTCRtpStreamStats).kind === 'audio',
) as BaseStats;
if (audioMediaSourceStats) {
const { audioLevel } = audioMediaSourceStats;
if (audioLevel) {
if (audioLevel >= AUDIO_LEVEL_THRESHOLD) {
onSoundDetectedStateChanged({
isSoundDetected: true,
audioLevel,
});
} else {
onSoundDetectedStateChanged({
isSoundDetected: false,
audioLevel: 0,
});
}
}
}
}, 1000);

return () => {
clearInterval(this.intervalId);
};
}
}
1 change: 1 addition & 0 deletions packages/client/src/stats/types.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
export type BaseStats = {
audioLevel?: number;
bytesSent?: number;
bytesReceived?: number;
codec?: string;
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
---
id: speaking-while-muted
title: Speaking while muted
---

It's a UI best practice to show some visual feedback when the user is speaking while muted. You can observe the state for this in call.state.speakingWhileMuted.

### Custom Speaking while muted Component

Our speaking-while-muted notification component will be based on simple principle of reading the `isSpeakingWhileMuted` state of the currently selected mic. The UI will be rendered only, when `isSpeakingWhileMuted` is set to true.

This can be derived from `useMicrophoneState` hook available in `useCallStateHooks`.

![Preview of the Speaking While Muted notification component](../assets/05-ui-cookbook/15-speaking-while-muted/speaking-while-muted.png)

```tsx
import { useCallStateHooks } from '@stream-io/video-react-sdk';

export const SpeakingWhileMutedNotification = () => {
const { useMicrophoneState } = useCallStateHooks();
const { isSpeakingWhileMuted } = useMicrophoneState();

if (!isSpeakingWhileMuted) return null;
return <Text>You are muted. Unmute to speak.</Text>;
};
```
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
3 changes: 3 additions & 0 deletions packages/react-native-sdk/jest-setup.ts
Original file line number Diff line number Diff line change
Expand Up @@ -48,3 +48,6 @@ global.navigator = {
},
product: 'ReactNative',
};

// @ts-expect-error due to dom typing incompatible with RN
global.RTCPeerConnection = jest.fn();
4 changes: 2 additions & 2 deletions sample-apps/react-native/dogfood/ios/Podfile.lock
Original file line number Diff line number Diff line change
Expand Up @@ -524,7 +524,7 @@ PODS:
- stream-react-native-webrtc (118.0.1):
- JitsiWebRTC (~> 118.0.0)
- React-Core
- stream-video-react-native (0.2.11):
- stream-video-react-native (0.3.0):
- React-Core
- stream-react-native-webrtc
- TOCropViewController (2.6.1)
Expand Down Expand Up @@ -835,7 +835,7 @@ SPEC CHECKSUMS:
RNVoipPushNotification: 543e18f83089134a35e7f1d2eba4c8b1f7776b08
SocketRocket: fccef3f9c5cedea1353a9ef6ada904fde10d6608
stream-react-native-webrtc: 31fe9ee69d5b4fc191380a78efa292377494b7ac
stream-video-react-native: 87db9cd0f19ea8052db8ec74602f63847fe1816f
stream-video-react-native: 325f2e8d32daca923978c3ee51fed56976f29043
TOCropViewController: edfd4f25713d56905ad1e0b9f5be3fbe0f59c863
Yoga: f7decafdc5e8c125e6fa0da38a687e35238420fa
YogaKit: f782866e155069a2cca2517aafea43200b01fd5a
Expand Down
Loading

0 comments on commit 9093006

Please sign in to comment.