diff --git a/packages/client/src/Call.ts b/packages/client/src/Call.ts index 6943837ffd..1c31f8397e 100644 --- a/packages/client/src/Call.ts +++ b/packages/client/src/Call.ts @@ -9,11 +9,7 @@ import { Subscriber, } from './rtc'; import { muteTypeToTrackType } from './rtc/helpers/tracks'; -import { - GoAwayReason, - SdkType, - TrackType, -} from './gen/video/sfu/models/models'; +import { GoAwayReason, TrackType } from './gen/video/sfu/models/models'; import { registerEventHandlers, registerRingingCallEventHandlers, @@ -116,7 +112,7 @@ import { Logger, StreamCallEvent, } from './coordinator/connection/types'; -import { getClientDetails, getSdkInfo } from './client-details'; +import { getClientDetails } from './client-details'; import { getLogger } from './logger'; import { CameraDirection, @@ -1000,14 +996,11 @@ export class Call { this.reconnectAttempts = 0; // reset the reconnect attempts counter this.state.setCallingState(CallingState.JOINED); - // React uses a different device management for now - if (getSdkInfo()?.type !== SdkType.REACT) { - try { - await this.initCamera(); - await this.initMic(); - } catch (error) { - this.logger('warn', 'Camera and/or mic init failed during join call'); - } + try { + await this.initCamera(); + await this.initMic(); + } catch (error) { + this.logger('warn', 'Camera and/or mic init failed during join call'); } // 3. once we have the "joinResponse", and possibly reconciled the local state @@ -1318,56 +1311,6 @@ export class Call { return this.statsReporter?.stopReportingStatsFor(sessionId); }; - /** - * Sets the used audio output device (`audioOutputDeviceId` of the [`localParticipant$`](./StreamVideoClient.md/#readonlystatestore). - * - * This method only stores the selection, if you're using custom UI components, you'll have to implement the audio switching, for more information see: https://developer.mozilla.org/en-US/docs/Web/API/HTMLMediaElement/sinkId. - * - * - * @param deviceId the selected device, `undefined` means the user wants to use the system's default audio output - * - * @deprecated use `call.speaker` instead - */ - setAudioOutputDevice = (deviceId?: string) => { - if (!this.sfuClient) return; - this.state.updateParticipant(this.sfuClient.sessionId, { - audioOutputDeviceId: deviceId, - }); - }; - - /** - * Sets the `audioDeviceId` property of the [`localParticipant$`](./StreamVideoClient.md/#readonlystatestore)). - * - * This method only stores the selection, if you want to start publishing a media stream call the [`publishAudioStream` method](#publishaudiostream) that will set `audioDeviceId` as well. - * - * - * @param deviceId the selected device, pass `undefined` to clear the device selection - * - * @deprecated use call.microphone.select - */ - setAudioDevice = (deviceId?: string) => { - if (!this.sfuClient) return; - this.state.updateParticipant(this.sfuClient.sessionId, { - audioDeviceId: deviceId, - }); - }; - - /** - * Sets the `videoDeviceId` property of the [`localParticipant$`](./StreamVideoClient.md/#readonlystatestore). - * - * This method only stores the selection, if you want to start publishing a media stream call the [`publishVideoStream` method](#publishvideostream) that will set `videoDeviceId` as well. - * - * @param deviceId the selected device, pass `undefined` to clear the device selection - * - * @deprecated use call.camera.select - */ - setVideoDevice = (deviceId?: string) => { - if (!this.sfuClient) return; - this.state.updateParticipant(this.sfuClient.sessionId, { - videoDeviceId: deviceId, - }); - }; - /** * Resets the last sent reaction for the user holding the given `sessionId`. This is a local action, it won't reset the reaction on the backend. * diff --git a/packages/client/src/devices/InputMediaDeviceManager.ts b/packages/client/src/devices/InputMediaDeviceManager.ts index 90c156e9fb..428f2a1b92 100644 --- a/packages/client/src/devices/InputMediaDeviceManager.ts +++ b/packages/client/src/devices/InputMediaDeviceManager.ts @@ -136,7 +136,7 @@ export abstract class InputMediaDeviceManager< } } - protected abstract getDevices(): Observable; + protected abstract getDevices(): Observable; protected abstract getStream(constraints: C): Promise; diff --git a/packages/client/src/helpers/DynascaleManager.ts b/packages/client/src/helpers/DynascaleManager.ts index 981df39fc0..d137996e5b 100644 --- a/packages/client/src/helpers/DynascaleManager.ts +++ b/packages/client/src/helpers/DynascaleManager.ts @@ -2,18 +2,12 @@ import { Call } from '../Call'; import { AudioTrackType, DebounceType, - StreamVideoLocalParticipant, StreamVideoParticipant, VideoTrackType, VisibilityState, } from '../types'; +import { TrackType, VideoDimension } from '../gen/video/sfu/models/models'; import { - SdkType, - TrackType, - VideoDimension, -} from '../gen/video/sfu/models/models'; -import { - combineLatest, distinctUntilChanged, distinctUntilKeyChanged, map, @@ -22,7 +16,6 @@ import { } from 'rxjs'; import { ViewportTracker } from './ViewportTracker'; import { getLogger } from '../logger'; -import { getSdkInfo } from '../client-details'; import { isFirefox, isSafari } from './browsers'; const DEFAULT_VIEWPORT_VISIBILITY_STATE: Record< @@ -174,7 +167,7 @@ export class DynascaleManager { (participants) => participants.find( (participant) => participant.sessionId === sessionId, - ) as StreamVideoLocalParticipant | StreamVideoParticipant, + ) as StreamVideoParticipant, ), takeWhile((participant) => !!participant), distinctUntilChanged(), @@ -339,9 +332,9 @@ export class DynascaleManager { const participant$ = this.call.state.participants$.pipe( map( (participants) => - participants.find((p) => p.sessionId === sessionId) as - | StreamVideoLocalParticipant - | StreamVideoParticipant, + participants.find( + (p) => p.sessionId === sessionId, + ) as StreamVideoParticipant, ), takeWhile((p) => !!p), distinctUntilChanged(), @@ -373,20 +366,14 @@ export class DynascaleManager { }); }); - const sinkIdSubscription = combineLatest([ - this.call.state.localParticipant$, - this.call.speaker.state.selectedDevice$, - ]).subscribe(([p, selectedDevice]) => { - const deviceId = - getSdkInfo()?.type === SdkType.REACT - ? p?.audioOutputDeviceId - : selectedDevice; - - if ('setSinkId' in audioElement && typeof deviceId === 'string') { - // @ts-expect-error setSinkId is not yet in the lib - audioElement.setSinkId(deviceId); - } - }); + const sinkIdSubscription = !('setSinkId' in audioElement) + ? null + : this.call.speaker.state.selectedDevice$.subscribe((deviceId) => { + if (deviceId) { + // @ts-expect-error setSinkId is not yet in the lib + audioElement.setSinkId(deviceId); + } + }); const volumeSubscription = this.call.speaker.state.volume$.subscribe( (volume) => { @@ -397,7 +384,7 @@ export class DynascaleManager { audioElement.autoplay = true; return () => { - sinkIdSubscription.unsubscribe(); + sinkIdSubscription?.unsubscribe(); volumeSubscription.unsubscribe(); updateMediaStreamSubscription.unsubscribe(); }; diff --git a/packages/client/src/helpers/__tests__/DynascaleManager.test.ts b/packages/client/src/helpers/__tests__/DynascaleManager.test.ts index e81d042fd0..d5babab36a 100644 --- a/packages/client/src/helpers/__tests__/DynascaleManager.test.ts +++ b/packages/client/src/helpers/__tests__/DynascaleManager.test.ts @@ -4,15 +4,14 @@ import '../../rtc/__tests__/mocks/webrtc.mocks'; -import { afterEach, beforeEach, describe, expect, it, Mock, vi } from 'vitest'; +import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'; import { DynascaleManager } from '../DynascaleManager'; import { Call } from '../../Call'; import { StreamClient } from '../../coordinator/connection/client'; import { StreamVideoWriteableStateStore } from '../../store'; import { DebounceType, VisibilityState } from '../../types'; import { noopComparator } from '../../sorting'; -import { SdkType, TrackType } from '../../gen/video/sfu/models/models'; -import { getSdkInfo } from '../../client-details'; +import { TrackType } from '../../gen/video/sfu/models/models'; vi.mock('../../client-details.ts', () => { return { @@ -154,18 +153,6 @@ describe('DynascaleManager', () => { 'different-device-id', ); - const mock = getSdkInfo as Mock; - mock.mockImplementation(() => ({ - type: SdkType.REACT, - })); - - call.state.updateParticipant('session-id-local', { - audioOutputDeviceId: 'new-device-id', - }); - - // @ts-expect-error setSinkId is not defined in types - expect(audioElement.setSinkId).toHaveBeenCalledWith('new-device-id'); - call.speaker.setVolume(0.5); expect(audioElement.volume).toBe(0.5); diff --git a/packages/client/src/rtc/Publisher.ts b/packages/client/src/rtc/Publisher.ts index 70db0dd9f4..203fd6ada2 100644 --- a/packages/client/src/rtc/Publisher.ts +++ b/packages/client/src/rtc/Publisher.ts @@ -14,10 +14,7 @@ import { OptimalVideoLayer, } from './videoLayers'; import { getPreferredCodecs } from './codecs'; -import { - trackTypeToDeviceIdKey, - trackTypeToParticipantStreamKey, -} from './helpers/tracks'; +import { trackTypeToParticipantStreamKey } from './helpers/tracks'; import { CallState } from '../store'; import { PublishOptions } from '../types'; import { isReactNative } from '../helpers/platforms'; @@ -387,14 +384,11 @@ export class Publisher { [audioOrVideoOrScreenShareStream]: undefined, })); } else { - const deviceId = track.getSettings().deviceId; - const audioOrVideoDeviceKey = trackTypeToDeviceIdKey(trackType); this.state.updateParticipant(this.sfuClient.sessionId, (p) => { return { publishedTracks: p.publishedTracks.includes(trackType) ? p.publishedTracks : [...p.publishedTracks, trackType], - ...(audioOrVideoDeviceKey && { [audioOrVideoDeviceKey]: deviceId }), [audioOrVideoOrScreenShareStream]: mediaStream, }; }); diff --git a/packages/client/src/rtc/__tests__/Publisher.test.ts b/packages/client/src/rtc/__tests__/Publisher.test.ts index ceb662c3e8..b7c2d1d378 100644 --- a/packages/client/src/rtc/__tests__/Publisher.test.ts +++ b/packages/client/src/rtc/__tests__/Publisher.test.ts @@ -99,7 +99,6 @@ describe('Publisher', () => { // initial publish await publisher.publishStream(mediaStream, track, TrackType.VIDEO); - expect(state.localParticipant?.videoDeviceId).toEqual('test-device-id'); expect(state.localParticipant?.publishedTracks).toContain(TrackType.VIDEO); expect(state.localParticipant?.videoStream).toEqual(mediaStream); expect(transceiver.setCodecPreferences).toHaveBeenCalled(); @@ -136,7 +135,6 @@ describe('Publisher', () => { expect.any(Function), ); expect(transceiver.sender.replaceTrack).toHaveBeenCalledWith(newTrack); - expect(state.localParticipant?.videoDeviceId).toEqual('test-device-id-2'); // stop publishing await publisher.unpublishStream(TrackType.VIDEO, true); @@ -144,7 +142,6 @@ describe('Publisher', () => { expect(state.localParticipant?.publishedTracks).not.toContain( TrackType.VIDEO, ); - expect(state.localParticipant?.videoDeviceId).toEqual('test-device-id-2'); }); it('can publish and un-pubish with just enabling and disabling tracks', async () => { @@ -178,7 +175,6 @@ describe('Publisher', () => { // initial publish await publisher.publishStream(mediaStream, track, TrackType.VIDEO); - expect(state.localParticipant?.videoDeviceId).toEqual('test-device-id'); expect(state.localParticipant?.publishedTracks).toContain(TrackType.VIDEO); expect(track.enabled).toBe(true); expect(state.localParticipant?.videoStream).toEqual(mediaStream); diff --git a/packages/client/src/rtc/flows/join.ts b/packages/client/src/rtc/flows/join.ts index 536ab21cc6..fdb17beb93 100644 --- a/packages/client/src/rtc/flows/join.ts +++ b/packages/client/src/rtc/flows/join.ts @@ -3,12 +3,7 @@ import { JoinCallRequest, JoinCallResponse, } from '../../gen/coordinator'; -import { - isStreamVideoLocalParticipant, - JoinCallData, - StreamVideoLocalParticipant, - StreamVideoParticipant, -} from '../../types'; +import { JoinCallData, StreamVideoParticipant } from '../../types'; import { StreamClient } from '../../coordinator/connection/client'; /** @@ -107,21 +102,11 @@ const getCascadingModeParams = () => { * @param source the participant to reconcile from. */ export const reconcileParticipantLocalState = ( - target: StreamVideoParticipant | StreamVideoLocalParticipant, - source?: StreamVideoParticipant | StreamVideoLocalParticipant, + target: StreamVideoParticipant, + source?: StreamVideoParticipant, ) => { if (!source) return target; // copy everything from source to target - Object.assign(target, source); - - if ( - isStreamVideoLocalParticipant(source) && - isStreamVideoLocalParticipant(target) - ) { - target.audioDeviceId = source.audioDeviceId; - target.videoDeviceId = source.videoDeviceId; - target.audioOutputDeviceId = source.audioOutputDeviceId; - } - return target; + return Object.assign(target, source); }; diff --git a/packages/client/src/rtc/helpers/tracks.ts b/packages/client/src/rtc/helpers/tracks.ts index 4791711ce5..55da6aaedc 100644 --- a/packages/client/src/rtc/helpers/tracks.ts +++ b/packages/client/src/rtc/helpers/tracks.ts @@ -1,8 +1,5 @@ import { TrackType } from '../../gen/video/sfu/models/models'; -import type { - StreamVideoLocalParticipant, - StreamVideoParticipant, -} from '../../types'; +import type { StreamVideoParticipant } from '../../types'; import { TrackMuteType } from '../../types'; export const trackTypeToParticipantStreamKey = ( @@ -25,24 +22,6 @@ export const trackTypeToParticipantStreamKey = ( } }; -export const trackTypeToDeviceIdKey = ( - trackType: TrackType, -): keyof StreamVideoLocalParticipant | undefined => { - switch (trackType) { - case TrackType.AUDIO: - return 'audioDeviceId'; - case TrackType.VIDEO: - return 'videoDeviceId'; - case TrackType.SCREEN_SHARE: - case TrackType.SCREEN_SHARE_AUDIO: - case TrackType.UNSPECIFIED: - return undefined; - default: - const exhaustiveTrackTypeCheck: never = trackType; - throw new Error(`Unknown track type: ${exhaustiveTrackTypeCheck}`); - } -}; - export const muteTypeToTrackType = (muteType: TrackMuteType): TrackType => { switch (muteType) { case 'audio': diff --git a/packages/client/src/store/CallState.ts b/packages/client/src/store/CallState.ts index e02f20919e..3d09226ee4 100644 --- a/packages/client/src/store/CallState.ts +++ b/packages/client/src/store/CallState.ts @@ -3,8 +3,6 @@ import { distinctUntilChanged, map, shareReplay } from 'rxjs/operators'; import type { Patch } from './rxUtils'; import * as RxUtils from './rxUtils'; import { - isStreamVideoLocalParticipant, - StreamVideoLocalParticipant, StreamVideoParticipant, StreamVideoParticipantPatch, StreamVideoParticipantPatches, @@ -137,9 +135,9 @@ export class CallState { private startedAtSubject = new BehaviorSubject(undefined); private participantCountSubject = new BehaviorSubject(0); private anonymousParticipantCountSubject = new BehaviorSubject(0); - private participantsSubject = new BehaviorSubject< - (StreamVideoParticipant | StreamVideoLocalParticipant)[] - >([]); + private participantsSubject = new BehaviorSubject( + [], + ); private callStatsReportSubject = new BehaviorSubject< CallStatsReport | undefined >(undefined); @@ -167,9 +165,7 @@ export class CallState { /** * All participants of the current call (this includes the current user and other participants as well). */ - participants$: Observable< - (StreamVideoParticipant | StreamVideoLocalParticipant)[] - >; + participants$: Observable; /** * Remote participants of the current call (this includes every participant except the logged-in user). @@ -179,7 +175,7 @@ export class CallState { /** * The local participant of the current call (the logged-in user). */ - localParticipant$: Observable; + localParticipant$: Observable; /** * Pinned participants of the current call. @@ -336,7 +332,7 @@ export class CallState { ); this.localParticipant$ = this.participants$.pipe( - map((participants) => participants.find(isStreamVideoLocalParticipant)), + map((participants) => participants.find((p) => p.isLocalParticipant)), shareReplay({ bufferSize: 1, refCount: true }), ); @@ -777,10 +773,7 @@ export class CallState { */ getParticipantLookupBySessionId = () => { return this.participants.reduce<{ - [sessionId: string]: - | StreamVideoParticipant - | StreamVideoLocalParticipant - | undefined; + [sessionId: string]: StreamVideoParticipant | undefined; }>((lookupTable, participant) => { lookupTable[participant.sessionId] = participant; return lookupTable; @@ -810,9 +803,7 @@ export class CallState { } const thePatch = typeof patch === 'function' ? patch(participant) : patch; - const updatedParticipant: - | StreamVideoParticipant - | StreamVideoLocalParticipant = { + const updatedParticipant: StreamVideoParticipant = { // FIXME OL: this is not a deep merge, we might want to revisit this ...participant, ...thePatch, diff --git a/packages/client/src/types.ts b/packages/client/src/types.ts index aceb3fa8d4..d412e5c9f5 100644 --- a/packages/client/src/types.ts +++ b/packages/client/src/types.ts @@ -90,31 +90,6 @@ export interface StreamVideoParticipant extends Participant { viewportVisibilityState?: Record; } -export interface StreamVideoLocalParticipant extends StreamVideoParticipant { - /** - * The device ID of the currently selected audio input device of the local participant (returned by the [MediaDevices API](https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/getUserMedia)) - * - * @deprecated use call.microphone.state.selectedDevice - */ - audioDeviceId?: string; - - /** - * The device ID of the currently selected video input device of the local participant (returned by the [MediaDevices API](https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/getUserMedia)) - * - * @deprecated use call.camera.state.selectedDevice - */ - videoDeviceId?: string; - - /** - * The device ID of the currently selected audio output device of the local participant (returned by the [MediaDevices API](https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/getUserMedia)) - * - * If the value is not defined, the user hasn't selected any device (in these cases the default system audio output could be used) - * - * @deprecated use call.speaker.state.selectedDevice - */ - audioOutputDeviceId?: string; -} - export type VideoTrackType = 'videoTrack' | 'screenShareTrack'; export type AudioTrackType = 'audioTrack' | 'screenShareAudioTrack'; export type TrackMuteType = @@ -139,18 +114,10 @@ export type ParticipantPin = { pinnedAt: number; }; -export const isStreamVideoLocalParticipant = ( - p: StreamVideoParticipant | StreamVideoLocalParticipant, -): p is StreamVideoLocalParticipant => { - return !!p.isLocalParticipant; -}; - /** * A partial representation of the StreamVideoParticipant. */ -export type StreamVideoParticipantPatch = Partial< - StreamVideoParticipant | StreamVideoLocalParticipant ->; +export type StreamVideoParticipantPatch = Partial; /** * A collection of {@link StreamVideoParticipantPatch} organized by sessionId. diff --git a/packages/react-bindings/src/hooks/callStateHooks.ts b/packages/react-bindings/src/hooks/callStateHooks.ts index a70751ae01..141b5209a5 100644 --- a/packages/react-bindings/src/hooks/callStateHooks.ts +++ b/packages/react-bindings/src/hooks/callStateHooks.ts @@ -330,12 +330,24 @@ export const useCameraState = () => { const call = useCall(); const { camera } = call as Call; + const devices$ = useMemo(() => camera.listDevices(), [camera]); + const status = useObservableValue(camera.state.status$); const direction = useObservableValue(camera.state.direction$); + const mediaStream = useObservableValue(camera.state.mediaStream$); + const selectedDevice = useObservableValue(camera.state.selectedDevice$); + const devices = useObservableValue(devices$); + const isMute = status !== 'enabled'; return { + camera, status, + isEnabled: status === 'enabled', direction, + mediaStream, + devices, + selectedDevice, + isMute, }; }; @@ -348,22 +360,62 @@ export const useMicrophoneState = () => { const call = useCall(); const { microphone } = call as Call; - const status = useObservableValue(microphone.state.status$); - const selectedDevice = useObservableValue(microphone.state.selectedDevice$); + const devices$ = useMemo(() => microphone.listDevices(), [microphone]); + + const { state } = microphone; + const status = useObservableValue(state.status$); + const mediaStream = useObservableValue(state.mediaStream$); + const selectedDevice = useObservableValue(state.selectedDevice$); + const devices = useObservableValue(devices$); + const isSpeakingWhileMuted = useObservableValue(state.speakingWhileMuted$); + const isMute = status !== 'enabled'; return { + microphone, status, + isEnabled: status === 'enabled', + mediaStream, + devices, selectedDevice, + isSpeakingWhileMuted, + isMute, }; }; +/** + * Returns the speaker state of the current call. + */ +export const useSpeakerState = () => { + const call = useCall(); + const { speaker } = call as Call; + + const devices$ = useMemo(() => speaker.listDevices(), [speaker]); + const devices = useObservableValue(devices$); + const selectedDevice = useObservableValue(speaker.state.selectedDevice$); + + return { + speaker, + devices, + selectedDevice, + isDeviceSelectionSupported: speaker.state.isDeviceSelectionSupported, + }; +}; + +/** + * Returns the Screen Share state of the current call. + */ export const useScreenShareState = () => { const call = useCall(); const { screenShare } = call as Call; const status = useObservableValue(screenShare.state.status$); + const mediaStream = useObservableValue(screenShare.state.mediaStream$); + const isMute = status !== 'enabled'; return { + screenShare, + mediaStream, status, + isMute, }; }; diff --git a/packages/react-bindings/tsconfig.json b/packages/react-bindings/tsconfig.json index e0eb139397..f05c59db4d 100644 --- a/packages/react-bindings/tsconfig.json +++ b/packages/react-bindings/tsconfig.json @@ -3,7 +3,7 @@ "outDir": "./dist", "module": "ES2015", "target": "ES2015", - "lib": ["esnext"], + "lib": ["esnext", "dom"], "declaration": true, "moduleResolution": "node", "allowSyntheticDefaultImports": true, diff --git a/packages/react-sdk/index.ts b/packages/react-sdk/index.ts index e01bd6b18f..b913a29995 100644 --- a/packages/react-sdk/index.ts +++ b/packages/react-sdk/index.ts @@ -13,8 +13,6 @@ export * from './src/translations'; export { useHorizontalScrollPosition, useVerticalScrollPosition, - useToggleAudioMuteState, - useToggleVideoMuteState, } from './src/hooks'; const [major, minor, patch] = version.split('.'); diff --git a/packages/react-sdk/src/components/CallControls/ScreenShareButton.tsx b/packages/react-sdk/src/components/CallControls/ScreenShareButton.tsx index 189d840bac..d8d88f09df 100644 --- a/packages/react-sdk/src/components/CallControls/ScreenShareButton.tsx +++ b/packages/react-sdk/src/components/CallControls/ScreenShareButton.tsx @@ -1,29 +1,27 @@ import { OwnCapability } from '@stream-io/video-client'; import { Restricted, - useCall, useCallStateHooks, useI18n, } from '@stream-io/video-react-bindings'; import { CompositeButton, IconButton } from '../Button/'; import { PermissionNotification } from '../Notification'; -import { useToggleScreenShare } from '../../hooks'; +import { useRequestPermission } from '../../hooks'; export type ScreenShareButtonProps = { caption?: string; }; -export const ScreenShareButton = ({ - caption = 'Screen Share', -}: ScreenShareButtonProps) => { - const call = useCall(); - const { useHasOngoingScreenShare } = useCallStateHooks(); - const isSomeoneScreenSharing = useHasOngoingScreenShare(); - +export const ScreenShareButton = (props: ScreenShareButtonProps) => { const { t } = useI18n(); - const { toggleScreenShare, isAwaitingPermission, isScreenSharing } = - useToggleScreenShare(); + const { caption = t('Screen Share') } = props; + + const { useHasOngoingScreenShare, useScreenShareState } = useCallStateHooks(); + const isSomeoneScreenSharing = useHasOngoingScreenShare(); + const { hasPermission, requestPermission, isAwaitingPermission } = + useRequestPermission(OwnCapability.SCREENSHARE); + const { screenShare, isMute: isScreenSharing } = useScreenShareState(); return ( { + if (!hasPermission) { + await requestPermission(); + } else { + screenShare.toggle(); + } + }} /> diff --git a/packages/react-sdk/src/components/CallControls/ToggleAudioButton.tsx b/packages/react-sdk/src/components/CallControls/ToggleAudioButton.tsx index 1ebd4095a2..eba02eff81 100644 --- a/packages/react-sdk/src/components/CallControls/ToggleAudioButton.tsx +++ b/packages/react-sdk/src/components/CallControls/ToggleAudioButton.tsx @@ -1,16 +1,14 @@ import { ComponentType } from 'react'; -import { OwnCapability, SfuModels } from '@stream-io/video-client'; +import { OwnCapability } from '@stream-io/video-client'; import { Restricted, useCallStateHooks, useI18n, } from '@stream-io/video-react-bindings'; - -import { useMediaDevices } from '../../core'; import { DeviceSelectorAudioInput } from '../DeviceSettings'; import { CompositeButton, IconButton } from '../Button'; import { PermissionNotification } from '../Notification'; -import { useToggleAudioMuteState } from '../../hooks'; +import { useRequestPermission } from '../../hooks'; export type ToggleAudioPreviewButtonProps = { caption?: string; @@ -20,20 +18,17 @@ export type ToggleAudioPreviewButtonProps = { export const ToggleAudioPreviewButton = ( props: ToggleAudioPreviewButtonProps, ) => { - const { initialAudioEnabled, toggleInitialAudioMuteState } = - useMediaDevices(); const { t } = useI18n(); const { caption = t('Mic'), Menu = DeviceSelectorAudioInput } = props; + const { useMicrophoneState } = useCallStateHooks(); + const { microphone, isMute } = useMicrophoneState(); + return ( - + microphone.toggle()} /> ); @@ -47,18 +42,14 @@ export type ToggleAudioPublishingButtonProps = { export const ToggleAudioPublishingButton = ( props: ToggleAudioPublishingButtonProps, ) => { - const { useLocalParticipant } = useCallStateHooks(); - const localParticipant = useLocalParticipant(); const { t } = useI18n(); - const { caption = t('Mic'), Menu = DeviceSelectorAudioInput } = props; - const isAudioMute = !localParticipant?.publishedTracks.includes( - SfuModels.TrackType.AUDIO, - ); + const { hasPermission, requestPermission, isAwaitingPermission } = + useRequestPermission(OwnCapability.SEND_AUDIO); - const { toggleAudioMuteState: handleClick, isAwaitingPermission } = - useToggleAudioMuteState(); + const { useMicrophoneState } = useCallStateHooks(); + const { microphone, isMute } = useMicrophoneState(); return ( @@ -69,10 +60,16 @@ export const ToggleAudioPublishingButton = ( messageAwaitingApproval={t('Awaiting for an approval to speak.')} messageRevoked={t('You can no longer speak.')} > - + { + if (!hasPermission) { + await requestPermission(); + } else { + microphone.toggle(); + } + }} /> diff --git a/packages/react-sdk/src/components/CallControls/ToggleVideoButton.tsx b/packages/react-sdk/src/components/CallControls/ToggleVideoButton.tsx index c810c18d9c..c37f8bf14c 100644 --- a/packages/react-sdk/src/components/CallControls/ToggleVideoButton.tsx +++ b/packages/react-sdk/src/components/CallControls/ToggleVideoButton.tsx @@ -5,12 +5,11 @@ import { useI18n, } from '@stream-io/video-react-bindings'; -import { OwnCapability, SfuModels } from '@stream-io/video-client'; +import { OwnCapability } from '@stream-io/video-client'; import { CompositeButton, IconButton } from '../Button/'; -import { useMediaDevices } from '../../core'; import { DeviceSelectorVideo } from '../DeviceSettings'; import { PermissionNotification } from '../Notification'; -import { useToggleVideoMuteState } from '../../hooks'; +import { useRequestPermission } from '../../hooks'; export type ToggleVideoPreviewButtonProps = { caption?: string; @@ -20,19 +19,17 @@ export type ToggleVideoPreviewButtonProps = { export const ToggleVideoPreviewButton = ( props: ToggleVideoPreviewButtonProps, ) => { - const { toggleInitialVideoMuteState, initialVideoState } = useMediaDevices(); const { t } = useI18n(); const { caption = t('Video'), Menu = DeviceSelectorVideo } = props; + const { useCameraState } = useCallStateHooks(); + const { camera, isMute } = useCameraState(); + return ( - + camera.toggle()} /> ); @@ -46,18 +43,14 @@ type ToggleVideoPublishingButtonProps = { export const ToggleVideoPublishingButton = ( props: ToggleVideoPublishingButtonProps, ) => { - const { useLocalParticipant } = useCallStateHooks(); - const localParticipant = useLocalParticipant(); const { t } = useI18n(); - const { caption = t('Video'), Menu = DeviceSelectorVideo } = props; - const isVideoMute = !localParticipant?.publishedTracks.includes( - SfuModels.TrackType.VIDEO, - ); + const { hasPermission, requestPermission, isAwaitingPermission } = + useRequestPermission(OwnCapability.SEND_VIDEO); - const { toggleVideoMuteState: handleClick, isAwaitingPermission } = - useToggleVideoMuteState(); + const { useCameraState } = useCallStateHooks(); + const { camera, isMute } = useCameraState(); return ( @@ -70,10 +63,16 @@ export const ToggleVideoPublishingButton = ( )} messageRevoked={t('You can no longer share your video.')} > - + { + if (!hasPermission) { + await requestPermission(); + } else { + camera.toggle(); + } + }} /> diff --git a/packages/react-sdk/src/components/DeviceSettings/DeviceSelectorAudio.tsx b/packages/react-sdk/src/components/DeviceSettings/DeviceSelectorAudio.tsx index eb5e4fe267..5207dc9863 100644 --- a/packages/react-sdk/src/components/DeviceSettings/DeviceSelectorAudio.tsx +++ b/packages/react-sdk/src/components/DeviceSettings/DeviceSelectorAudio.tsx @@ -1,28 +1,25 @@ +import { useCallStateHooks, useI18n } from '@stream-io/video-react-bindings'; import { DeviceSelector } from './DeviceSelector'; -import { - useMediaDevices, - useAudioInputDevices, - useAudioOutputDevices, -} from '../../core'; export type DeviceSelectorAudioInputProps = { title?: string; }; export const DeviceSelectorAudioInput = ({ - title = 'Select a Mic', + title, }: DeviceSelectorAudioInputProps) => { - const { selectedAudioInputDeviceId, switchDevice } = useMediaDevices(); - const audioInputDevices = useAudioInputDevices(); + const { t } = useI18n(); + const { useMicrophoneState } = useCallStateHooks(); + const { microphone, selectedDevice, devices } = useMicrophoneState(); return ( { - switchDevice('audioinput', deviceId); + devices={devices || []} + selectedDeviceId={selectedDevice} + onChange={async (deviceId) => { + await microphone.select(deviceId); }} - title={title} + title={title || t('Select a Mic')} /> ); }; @@ -32,26 +29,23 @@ export type DeviceSelectorAudioOutputProps = { }; export const DeviceSelectorAudioOutput = ({ - title = 'Select Speakers', + title, }: DeviceSelectorAudioOutputProps) => { - const { - isAudioOutputChangeSupported, - selectedAudioOutputDeviceId, - switchDevice, - } = useMediaDevices(); + const { t } = useI18n(); + const { useSpeakerState } = useCallStateHooks(); + const { speaker, selectedDevice, devices, isDeviceSelectionSupported } = + useSpeakerState(); - const audioOutputDevices = useAudioOutputDevices(); - - if (!isAudioOutputChangeSupported) return null; + if (!isDeviceSelectionSupported) return null; return ( { - switchDevice('audiooutput', deviceId); + devices={devices} + selectedDeviceId={selectedDevice} + onChange={async (deviceId) => { + speaker.select(deviceId); }} - title={title} + title={title || t('Select Speakers')} /> ); }; diff --git a/packages/react-sdk/src/components/DeviceSettings/DeviceSelectorVideo.tsx b/packages/react-sdk/src/components/DeviceSettings/DeviceSelectorVideo.tsx index 42767013a6..109f8df4c5 100644 --- a/packages/react-sdk/src/components/DeviceSettings/DeviceSelectorVideo.tsx +++ b/packages/react-sdk/src/components/DeviceSettings/DeviceSelectorVideo.tsx @@ -1,22 +1,23 @@ import { DeviceSelector } from './DeviceSelector'; -import { useMediaDevices, useVideoDevices } from '../../core'; +import { useCallStateHooks, useI18n } from '@stream-io/video-react-bindings'; export type DeviceSelectorVideoProps = { title?: string; }; export const DeviceSelectorVideo = ({ title }: DeviceSelectorVideoProps) => { - const { selectedVideoDeviceId, switchDevice } = useMediaDevices(); - const videoDevices = useVideoDevices(); + const { t } = useI18n(); + const { useCameraState } = useCallStateHooks(); + const { camera, devices, selectedDevice } = useCameraState(); return ( { - switchDevice('videoinput', deviceId); + devices={devices || []} + selectedDeviceId={selectedDevice} + onChange={async (deviceId) => { + await camera.select(deviceId); }} - title={title || 'Select a Camera'} + title={title || t('Select a Camera')} /> ); }; diff --git a/packages/react-sdk/src/components/Notification/SpeakingWhileMutedNotification.tsx b/packages/react-sdk/src/components/Notification/SpeakingWhileMutedNotification.tsx index 2e3be6bedd..4ce3155e12 100644 --- a/packages/react-sdk/src/components/Notification/SpeakingWhileMutedNotification.tsx +++ b/packages/react-sdk/src/components/Notification/SpeakingWhileMutedNotification.tsx @@ -1,69 +1,48 @@ import { PropsWithChildren, useEffect, useState } from 'react'; -import { createSoundDetector, SfuModels } from '@stream-io/video-client'; import { useCallStateHooks, useI18n } from '@stream-io/video-react-bindings'; - -import { useMediaDevices } from '../../core'; import { Notification } from './Notification'; export type SpeakingWhileMutedNotificationProps = { - /* - Text message displayed by the notification. + /** + * Text message displayed by the notification. */ text?: string; + + /** + * Duration in milliseconds for which the notification should be displayed. + * Default is 3500ms. + */ + displayDuration?: number; }; export const SpeakingWhileMutedNotification = ({ children, text, + displayDuration = 3500, }: PropsWithChildren) => { - const { useLocalParticipant } = useCallStateHooks(); - const localParticipant = useLocalParticipant(); - const { getAudioStream } = useMediaDevices(); + const { useMicrophoneState } = useCallStateHooks(); + const { isSpeakingWhileMuted } = useMicrophoneState(); const { t } = useI18n(); - const message = text ?? t('You are muted. Unmute to speak.'); - const isAudioMute = !localParticipant?.publishedTracks.includes( - SfuModels.TrackType.AUDIO, - ); - const audioDeviceId = localParticipant?.audioDeviceId; - const [isSpeakingWhileMuted, setIsSpeakingWhileMuted] = useState(false); - useEffect(() => { - // do nothing when not muted - if (!isAudioMute) return; - const disposeSoundDetector = getAudioStream({ - deviceId: audioDeviceId, - }).then((audioStream) => - createSoundDetector(audioStream, ({ isSoundDetected }) => { - setIsSpeakingWhileMuted((isNotified) => - isNotified ? isNotified : isSoundDetected, - ); - }), - ); - disposeSoundDetector.catch((err) => { - console.error('Error while creating sound detector', err); - }); - return () => { - disposeSoundDetector - .then((dispose) => dispose()) - .catch((err) => { - console.error('Error while disposing sound detector', err); - }); - setIsSpeakingWhileMuted(false); - }; - }, [audioDeviceId, getAudioStream, isAudioMute]); + const [showNotification, setShowNotification] = useState(false); + if (!showNotification && isSpeakingWhileMuted) { + setShowNotification(true); + } useEffect(() => { - if (!isSpeakingWhileMuted) return; + if (!showNotification) return; const timeout = setTimeout(() => { - setIsSpeakingWhileMuted(false); - }, 3500); + setShowNotification(false); + }, displayDuration); return () => { clearTimeout(timeout); - setIsSpeakingWhileMuted(false); + setShowNotification(false); }; - }, [isSpeakingWhileMuted]); + }, [showNotification, displayDuration]); + + const message = text ?? t('You are muted. Unmute to speak.'); return ( - + {children} ); diff --git a/packages/react-sdk/src/components/Video/VideoPreview.tsx b/packages/react-sdk/src/components/Video/VideoPreview.tsx deleted file mode 100644 index c51b69f74d..0000000000 --- a/packages/react-sdk/src/components/Video/VideoPreview.tsx +++ /dev/null @@ -1,152 +0,0 @@ -import { - ComponentType, - ReactEventHandler, - useCallback, - useEffect, - useState, -} from 'react'; -import clsx from 'clsx'; -import { disposeOfMediaStream } from '@stream-io/video-client'; -import { BaseVideo } from '../../core/components/Video'; -import { - DEVICE_STATE, - useMediaDevices, - useOnUnavailableVideoDevices, - useVideoDevices, -} from '../../core'; -import { LoadingIndicator } from '../LoadingIndicator'; - -const DefaultDisabledVideoPreview = () => { - return
Video is disabled
; -}; - -const DefaultNoCameraPreview = () => { - return
No camera found
; -}; - -type VideoErrorPreviewProps = { - message?: string; -}; -const DefaultVideoErrorPreview = ({ message }: VideoErrorPreviewProps) => { - return ( - <> -
Error:
-

{message || 'Unexpected error happened'}

- - ); -}; - -export type VideoPreviewProps = { - /** - * Component rendered when user turns off the video. - */ - DisabledVideoPreview?: ComponentType; - /** - * Enforces mirroring of the video on the X axis. Defaults to true. - */ - mirror?: boolean; - /** - * Component rendered when no camera devices are available. - */ - NoCameraPreview?: ComponentType; - /** - * Component rendered above the BaseVideo until the video is ready (meaning until the play event is emitted). - */ - StartingCameraPreview?: ComponentType; - /** - * Component rendered when the video stream could not be retrieved. - */ - VideoErrorPreview?: ComponentType; -}; - -export const VideoPreview = ({ - mirror = true, - DisabledVideoPreview = DefaultDisabledVideoPreview, - NoCameraPreview = DefaultNoCameraPreview, - StartingCameraPreview = LoadingIndicator, - VideoErrorPreview = DefaultVideoErrorPreview, -}: VideoPreviewProps) => { - const [stream, setStream] = useState(); - const { - selectedVideoDeviceId, - getVideoStream, - initialVideoState, - setInitialVideoState, - } = useMediaDevices(); - // When there are 0 video devices (e.g. when laptop lid closed), - // we do not restart the video automatically when the device is again available, - // but rather leave turning the video on manually to the user. - useOnUnavailableVideoDevices(() => - setInitialVideoState(DEVICE_STATE.stopped), - ); - const videoDevices = useVideoDevices(); - - useEffect(() => { - if (!initialVideoState.enabled) return; - - getVideoStream({ deviceId: selectedVideoDeviceId }) - .then((s) => { - setStream((previousStream) => { - if (previousStream) { - disposeOfMediaStream(previousStream); - } - return s; - }); - }) - .catch((e) => - setInitialVideoState({ - ...DEVICE_STATE.error, - message: (e as Error).message, - }), - ); - return () => { - setStream(undefined); - }; - }, [ - initialVideoState, - getVideoStream, - selectedVideoDeviceId, - setInitialVideoState, - videoDevices.length, - ]); - - useEffect(() => { - if (initialVideoState.type === 'stopped') { - setStream(undefined); - } - }, [initialVideoState]); - - const handleOnPlay: ReactEventHandler = useCallback(() => { - setInitialVideoState(DEVICE_STATE.playing); - }, [setInitialVideoState]); - - let contents; - if (initialVideoState.type === 'error') { - contents = ; - } else if (initialVideoState.type === 'stopped' && !videoDevices.length) { - contents = ; - } else if (initialVideoState.enabled) { - const loading = initialVideoState.type === 'starting'; - contents = ( - <> - {stream && ( - - )} - {loading && } - - ); - } else { - contents = ; - } - - return ( -
{contents}
- ); -}; diff --git a/packages/react-sdk/src/components/VideoPreview/VideoPreview.tsx b/packages/react-sdk/src/components/VideoPreview/VideoPreview.tsx new file mode 100644 index 0000000000..02adee621f --- /dev/null +++ b/packages/react-sdk/src/components/VideoPreview/VideoPreview.tsx @@ -0,0 +1,67 @@ +import { ComponentType } from 'react'; +import clsx from 'clsx'; +import { useCallStateHooks } from '@stream-io/video-react-bindings'; +import { BaseVideo } from '../../core/components/Video'; +import { LoadingIndicator } from '../LoadingIndicator'; + +const DefaultDisabledVideoPreview = () => { + return
Video is disabled
; +}; + +const DefaultNoCameraPreview = () => { + return
No camera found
; +}; + +export type VideoPreviewProps = { + /** + * Component rendered when user turns off the video. + */ + DisabledVideoPreview?: ComponentType; + /** + * Enforces mirroring of the video on the X axis. Defaults to true. + */ + mirror?: boolean; + /** + * Component rendered when no camera devices are available. + */ + NoCameraPreview?: ComponentType; + /** + * Component rendered above the BaseVideo until the video is ready (meaning until the play event is emitted). + */ + StartingCameraPreview?: ComponentType; +}; + +export const VideoPreview = ({ + mirror = true, + DisabledVideoPreview = DefaultDisabledVideoPreview, + NoCameraPreview = DefaultNoCameraPreview, + StartingCameraPreview = LoadingIndicator, +}: VideoPreviewProps) => { + const { useCameraState } = useCallStateHooks(); + const { devices, status, isMute, mediaStream } = useCameraState(); + + let contents; + if (isMute && devices?.length === 0) { + contents = ; + } else if (status === 'enabled') { + const loading = !mediaStream; + contents = ( + <> + {mediaStream && ( + + )} + {loading && } + + ); + } else { + contents = ; + } + + return
{contents}
; +}; diff --git a/packages/react-sdk/src/components/Video/index.ts b/packages/react-sdk/src/components/VideoPreview/index.ts similarity index 100% rename from packages/react-sdk/src/components/Video/index.ts rename to packages/react-sdk/src/components/VideoPreview/index.ts diff --git a/packages/react-sdk/src/components/index.ts b/packages/react-sdk/src/components/index.ts index 6040d7e209..2fddcc4fd2 100644 --- a/packages/react-sdk/src/components/index.ts +++ b/packages/react-sdk/src/components/index.ts @@ -14,4 +14,4 @@ export * from './Permissions'; export * from './StreamTheme'; export * from './Search'; export * from './Tooltip'; -export * from './Video'; +export * from './VideoPreview'; diff --git a/packages/react-sdk/src/core/components/CallLayout/PaginatedGridLayout.tsx b/packages/react-sdk/src/core/components/CallLayout/PaginatedGridLayout.tsx index 1b220095f6..838df530df 100644 --- a/packages/react-sdk/src/core/components/CallLayout/PaginatedGridLayout.tsx +++ b/packages/react-sdk/src/core/components/CallLayout/PaginatedGridLayout.tsx @@ -1,9 +1,6 @@ import { useEffect, useMemo, useState } from 'react'; import { useCall, useCallStateHooks } from '@stream-io/video-react-bindings'; -import { - StreamVideoLocalParticipant, - StreamVideoParticipant, -} from '@stream-io/video-client'; +import { StreamVideoParticipant } from '@stream-io/video-client'; import clsx from 'clsx'; import { @@ -22,7 +19,7 @@ type PaginatedGridLayoutGroupProps = { /** * The group of participants to render. */ - group: Array; + group: Array; } & Pick & Required>; diff --git a/packages/react-sdk/src/core/components/ParticipantView/DefaultParticipantViewUI.tsx b/packages/react-sdk/src/core/components/ParticipantView/DefaultParticipantViewUI.tsx index 111faffcd5..6a4ba4e2f7 100644 --- a/packages/react-sdk/src/core/components/ParticipantView/DefaultParticipantViewUI.tsx +++ b/packages/react-sdk/src/core/components/ParticipantView/DefaultParticipantViewUI.tsx @@ -1,7 +1,7 @@ import { forwardRef } from 'react'; import { Placement } from '@floating-ui/react'; import { SfuModels } from '@stream-io/video-client'; -import { useCall } from '@stream-io/video-react-bindings'; +import { useCall, useI18n } from '@stream-io/video-react-bindings'; import { clsx } from 'clsx'; import { @@ -42,22 +42,23 @@ const ToggleButton = forwardRef( export const DefaultScreenShareOverlay = () => { const call = useCall(); + const { t } = useI18n(); const stopScreenShare = () => { - call?.stopPublish(SfuModels.TrackType.SCREEN_SHARE).catch(console.error); + call?.screenShare.disable(); }; return (
- You are presenting your screen + {t('You are presenting your screen')}
); diff --git a/packages/react-sdk/src/core/components/ParticipantView/ParticipantView.tsx b/packages/react-sdk/src/core/components/ParticipantView/ParticipantView.tsx index c663a97107..a6b6981561 100644 --- a/packages/react-sdk/src/core/components/ParticipantView/ParticipantView.tsx +++ b/packages/react-sdk/src/core/components/ParticipantView/ParticipantView.tsx @@ -10,7 +10,6 @@ import { import clsx from 'clsx'; import { SfuModels, - StreamVideoLocalParticipant, StreamVideoParticipant, VideoTrackType, } from '@stream-io/video-client'; @@ -40,7 +39,7 @@ export type ParticipantViewProps = { /** * The participant whose video/audio stream we want to play. */ - participant: StreamVideoParticipant | StreamVideoLocalParticipant; + participant: StreamVideoParticipant; /** * Override the default UI for rendering participant information/actions. diff --git a/packages/react-sdk/src/core/components/StreamCall/StreamCall.tsx b/packages/react-sdk/src/core/components/StreamCall/StreamCall.tsx index 2cb8fc71ee..a8a371cd18 100644 --- a/packages/react-sdk/src/core/components/StreamCall/StreamCall.tsx +++ b/packages/react-sdk/src/core/components/StreamCall/StreamCall.tsx @@ -1,10 +1,6 @@ import { PropsWithChildren } from 'react'; import { Call } from '@stream-io/video-client'; import { StreamCallProvider } from '@stream-io/video-react-bindings'; -import { - MediaDevicesProvider, - MediaDevicesProviderProps, -} from '../../contexts'; export type StreamCallProps = { call: Call; @@ -12,7 +8,7 @@ export type StreamCallProps = { /** * An optional props to pass to the `MediaDevicesProvider`. */ - mediaDevicesProviderProps?: MediaDevicesProviderProps; + mediaDevicesProviderProps?: any; }; export const StreamCall = ({ @@ -20,11 +16,8 @@ export const StreamCall = ({ call, mediaDevicesProviderProps, }: PropsWithChildren) => { - return ( - - - {children} - - - ); + if (mediaDevicesProviderProps) { + console.warn('mediaDevicesProviderProps is deprecated'); + } + return {children}; }; diff --git a/packages/react-sdk/src/core/contexts/MediaDevicesContext.tsx b/packages/react-sdk/src/core/contexts/MediaDevicesContext.tsx deleted file mode 100644 index 6da41fc02f..0000000000 --- a/packages/react-sdk/src/core/contexts/MediaDevicesContext.tsx +++ /dev/null @@ -1,416 +0,0 @@ -import { - createContext, - PropsWithChildren, - useCallback, - useContext, - useEffect, - useState, -} from 'react'; -import { map } from 'rxjs'; -import { - CallingState, - checkIfAudioOutputChangeSupported, - disposeOfMediaStream, - getAudioStream, - getVideoStream, - SfuModels, - watchForDisconnectedAudioOutputDevice, -} from '@stream-io/video-client'; -import { useCall, useCallStateHooks } from '@stream-io/video-react-bindings'; - -import { - useAudioInputDeviceFallback, - useAudioOutputDeviceFallback, - useAudioPublisher, - useHasBrowserPermissions, - useVideoDeviceFallback, - useVideoPublisher, -} from '../hooks'; - -type EnabledStateType = 'starting' | 'playing'; -type DisabledStateType = 'uninitialized' | 'stopped'; -type ErrorStateType = 'error'; -type DeviceStateType = EnabledStateType | DisabledStateType | ErrorStateType; - -type EnabledDeviceState = { - type: T; - enabled: true; -}; -type DisabledDeviceState = { - type: T; - enabled: false; -}; -type ErrorDeviceState = { - type: 'error'; - message: string; - enabled: false; -}; - -type DeviceState = - | EnabledDeviceState - | DisabledDeviceState - | ErrorDeviceState; - -const DEVICE_STATE_TOGGLE: Record = { - starting: 'stopped', - playing: 'stopped', - stopped: 'starting', - uninitialized: 'starting', - error: 'starting', -}; - -/** - * Exclude types from documentation site, but we should still add doc comments - * @internal - */ -export const DEVICE_STATE: { - starting: EnabledDeviceState<'starting'>; - playing: EnabledDeviceState<'playing'>; - stopped: DisabledDeviceState<'stopped'>; - uninitialized: DisabledDeviceState<'uninitialized'>; - error: ErrorDeviceState; -} = { - starting: { type: 'starting', enabled: true }, - playing: { type: 'playing', enabled: true }, - stopped: { type: 'stopped', enabled: false }, - uninitialized: { type: 'uninitialized', enabled: false }, - error: { type: 'error', message: '', enabled: false }, -}; - -const DEFAULT_DEVICE_ID = 'default'; - -/** - * API to control device enablement, device selection and media stream access for a call. - * @category Device Management - */ -export type MediaDevicesContextAPI = { - /** - * Deactivates MediaStream (stops and removes tracks) to be later garbage collected - * - * @param stream MediaStream - * @returns void - */ - disposeOfMediaStream: (stream: MediaStream) => void; - /** - * Returns an 'audioinput' media stream with the given `deviceId`, if no `deviceId` is provided, it uses the first available device. - * - * @param deviceId - * @returns - */ - getAudioStream: typeof getAudioStream; - /** - * Returns a 'videoinput' media stream with the given `deviceId`, if no `deviceId` is provided, it uses the first available device. - * - * @param deviceId - * @returns - */ - getVideoStream: typeof getVideoStream; - /** - * [Tells if the browser supports audio output change on 'audio' elements](https://developer.mozilla.org/en-US/docs/Web/API/HTMLMediaElement/setSinkId). - */ - isAudioOutputChangeSupported: boolean; - /** - * Signals whether audio stream will be published when the call is joined. - */ - initialAudioEnabled: boolean; - /** - * Signals whether audio stream will be published when the call is joined. - */ - initialVideoState: DeviceState; - /** - * Publishes audio stream for currently selected audio input (microphone) device to other call participants. - */ - publishAudioStream: () => Promise; - /** - * Publishes video stream for currently selected video input (camera) device to other call participants. - */ - publishVideoStream: () => Promise; - /** - * Stops publishing audio stream for currently selected audio input (microphone) device to other call participants. - */ - stopPublishingAudio: () => Promise; - /** - * Stops publishing video stream for currently selected video input (camera) device to other call participants. - */ - stopPublishingVideo: () => Promise; - /** - * Sets the initialAudioEnabled flag to a given boolean value. - * The latest value set will be used to decide, whether audio stream will be published when joining a call. - * @param enabled - */ - setInitialAudioEnabled: (enabled: boolean) => void; - - /** - * Sets the initialVideoState to a given DeviceState value. - * The latest value set will be used to decide, whether video stream will be published when joining a call. - * @param enabled - */ - setInitialVideoState: (state: DeviceState) => void; - /** - * Stores audio input device (microphone) id which is used to publish user's sound to other call participants. - */ - selectedAudioInputDeviceId?: string; - /** - * Stores audio output device (speaker) id used to reproduce incoming audio from other call participants. - */ - selectedAudioOutputDeviceId?: string; - /** - * Stores video input device (camera) id which is used to publish user's video to other call participants. - */ - selectedVideoDeviceId?: string; - /** - * Function should be used to change selected device id. - * The change is later reflected in selectedAudioInputDeviceId, selectedAudioOutputDeviceId or selectedVideoDeviceId depending on kind parameter. - * @param kind - * @param deviceId - */ - switchDevice: (kind: MediaDeviceKind, deviceId?: string) => void; - /** - * Sets the initialAudioEnabled flag by negating the current state value. - * The latest value set will be used to decide, whether audio stream will be published when joining a call. - * @param enabled - */ - toggleInitialAudioMuteState: () => void; - /** - * Sets the initialVideoState by toggling the current state DeviceState value. - * The latest value set will be used to decide, whether video stream will be published when joining a call. - * @param enabled - */ - toggleInitialVideoMuteState: () => void; -}; - -const MediaDevicesContext = createContext(null); - -/** - * Configuration parameters for MediaDevicesProvider. - * @category Device Management - */ -export type MediaDevicesProviderProps = { - /** - * Provides external control over the initial audio input (microphone) enablement. Overrides the default false. - */ - initialAudioEnabled?: boolean; - /** - * Provides external control over the initial video input (camera) enablement. Overrides the default false. - */ - initialVideoEnabled?: boolean; - /** - * Allows to override the default audio input (microphone) stream to be published. Overrides the default string 'default'. - */ - initialAudioInputDeviceId?: string; - /** - * Allows to override the default audio output (speaker) device to reproduce incoming audio from the SFU. Overrides the default string 'default'. - */ - initialAudioOutputDeviceId?: string; - /** - * Allows to override the default video input (camera) stream to be published. Overrides the default string 'default'. - */ - initialVideoInputDeviceId?: string; -}; - -/** - * Context provider that internally puts in place mechanisms that: - * 1. fall back to selecting a default device when trying to switch to a non-existent device - * 2. fall back to a default device when an active device is disconnected - * 3. stop publishing a media stream when a non-default device is disconnected - * 4. republish a media stream from the newly connected default device - * 5. republish a media stream when a new device is selected - * - * Provides `MediaDevicesContextAPI` that allow the integrators to handle: - * 1. the initial device state enablement (for example apt for lobby scenario) - * 2. media stream retrieval and disposal - * 3. media stream publishing - * 4. specific device selection - * @param params - * @returns - * - * @category Device Management - */ -export const MediaDevicesProvider = ({ - children, - initialAudioEnabled, - initialVideoEnabled, - initialVideoInputDeviceId = DEFAULT_DEVICE_ID, - initialAudioOutputDeviceId = DEFAULT_DEVICE_ID, - initialAudioInputDeviceId = DEFAULT_DEVICE_ID, -}: PropsWithChildren) => { - const call = useCall(); - const { useCallCallingState, useCallState, useCallSettings } = - useCallStateHooks(); - const callingState = useCallCallingState(); - const callState = useCallState(); - const { localParticipant$ } = callState; - const hasBrowserPermissionVideoInput = useHasBrowserPermissions( - 'camera' as PermissionName, - ); - const hasBrowserPermissionAudioInput = useHasBrowserPermissions( - 'microphone' as PermissionName, - ); - const [selectedAudioInputDeviceId, selectAudioInputDeviceId] = useState< - MediaDevicesContextAPI['selectedAudioInputDeviceId'] - >(initialAudioInputDeviceId); - const [selectedAudioOutputDeviceId, selectAudioOutputDeviceId] = useState< - MediaDevicesContextAPI['selectedAudioOutputDeviceId'] - >(initialAudioOutputDeviceId); - const [selectedVideoDeviceId, selectVideoDeviceId] = useState< - MediaDevicesContextAPI['selectedVideoDeviceId'] - >(initialVideoInputDeviceId); - - const [isAudioOutputChangeSupported] = useState(() => - checkIfAudioOutputChangeSupported(), - ); - const [initAudioEnabled, setInitialAudioEnabled] = useState( - !!initialAudioEnabled, - ); - const [initialVideoState, setInitialVideoState] = useState(() => - initialVideoEnabled ? DEVICE_STATE.starting : DEVICE_STATE.uninitialized, - ); - - const settings = useCallSettings(); - useEffect(() => { - if (!settings) return; - const { audio, video } = settings; - if (typeof initialAudioEnabled === 'undefined' && audio.mic_default_on) { - setInitialAudioEnabled(audio.mic_default_on); - } - if (typeof initialVideoEnabled === 'undefined' && video.camera_default_on) { - setInitialVideoState(DEVICE_STATE.starting); - } - }, [initialAudioEnabled, initialVideoEnabled, settings]); - - const publishVideoStream = useVideoPublisher({ - initialVideoMuted: !initialVideoState.enabled, - videoDeviceId: selectedVideoDeviceId, - }); - const publishAudioStream = useAudioPublisher({ - initialAudioMuted: !initAudioEnabled, - audioDeviceId: selectedAudioInputDeviceId, - }); - - const stopPublishingAudio = useCallback(async () => { - if ( - callingState === CallingState.IDLE || - callingState === CallingState.RINGING - ) { - setInitialAudioEnabled(false); - } else { - call?.stopPublish(SfuModels.TrackType.AUDIO); - } - }, [call, callingState]); - - const stopPublishingVideo = useCallback(async () => { - if ( - callingState === CallingState.IDLE || - callingState === CallingState.RINGING - ) { - setInitialVideoState(DEVICE_STATE.stopped); - } else { - call?.stopPublish(SfuModels.TrackType.VIDEO); - } - }, [call, callingState]); - - const toggleInitialAudioMuteState = useCallback( - () => setInitialAudioEnabled((prev) => !prev), - [], - ); - const toggleInitialVideoMuteState = useCallback( - () => - setInitialVideoState((prev) => { - const newType = DEVICE_STATE_TOGGLE[prev.type]; - return DEVICE_STATE[newType]; - }), - [], - ); - - const switchDevice = useCallback( - (kind: MediaDeviceKind, deviceId?: string) => { - if (kind === 'videoinput') { - selectVideoDeviceId(deviceId); - } - if (kind === 'audioinput') { - selectAudioInputDeviceId(deviceId); - } - if (kind === 'audiooutput') { - selectAudioOutputDeviceId(deviceId); - } - }, - [], - ); - - useAudioInputDeviceFallback( - () => switchDevice('audioinput', DEFAULT_DEVICE_ID), - hasBrowserPermissionAudioInput, - selectedAudioInputDeviceId, - ); - useAudioOutputDeviceFallback( - () => switchDevice('audiooutput', DEFAULT_DEVICE_ID), - // audiooutput devices can be enumerated only with microphone permissions - hasBrowserPermissionAudioInput, - selectedAudioOutputDeviceId, - ); - useVideoDeviceFallback( - () => switchDevice('videoinput', DEFAULT_DEVICE_ID), - hasBrowserPermissionVideoInput, - selectedVideoDeviceId, - ); - - useEffect(() => { - if (!call || callingState !== CallingState.JOINED) return; - call.setAudioOutputDevice(selectedAudioOutputDeviceId); - }, [call, callingState, selectedAudioOutputDeviceId]); - - useEffect(() => { - // audiooutput devices can be enumerated only with microphone permissions - if (!localParticipant$ || !hasBrowserPermissionAudioInput) return; - - const subscription = watchForDisconnectedAudioOutputDevice( - localParticipant$.pipe(map((p) => p?.audioOutputDeviceId)), - ).subscribe(async () => { - selectAudioOutputDeviceId(DEFAULT_DEVICE_ID); - }); - return () => { - subscription.unsubscribe(); - }; - }, [hasBrowserPermissionAudioInput, localParticipant$]); - - const contextValue: MediaDevicesContextAPI = { - disposeOfMediaStream, - getAudioStream, - getVideoStream, - isAudioOutputChangeSupported, - selectedAudioInputDeviceId, - selectedAudioOutputDeviceId, - selectedVideoDeviceId, - switchDevice, - initialAudioEnabled: initAudioEnabled, - initialVideoState, - setInitialAudioEnabled, - setInitialVideoState, - toggleInitialAudioMuteState, - toggleInitialVideoMuteState, - publishAudioStream, - publishVideoStream, - stopPublishingAudio, - stopPublishingVideo, - }; - - return ( - - {children} - - ); -}; - -/** - * Context consumer retrieving MediaDevicesContextAPI. - * @returns - * - * @category Device Management - */ -export const useMediaDevices = () => { - const value = useContext(MediaDevicesContext); - if (!value) { - console.warn(`Null MediaDevicesContext`); - } - return value as MediaDevicesContextAPI; -}; diff --git a/packages/react-sdk/src/core/contexts/index.ts b/packages/react-sdk/src/core/contexts/index.ts deleted file mode 100644 index 9b94c140a1..0000000000 --- a/packages/react-sdk/src/core/contexts/index.ts +++ /dev/null @@ -1 +0,0 @@ -export * from './MediaDevicesContext'; diff --git a/packages/react-sdk/src/core/hooks/index.ts b/packages/react-sdk/src/core/hooks/index.ts index 93e2f23dbd..1a60fad836 100644 --- a/packages/react-sdk/src/core/hooks/index.ts +++ b/packages/react-sdk/src/core/hooks/index.ts @@ -1,4 +1,2 @@ -export * from './useAudioPublisher'; export * from './useDevices'; -export * from './useVideoPublisher'; export * from './useTrackElementVisibility'; diff --git a/packages/react-sdk/src/core/hooks/useAudioPublisher.ts b/packages/react-sdk/src/core/hooks/useAudioPublisher.ts deleted file mode 100644 index cb45a1bbab..0000000000 --- a/packages/react-sdk/src/core/hooks/useAudioPublisher.ts +++ /dev/null @@ -1,146 +0,0 @@ -import { useCallback, useEffect, useRef } from 'react'; -import { map } from 'rxjs'; -import { - CallingState, - getAudioStream, - OwnCapability, - SfuModels, - watchForAddedDefaultAudioDevice, - watchForDisconnectedAudioDevice, -} from '@stream-io/video-client'; -import { useCall, useCallStateHooks } from '@stream-io/video-react-bindings'; -import { useHasBrowserPermissions } from './useDevices'; - -/** - * @internal - */ -export type AudioPublisherInit = { - initialAudioMuted?: boolean; - audioDeviceId?: string; -}; - -/** - * @internal - * @category Device Management - */ -export const useAudioPublisher = ({ - initialAudioMuted, - audioDeviceId, -}: AudioPublisherInit) => { - const call = useCall(); - const { useCallState, useCallCallingState, useLocalParticipant } = - useCallStateHooks(); - const callState = useCallState(); - const callingState = useCallCallingState(); - const participant = useLocalParticipant(); - const hasBrowserPermissionAudioInput = useHasBrowserPermissions( - 'microphone' as PermissionName, - ); - const { localParticipant$ } = callState; - - const isPublishingAudio = participant?.publishedTracks.includes( - SfuModels.TrackType.AUDIO, - ); - - const publishAudioStream = useCallback(async () => { - if (!call) return; - if (!call.permissionsContext.hasPermission(OwnCapability.SEND_AUDIO)) { - throw new Error(`No permission to publish audio`); - } - try { - const audioStream = await getAudioStream({ - deviceId: audioDeviceId, - }); - await call.publishAudioStream(audioStream); - } catch (e) { - console.log('Failed to publish audio stream', e); - } - }, [audioDeviceId, call]); - - const lastAudioDeviceId = useRef(audioDeviceId); - useEffect(() => { - if ( - callingState === CallingState.JOINED && - audioDeviceId !== lastAudioDeviceId.current - ) { - lastAudioDeviceId.current = audioDeviceId; - publishAudioStream().catch((e) => { - console.error('Failed to publish audio stream', e); - }); - } - }, [audioDeviceId, callingState, publishAudioStream]); - - const initialPublishRun = useRef(false); - useEffect(() => { - if ( - callingState === CallingState.JOINED && - !initialPublishRun.current && - !initialAudioMuted - ) { - // automatic publishing should happen only when joining the call - // from the lobby, and the audio is not muted - publishAudioStream().catch((e) => { - console.error('Failed to publish audio stream', e); - }); - initialPublishRun.current = true; - } - }, [callingState, initialAudioMuted, publishAudioStream]); - - useEffect(() => { - if (!localParticipant$ || !hasBrowserPermissionAudioInput) return; - const subscription = watchForDisconnectedAudioDevice( - localParticipant$.pipe(map((p) => p?.audioDeviceId)), - ).subscribe(async () => { - if (!call) return; - call.setAudioDevice(undefined); - await call.stopPublish(SfuModels.TrackType.AUDIO); - }); - return () => { - subscription.unsubscribe(); - }; - }, [hasBrowserPermissionAudioInput, localParticipant$, call]); - - useEffect(() => { - if (!participant?.audioStream || !call || !isPublishingAudio) return; - - const [track] = participant.audioStream.getAudioTracks(); - const selectedAudioDeviceId = track.getSettings().deviceId; - - const republishDefaultDevice = watchForAddedDefaultAudioDevice().subscribe( - async () => { - if ( - !( - call && - participant.audioStream && - selectedAudioDeviceId === 'default' - ) - ) - return; - // We need to stop the original track first in order - // we can retrieve the new default device stream - track.stop(); - const audioStream = await getAudioStream({ - deviceId: 'default', - }); - await call.publishAudioStream(audioStream); - }, - ); - - const handleTrackEnded = async () => { - if (selectedAudioDeviceId === audioDeviceId) { - const audioStream = await getAudioStream({ - deviceId: audioDeviceId, - }); - await call.publishAudioStream(audioStream); - } - }; - - track.addEventListener('ended', handleTrackEnded); - return () => { - track.removeEventListener('ended', handleTrackEnded); - republishDefaultDevice.unsubscribe(); - }; - }, [audioDeviceId, call, participant?.audioStream, isPublishingAudio]); - - return publishAudioStream; -}; diff --git a/packages/react-sdk/src/core/hooks/useDevices.ts b/packages/react-sdk/src/core/hooks/useDevices.ts index b76badb074..21a22d921e 100644 --- a/packages/react-sdk/src/core/hooks/useDevices.ts +++ b/packages/react-sdk/src/core/hooks/useDevices.ts @@ -1,10 +1,4 @@ import { ChangeEvent, useEffect, useState } from 'react'; -import { Observable, pairwise } from 'rxjs'; -import { - getAudioDevices, - getAudioOutputDevices, - getVideoDevices, -} from '@stream-io/video-client'; export const useHasBrowserPermissions = (permissionName: PermissionName) => { const [canSubscribe, enableSubscription] = useState(false); @@ -37,192 +31,3 @@ export const useHasBrowserPermissions = (permissionName: PermissionName) => { return canSubscribe; }; - -/** - * Observes changes in connected devices and maintains an up-to-date array of connected MediaDeviceInfo objects. - * @param observeDevices - * @category Device Management - */ -export const useDevices = ( - observeDevices: () => Observable, -) => { - const [devices, setDevices] = useState([]); - - useEffect(() => { - const subscription = observeDevices().subscribe(setDevices); - - return () => { - subscription.unsubscribe(); - }; - }, [observeDevices]); - - return devices; -}; - -/** - * Observes changes and maintains an array of connected video input devices - * @category Device Management - */ -export const useVideoDevices = () => useDevices(getVideoDevices); - -/** - * Observes changes and maintains an array of connected audio input devices - * @category Device Management - */ -export const useAudioInputDevices = () => useDevices(getAudioDevices); - -/** - * Observes changes and maintains an array of connected audio output devices - * @category Device Management - */ -export const useAudioOutputDevices = () => useDevices(getAudioOutputDevices); - -/** - * Verifies that newly selected device id exists among the registered devices. - * If the selected device id is not found among existing devices, switches to the default device. - * The media devices are observed only if a given permission ('camera' resp. 'microphone') is granted in browser. - * Regardless of current permissions settings, an intent to observe devices will take place in Firefox. - * This is due to the fact that Firefox does not allow to query for 'camera' and 'microphone' permissions. - * @param canObserve - * @param devices$ - * @param switchToDefaultDevice - * @param selectedDeviceId - * @category Device Management - */ -export const useDeviceFallback = ( - canObserve: boolean, - devices$: Observable, - switchToDefaultDevice: () => void, - selectedDeviceId?: string, -) => { - useEffect(() => { - if (!canObserve) return; - const validateDeviceId = devices$.pipe().subscribe((devices) => { - const deviceFound = devices.find( - (device) => device.deviceId === selectedDeviceId, - ); - if (!deviceFound) switchToDefaultDevice(); - }); - - return () => { - validateDeviceId.unsubscribe(); - }; - }, [canObserve, devices$, selectedDeviceId, switchToDefaultDevice]); -}; - -/** - * Verifies that newly selected video device id exists among the registered devices. - * If the selected device id is not found among existing devices, switches to the default video device. - * The media devices are observed only if 'camera' permission is granted in browser. - * It is integrators responsibility to instruct users how to enable required permissions. - * Regardless of current permissions settings, an intent to observe devices will take place in Firefox. - * This is due to the fact that Firefox does not allow to query for 'camera' and 'microphone' permissions. - * @param switchToDefaultDevice - * @param canObserve - * @param selectedDeviceId - * @category Device Management - */ -export const useVideoDeviceFallback = ( - switchToDefaultDevice: () => void, - canObserve: boolean, - selectedDeviceId?: string, -) => - useDeviceFallback( - canObserve, - getVideoDevices(), - switchToDefaultDevice, - selectedDeviceId, - ); - -/** - * Verifies that newly selected audio input device id exists among the registered devices. - * If the selected device id is not found among existing devices, switches to the default audio input device. - * The media devices are observed only if 'microphone' permission is granted in browser. - * It is integrators responsibility to instruct users how to enable required permissions. - * Regardless of current permissions settings, an intent to observe devices will take place in Firefox. - * This is due to the fact that Firefox does not allow to query for 'camera' and 'microphone' permissions. - * @param switchToDefaultDevice - * @param canObserve - * @param selectedDeviceId - * @category Device Management - */ -export const useAudioInputDeviceFallback = ( - switchToDefaultDevice: () => void, - canObserve: boolean, - selectedDeviceId?: string, -) => - useDeviceFallback( - canObserve, - getAudioDevices(), - switchToDefaultDevice, - selectedDeviceId, - ); - -/** - * Verifies that newly selected audio output device id exists among the registered devices. - * If the selected device id is not found among existing devices, switches to the default audio output device. - * The media devices are observed only if 'microphone' permission is granted in browser. - * It is integrators responsibility to instruct users how to enable required permissions. - * Regardless of current permissions settings, an intent to observe devices will take place in Firefox. - * This is due to the fact that Firefox does not allow to query for 'camera' and 'microphone' permissions. - * @param switchToDefaultDevice - * @param canObserve - * @param selectedDeviceId - * @category Device Management - */ -export const useAudioOutputDeviceFallback = ( - switchToDefaultDevice: () => void, - canObserve: boolean, - selectedDeviceId?: string, -) => - useDeviceFallback( - canObserve, - getAudioOutputDevices(), - switchToDefaultDevice, - selectedDeviceId, - ); - -/** - * Observes devices of certain kind are made unavailable and executes onDisconnect callback. - * @param observeDevices - * @param onDisconnect - * @category Device Management - */ -export const useOnUnavailableDevices = ( - observeDevices: Observable, - onDisconnect: () => void, -) => { - useEffect(() => { - const subscription = observeDevices - .pipe(pairwise()) - .subscribe(([prev, current]) => { - if (prev.length > 0 && current.length === 0) onDisconnect(); - }); - - return () => subscription.unsubscribe(); - }, [observeDevices, onDisconnect]); -}; - -/** - * Observes disconnect of all video devices and executes onDisconnect callback. - * @param onDisconnect - * @category Device Management - */ -export const useOnUnavailableVideoDevices = (onDisconnect: () => void) => - useOnUnavailableDevices(getVideoDevices(), onDisconnect); - -/** - * Observes disconnect of all audio input devices and executes onDisconnect callback. - * @param onDisconnect - * @category Device Management - */ -export const useOnUnavailableAudioInputDevices = (onDisconnect: () => void) => - useOnUnavailableDevices(getAudioDevices(), onDisconnect); - -/** - * Observes disconnect of all audio output devices and executes onDisconnect callback. - * @param onDisconnect - * @category Device Management - */ -export const useOnUnavailableAudioOutputDevices = (onDisconnect: () => void) => - useOnUnavailableDevices(getAudioOutputDevices(), onDisconnect); diff --git a/packages/react-sdk/src/core/hooks/useVideoPublisher.ts b/packages/react-sdk/src/core/hooks/useVideoPublisher.ts deleted file mode 100644 index cfe1bfc7bb..0000000000 --- a/packages/react-sdk/src/core/hooks/useVideoPublisher.ts +++ /dev/null @@ -1,177 +0,0 @@ -import { useCallback, useEffect, useRef } from 'react'; -import { map } from 'rxjs/operators'; -import { - CallingState, - getVideoStream, - OwnCapability, - SfuModels, - VideoSettingsCameraFacingEnum, - watchForAddedDefaultVideoDevice, - watchForDisconnectedVideoDevice, -} from '@stream-io/video-client'; -import { useCall, useCallStateHooks } from '@stream-io/video-react-bindings'; -import { useDebugPreferredVideoCodec } from '../../components/Debug/useIsDebugMode'; -import { useHasBrowserPermissions } from './useDevices'; - -/** - * @internal - */ -export type VideoPublisherInit = { - initialVideoMuted?: boolean; - videoDeviceId?: string; -}; - -/** - * @internal - * @category Device Management - */ -export const useVideoPublisher = ({ - initialVideoMuted, - videoDeviceId, -}: VideoPublisherInit) => { - const call = useCall(); - const { - useCallState, - useCallCallingState, - useLocalParticipant, - useCallSettings, - } = useCallStateHooks(); - const callState = useCallState(); - const callingState = useCallCallingState(); - const participant = useLocalParticipant(); - const hasBrowserPermissionVideoInput = useHasBrowserPermissions( - 'camera' as PermissionName, - ); - const { localParticipant$ } = callState; - - const preferredCodec = useDebugPreferredVideoCodec(); - const isPublishingVideo = participant?.publishedTracks.includes( - SfuModels.TrackType.VIDEO, - ); - - const settings = useCallSettings(); - const videoSettings = settings?.video; - const targetResolution = videoSettings?.target_resolution; - const publishVideoStream = useCallback(async () => { - if (!call) return; - if (!call.permissionsContext.hasPermission(OwnCapability.SEND_VIDEO)) { - throw new Error(`No permission to publish video`); - } - try { - const videoStream = await getVideoStream({ - deviceId: videoDeviceId, - width: targetResolution?.width, - height: targetResolution?.height, - facingMode: toFacingMode(videoSettings?.camera_facing), - }); - await call.publishVideoStream(videoStream, { preferredCodec }); - } catch (e) { - console.log('Failed to publish video stream', e); - } - }, [ - call, - preferredCodec, - targetResolution?.height, - targetResolution?.width, - videoDeviceId, - videoSettings?.camera_facing, - ]); - - const lastVideoDeviceId = useRef(videoDeviceId); - useEffect(() => { - if ( - callingState === CallingState.JOINED && - videoDeviceId !== lastVideoDeviceId.current - ) { - lastVideoDeviceId.current = videoDeviceId; - publishVideoStream().catch((e) => { - console.error('Failed to publish video stream', e); - }); - } - }, [publishVideoStream, videoDeviceId, callingState]); - - const initialPublishRun = useRef(false); - useEffect(() => { - if ( - callingState === CallingState.JOINED && - !initialPublishRun.current && - !initialVideoMuted - ) { - // automatic publishing should happen only when joining the call - // from the lobby, and the video is not muted - publishVideoStream().catch((e) => { - console.error('Failed to publish video stream', e); - }); - initialPublishRun.current = true; - } - }, [callingState, initialVideoMuted, publishVideoStream]); - - useEffect(() => { - if (!localParticipant$ || !hasBrowserPermissionVideoInput) return; - const subscription = watchForDisconnectedVideoDevice( - localParticipant$.pipe(map((p) => p?.videoDeviceId)), - ).subscribe(async () => { - if (!call) return; - call.setVideoDevice(undefined); - await call.stopPublish(SfuModels.TrackType.VIDEO); - }); - return () => { - subscription.unsubscribe(); - }; - }, [hasBrowserPermissionVideoInput, localParticipant$, call]); - - useEffect(() => { - if (!participant?.videoStream || !call || !isPublishingVideo) return; - - const [track] = participant.videoStream.getVideoTracks(); - const selectedVideoDeviceId = track.getSettings().deviceId; - - const republishDefaultDevice = watchForAddedDefaultVideoDevice().subscribe( - async () => { - if ( - !( - call && - participant.videoStream && - selectedVideoDeviceId === 'default' - ) - ) - return; - // We need to stop the original track first in order - // we can retrieve the new default device stream - track.stop(); - const videoStream = await getVideoStream({ - deviceId: 'default', - }); - await call.publishVideoStream(videoStream); - }, - ); - - const handleTrackEnded = async () => { - if (selectedVideoDeviceId === videoDeviceId) { - const videoStream = await getVideoStream({ - deviceId: videoDeviceId, - }); - await call.publishVideoStream(videoStream); - } - }; - - track.addEventListener('ended', handleTrackEnded); - return () => { - track.removeEventListener('ended', handleTrackEnded); - republishDefaultDevice.unsubscribe(); - }; - }, [videoDeviceId, call, participant?.videoStream, isPublishingVideo]); - - return publishVideoStream; -}; - -const toFacingMode = (value: VideoSettingsCameraFacingEnum | undefined) => { - switch (value) { - case VideoSettingsCameraFacingEnum.FRONT: - return 'user'; - case VideoSettingsCameraFacingEnum.BACK: - return 'environment'; - default: - return undefined; - } -}; diff --git a/packages/react-sdk/src/core/index.ts b/packages/react-sdk/src/core/index.ts index 615c788fb3..f76fd6f166 100644 --- a/packages/react-sdk/src/core/index.ts +++ b/packages/react-sdk/src/core/index.ts @@ -1,3 +1,2 @@ export * from './components'; -export * from './contexts'; export * from './hooks'; diff --git a/packages/react-sdk/src/hooks/index.ts b/packages/react-sdk/src/hooks/index.ts index fef3e2c496..e1b5f2cc0c 100644 --- a/packages/react-sdk/src/hooks/index.ts +++ b/packages/react-sdk/src/hooks/index.ts @@ -1,7 +1,4 @@ export * from './useFloatingUIPreset'; export * from './useScrollPosition'; -export * from './useToggleAudioMuteState'; -export * from './useToggleVideoMuteState'; -export * from './useToggleScreenShare'; export * from './useToggleCallRecording'; export * from './useRequestPermission'; diff --git a/packages/react-sdk/src/hooks/useToggleAudioMuteState.ts b/packages/react-sdk/src/hooks/useToggleAudioMuteState.ts deleted file mode 100644 index b094f72ef0..0000000000 --- a/packages/react-sdk/src/hooks/useToggleAudioMuteState.ts +++ /dev/null @@ -1,34 +0,0 @@ -import { useCallback, useRef } from 'react'; -import { useCallStateHooks } from '@stream-io/video-react-bindings'; -import { OwnCapability, SfuModels } from '@stream-io/video-client'; - -import { useMediaDevices } from '../core'; -import { useRequestPermission } from './useRequestPermission'; - -export const useToggleAudioMuteState = () => { - const { publishAudioStream, stopPublishingAudio } = useMediaDevices(); - const { useLocalParticipant } = useCallStateHooks(); - const localParticipant = useLocalParticipant(); - - const { isAwaitingPermission, requestPermission } = useRequestPermission( - OwnCapability.SEND_AUDIO, - ); - - // to keep the toggle function as stable as possible - const isAudioMutedReference = useRef(false); - - isAudioMutedReference.current = !localParticipant?.publishedTracks.includes( - SfuModels.TrackType.AUDIO, - ); - - const toggleAudioMuteState = useCallback(async () => { - if (isAudioMutedReference.current) { - const canPublish = await requestPermission(); - if (canPublish) return publishAudioStream(); - } - - if (!isAudioMutedReference.current) await stopPublishingAudio(); - }, [publishAudioStream, requestPermission, stopPublishingAudio]); - - return { toggleAudioMuteState, isAwaitingPermission }; -}; diff --git a/packages/react-sdk/src/hooks/useToggleScreenShare.ts b/packages/react-sdk/src/hooks/useToggleScreenShare.ts deleted file mode 100644 index 92ba4638bd..0000000000 --- a/packages/react-sdk/src/hooks/useToggleScreenShare.ts +++ /dev/null @@ -1,43 +0,0 @@ -import { useCallback, useRef } from 'react'; -import { useCall, useCallStateHooks } from '@stream-io/video-react-bindings'; -import { - getScreenShareStream, - OwnCapability, - SfuModels, -} from '@stream-io/video-client'; -import { useRequestPermission } from './useRequestPermission'; - -export const useToggleScreenShare = () => { - const { useLocalParticipant } = useCallStateHooks(); - const localParticipant = useLocalParticipant(); - const call = useCall(); - const isScreenSharingReference = useRef(false); - const { isAwaitingPermission, requestPermission } = useRequestPermission( - OwnCapability.SCREENSHARE, - ); - - const isScreenSharing = !!localParticipant?.publishedTracks.includes( - SfuModels.TrackType.SCREEN_SHARE, - ); - - isScreenSharingReference.current = isScreenSharing; - - const toggleScreenShare = useCallback(async () => { - if (!isScreenSharingReference.current) { - const canPublish = await requestPermission(); - if (!canPublish) return; - - const stream = await getScreenShareStream().catch((e) => { - console.log(`Can't share screen: ${e}`); - }); - - if (stream) { - return call?.publishScreenShareStream(stream); - } - } - - await call?.stopPublish(SfuModels.TrackType.SCREEN_SHARE); - }, [call, requestPermission]); - - return { toggleScreenShare, isAwaitingPermission, isScreenSharing }; -}; diff --git a/packages/react-sdk/src/hooks/useToggleVideoMuteState.ts b/packages/react-sdk/src/hooks/useToggleVideoMuteState.ts deleted file mode 100644 index ea574fd06f..0000000000 --- a/packages/react-sdk/src/hooks/useToggleVideoMuteState.ts +++ /dev/null @@ -1,34 +0,0 @@ -import { useCallback, useRef } from 'react'; -import { useCallStateHooks } from '@stream-io/video-react-bindings'; -import { OwnCapability, SfuModels } from '@stream-io/video-client'; - -import { useMediaDevices } from '../core'; -import { useRequestPermission } from './useRequestPermission'; - -export const useToggleVideoMuteState = () => { - const { publishVideoStream, stopPublishingVideo } = useMediaDevices(); - const { useLocalParticipant } = useCallStateHooks(); - const localParticipant = useLocalParticipant(); - - const { isAwaitingPermission, requestPermission } = useRequestPermission( - OwnCapability.SEND_VIDEO, - ); - - // to keep the toggle function as stable as possible - const isVideoMutedReference = useRef(false); - - isVideoMutedReference.current = !localParticipant?.publishedTracks.includes( - SfuModels.TrackType.VIDEO, - ); - - const toggleVideoMuteState = useCallback(async () => { - if (isVideoMutedReference.current) { - const canPublish = await requestPermission(); - if (canPublish) return publishVideoStream(); - } - - if (!isVideoMutedReference.current) await stopPublishingVideo(); - }, [publishVideoStream, requestPermission, stopPublishingVideo]); - - return { toggleVideoMuteState, isAwaitingPermission }; -}; diff --git a/packages/react-sdk/src/translations/en.json b/packages/react-sdk/src/translations/en.json index 3fc47a7937..d3c56a5f3b 100644 --- a/packages/react-sdk/src/translations/en.json +++ b/packages/react-sdk/src/translations/en.json @@ -37,6 +37,9 @@ "Me": "Me", "Unknown": "Unknown", + "You are presenting your screen": "You are presenting your screen", + "Stop Screen Sharing": "Stop Screen Sharing", + "Allow": "Allow", "Revoke": "Revoke", "Dismiss": "Dismiss", diff --git a/sample-apps/react/react-dogfood/components/AudioVolumeIndicator.tsx b/sample-apps/react/react-dogfood/components/AudioVolumeIndicator.tsx index 6f09e551a4..c686dfc80f 100644 --- a/sample-apps/react/react-dogfood/components/AudioVolumeIndicator.tsx +++ b/sample-apps/react/react-dogfood/components/AudioVolumeIndicator.tsx @@ -3,41 +3,29 @@ import { useEffect, useState } from 'react'; import { createSoundDetector, Icon, - useMediaDevices, + useCallStateHooks, } from '@stream-io/video-react-sdk'; export const AudioVolumeIndicator = () => { - const { getAudioStream, selectedAudioInputDeviceId, initialAudioEnabled } = - useMediaDevices(); + const { useMicrophoneState } = useCallStateHooks(); + const { isEnabled, mediaStream } = useMicrophoneState(); const [audioLevel, setAudioLevel] = useState(0); useEffect(() => { - if (!initialAudioEnabled) return; + if (!isEnabled || !mediaStream) return; - const disposeSoundDetector = getAudioStream({ - deviceId: selectedAudioInputDeviceId, - }).then((audioStream) => - createSoundDetector( - audioStream, - ({ audioLevel: al }) => setAudioLevel(al), - { detectionFrequencyInMs: 80 }, - ), + const disposeSoundDetector = createSoundDetector( + mediaStream, + ({ audioLevel: al }) => setAudioLevel(al), + { detectionFrequencyInMs: 80 }, ); - disposeSoundDetector.catch((err) => { - console.error('Error while creating sound detector', err); - }); - return () => { - disposeSoundDetector - .then((dispose) => dispose()) - .catch((err) => { - console.error('Error while disposing sound detector', err); - }); + disposeSoundDetector().catch(console.error); }; - }, [initialAudioEnabled, getAudioStream, selectedAudioInputDeviceId]); + }, [isEnabled, mediaStream]); - if (!initialAudioEnabled) return null; + if (!isEnabled) return null; return (
& { +export type LocalDeviceSettings = { + selectedVideoDeviceId: string; + selectedAudioInputDeviceId: string; + selectedAudioOutputDeviceId: string; isAudioMute: boolean; isVideoMute: boolean; }; @@ -31,13 +24,15 @@ export const getDeviceSettings = () => { }; export const DeviceSettingsCaptor = () => { + // FIXME OL: rework this + const { - selectedAudioOutputDeviceId, - selectedAudioInputDeviceId, - selectedVideoDeviceId, - initialAudioEnabled, - initialVideoState, - } = useMediaDevices(); + selectedAudioOutputDeviceId = 'default', + selectedAudioInputDeviceId = 'default', + selectedVideoDeviceId = 'default', + initialAudioEnabled = true, + initialVideoState = { enabled: true }, + } = {}; let isAudioMute = !initialAudioEnabled; let isVideoMute = !initialVideoState.enabled; diff --git a/sample-apps/react/react-dogfood/hooks/useKeyboardShortcuts.ts b/sample-apps/react/react-dogfood/hooks/useKeyboardShortcuts.ts index 09c6f60b6d..26f9245ab2 100644 --- a/sample-apps/react/react-dogfood/hooks/useKeyboardShortcuts.ts +++ b/sample-apps/react/react-dogfood/hooks/useKeyboardShortcuts.ts @@ -1,9 +1,7 @@ import { defaultReactions, useCall, - useMediaDevices, - useToggleAudioMuteState, - useToggleVideoMuteState, + useCallStateHooks, } from '@stream-io/video-react-sdk'; import { useEffect, useRef, useState } from 'react'; @@ -26,11 +24,12 @@ const isMacOS = () => { const [, raiseHandReaction] = defaultReactions; export const usePushToTalk = (key: string) => { - const { publishAudioStream, stopPublishingAudio } = useMediaDevices(); - const [isTalking, setIsTalking] = useState(false); const interactedRef = useRef(false); + const { useMicrophoneState } = useCallStateHooks(); + const { microphone } = useMicrophoneState(); + useEffect(() => { hotkeys(key, { keyup: true }, (e) => { if (e.metaKey || e.ctrlKey) return; @@ -49,18 +48,19 @@ export const usePushToTalk = (key: string) => { }, [key]); useEffect(() => { - if (isTalking) publishAudioStream().catch(console.error); + if (isTalking) microphone.enable().catch(console.error); return () => { - if (interactedRef.current) stopPublishingAudio(); + if (interactedRef.current) microphone.disable().catch(console.error); }; - }, [isTalking, publishAudioStream, stopPublishingAudio]); + }, [isTalking, microphone]); }; export const useKeyboardShortcuts = () => { - const { toggleAudioMuteState } = useToggleAudioMuteState(); - const { toggleVideoMuteState } = useToggleVideoMuteState(); const call = useCall(); + const { useCameraState, useMicrophoneState } = useCallStateHooks(); + const { microphone } = useMicrophoneState(); + const { camera } = useCameraState(); usePushToTalk(KeyboardShortcut.PUSH_TO_TALK); useEffect(() => { @@ -73,13 +73,13 @@ export const useKeyboardShortcuts = () => { if (isMac && !KeyboardShortcut.TOGGLE_AUDIO_MAC.includes(ke.shortcut)) return; - toggleAudioMuteState().catch(console.error); + microphone.toggle(); }); return () => { hotkeys.unbind(key); }; - }, [toggleAudioMuteState]); + }, [microphone]); useEffect(() => { const key = `${KeyboardShortcut.TOGGLE_VIDEO_MAC},${KeyboardShortcut.TOGGLE_VIDEO_OTHER}`; @@ -91,13 +91,13 @@ export const useKeyboardShortcuts = () => { if (isMac && !KeyboardShortcut.TOGGLE_VIDEO_MAC.includes(ke.shortcut)) return; - toggleVideoMuteState().catch(console.error); + camera.toggle(); }); return () => { hotkeys.unbind(key); }; - }, [toggleVideoMuteState]); + }, [camera]); useEffect(() => { const key = `${KeyboardShortcut.RAISE_HAND_MAC},${KeyboardShortcut.RAISE_HAND_OTHER}`;