Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

VIDCS-2289: Stereo #314

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
218 changes: 116 additions & 102 deletions Custom-Audio-Driver/External-Audio-Device/OTDefaultAudioDevice.m
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,7 @@ static OSStatus playout_cb(void *ref_con,
UInt32 bus_num,
UInt32 num_frames,
AudioBufferList *data);
static NSInteger channelState;

@interface OTDefaultAudioDevice ()
- (BOOL) setupAudioUnit:(AudioUnit *)voice_unit playout:(BOOL)isPlayout;
Expand Down Expand Up @@ -93,6 +94,7 @@ @implementation OTDefaultAudioDevice

/* synchronize all access to the audio subsystem */
dispatch_queue_t _safetyQueue;
NSTimer *channelTimer;

@public
id _audioBus;
Expand All @@ -117,7 +119,7 @@ - (instancetype)init
if (self) {
_audioFormat = [[OTAudioFormat alloc] init];
_audioFormat.sampleRate = kSampleRate;
_audioFormat.numChannels = 1;
_audioFormat.numChannels = 2;
_safetyQueue = dispatch_queue_create("ot-audio-driver",
DISPATCH_QUEUE_SERIAL);
_restartRetryCount = 0;
Expand All @@ -131,11 +133,23 @@ - (BOOL)setAudioBus:(id<OTAudioBus>)audioBus
_audioBus = audioBus;
_audioFormat = [[OTAudioFormat alloc] init];
_audioFormat.sampleRate = kSampleRate;
_audioFormat.numChannels = 1;
_audioFormat.numChannels = 2;

channelState = 1; // Start with state 1
channelTimer = [NSTimer scheduledTimerWithTimeInterval:5.0
target:self
selector:@selector(updateChannelState)
userInfo:nil
repeats:YES];

return YES;
}

- (void)updateChannelState {
channelState = (channelState % 6) + 1;
NSLog(@"Channel State Updated to: %ld", (long)channelState);
}

- (void)dealloc
{
[self removeObservers];
Expand Down Expand Up @@ -408,7 +422,7 @@ - (void) teardownAudio
[mySession setMode:avAudioSessionMode error:nil];
[mySession setPreferredSampleRate: avAudioSessionPreffSampleRate
error: nil];
[mySession setPreferredInputNumberOfChannels:avAudioSessionChannels
[mySession setPreferredInputNumberOfChannels:2
error:nil];

AVAudioSessionSetActiveOptions audioOptions = AVAudioSessionSetActiveOptionNotifyOthersOnDeactivation;
Expand Down Expand Up @@ -440,10 +454,10 @@ - (void) setupAudioSession
_previousAVAudioSessionCategory = mySession.category;
avAudioSessionMode = mySession.mode;
avAudioSessionPreffSampleRate = mySession.preferredSampleRate;
avAudioSessionChannels = mySession.inputNumberOfChannels;
avAudioSessionChannels = 2;

[mySession setPreferredSampleRate: kSampleRate error: nil];
[mySession setPreferredInputNumberOfChannels:1 error:nil];
[mySession setPreferredInputNumberOfChannels:2 error:nil];
[mySession setPreferredIOBufferDuration:kPreferredIOBufferDuration
error:nil];

Expand Down Expand Up @@ -794,74 +808,86 @@ static void update_recording_delay(OTDefaultAudioDevice* device) {
device->_recordingDelay = device->_recordingDelayHWAndOS;
}

#define TONE_FREQUENCY 440
#define M_TAU 2.0 * M_PI

static OSStatus recording_cb(void *ref_con,
AudioUnitRenderActionFlags *action_flags,
const AudioTimeStamp *time_stamp,
UInt32 bus_num,
UInt32 num_frames,
AudioBufferList *data)
UInt32 inNumberFrames,
AudioBufferList *ioData)
{

OTDefaultAudioDevice *dev = (__bridge OTDefaultAudioDevice*) ref_con;

if (!dev->buffer_list || num_frames > dev->buffer_num_frames)
{
if (dev->buffer_list) {
free(dev->buffer_list->mBuffers[0].mData);
free(dev->buffer_list);
}

dev->buffer_list =
(AudioBufferList*)malloc(sizeof(AudioBufferList) + sizeof(AudioBuffer));
dev->buffer_list->mNumberBuffers = 1;
dev->buffer_list->mBuffers[0].mNumberChannels = 1;

dev->buffer_list->mBuffers[0].mDataByteSize = num_frames*sizeof(UInt16);
dev->buffer_list->mBuffers[0].mData = malloc(num_frames*sizeof(UInt16));

dev->buffer_num_frames = num_frames;
dev->buffer_size = dev->buffer_list->mBuffers[0].mDataByteSize;
}

OSStatus status;
status = AudioUnitRender(dev->recording_voice_unit,
action_flags,
time_stamp,
1,
num_frames,
dev->buffer_list);

if (status != noErr) {
CheckError(status, @"AudioUnitRender");
}



if (dev->recording) {
static float theta;

// Assuming ioData has only one buffer for interleaved data
SInt16 *buffer = (SInt16 *)ioData->mBuffers[0].mData;
for (UInt32 frame = 0; frame < inNumberFrames; ++frame) {
if (channelState == 1) {
// Write left channel
buffer[2 * frame] = (SInt16)(sin(theta) * 32767.0f);
// Write right channel (silenced in your original example)
buffer[2 * frame + 1] = 0;
} else if (channelState == 2) {
// Write left channel
buffer[2 * frame] = 0;
// Write right channel (silenced in your original example)
buffer[2 * frame + 1] = 0; //0;
} else if (channelState == 3) {
// Write left channel
buffer[2 * frame] = (SInt16)(sin(theta) * 32767.0f);;
// Write right channel (silenced in your original example)
buffer[2 * frame + 1] = (SInt16)(sin(theta) * 32767.0f);
}else if (channelState == 4) {
// Write left channel
buffer[2 * frame] = 0;
// Write right channel (silenced in your original example)
buffer[2 * frame + 1] = 0;
} else if (channelState == 5) {
// Write left channel
buffer[2 * frame] = 0;
// Write right channel (silenced in your original example)
buffer[2 * frame + 1] = (SInt16)(sin(theta) * 32767.0f);
} else if (channelState == 6) {
// Write left channel
buffer[2 * frame] = 0;
// Write right channel (silenced in your original example)
buffer[2 * frame + 1] = 0;
}


// Increment theta for the tone frequency
theta += M_TAU * TONE_FREQUENCY / kSampleRate;
if (theta > M_TAU) {
theta -= M_TAU;
}
}

// Some sample code to generate a sine wave instead of use the mic
// static double startingFrameCount = 0;
// double j = startingFrameCount;
// double cycleLength = kSampleRate. / 880.0;
// int frame = 0;
// for (frame = 0; frame < num_frames; ++frame)
// {
// int16_t* data = (int16_t*)dev->buffer_list->mBuffers[0].mData;
// Float32 sample = (Float32)sin (2 * M_PI * (j / cycleLength));
// (data)[frame] = (sample * 32767.0f);
// j += 1.0;
// if (j > cycleLength)
// j -= cycleLength;
// }
// startingFrameCount = j;
[dev->_audioBus writeCaptureData:dev->buffer_list->mBuffers[0].mData
numberOfSamples:num_frames];

// Write the captured data to the audio bus
[dev->_audioBus writeCaptureData:buffer
numberOfSamples:inNumberFrames]; // multiply by 2 because each frame now includes two samples (left and right)
// Access the interleaved buffer
SInt16 * interleavedBuffer = (SInt16 *)ioData->mBuffers[0].mData;

// Set all samples to zero for both left and right channels
for (UInt32 frame = 0; frame < inNumberFrames; ++frame) {
interleavedBuffer[2 * frame] = 0; // Left channel
interleavedBuffer[2 * frame + 1] = 0; // Right channel
}

}
// some ocassions, AudioUnitRender only renders part of the buffer and then next
// call to the AudioUnitRender fails with smaller buffer.
if (dev->buffer_size != dev->buffer_list->mBuffers[0].mDataByteSize)
dev->buffer_list->mBuffers[0].mDataByteSize = dev->buffer_size;

// // Ensure the buffer size remains constant
// if (dev->buffer_size != ioData->mBuffers[0].mDataByteSize)
// ioData->mBuffers[0].mDataByteSize = dev->buffer_size;

update_recording_delay(dev);

return noErr;
}

Expand Down Expand Up @@ -1028,21 +1054,20 @@ - (BOOL)setupAudioUnit:(AudioUnit *)voice_unit playout:(BOOL)isPlayout;
}

UInt32 bytesPerSample = sizeof(SInt16);
stream_format.mFormatID = kAudioFormatLinearPCM;
stream_format.mFormatFlags =
kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
stream_format.mBytesPerPacket = bytesPerSample;
stream_format.mFormatID = kAudioFormatLinearPCM;
stream_format.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked; // Ensure no non-interleaved flag is set
stream_format.mSampleRate = kSampleRate;
stream_format.mChannelsPerFrame = 2;
stream_format.mBitsPerChannel = 16;
stream_format.mBytesPerFrame = (stream_format.mBitsPerChannel / 8) * stream_format.mChannelsPerFrame;
stream_format.mFramesPerPacket = 1;
stream_format.mBytesPerFrame = bytesPerSample;
stream_format.mChannelsPerFrame= 1;
stream_format.mBitsPerChannel = 8 * bytesPerSample;
stream_format.mSampleRate = (Float64) kSampleRate;
stream_format.mBytesPerPacket = stream_format.mBytesPerFrame * stream_format.mFramesPerPacket;

AudioComponentDescription audio_unit_description;
audio_unit_description.componentType = kAudioUnitType_Output;

#if !(TARGET_OS_TV)
audio_unit_description.componentSubType = kAudioUnitSubType_VoiceProcessingIO;
audio_unit_description.componentSubType = kAudioUnitSubType_RemoteIO;
#else
audio_unit_description.componentSubType = kAudioUnitSubType_RemoteIO;
#endif
Expand All @@ -1062,46 +1087,35 @@ - (BOOL)setupAudioUnit:(AudioUnit *)voice_unit playout:(BOOL)isPlayout;

if (!isPlayout)
{
UInt32 enable_input = 1;
AudioUnitSetProperty(*voice_unit, kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Input, kInputBus, &enable_input,
sizeof(enable_input));
AudioUnitSetProperty(*voice_unit, kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output, kInputBus,
&stream_format, sizeof (stream_format));

AURenderCallbackStruct input_callback;
input_callback.inputProc = recording_cb;
input_callback.inputProcRefCon = (__bridge void *)(self);

AudioUnitSetProperty(*voice_unit,
kAudioOutputUnitProperty_SetInputCallback,
kAudioUnitScope_Global, kInputBus, &input_callback,
sizeof(input_callback));
UInt32 flag = 0;
AudioUnitSetProperty(*voice_unit, kAudioUnitProperty_ShouldAllocateBuffer,
kAudioUnitScope_Output, kInputBus, &flag,
sizeof(flag));
// Disable Output on record
UInt32 enable_output = 0;
AudioUnitSetProperty(*voice_unit, kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Output, kOutputBus, &enable_output,
sizeof(enable_output));
CheckError(AudioUnitSetProperty(*voice_unit,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Input, kOutputBus, &input_callback,
sizeof(input_callback)),@"error 3");

CheckError(AudioUnitSetProperty(*voice_unit, kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input, kOutputBus,
&stream_format, sizeof (stream_format)),@"playout AudioUnitSetProperty error");



} else
{
UInt32 enable_output = 1;
AudioUnitSetProperty(*voice_unit, kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Output, kOutputBus, &enable_output,
sizeof(enable_output));
AudioUnitSetProperty(*voice_unit, kAudioUnitProperty_StreamFormat,


CheckError(AudioUnitSetProperty(*voice_unit, kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input, kOutputBus,
&stream_format, sizeof (stream_format));
// Disable Input on playout
UInt32 enable_input = 0;
AudioUnitSetProperty(*voice_unit, kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Input, kInputBus, &enable_input,
sizeof(enable_input));
[self setPlayOutRenderCallback:*voice_unit];
&stream_format, sizeof (stream_format)),@"error b");
AURenderCallbackStruct render_callback;
render_callback.inputProc = playout_cb;;
render_callback.inputProcRefCon = (__bridge void *)(self);
CheckError(AudioUnitSetProperty(*voice_unit, kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Input, kOutputBus, &render_callback,
sizeof(render_callback)),@"error last");
}

Float64 f64 = 0;
Expand Down
Loading