问题描述
我在使用 AudioToolBox 时遇到了一个令人困惑的问题。我有以下代码:
#include <AudioToolBox/AudioToolBox.h>
#include <stdbool.h>
#include <stdio.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
size_t load_from_file(char *data,size_t len) {
FILE *fp = fopen("test.wav","r");
int fd = fileno(fp);
struct stat file_info;
fstat(fd,&file_info);
off_t file_size = file_info.st_size;
if (file_size < 0 || len < (uintmax_t)file_size) {
return 0;
}
size_t result = fread(data,1,(uintmax_t)file_size,fp);
fclose(fp);
return result;
}
struct callback_data {
char *data;
size_t len;
};
Osstatus read_data(void *inClientData,SInt64 inPosition,UInt32 requestCount,void *buffer,UInt32 *actualCount) {
struct callback_data actualData = *(struct callback_data *)inClientData;
if (actualData.len < (uintmax_t)inPosition) {
*actualCount = 0;
// Apple's documentation is very unclear on what,if any,errors should be returned when.
return noErr;
}
UInt32 possible = 0;
if ((uintmax_t)(inPosition + requestCount) > actualData.len) {
possible = actualData.len - inPosition;
} else {
possible = requestCount;
}
memcpy(buffer,actualData.data + inPosition,possible);
*actualCount = possible;
return noErr;
}
SInt64 get_size(void *inClientData) {
return (*(struct callback_data *)inClientData).len;
}
double populateAudioUnit(AudioUnit audioUnit,AudioStreamBasicDescription fileFormat,AudioFileID audioFile) {
UInt64 nPackets;
UInt32 propsize = sizeof(nPackets);
AudioFileGetProperty(audioFile,kAudioFilePropertyAudioDataPacketCount,&propsize,&nPackets);
Float64 fileDuration = (nPackets * fileFormat.mFramesPerPacket) / fileFormat.mSampleRate;
ScheduledAudioFileRegion rgn;
memset(&rgn.mTimeStamp,sizeof(rgn.mTimeStamp));
rgn.mTimeStamp.mFlags = kAudioTimeStampSampleTimeValid;
rgn.mTimeStamp.mSampleTime = 0;
rgn.mCompletionProc = NULL;
rgn.mCompletionProcUserData = NULL;
rgn.mAudioFile = audioFile;
rgn.mLoopCount = 1;
rgn.mStartFrame = 0;
rgn.mFramesToPlay = (UInt32)(nPackets * fileFormat.mFramesPerPacket);
// tell the file player AU to play all of the file
AudioUnitSetProperty(audioUnit,kAudioUnitProperty_ScheduledFileRegion,kAudioUnitScope_Global,&rgn,sizeof(rgn));
// prime the fp AU with default values
UInt32 defaultVal = 0;
AudioUnitSetProperty(audioUnit,kAudioUnitProperty_ScheduledFilePrime,&defaultVal,sizeof(defaultVal));
// tell the fp AU when to start playing (this ts is in the AU's render time stamps; -1 means next render cycle)
AudioTimeStamp startTime;
memset(&startTime,sizeof(startTime));
startTime.mFlags = kAudioTimeStampSampleTimeValid;
startTime.mSampleTime = -1;
AudioUnitSetProperty(audioUnit,kAudioUnitProperty_ScheduleStartTimeStamp,&startTime,sizeof(startTime));
return fileDuration;
}
void generateAUGraph(AUGraph *graph,AudioStreamBasicDescription *fileFormat,AudioFileID audioFile,AudioUnit *audioUnit) {
NewAUGraph(graph);
AudioComponentDescription componentDescription;
componentDescription.componentType = kAudioUnitType_Output;
componentDescription.componentSubType = kAudioUnitSubType_DefaultOutput;
componentDescription.componentManufacturer = kAudioUnitManufacturer_Apple;
AUNode outputNode;
AUGraphAddNode(*graph,&componentDescription,&outputNode);
componentDescription.componentType = kAudioUnitType_Generator;
componentDescription.componentSubType = kAudioUnitSubType_AudioFilePlayer;
AUNode fileNode;
AUGraphAddNode(*graph,&fileNode);
AUGraphOpen(*graph);
AUGraphNodeInfo(*graph,fileNode,NULL,audioUnit);
AudioUnitSetProperty(*audioUnit,kAudioUnitProperty_StreamFormat,kAudioUnitScope_Output,fileFormat,sizeof(*fileFormat));
AudioUnitSetProperty(*audioUnit,kAudioUnitProperty_ScheduledFileIDs,&audioFile,sizeof(AudioFileID));
AUGraphConnectNodeInput(*graph,outputNode,0);
AUGraphInitialize(*graph);
usleep(10000);
if (fileFormat->mChannelsPerFrame > 2) {
UInt32 layoutSize = 0;
Osstatus err = AudioFileGetPropertyInfo(audioFile,kAudioFilePropertyChannelLayout,&layoutSize,0);
if (err || layoutSize == 0) {
return;
}
char *layout = malloc(layoutSize);
err = AudioFileGetProperty(audioFile,layout);
if (err) {
return;
}
AUGraphNodeInfo(*graph,audioUnit);
AudioUnitSetProperty(*audioUnit,kAudioUnitProperty_AudioChannelLayout,kAudioUnitScope_Input,layout,layoutSize);
}
}
void *start_platform_audio(void *param) {
char *file_data = malloc(64000000);
size_t data_size = load_from_file(file_data,64000000);
if (data_size == 0) {
fprintf(stderr,"Failed to load file\n");
return NULL;
}
AudioFileID audioFile;
struct callback_data in_data = { file_data,data_size };
Osstatus err = AudioFileOpenWithCallbacks(&in_data,read_data,get_size,kAudioFileWAVEType,&audioFile);
if (err) {
// Todo: Add better error handling
printf("Some error happened with loading the data into an AudioFileID: %d\n",err);
exit(1);
}
AudioStreamBasicDescription fileFormat;
UInt32 fileFormat_size = sizeof(fileFormat);
AudioFileGetProperty(audioFile,kAudioFilePropertyDataFormat,&fileFormat_size,&fileFormat);
AUGraph graph;
AudioUnit audioUnit;
generateAUGraph(&graph,&fileFormat,audioFile,&audioUnit);
double duration = populateAudioUnit(audioUnit,audioFile);
(void)duration;
Boolean graphIsRunning = false;
while (true) {
AUGraphStart(graph);
sleep(5);
AUGraphStop(graph);
sleep(5);
}
AUGraphStop(graph);
AUGraphUninitialize(graph);
AUGraphClose(graph);
return NULL;
}
int main(void) {
start_platform_audio(NULL);
}
(clang file.c -framework AudioToolBox
)
AUGraphStop(graph)
立即(接近)生效,但是 AUGraphStart(graph)
在停止图形后调用时,只会在受 usleep
多长时间影响的时间后恢复播放在最后一个 AUGraphStart(graph)
之后。
如果您能深入了解这里发生的事情,我将不胜感激。
编辑 1:我改编了来自 Apple 示例项目 iPhonemixerEQGraphTest 的以下(Objective-C)代码。它没有这个问题。我试图找出我的代码和这段代码之间的主要区别。
#import <AudioToolBox/AudioToolBox.h>
#import <AudioUnit/AudioUnit.h>
#import <CoreFoundation/CoreFoundation.h>
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wformat"
#define MAXBUFS 2
#define NUMFILES 2
typedef struct {
AudioStreamBasicDescription asbd;
SInt16 *data;
UInt32 numFrames;
} SoundBuffer,*SoundBufferPtr;
typedef struct {
UInt32 frameNum;
UInt32 maxnumFrames;
SoundBuffer soundBuffer[MAXBUFS];
} SourceAudioBufferData,*SourceAudioBufferDataPtr;
CFURLRef sourceURL[2];
AUGraph mGraph;
// AudioUnit mEQ;
AudioUnit mmixer;
AudioStreamBasicDescription mClientFormat;
AudioStreamBasicDescription mOutputFormat;
// CFArrayRef mEQPresetsArray;
SourceAudioBufferData mUserData;
Boolean mIsPlaying;
void SetCanonical(AudioStreamBasicDescription *format,UInt32 nChannels,bool interleaved)
// note: leaves sample rate untouched
{
format->mFormatID = kAudioFormatLinearPCM;
int sampleSize = (UInt32)sizeof(SInt16);
format->mFormatFlags = kAudioFormatFlagIsSignedInteger |
kAudioFormatFlagsNativeEndian |
kAudioFormatFlagIsPacked;
format->mBitsPerChannel = 8 * sampleSize;
format->mChannelsPerFrame = nChannels;
format->mFramesPerPacket = 1;
if (interleaved)
format->mBytesPerPacket = format->mBytesPerFrame = nChannels * sampleSize;
else {
format->mBytesPerPacket = format->mBytesPerFrame = sampleSize;
format->mFormatFlags |= kAudioFormatFlagIsNonInterleaved;
}
}
void SetAUCanonical(AudioStreamBasicDescription *format,bool interleaved) {
format->mFormatID = kAudioFormatLinearPCM;
format->mFormatFlags = kAudioFormatFlagIsSignedInteger |
kAudioFormatFlagsNativeEndian |
kAudioFormatFlagIsPacked;
format->mChannelsPerFrame = nChannels;
format->mFramesPerPacket = 1;
format->mBitsPerChannel = 8 * (UInt32)sizeof(SInt16);
if (interleaved)
format->mBytesPerPacket = format->mBytesPerFrame =
nChannels * (UInt32)sizeof(SInt16);
else {
format->mBytesPerPacket = format->mBytesPerFrame = (UInt32)sizeof(SInt16);
format->mFormatFlags |= kAudioFormatFlagIsNonInterleaved;
}
}
const Float64 kGraphSampleRate = 44100.0;
#pragma mark - Render
// render some silence
static void SilenceData(audiobufferlist *inData) {
for (UInt32 i = 0; i < inData->mNumberBuffers; i++)
memset(inData->mBuffers[i].mData,inData->mBuffers[i].mDataByteSize);
}
// audio render procedure to render our client data format
// 2 ch 'lpcm' 16-bit little-endian signed integer interleaved this is
// mClientFormat data,see CAStreamBasicDescription SetCanonical()
static Osstatus renderInput(void *inRefCon,AudioUnitRenderActionFlags *ioActionFlags,const AudioTimeStamp *inTimeStamp,UInt32 inBusNumber,UInt32 inNumberFrames,audiobufferlist *ioData) {
SourceAudioBufferDataPtr userData = (SourceAudioBufferDataPtr)inRefCon;
SInt16 *in = userData->soundBuffer[inBusNumber].data;
SInt16 *out = (SInt16 *)ioData->mBuffers[0].mData;
UInt32 sample = userData->frameNum *
userData->soundBuffer[inBusNumber].asbd.mChannelsPerFrame;
// make sure we don't attempt to render more data than we have available in
// the source buffers if one buffer is larger than the other,just render
// silence for that bus until we loop around again
if ((userData->frameNum + inNumberFrames) >
userData->soundBuffer[inBusNumber].numFrames) {
UInt32 offset = (userData->frameNum + inNumberFrames) -
userData->soundBuffer[inBusNumber].numFrames;
if (offset < inNumberFrames) {
// copy the last bit of source
SilenceData(ioData);
memcpy(out,&in[sample],((inNumberFrames - offset) *
userData->soundBuffer[inBusNumber].asbd.mBytesPerFrame));
return noErr;
} else {
// we have no source data
SilenceData(ioData);
*ioActionFlags |= kAudioUnitRenderAction_OutputIsSilence;
return noErr;
}
}
memcpy(out,ioData->mBuffers[0].mDataByteSize);
// printf("render input bus %ld from sample %ld\n",inBusNumber,sample);
return noErr;
}
// the render notification is used to keep track of the frame number position in
// the source audio
static Osstatus renderNotification(void *inRefCon,audiobufferlist *ioData) {
SourceAudioBufferDataPtr userData = (SourceAudioBufferDataPtr)inRefCon;
if (*ioActionFlags & kAudioUnitRenderAction_PostRender) {
// printf("post render notification frameNum %ld inNumberFrames %ld\n",// userData->frameNum,inNumberFrames);
userData->frameNum += inNumberFrames;
if (userData->frameNum >= userData->maxnumFrames) {
userData->frameNum = 0;
}
}
return noErr;
}
@interface AUGraphController : NSObject
- (void)loadFiles;
@end
@implementation AUGraphController
- (void)dealloc {
printf("AUGraphController dealloc\n");
disposeAUGraph(mGraph);
free(mUserData.soundBuffer[0].data);
free(mUserData.soundBuffer[1].data);
CFRelease(sourceURL[0]);
CFRelease(sourceURL[1]);
// CFRelease(mEQPresetsArray);
[super dealloc];
}
- (void)awakeFromNib {
printf("AUGraphController awakeFromNib\n");
mIsPlaying = false;
// clear the mSoundBuffer struct
memset(&mUserData.soundBuffer,sizeof(mUserData.soundBuffer));
// create the URLs we'll use for source A and B
Nsstring *sourceA = @"/Users/user/tmp/test.wav";
Nsstring *sourceB = @"/Users/user/tmp/test.wav";
sourceURL[0] = CFURLCreateWithFileSystemPath(
kcfAllocatorDefault,(CFStringRef)sourceA,kcfURLPOSIXPathStyle,false);
sourceURL[1] = CFURLCreateWithFileSystemPath(
kcfAllocatorDefault,(CFStringRef)sourceB,false);
}
- (void)initializeAUGraph {
printf("initializeAUGraph\n");
AUNode outputNode;
// AUNode eqNode;
AUNode mixerNode;
printf("create client ASBD\n");
// client format audio goes into the mixer
SetCanonical(&mClientFormat,2,true);
mClientFormat.mSampleRate = kGraphSampleRate;
printf("create output ASBD\n");
// output format
SetAUCanonical(&mOutputFormat,true);
mOutputFormat.mSampleRate = kGraphSampleRate;
Osstatus result = noErr;
// load up the audio data
printf("load up audio data\n");
[self loadFiles];
printf("\nnew AUGraph\n");
// create a new AUGraph
result = NewAUGraph(&mGraph);
if (result) {
printf("NewAUGraph result %hd %08X %4.4s\n",*(int16_t *)&result,(unsigned int)result,(char *)&result);
return;
}
// create three Audio Component Descriptons for the AUs we want in the graph
// using the CAComponentDescription helper class
// output unit
AudioComponentDescription output_desc;
output_desc.componentType = kAudioUnitType_Output;
output_desc.componentSubType = kAudioUnitSubType_HALOutput;
output_desc.componentManufacturer = kAudioUnitManufacturer_Apple;
output_desc.componentFlags = 0;
output_desc.componentFlagsMask = 0;
// ipodeQ unit
// AudioComponentDescription eq_desc;
// eq_desc.componentType = kAudioUnitType_Effect;
// eq_desc.componentSubType = kAudioUnitSubType_GraphicEQ;
// eq_desc.componentManufacturer = kAudioUnitManufacturer_Apple;
// eq_desc.componentFlags = 0;
// eq_desc.componentFlagsMask = 0;
// multichannel mixer unit
AudioComponentDescription mixer_desc;
mixer_desc.componentType = kAudioUnitType_mixer;
mixer_desc.componentSubType = kAudioUnitSubType_MultiChannelmixer;
mixer_desc.componentManufacturer = kAudioUnitManufacturer_Apple;
mixer_desc.componentFlags = 0;
mixer_desc.componentFlagsMask = 0;
printf("add nodes\n");
// create a node in the graph that is an AudioUnit,using the supplied
// AudioComponentDescription to find and open that unit
result = AUGraphAddNode(mGraph,&output_desc,&outputNode);
if (result) {
printf("AUGraphNewNode 1 result %lu %4.4s\n",result,(char *)&result);
return;
}
// result = AUGraphAddNode(mGraph,&eq_desc,&eqNode);
// if (result) {
// printf("AUGraphNewNode 2 result %lu %4.4s\n",(char *)&result);
// return;
// }
result = AUGraphAddNode(mGraph,&mixer_desc,&mixerNode);
if (result) {
printf("AUGraphNewNode 3 result %lu %4.4s\n",(char *)&result);
return;
}
// connect a node's output to a node's input
// mixer -> eq -> output
result = AUGraphConnectNodeInput(mGraph,mixerNode,0);
if (result) {
printf("AUGraphConnectNodeInput result %lu %4.4s\n",(char *)&result);
return;
}
// result = AUGraphConnectNodeInput(mGraph,eqNode,0);
// if (result) {
// printf("AUGraphConnectNodeInput result %lu %4.4s\n",// (char *)&result);
// return;
// }
// open the graph AudioUnits are open but not initialized (no resource
// allocation occurs here)
result = AUGraphOpen(mGraph);
if (result) {
printf("AUGraphOpen result %hd %08X %4.4s\n",(char *)&result);
return;
}
// grab the audio unit instances from the nodes
result = AUGraphNodeInfo(mGraph,&mmixer);
if (result) {
printf("AUGraphNodeInfo result %hd %08X %4.4s\n",(char *)&result);
return;
}
// result = AUGraphNodeInfo(mGraph,&mEQ);
// if (result) {
// printf("AUGraphNodeInfo result %hd %08X %4.4s\n",// (unsigned int)result,(char *)&result);
// return;
// }
// set bus count
UInt32 numbuses = 2;
printf("set input bus count %lu\n",numbuses);
result = AudioUnitSetProperty(mmixer,kAudioUnitProperty_ElementCount,&numbuses,sizeof(numbuses));
if (result) {
printf("AudioUnitSetProperty result %hd %08X %4.4s\n",(char *)&result);
return;
}
for (UInt32 i = 0; i < numbuses; ++i) {
// setup render callback struct
AURenderCallbackStruct rcbs;
rcbs.inputProc = &renderInput;
rcbs.inputProcRefCon = &mUserData;
printf("set AUGraphSetNodeInputCallback\n");
// set a callback for the specified node's specified input
result = AUGraphSetNodeInputCallback(mGraph,i,&rcbs);
if (result) {
printf("AUGraphSetNodeInputCallback result %hd %08X %4.4s\n",(char *)&result);
return;
}
printf("set input bus %d,client kAudioUnitProperty_StreamFormat\n",(unsigned int)i);
// set the input stream format,this is the format of the audio for mixer
// input
result = AudioUnitSetProperty(mmixer,&mClientFormat,sizeof(mClientFormat));
if (result) {
printf("AudioUnitSetProperty result %hd %08X %4.4s\n",(char *)&result);
return;
}
}
printf("get EQ kAudioUnitProperty_FactoryPresets\n");
// get the eq's factory preset list -- this is a read-only CFArray array of
// AUPreset structures host owns the retuned array and should release it when
// no longer needed
// UInt32 size = sizeof(mEQPresetsArray);
// result =
// AudioUnitGetProperty(mEQ,kAudioUnitProperty_FactoryPresets,// kAudioUnitScope_Global,&mEQPresetsArray,// &size);
// if (result) {
// printf("AudioUnitGetProperty result %hd %08X %4.4s\n",*(int16_t
// *)&result,(char *)&result);
// return;
// }
/* this code can be used if you're interested in dumping out the preset list
printf("ipodeQ Factory Preset List:\n");
UInt8 count = CFArrayGetCount(mEQPresetsArray);
for (int i = 0; i < count; ++i) {
AUPreset *aPreset = (AUPreset*)CFArrayGetValueAtIndex(mEQPresetsArray,i);
CFShow(aPreset->presetName);
}*/
printf("set output kAudioUnitProperty_StreamFormat\n");
// set the output stream format of the mixer
result = AudioUnitSetProperty(mmixer,&mOutputFormat,sizeof(mOutputFormat));
if (result) {
printf("AudioUnitSetProperty result %hd %08X %4.4s\n",(char *)&result);
return;
}
// // set the output stream format of the ipodeQ audio unit
// result = AudioUnitSetProperty(mEQ,// kAudioUnitScope_Output,// sizeof(mOutputFormat));
// if (result) {
// printf("AudioUnitSetProperty result %hd %08X %4.4s\n",(char *)&result);
// return;
// }
printf("set render notification\n");
// add a render notification,this is a callback that the graph will call
// every time the graph renders the callback will be called once before the
// graph’s render operation,and once after the render operation is complete
result = AUGraphAddRenderNotify(mGraph,renderNotification,&mUserData);
if (result) {
printf("AUGraphAddRenderNotify result %hd %08X %4.4s\n",(char *)&result);
return;
}
printf("AUGraphInitialize\n");
// Now that we've set everything up we can initialize the graph,this will
// also validate the connections
result = AUGraphInitialize(mGraph);
if (result) {
printf("AUGraphInitialize result %hd %08X %4.4s\n",(char *)&result);
return;
}
CAShow(mGraph);
}
// load up audio data from the demo files into mSoundBuffer.data used in the
// render proc
- (void)loadFiles {
mUserData.frameNum = 0;
mUserData.maxnumFrames = 0;
for (int i = 0; i < NUMFILES && i < MAXBUFS; i++) {
printf("loadFiles,%d\n",i);
ExtAudioFileRef xafref = 0;
// open one of the two source files
Osstatus result = ExtAudioFileOpenURL(sourceURL[i],&xafref);
if (result || 0 == xafref) {
printf("ExtAudioFileOpenURL result %hd %08X %4.4s\n",(char *)&result);
return;
}
// get the file data format,this represents the file's actual data format
// for @R_543_4045@ional purposes only -- the client format set on ExtAudioFile
// is what we really want back
AudioStreamBasicDescription fileFormat;
UInt32 propSize = sizeof(fileFormat);
result = ExtAudioFileGetProperty(
xafref,kExtAudioFileProperty_FileDataFormat,&propSize,&fileFormat);
if (result) {
printf("ExtAudioFileGetProperty kExtAudioFileProperty_FileDataFormat "
"result %d %08X %4.4s\n",(char *)&result);
return;
}
printf("file %d,native file format\n",i);
// set the client format to be what we want back
// this is the same format audio we're giving to the the mixer input
result =
ExtAudioFileSetProperty(xafref,kExtAudioFileProperty_ClientDataFormat,sizeof(mClientFormat),&mClientFormat);
if (result) {
printf("ExtAudioFileSetProperty kExtAudioFileProperty_ClientDataFormat "
"%ld %08X %4.4s\n",(char *)&result);
return;
}
// get the file's length in sample frames
UInt64 numFrames = 0;
propSize = sizeof(numFrames);
result = ExtAudioFileGetProperty(
xafref,kExtAudioFileProperty_FileLengthFrames,&numFrames);
if (result || numFrames == 0) {
printf("ExtAudioFileGetProperty kExtAudioFileProperty_FileLengthFrames "
"result %ld %08X %4.4s\n",(char *)&result);
return;
}
// keep track of the largest number of source frames
if (numFrames > mUserData.maxnumFrames)
mUserData.maxnumFrames = numFrames;
// set up our buffer
mUserData.soundBuffer[i].numFrames = numFrames;
mUserData.soundBuffer[i].asbd = mClientFormat;
UInt32 samples =
numFrames * mUserData.soundBuffer[i].asbd.mChannelsPerFrame;
mUserData.soundBuffer[i].data = (SInt16 *)calloc(samples,sizeof(SInt16));
// set up a audiobufferlist to read data into
audiobufferlist bufList;
bufList.mNumberBuffers = 1;
bufList.mBuffers[0].mNumberChannels =
mUserData.soundBuffer[i].asbd.mChannelsPerFrame;
bufList.mBuffers[0].mData = mUserData.soundBuffer[i].data;
bufList.mBuffers[0].mDataByteSize = samples * sizeof(SInt16);
// perform a synchronous sequential read of the audio data out of the file
// into our allocated data buffer
UInt32 numPackets = numFrames;
result = ExtAudioFileRead(xafref,&numPackets,&bufList);
if (result) {
printf("ExtAudioFileRead result %hd %08X %4.4s\n",(char *)&result);
free(mUserData.soundBuffer[i].data);
mUserData.soundBuffer[i].data = 0;
return;
}
// close the file and dispose the ExtAudioFileRef
ExtAudioFiledispose(xafref);
}
}
#pragma mark -
// enable or disables a specific bus
- (void)enableInput:(UInt32)inputNum isOn:(AudioUnitParameterValue)isONValue {
printf("BUS %ld isON %f\n",inputNum,isONValue);
Osstatus result =
AudioUnitSetParameter(mmixer,kMultiChannelmixerParam_Enable,isONValue,0);
if (result) {
printf("AudioUnitSetParameter kMultiChannelmixerParam_Enable result %ld "
"%08X %4.4s\n",(char *)&result);
return;
}
}
// sets the input volume for a specific bus
- (void)setInputVolume:(UInt32)inputNum value:(AudioUnitParameterValue)value {
Osstatus result =
AudioUnitSetParameter(mmixer,kMultiChannelmixerParam_Volume,value,0);
if (result) {
printf("AudioUnitSetParameter kMultiChannelmixerParam_Volume Input result "
"%ld %08X %4.4s\n",(char *)&result);
return;
}
}
// sets the overall mixer output volume
- (void)setoutputVolume:(AudioUnitParameterValue)value {
Osstatus result =
AudioUnitSetParameter(mmixer,0);
if (result) {
printf("AudioUnitSetParameter kMultiChannelmixerParam_Volume Output result "
"%ld %08X %4.4s\n",(char *)&result);
return;
}
}
// - (void)selectEQPreset:(NSInteger)value;
// {
// AUPreset *aPreset =
// (AUPreset *)CFArrayGetValueAtIndex(mEQPresetsArray,value);
// Osstatus result = AudioUnitSetProperty(mEQ,// kAudioUnitProperty_PresentPreset,// kAudioUnitScope_Global,aPreset,// sizeof(AUPreset));
// if (result) {
// printf("AudioUnitSetProperty result %hd %08X %4.4s\n",*(int16_t
// *)&result,(char *)&result);
// return;
// };
// printf("SET EQ PRESET %d ",value);
// CFShow(aPreset->presetName);
// }
// stars render
- (void)startAUGraph {
printf("PLAY\n");
Osstatus result = AUGraphStart(mGraph);
if (result) {
printf("AUGraphStart result %hd %08X %4.4s\n",(char *)&result);
return;
}
mIsPlaying = true;
}
// stops render
- (void)stopAUGraph {
printf("STOP\n");
Boolean isRunning = false;
Osstatus result = AUGraphIsRunning(mGraph,&isRunning);
if (result) {
printf("AUGraphIsRunning result %hd %08X %4.4s\n",(char *)&result);
return;
}
if (isRunning) {
result = AUGraphStop(mGraph);
if (result) {
printf("AUGraphStop result %hd %08X %4.4s\n",(char *)&result);
return;
}
mIsPlaying = false;
}
}
@end
#pragma clang diagnostic pop
int main(void) {
AUGraphController *controller = [[AUGraphController alloc] init];
[controller awakeFromNib];
[controller initializeAUGraph];
[controller enableInput:0 isOn:true];
[controller enableInput:1 isOn:false];
[controller setInputVolume:0 value:1.f];
[controller setInputVolume:1 value:0.f];
[controller setoutputVolume:1.f];
[controller startAUGraph];
sleep(10);
[controller stopAUGraph];
sleep(2);
[controller startAUGraph];
while (true)
continue;
}
解决方法
暂无找到可以解决该程序问题的有效方法,小编努力寻找整理中!
如果你已经找到好的解决方法,欢迎将解决方案带上本链接一起发送给小编。
小编邮箱:dio#foxmail.com (将#修改为@)