ios – AudioConverter#FillComplexBuffer返回-50并且不转换任何内容

我强烈关注 this Xamarin sample(基于 this Apple sample)将LinearPCM文件转换为AAC文件.

该示例工作得很好,但在我的项目中实现,FillComplexBuffer方法返回错误-50并且InputData事件未被触发一次,因此没有任何转换.

只有在设备上进行测试时才会出现该错误.在模拟器上进行测试时,一切都很顺利,最后我得到了一个很好的编码AAC文件.

我今天尝试了很多东西,我发现我的代码和示例代码之间没有任何区别.你知道这可能来自哪里吗?

我不知道这是否与Xamarin有关,因为Xamarin样本效果很好,所以看起来并不是这样.

这是我的代码的相关部分:

protected void Encode(string path)
{
  // In class setup. File at TempWavFilePath has DecodedFormat as format.
  // 
  // DecodedFormat = AudioStreamBasicDescription.CreateLinearPCM();
  // AudioStreamBasicDescription encodedFormat = new AudioStreamBasicDescription()
  // {
  //   Format = AudioFormatType.MPEG4AAC,//   SampleRate = DecodedFormat.SampleRate,//   ChannelsPerFrame = DecodedFormat.ChannelsPerFrame,// };
  // AudioStreamBasicDescription.GetFormatInfo (ref encodedFormat);
  // EncodedFormat = encodedFormat;


  // Setup converter
  AudioStreamBasicDescription inputFormat = DecodedFormat;
  AudioStreamBasicDescription outputFormat = EncodedFormat;

  AudioConverterError converterCreateError;
  AudioConverter converter = AudioConverter.Create(inputFormat,outputFormat,out converterCreateError);
  if (converterCreateError != AudioConverterError.None)
  {
    Console.WriteLine("Converter creation error: " + converterCreateError);
  }
  converter.EncodeBitRate = 192000; // AAC 192kbps

  // get the actual formats back from the Audio Converter
  inputFormat = converter.CurrentInputStreamDescription;
  outputFormat = converter.CurrentOutputStreamDescription;


  /*** INPUT ***/

  AudioFile inputFile = AudioFile.OpenRead(NSUrl.FromFilename(TempWavFilePath));

  // init buffer
  const int inputBufferBytesSize = 32768;
  IntPtr inputBufferPtr = Marshal.AllocHGlobal(inputBufferBytesSize);

  // calc number of packets per read
  int inputSizePerPacket = inputFormat.BytesPerPacket;
  int inputBufferPacketSize = inputBufferBytesSize / inputSizePerPacket;
  AudioStreamPacketDescription[] inputPacketDescriptions = null;

  // init position
  long inputFilePosition = 0;

  // define input delegate
  converter.InputData += delegate(ref int numberDataPackets,AudioBuffers data,ref AudioStreamPacketDescription[] dataPacketDescription)
  {
    // how much to read
    if (numberDataPackets > inputBufferPacketSize)
    {
      numberDataPackets = inputBufferPacketSize;
    }

    // read from the file
    int outNumBytes;
    AudioFileError readError = inputFile.ReadPackets(false,out outNumBytes,inputPacketDescriptions,inputFilePosition,ref numberDataPackets,inputBufferPtr);
    if (readError != 0)
    {
      Console.WriteLine("Read error: " + readError);
    }

    // advance input file packet position
    inputFilePosition += numberDataPackets;

    // put the data pointer into the buffer list
    data.SetData(0,inputBufferPtr,outNumBytes);

    // add packet descriptions if required
    if (dataPacketDescription != null)
    {
      if (inputPacketDescriptions != null)
      {
        dataPacketDescription = inputPacketDescriptions;
      }
      else
      {
        dataPacketDescription = null;
      }
    }

    return AudioConverterError.None;
  };


  /*** OUTPUT ***/

  // create the destination file 
  var outputFile = AudioFile.Create (NSUrl.FromFilename(path),AudioFileType.M4A,AudioFileFlags.EraseFlags);

  // init buffer
  const int outputBufferBytesSize = 32768;
  IntPtr outputBufferPtr = Marshal.AllocHGlobal(outputBufferBytesSize);

  AudioBuffers buffers = new AudioBuffers(1);

  // calc number of packet per write
  int outputSizePerPacket = outputFormat.BytesPerPacket;
  AudioStreamPacketDescription[] outputPacketDescriptions = null;

  if (outputSizePerPacket == 0) {
    // if the destination format is VBR,we need to get max size per packet from the converter
    outputSizePerPacket = (int)converter.MaximumOutputPacketSize;

    // allocate memory for the PacketDescription structures describing the layout of each packet
    outputPacketDescriptions = new AudioStreamPacketDescription [outputBufferBytesSize / outputSizePerPacket];
  }
  int outputBufferPacketSize = outputBufferBytesSize / outputSizePerPacket;

  // init position
  long outputFilePosition = 0;

  long totalOutputFrames = 0; // used for debugging

  // write magic cookie if necessary
  if (converter.CompressionMagicCookie != null && converter.CompressionMagicCookie.Length != 0)
  {
    outputFile.MagicCookie = converter.CompressionMagicCookie;
  }

  // loop to convert data
  Console.WriteLine ("Converting...");
  while (true)
  {
    // create buffer
    buffers[0] = new AudioBuffer()
    {
      NumberChannels = outputFormat.ChannelsPerFrame,DataByteSize = outputBufferBytesSize,Data = outputBufferPtr
    };

    int writtenPackets = outputBufferPacketSize;

    // LET'S CONVERT (it's about time...)
    AudioConverterError converterFillError = converter.FillComplexBuffer(ref writtenPackets,buffers,outputPacketDescriptions);
    if (converterFillError != AudioConverterError.None)
    {
      Console.WriteLine("FillComplexBuffer error: " + converterFillError);
    }

    if (writtenPackets == 0) // EOF
    {
      break;
    }

    // write to output file
    int inNumBytes = buffers[0].DataByteSize;

    AudioFileError writeError = outputFile.WritePackets(false,inNumBytes,outputPacketDescriptions,outputFilePosition,ref writtenPackets,outputBufferPtr);
    if (writeError != 0)
    {
      Console.WriteLine("WritePackets error: {0}",writeError);
    }

    // advance output file packet position
    outputFilePosition += writtenPackets;

    if (FlowFormat.FramesPerPacket != 0) { 
      // the format has constant frames per packet
      totalOutputFrames += (writtenPackets * FlowFormat.FramesPerPacket);
    } else {
      // variable frames per packet require doing this for each packet (adding up the number of sample frames of data in each packet)
      for (var i = 0; i < writtenPackets; ++i)
      {
        totalOutputFrames += outputPacketDescriptions[i].VariableFramesInPacket;
      }
    }
  }

  // write out any of the leading and trailing frames for compressed formats only
  if (outputFormat.BitsPerChannel == 0)
  {
    Console.WriteLine("Total number of output frames counted: {0}",totalOutputFrames); 
    WritePacketTableInfo(converter,outputFile);
  }

  // write the cookie again - sometimes codecs will update cookies at the end of a conversion
  if (converter.CompressionMagicCookie != null && converter.CompressionMagicCookie.Length != 0)
  {
    outputFile.MagicCookie = converter.CompressionMagicCookie;
  }

  // Clean everything
  Marshal.FreeHGlobal(inputBufferPtr);
  Marshal.FreeHGlobal(outputBufferPtr);
  converter.dispose();
  outputFile.dispose();

  // Remove temp file
  File.Delete(TempWavFilePath);
}

我已经看过this SO question了,但是没有详细的C / Obj-C相关答案似乎不符合我的问题.

谢谢 !

解决方法

我终于找到了解决方案!

我只需要在转换文件之前声明AVAudioSession类别.

AVAudioSession.SharedInstance().SetCategory(AVAudioSessionCategory.AudioProcessing);
AVAudioSession.SharedInstance().SetActive(true);

由于我还使用了一个AudioQueue到RenderOffline,我实际上必须将类别设置为AVAudioSessionCategory.PlayAndRecord,因此离线渲染和音频转换都可以工作.

相关文章

当我们远离最新的 iOS 16 更新版本时,我们听到了困扰 Apple...
欧版/美版 特别说一下,美版选错了 可能会永久丧失4G,不过只...
一般在接外包的时候, 通常第三方需要安装你的app进行测...
前言为了让更多的人永远记住12月13日,各大厂都在这一天将应...