设备遭受少量数据包丢失后约30秒钟之内未发送或接收WebSocket消息

问题描述

我一直在尝试使用Node.js通过WebSocket连接进行双向未压缩音频传输,以代理与另一台设备之间的音频数据。每个音频块的长度为100 ms,比特率约为136 kbps(因此,每个块约为1.7 KB)。即使在高延迟连接,VPN和弱3G连接的情况下,它也能很好地工作,除了当我将其中一台设备移动到离WiFi路由器约15 m的墙后稍稍的位置时,不会发送或接收音频数据包不再。 WiFi RSSI仍然很高。使用WiFi路由器移回视线后,直到经过大约30秒之后,才接收或发送任何数据包,然后数据包恢复正常。我期望由于数据包丢失和重新传输而导致一些延迟的块,但是我不知道为什么要花这么长时间才能再次开始发送WebSocket消息。可能与行头阻塞有关,但是在墙后,TCP连接测试显示
Download: 327 Mbps,Upload: 239 Mbps,Jitter: 0.39 ms,Packet Loss: 0%.
这些数字似乎与我的观察相矛盾,并且没有解释为什么该连接在约30秒钟后失效。我已经在iOS 14 Safari,Android上的Chrome,macOS上的Chrome和macOS上的Firefox以及不同的WiFi路由器上进行了测试。它发生在所有人身上。我还在本地网络上以及通过Cloudflare进行了测试。这是相关的代码


connect();

function connect() {
    if (!recording) startRec(); // capture the microphone samples and send them
    console.log('listening...');
    audioCtxRX = new (window.AudioContext || window.webkitaudiocontext)(); // set the audio context on user interaction

    dualTone(10,20,400); // play a tone

    var chunkPlaying = false;
    ws = new WebSocket('wss://www.example.com/wss3/');
    initWS = true;
    ws.binaryType = 'blob';
    ws.addEventListener('open',function (event) {
        console.log('Connection Opened');
    });
    ws.addEventListener('message',function (event) {
        dt = true;
        console.log('####message####');
        console.log(event.data);
        var data = event.data;
        dataUsed += data.size / 1000000;
        dataSum = data.size;
        if (pb) {
            data.arrayBuffer().then((buffer) => {
                console.log(buffer);
                var inpDat;
                if (depth == 1) { // 2 bytes per sample or 1 byte per sample
                    inpDat = new Uint8Array(buffer);
                } else {
                    inpDat = new Uint16Array(buffer);
                }
                var audioFloat32Array = new Float32Array(inpDat.length);
                for (let i = 0; i < inpDat.length; i++) {
                    audioFloat32Array[i] = inpDat[i] / (res / 2) - 1;
                }
                console.log(audioFloat32Array);
                var channels = 2;
                var frameCount = audioFloat32Array.length * compressionRatio;
                myArrayBuffer = audioCtxRX.createBuffer(1,frameCount,48000); // RX buffer;
                var NowBuffering = myArrayBuffer.getChannelData(0);
                let m = 0;
                let c = 0;
                let y2 = 0;
                let y1 = 0;
                for (var i = 0; i < frameCount; i++) {
                    let bv = 0;

                    // linear interpolation
                    // if (i%compressionRatio == 0){
                    //     bv = audioFloat32Array[Math.floor(i/compressionRatio)];
                    //     y1 = audioFloat32Array[Math.floor(i/compressionRatio)];
                    //     y2 = i/compressionRatio+1 > audioFloat32Array.length - 1 ? y1 : audioFloat32Array[Math.floor(i/compressionRatio)+1];
                    //     m = (y2 - y1) / compressionRatio;
                    //     c = y1 - m*(i%compressionRatio);
                    // } else {
                    //     bv = m*(i%compressionRatio) + c;
                    // }
                    // NowBuffering[i] = audioFloat32Array[bv];

                    // with no linear interpolation:
                    NowBuffering[i] = audioFloat32Array[Math.floor(i / compressionRatio)];
                }
                chunkID++;

                if (!chunkPlaying) {
                    if (missedC) {
                        missedC = false;
                        chunkPlaying = true;
                        setTimeout(() => {
                            chunkPlay();
                        },chunkSec / 0.002); // wait for half the chunk size,so the receiving of chunks happen half-way through the playing of the current chunk
                    } else {
                        chunkPlay();
                    }
                }

                function chunkPlay() {
                    chunkPlaying = true;
                    lastChunkID = chunkID;
                    playSt = new Date().getTime();
                    lastRC = new Date().getTime();
                    var source = audioCtxRX.createBufferSource();
                    source.buffer = myArrayBuffer;
                    source.connect(audioCtxRX.destination);
                    source.start();
                    source.onended = function () {
                        chunkPlaying = false;
                        if (chunkID != lastChunkID) {
                            // there is a new chunk,so play it right away
                            chunkPlay();
                            missedC = false;
                        } else {
                            // new chunk has not been sent
                            missedC = true;
                        }
                    }
                }

            });
        }
    });
}


function startRec() { // capture the microphone samples and send them over the websocket connection
  var audioCtx = new (window.AudioContext || window.webkitaudiocontext)();
  var rate = audioCtx.sampleRate;
  var resampfact = 0;
  var resampType = 0;
  if (rate < 40000 || rate > 60000) console.error('Unsupported input sample rate. Must be 40-60 kilosamples/s');
  if (rate < 48000) {
      resampType = 1;
      resampfact = Math.floor(rate / (48000 - rate));
      console.log('RESAMP FACTOR: ' + resampfact);
  } else if (rate > 48000) {
      resampType = 2; // not implemented yet
  }
  var stream;
  var recordedData = [];
  var startDate;

  var audioInput;
  var audioNode;
  var bufferSize = 4096;

  // reset any prevIoUs data
  recordedData = [];
  recordingLength = 0;
  recording = true;

  if (audioCtx.createJavaScriptNode) {
      audioNode = audioCtx.createJavaScriptNode(bufferSize,1,1);
  } else if (audioCtx.createscriptprocessor) {
      audioNode = audioCtx.createscriptprocessor(bufferSize,1);
  } else {
      throw 'WebAudio not supported!';
  }

  audioNode.connect(audioCtx.destination);
  navigator.mediaDevices.getUserMedia({ audio: true })
      .then(onMicrophoneCaptured)
      .catch(onMicrophoneError);
  function onMicrophoneCaptured(microphone) {
      stream = microphone;
      audioInput = audioCtx.createmediastreamsource(microphone);
      audioInput.connect(audioNode);
      audioNode.onaudioprocess = onAudioProcess;
      startDate = new Date();
      sampleRate = audioCtx.sampleRate;
  }

  function stopRecording(callback) {
      // stop recording
      recording = false;
      // to make sure onaudioprocess stops firing
      stream.getTracks().forEach((track) => { track.stop(); });
      audioInput.disconnect();
      audioNode.disconnect();
      if (inCall) {
          init();
      }
  }

  function onMicrophoneError(e) {
      console.log(e);
      alert('Unable to access the microphone.');
  }

  var nSince = 0;
  function onAudioProcess(e) {
      if (!recording) {
          return;
      }
      let elm = Array.from(e.inputBuffer.getChannelData(0));
      recordedData.push.apply(recordedData,elm);
      duration = new Date().getTime() - startDate;
      let chunkLD = false;
      if (recordedData.length > Math.round(chunkSec * rate)) {
          if (resampType == 1) {
              for (let i = 0; i < recordedData.length; i++) {
                  if (i % resampfact == 0) {
                      recordedData.splice(i + 1,recordedData[i]);
                  }
              }
          }
          startDate = new Date().getTime();
          if (depth == 1) { // 8-bit depth or 16-bit depth
              chunk = new Uint8Array(Math.floor(recordedData.length / compressionRatio));
          } else {
              chunk = new Uint16Array(Math.floor(recordedData.length / compressionRatio));
          }
          for (let i = 0; i < recordedData.length / compressionRatio; i++) {
              let sampAvg = 0;
              for (let p = 0; p < compressionRatio; p++) {
                  sampAvg += recordedData[i * compressionRatio + p];
              }
              sampAvg /= compressionRatio;

              let cv = (sampAvg + 1) * (res / 2);
              if (cv > res) cv = res;
              else if (cv < 0) cv = 0;
              chunk[i] = cv;
          }
          recordedData = [];
          ws.send(chunk.buffer);
      
      }
  }
}

我也在setInterval()中获得了这段代码,该代码关闭WebSocket连接时会重新连接,但是在发生问题时WebSocket连接仍保持打开状态,因此不会调用connectToWSServer():

if ((ws.readyState == 2 || ws.readyState == 3) && initWS) {
    if (pb) connectToWSServer(); // attempt to reconnect if disconnected
}

Node.js服务器只是在客户端之间中继WebSocket二进制数据。

这可能是什么原因?如果它不可修复,HTTP流会更有效吗? WebRTC不符合我的要求。谢谢。

编辑:我已经做了一些更多的测试。我发现,当WebSocket消息小得多(例如10个字节)并且以较低的速率(例如每秒一次)发送时,仍然会出现此问题。在路由器旁边开始连接,然后在墙后行走会导致音频停止工作,但是在墙后开始连接时,音频会起作用。但是,甚至更陌生的是,当在墙后开始连接时,它可以工作,但是当它离路由器更近时,它就切断了!我很混乱。当然,不必在每次音频切断时都创建一个新的WebSocket吗?

编辑2:我想我已经找到了原因。它与我的代码或服务器或WebSockets无关。我尝试过的所有WiFi接入点都是同一品牌。我尝试连接到Ubiquity AP,问题已解决。不知道为什么其他路由器做了他们所做的事情。 现在效果很好。我可以走得更远而不会丢失音频,当音频确实丢失时,当再次靠近路由器时,它立即又开始工作。我的猜测是,在进一步观察到发生这种情况的原因之后,是当客户端的RSSI太低或丢弃的数据包过多时,WiFi路由器会发送一个帧,要求设备重新连接到网络,然后花费一些时间重新连接,但特定于有问题的路由器的固件。

解决方法

暂无找到可以解决该程序问题的有效方法,小编努力寻找整理中!

如果你已经找到好的解决方法,欢迎将解决方案带上本链接一起发送给小编。

小编邮箱:dio#foxmail.com (将#修改为@)