Play multiple Audio files on Safari at once - ios

I want to play multiple Audio files simultaneously on iOS .
On the click of a button I create multiple instance of an Audio file and put them into an array.
let audio = new Audio('path.wav')
audio.play().then(() => {
audio.pause();
possibleAudiosToPlay.push(audio);
});
After a while I play them all:
possibleAudiosToPlay.forEach(el => {
el.currentTime = 0;
el.play();
});
While this plays all audio files: When a new one begins it stops the old one. (on iOS)
Apples developer guide says this isn't possible at all with HTML5 Audio:
Playing multiple simultaneous audio streams is also not supported.
But can this be achieved with the Web Audio API?
There isn't anything written about it in Apples developer guide.

Yes you can with Web Audio API. You have to create an AudioBufferSourceNode for each one of your audio sources, since each source can be played only once (you can't stop it and play it again).
const AudioContext = window.AudioContext || window.webkitAudioContext;
const ctx = new AudioContext();
const audioPaths = [
"path/to/audio_file1.wav",
"path/to/audio_file2.wav",
"path/to/audio_file3.wav"
];
let promises = [];
// utility function to load an audio file and resolve it as a decoded audio buffer
function getBuffer(url, audioCtx) {
return new Promise((resolve, reject) => {
if (!url) {
reject("Missing url!");
return;
}
if (!audioCtx) {
reject("Missing audio context!");
return;
}
let xhr = new XMLHttpRequest();
xhr.open("GET", url);
xhr.responseType = "arraybuffer";
xhr.onload = function() {
let arrayBuffer = xhr.response;
audioCtx.decodeAudioData(arrayBuffer, decodedBuffer => {
resolve(decodedBuffer);
});
};
xhr.onerror = function() {
reject("An error occurred.");
};
xhr.send();
});
}
audioPaths.forEach(p => {
promises.push(getBuffer(p, ctx));
});
// Once all your sounds are loaded, create an AudioBufferSource for each one and start sound
Promise.all(promises).then(buffers => {
buffers.forEach(b => {
let source = ctx.createBufferSource();
source.buffer = b;
source.connect(ctx.destination);
source.start();
})
});

Related

WebRTC connection does not resume after mobile browser is backgrounded

I have a web application running on Safari on an iPad displaying a live WebRTC video stream. When the user switches away from Safari for a few seconds, and then switches back, the <video> element just shows a black rectangle.
I have added logging to the onsignalingstatechange handler, and checked the console logs for any apparent errors after resuming Safari, but there is nothing obvious indicating the failure.
How can I recover/resume/restart the stream after the user switches back to Safari?
Here is my cargo cult WebRTC code, for reference:
export default class WebRtcPlayer {
static server = "http://127.0.0.1:8083";
server = null;
stream = null;
channel = null;
webrtc = null;
mediastream = null;
video = null;
constructor(id, stream, channel) {
this.server = WebRtcPlayer.server;
this.video = document.getElementById(id);
this.stream = stream;
this.channel = channel;
this.video.addEventListener("loadeddata", () => {
this.video.play();
});
this.video.addEventListener("error", () => {
console.error("video error");
});
this.play();
}
getStreamUrl() {
// RTSPtoWeb only, not RTSPtoWebRTC
return `${this.server}/stream/${this.stream}/channel/${this.channel}/webrtc`;
}
async play() {
console.log("webrtc play");
this.mediastream = new MediaStream();
this.video.srcObject = this.mediastream;
this.webrtc = new RTCPeerConnection({
iceServers: [{
urls: ["stun:stun.l.google.com:19302"],
}],
sdpSemantics: "unified-plan"
});
this.webrtc.onnegotiationneeded = this.handleNegotiationNeeded.bind(this);
this.webrtc.onsignalingstatechange = this.handleSignalingStateChange.bind(this);
this.webrtc.ontrack = this.handleTrack.bind(this);
this.webrtc.addTransceiver("video", {
"direction": "sendrecv",
});
}
async handleNegotiationNeeded() {
console.log("handleNegotiationNeeded");
let offer = await this.webrtc.createOffer({
offerToReceiveAudio: false,
offerToReceiveVideo: true
});
await this.webrtc.setLocalDescription(offer);
}
async handleSignalingStateChange() {
console.log(`handleSignalingStateChange ${this.webrtc.signalingState}`);
switch (this.webrtc.signalingState) {
case "have-local-offer":
let formData = new FormData();
formData.append("data", btoa(this.webrtc.localDescription.sdp));
const response = await fetch(this.getStreamUrl(), {
method: "POST",
body: formData,
});
this.webrtc.setRemoteDescription(new RTCSessionDescription({
type: "answer",
sdp: atob(await response.text()),
}));
break;
case "stable":
/*
* There is no ongoing exchange of offer and answer underway.
* This may mean that the RTCPeerConnection object is new, in which case both the localDescription and remoteDescription are null;
* it may also mean that negotiation is complete and a connection has been established.
*/
break;
case "closed":
/*
* The RTCPeerConnection has been closed.
*/
break;
default:
console.log(`unhandled signalingState is ${this.webrtc.signalingState}`);
break;
}
}
handleTrack(event) {
console.log("handle track");
this.mediastream.addTrack(event.track);
}
static setServer(serv) {
this.server = serv;
}
}
I'm not sure if it's the best way, but I used the Page Visibility API to subscribe to the visibilitychange event:
constructor(id, stream, channel) {
// ...
document.addEventListener("visibilitychange", () => {
if (document.visibilityState === "visible") {
console.log("Document became visible, restarting WebRTC stream.");
this.play();
}
});
// ...
}

Is it possible to do voice pitch shifting in Twilio group video?

We have built a web application. The application's core is to arrange the meetings/sessions on the web. So User A(Meeting co-ordinator) will arrange a meeting/session and all other participants B, C, D and etc will be joining in the meeting/session. So I have used Twilio group video call to achieve it.
I have the below use case.
We want to do the voice pitch shifting of the User A's(Meeting co-ordinator) voice. So all other participants will be receiving the pitch-shifted voice in group video. We have analyzed the AWS Polly in Twilio but it doesn’t match with our use case.
So please advice is there any services in Twilio to achieve this scenario.
(or)
will it be possible to interrupt Twilio group call and pass the pitch-shifted voice to other participants?
Sample Code Used
initAudio();
function initAudio() {
analyser1 = audioContext.createAnalyser();
analyser1.fftSize = 1024;
analyser2 = audioContext.createAnalyser();
analyser2.fftSize = 1024;
if (!navigator.getUserMedia)
navigator.getUserMedia = navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
if (!navigator.getUserMedia)
return(alert("Error: getUserMedia not supported!"));
navigator.getUserMedia({ audio: true }, function(stream){
gotStream(stream);
}, function(){ console.log('Error getting Microphone stream'); });
if ((typeof MediaStreamTrack === 'undefined')||(!MediaStreamTrack.getSources)){
console.log("This browser does not support MediaStreamTrack, so doesn't support selecting sources.\n\nTry Chrome Canary.");
} else {
MediaStreamTrack.getSources(gotSources);
}
}
function gotStream (stream) {
audioInput = audioContext.createMediaStreamSource(stream);
outputMix = audioContext.createGain();
dryGain = audioContext.createGain();
wetGain = audioContext.createGain();
effectInput = audioContext.createGain();
audioInput.connect(dryGain);
audioInput.connect(effectInput);
dryGain.connect(outputMix);
wetGain.connect(outputMix);
audioOutput = audioContext.createMediaStreamDestination();
outputMix.connect(audioOutput);
outputMix.connect(analyser2);
crossfade(1.0);
changeEffect();
}
function crossfade (value) {
var gain1 = Math.cos(value * 0.5 * Math.PI);
var gain2 = Math.cos((1.0 - value) * 0.5 * Math.PI);
dryGain.gain.value = gain1;
wetGain.gain.value = gain2;
}
function createPitchShifter () {
effect = new Jungle( audioContext );
effect.output.connect( wetGain );
effect.setPitchOffset(1);
return effect.input;
}
function changeEffect () {
if (currentEffectNode)
currentEffectNode.disconnect();
if (effectInput)
effectInput.disconnect();
var effect = 'pitch';
switch (effect) {
case 'pitch':
currentEffectNode = createPitchShifter();
break;
}
audioInput.connect(currentEffectNode);
}
Facing the error while adding the Localaudiotrack to a room
var mediaStream = new Twilio.Video.LocalAudioTrack(audioOutput.stream);
room.localParticipant.publishTrack(mediaStream, {
name: 'adminaudio'
});
ERROR:
Uncaught (in promise) TypeError: Failed to execute 'addTrack' on 'MediaStream': parameter 1 is not of type 'MediaStreamTrack'.
Twilio developer evangelist here.
There is nothing within Twilio itself that pitch shifts voices.
If you are building this in a browser, then you could use the Web Audio API to take the input from the user's microphone and pitch shift it, then provide the resultant audio stream to the Video API instead of the original mic stream.
the comments in the above answer are SO helpful! I've been researching this for a couple of weeks, posted to Twilio-video.js to no avail and finally just the right phrasing pulled this up on S.O!
but to summarize and to add what I've found to work since it's hard to follow all the 27 questions/comments/code excerpts:
when connecting to Twilio:
const room = await Video.connect(twilioToken, {
name: roomName,
tracks: localTracks,
audio: false, // if you don't want to hear the normal voice at all, you can hide this and add the shifted track upon participant connections
video: true,
logLevel: "debug",
}).then((room) => {
return room;
});
upon a new (remote) participant connection:
const stream = new MediaStream([audioTrack.mediaStreamTrack]);
const audioContext = new AudioContext();
const audioInput = audioContext.createMediaStreamSource(stream);
source.disconnect(audioOutput);
console.log("using PitchShift.js");
var pitchShift = PitchShift(audioContext);
if (isFinite(pitchVal)) {
pitchShift.transpose = pitchVal;
console.log("gain is " + pitchVal);
}
pitchShift.wet.value = 1;
pitchShift.dry.value = 0.5;
try {
audioOutput.stream.getAudioTracks()[0]?.applyConstraints({
echoCancellation: true,
noiseSuppression: true,
});
} catch (e) {
console.log("tried to constrain audio track " + e);
}
var biquadFilter = audioContext.createBiquadFilter();
// Create a compressor node
var compressor = audioContext.createDynamicsCompressor();
compressor.threshold.setValueAtTime(-50, audioContext.currentTime);
compressor.knee.setValueAtTime(40, audioContext.currentTime);
compressor.ratio.setValueAtTime(12, audioContext.currentTime);
compressor.attack.setValueAtTime(0, audioContext.currentTime);
compressor.release.setValueAtTime(0.25, audioContext.currentTime);
//biquadFilter.type = "lowpass";
if (isFinite(freqVal)) {
biquadFilter.frequency.value = freqVal;
console.log("gain is " + freqVal);
}
if (isFinite(gainVal)) {
biquadFilter.gain.value = gainVal;
console.log("gain is " + gainVal);
}
source.connect(compressor);
compressor.connect(biquadFilter);
biquadFilter.connect(pitchShift);
pitchShift.connect(audioOutput);
const localAudioWarpedTracks = new Video.LocalAudioTrack(audioOutput.stream.getAudioTracks()[0]);
const audioElement2 = document.createElement("audio");
document.getElementById("audio_div").appendChild(audioElement2);
localAudioWarpedTracks.attach();

Multiple StreamingRecognizeRequest

I'm trying to setup a StreamingRecognize, with multiple request's. Is it possible ?
The point is that i want to send audio stream from the mic with a unknown time, so i think that i must implement multiple requests. (Considering that a request session has a max_time = 65 seconds).
Anyone can help me with this ?
Thank's alot ;)
Google sample code:
static async Task<object> StreamingMicRecognizeAsync(int seconds)
{
if (NAudio.Wave.WaveIn.DeviceCount < 1)
{
Console.WriteLine("No microphone!");
return -1;
}
var speech = SpeechClient.Create();
var streamingCall = speech.StreamingRecognize();
// Write the initial request with the config.
await streamingCall.WriteAsync(
new StreamingRecognizeRequest()
{
StreamingConfig = new StreamingRecognitionConfig()
{
Config = new RecognitionConfig()
{
Encoding =
RecognitionConfig.Types.AudioEncoding.Linear16,
SampleRateHertz = 16000,
LanguageCode = "en",
},
InterimResults = true,
}
});
// Print responses as they arrive.
Task printResponses = Task.Run(async () =>
{
while (await streamingCall.ResponseStream.MoveNext(
default(CancellationToken)))
{
foreach (var result in streamingCall.ResponseStream
.Current.Results)
{
foreach (var alternative in result.Alternatives)
{
Console.WriteLine(alternative.Transcript);
}
}
}
});
// Read from the microphone and stream to API.
object writeLock = new object();
bool writeMore = true;
var waveIn = new NAudio.Wave.WaveInEvent();
waveIn.DeviceNumber = 0;
waveIn.WaveFormat = new NAudio.Wave.WaveFormat(16000, 1);
waveIn.DataAvailable +=
(object sender, NAudio.Wave.WaveInEventArgs args) =>
{
lock (writeLock)
{
if (!writeMore) return;
streamingCall.WriteAsync(
new StreamingRecognizeRequest()
{
AudioContent = Google.Protobuf.ByteString
.CopyFrom(args.Buffer, 0, args.BytesRecorded)
}).Wait();
}
};
waveIn.StartRecording();
Console.WriteLine("Speak now.");
await Task.Delay(TimeSpan.FromSeconds(seconds));
// Stop recording and shut down.
waveIn.StopRecording();
lock (writeLock) writeMore = false;
await streamingCall.WriteCompleteAsync();
await printResponses;
return 0;
}
In Cloud Speech-to-Text audio length limit for each streaming request is around 1 minute [1]. You can either use asynchronous speech recognition [2] for audio files up to 180 minutes or renew the streaming request before it reaches to the time limit for streaming speech recognition [3].
Here is a Python example how to renew streaming request and stream audio more than 1 minute [4].

How to get webcam video feed in a firefox addon?

I am currently developing an addon where the requirement is to capture the webcam video. I did some testing and noticed that navigator.mediaDevices.getUserMedia() is available within panel and hence have written the following content script for the panel to get webcam video feed from addon.
var mediastream;
var mediarecorder;
// Get the instance of mediaDevices object to use.
navigator.mediaDevices = navigator.mediaDevices || ((navigator.mozGetUserMedia || navigator.webkitGetUserMedia) ? {
getUserMedia: function(c) {
return new Promise(function(y, n) {
(navigator.mozGetUserMedia ||
navigator.webkitGetUserMedia).call(navigator, c, y, n);
});
}
} : null);
function startVideoCapture(width, height, framerate) {
// Check if the browser supports video recording
if (!navigator.mediaDevices) {
return;
}
// Lets initialize the video settings for use for our video recording session
var constraints = { audio: false, video: { width: 640, height: 320, framerate: 25 } };
// Make request to start video capture
navigator.mediaDevices.getUserMedia(constraints)
.then(function(stream) {
// Lets initialize the timestamp for this video
var date = new Date();
var milliseconds = "000" + date.getMilliseconds();
var timestamp = date.toLocaleFormat("%Y-%m-%d %H:%M:%S.") + milliseconds.substr(-3);
// Lets make the stream globally available so that we will be able to control it later.
mediastream = stream;
// Lets display the available stream in the video element available inside the panel.
var video = document.querySelector('video');
video.src = window.URL.createObjectURL(stream);
video.onloadedmetadata = function(e) {
video.play();
};
// We are not here to just show the video to screen. Lets get a media recorder to store the video into memory
mediarecorder = new MediaRecorder(stream);
// Lets decide what to do with the recorded video once we are done with the recording
mediarecorder.ondataavailable = function(evt) {
// recorded video will be available as a blob in evt.data object.
// The only way to use it properly is through FileReader Object
var reader = new FileReader();
// Lets decide what we are going to do with the data that we will read from blob
reader.onloadend = function() {
// create a video object containing the timestamp and the binary video string
var videoObject = new Object();
videoObject.timestamp = timestamp;
videoObject.video = reader.result;
// send the video to the main script for safe keeping
self.port.emit("videoAvailable", videoObject);
}
// instruct the FileReader to start reading the blob
reader.readAsBinaryString(evt.data);
}
// Lets start the video capture
mediarecorder.start();
})
.catch(function(err) {
self.port.emit("VideoError", err);
});
}
function stopVideoCapure(){
if (mediarecorder !== undefined && mediarecorder !== null) {
mediarecorder.stop();
}
if (mediastream !== undefined && mediastream !== null) {
mediastream.stop();
}
}
function updateVideoSettings(settings){
stopVideoCapture();
startVideoCapture(settings.width, settings.height, settings.framerate);
}
self.port.on("VideoPreferenceUpdated", updateVideoSettings);
// Start video capture
startVideoCapture(self.options.width, self.options.height, self.options.framerate);
Now the problem here is the code is perfectly working when from a webpage i.e. if I save the open the panel.html file directly in the browser with proper adjustment of self.options and self.port lines. But when I am using the code in the contentscript for panel in my addon, I am getting the following error
JavaScript error: resource:///modules/webrtcUI.jsm, line 186: TypeError: stringBundle is undefined
Now that is an error from the inbuilt jsm module in firefox. Is there a way I can get past that error or any other way to get webcam video feed in my addon?
Thanks

Distorted audio in iOS 7.1 with WebAudio API

On iOS 7.1, I keep getting a buzzing / noisy / distorted sound when playing back audio using the Web Audio API. It sounds distorted like this, in place of normal like this.
The same files are fine when using HTML5 audio. It all works fine on desktop (Firefox, Chrome, Safari.)
EDIT:
The audio is distorted in the iOS Simulator versions iOS 7.1, 8.1, 8.2. The buzzing sound often starts before I even playback anything.
The audio is distorted on a physical iPhone running iOS 7.1, in both Chrome and Safari.
The audio is fine on a physical iPhone running iOS 8.1, in both Chrome and Safari.
i.e.: the buzzing audio is on iOS 7.1. only.
Howler.js is not the issue. The problem is still there using pure JS like so:
var context;
var sound;
var extension = '.' + ( new Audio().canPlayType( 'audio/ogg' ) !== '' ? 'ogg' : 'mp3');
/** Test for WebAudio API support **/
try {
// still needed for Safari
window.AudioContext = window.AudioContext || window.webkitAudioContext;
// create an AudioContext
context = new AudioContext();
} catch(e) {
// API not supported
throw new Error( 'Web Audio API not supported.' );
}
function loadSound( url ) {
var request = new XMLHttpRequest();
request.open( 'GET', url, true );
request.responseType = 'arraybuffer';
request.onload = function() {
// request.response is encoded... so decode it now
context.decodeAudioData( request.response, function( buffer ) {
sound = buffer;
}, function( err ) {
throw new Error( err );
});
}
request.send();
}
function playSound(buffer) {
var source = context.createBufferSource();
source.buffer = buffer;
source.connect(context.destination);
source.start(0);
}
loadSound( '/tests/Assets/Audio/En-us-hello' + extension );
$(document).ready(function(){
$( '#clickme' ).click( function( event ) {
playSound(sound);
});
}); /* END .ready() */
A live version of this code is available here: Web Audio API - Hello world
Google did not bring up any result about such a distorted sound issue on iOS 7.1.
Has anyone else run into it? Should I file a bug report to Apple?
I believe the issue is caused due to resetting the audioContext.sampleRate prop, which seem to happen after the browser/OS plays something recorded in a different sampling rate.
I've devised the following workaround, which basically silently plays a short wav file recorded in the sampling rate that the device currently does playback on:
"use strict";
var getData = function( context, filePath, callback ) {
var source = context.createBufferSource(),
request = new XMLHttpRequest();
request.open( "GET", filePath, true );
request.responseType = "arraybuffer";
request.onload = function() {
var audioData = request.response;
context.decodeAudioData(
audioData,
function( buffer ) {
source.buffer = buffer;
callback( source );
},
function( e ) {
console.log( "Error with decoding audio data" + e.err );
}
);
};
request.send();
};
module.exports = function() {
var AudioContext = window.AudioContext || window.webkitAudioContext,
context = new AudioContext();
getData(
context,
"path/to/short/file.wav",
function( bufferSource ) {
var gain = context.createGain();
gain.gain.value = 0;
bufferSource.connect( gain );
gain.connect( context.destination );
bufferSource.start( 0 );
}
);
};
Obviously, if some of the devices have different sampling rates, you would need to detect and use a specific file for every rate.
it looks like iOS6+ Safari defaults to a sample rate of 48000. If you type this into the developer console when you first open mobile safari, you'll get 48000:
var ctx = new window.webkitAudioContext();
console.log(ctx.sampleRate);
Further Reference: https://forums.developer.apple.com/thread/20677
Then if you close the initial context on load: ctx.close(), the next created context will use the sample rate most other browsers use (44100) and sound will play without distortion.
Credit to this for pointing me in the right direction (and in case the above no longer works in the future): https://github.com/Jam3/ios-safe-audio-context/blob/master/index.js
function as of post date:
function createAudioContext (desiredSampleRate) {
var AudioCtor = window.AudioContext || window.webkitAudioContext
desiredSampleRate = typeof desiredSampleRate === 'number'
? desiredSampleRate
: 44100
var context = new AudioCtor()
// Check if hack is necessary. Only occurs in iOS6+ devices
// and only when you first boot the iPhone, or play a audio/video
// with a different sample rate
if (/(iPhone|iPad)/i.test(navigator.userAgent) &&
context.sampleRate !== desiredSampleRate) {
var buffer = context.createBuffer(1, 1, desiredSampleRate)
var dummy = context.createBufferSource()
dummy.buffer = buffer
dummy.connect(context.destination)
dummy.start(0)
dummy.disconnect()
context.close() // dispose old context
context = new AudioCtor()
}
return context
}

Resources