Get System Volume iOS - ios

My case is simple:
I need to play a warning signal and want to make sure the user will hear it, so I want to check the system volume.
How can I find out what the current system volume is?

Update for Swift
let vol = AVAudioSession.sharedInstance().outputVolume
The audio session can provide output volume (iOS >= 6.0).
float vol = [[AVAudioSession sharedInstance] outputVolume];
NSLog(#"output volume: %1.2f dB", 20.f*log10f(vol+FLT_MIN));

Swift 3.1
let audioSession = AVAudioSession.sharedInstance()
var volume: Float?
do {
try audioSession.setActive(true)
volume = audioSession.outputVolume
} catch {
print("Error Setting Up Audio Session")
}
audioSession.setActive(true) - important

Try this:
MPMusicPlayerController *iPod = [MPMusicPlayerController iPodMusicPlayer];
float volumeLevel = iPod.volume;
You need to import the MediaPlayer framework.

This works fine:
Float32 volume;
UInt32 dataSize = sizeof(Float32);
AudioSessionGetProperty (
kAudioSessionProperty_CurrentHardwareOutputVolume,
&dataSize,
&volume
);

For Swift 2:
let volume = AVAudioSession.sharedInstance().outputVolume
print("Output volume: \(volume)")

You can use the default system's volume View and add to wherever you need it. In my case I required it in my own music player. It's easy and hassle free. Just add the view, and everything is done. This is explained in Apple's MPVolume Class Reference.
mpVolumeViewParentView.backgroundColor = [UIColor clearColor];
MPVolumeView *myVolumeView =
[[MPVolumeView alloc] initWithFrame: mpVolumeViewParentView.bounds];
[mpVolumeViewParentView addSubview: myVolumeView];
[myVolumeView release];

I have prepared a class with static methods in order to deal with the volume of ios devices. Let me share with you :)
import AVFoundation
class HeadPhoneDetectHelper {
class func isHeadPhoneConnected() -> Bool
{
do{
let audioSession = AVAudioSession.sharedInstance()
try audioSession.setActive(true)
let currentRoute = audioSession.currentRoute
let headPhonePortDescriptionArray = currentRoute.outputs.filter{$0.portType == AVAudioSessionPortHeadphones}
let isHeadPhoneConnected = headPhonePortDescriptionArray.count != 0
return isHeadPhoneConnected
}catch{
print("Error while checking head phone connection : \(error)")
}
return false
}
class func isVolumeLevelAppropriate() -> Bool
{
let minimumVolumeLevelToAccept = 100
let currentVolumeLevel = HeadPhoneDetectHelper.getVolumeLevelAsPercentage()
let isVolumeLevelAppropriate = currentVolumeLevel >= minimumVolumeLevelToAccept
return isVolumeLevelAppropriate
}
class func getVolumeLevelAsPercentage() -> Int
{
do{
let audioSession = AVAudioSession.sharedInstance()
try audioSession.setActive(true)
let audioVolume = audioSession.outputVolume
let audioVolumePercentage = audioVolume * 100
return Int(audioVolumePercentage)
}catch{
print("Error while getting volume level \(error)")
}
return 0
}
}

Swift 2.2, make sure to import MediaPlayer
private func setupVolumeListener()
{
let frameView:CGRect = CGRectMake(0, 0, 0, 0)
let volumeView = MPVolumeView(frame: frameView)
//self.window?.addSubview(volumeView) //use in app delegate
self.view.addSubview(volumeView) //use in a view controller
NSNotificationCenter.defaultCenter().addObserver(self, selector: #selector(volumeChanged(_:)), name: "AVSystemController_SystemVolumeDidChangeNotification", object: nil)
}//eom
func volumeChanged(notification:NSNotification)
{
let volume = notification.userInfo!["AVSystemController_AudioVolumeNotificationParameter"]
let category = notification.userInfo!["AVSystemController_AudioCategoryNotificationParameter"]
let reason = notification.userInfo!["AVSystemController_AudioVolumeChangeReasonNotificationParameter"]
print("volume: \(volume!)")
print("category: \(category!)")
print("reason: \(reason!)")
print("\n")
}//eom

Related

AudioKit output changes to ear speakers

I implemented the AudioKit "MICROPHONE ANALYSIS" example https://audiokit.io/examples/MicrophoneAnalysis/ in my App.
I want to analyze the microphone input frequency and then play the correct note which is near the frequency which was determined.
Normally the sound output is the speaker or a Bluetooth device connected to my iPhone but after implementing the "MICROPHONE ANALYSIS" example the sound output changed to the tiny little speaker on the top of the iPhone which is normally used when you get a call.
How can I switch to the "normal" speaker or to the connected Bluetooth device like before?
var mic: AKMicrophone!
var tracker: AKFrequencyTracker!
var silence: AKBooster!
func initFrequencyTracker() {
AKSettings.audioInputEnabled = true
mic = AKMicrophone()
tracker = AKFrequencyTracker(mic)
silence = AKBooster(tracker, gain: 0)
}
func deinitFrequencyTracker() {
plotTimer.invalidate()
do {
try AudioKit.stop()
AudioKit.output = nil
} catch {
print(error)
}
}
func initPlotTimer() {
AudioKit.output = silence
do {
try AudioKit.start()
} catch {
AKLog("AudioKit did not start!")
}
setupPlot()
plotTimer = Timer.scheduledTimer(timeInterval: 0.1, target: self, selector: #selector(updatePlotUI), userInfo: nil, repeats: true)
}
func setupPlot() {
let plot = AKNodeOutputPlot(mic, frame: audioInputPlot.bounds)
plot.translatesAutoresizingMaskIntoConstraints = false
plot.alpha = 0.3
plot.plotType = .rolling
plot.shouldFill = true
plot.shouldCenterYAxis = false
plot.shouldMirror = true
plot.color = UIColor(named: uiFarbe)
audioInputPlot.addSubview(plot)
// Pin the AKNodeOutputPlot to the audioInputPlot
var constraints = [plot.leadingAnchor.constraint(equalTo: audioInputPlot.leadingAnchor)]
constraints.append(plot.trailingAnchor.constraint(equalTo: audioInputPlot.trailingAnchor))
constraints.append(plot.topAnchor.constraint(equalTo: audioInputPlot.topAnchor))
constraints.append(plot.bottomAnchor.constraint(equalTo: audioInputPlot.bottomAnchor))
constraints.forEach { $0.isActive = true }
}
#objc func updatePlotUI() {
if tracker.amplitude > 0.1 {
let trackerFrequency = Float(tracker.frequency)
guard trackerFrequency < 7_000 else {
// This is a bit of hack because of modern Macbooks giving super high frequencies
return
}
var frequency = trackerFrequency
while frequency > Float(noteFrequencies[noteFrequencies.count - 1]) {
frequency /= 2.0
}
while frequency < Float(noteFrequencies[0]) {
frequency *= 2.0
}
var minDistance: Float = 10_000.0
var index = 0
for i in 0..<noteFrequencies.count {
let distance = fabsf(Float(noteFrequencies[i]) - frequency)
if distance < minDistance {
index = i
minDistance = distance
}
}
// let octave = Int(log2f(trackerFrequency / frequency))
frequencyLabel.text = String(format: "%0.1f", tracker.frequency)
if frequencyTranspose(note: notesToTanspose[index]) != droneLabel.text {
note = frequencyTranspose(note: notesToTanspose[index])
droneLabel.text = note
DispatchQueue.main.asyncAfter(deadline: .now() + 0.03, execute: {
self.prepareSinglePlayerFirstForStart(note: self.note)
self.startSinglePlayer()
})
}
}
}
func frequencyTranspose(note: String) -> String {
var indexNote = notesToTanspose.firstIndex(of: note)!
let chosenInstrument = UserDefaults.standard.object(forKey: "whichInstrument") as! String
if chosenInstrument == "Bb" {
if indexNote + 2 >= notesToTanspose.count {
indexNote -= 12
}
return notesToTanspose[indexNote + 2]
} else if chosenInstrument == "Eb" {
if indexNote - 3 < 0 {
indexNote += 12
}
return notesToTanspose[indexNote - 3]
} else {
return note
}
}
It's a good practice to control the session settings, so start by creating a method in your application to take care of that during initialisation.
Following up, there's an example where I set a category and the desired options:
func start() {
do {
let session = AVAudioSession.sharedInstance()
try session.setCategory(.playAndRecord, options: .defaultToSpeaker)
try session.setActive(true, options: .notifyOthersOnDeactivation)
try session.overrideOutputAudioPort(AVAudioSession.PortOverride.speaker)
try AudioKit.start()
} catch {
// your error handler
}
}
You can call the method start where you make the call to AudioKit.Start() in initPlotTimer.
The example above is using the AVAudioSession, which I believe is what AKSettings wraps (please feel free to edit my answer to not mislead future readers, as I'm not looking at the AudioKit source-code at the moment).
Now that AVAudioSession is exposed, let's stick with the method offered by AudioKit since that's what you're dealing with.
Here's another example using AKSettings:
func start() {
do {
AKSettings.channelCount = 2
AKSettings.ioBufferDuration = 0.002
AKSettings.audioInputEnabled = true
AKSettings.bufferLength = .medium
AKSettings.defaultToSpeaker = true
// check docs for other options and settings
try AKSettings.setSession(category: .playAndRecord, with: [.defaultToSpeaker, .allowBluetooth])
try AudioKit.start()
} catch {
// your handler
}
}
Have in mind that you don't necessarily have to call it start, or run AudioKit's start method, I'm just exposing the initialisation phase, to make it readable to you and other use-cases.
Reference:
https://developer.apple.com/documentation/avfoundation/avaudiosession/categoryoptions
https://audiokit.io/docs/Classes/AKSettings.html

MTAudioProcessingTap EXC_BAD_ACCESS , doesnt always fire the finalize callback. how to Release it?

Im trying to implement MTAudioProcessingTap and it works great. The problem is when Im done using the Tap and I reinstaniate my class and create a new Tap.
How Im supposely releasing the tap
1- I retain the tap as a property when created, hoping I can access it and release it later
2- In deinit() method of the class, I set the audiomix to nil and try to do a self.tap?.release()
The thing is.. sometimes it works and calls the FINALIZE callback and everything is great, and sometimes it doesn't and just crashes at the tapProcess Callback line:
let selfMediaInput = Unmanaged<VideoMediaInput>.fromOpaque(MTAudioProcessingTapGetStorage(tap)).takeUnretainedValue()
Here's the full code: https://gist.github.com/omarojo/03d08165a1a7962cb30c17ec01f809a3
import Foundation
import UIKit
import AVFoundation;
import MediaToolbox
protocol VideoMediaInputDelegate: class {
func videoFrameRefresh(sampleBuffer: CMSampleBuffer) //could be audio or video
}
class VideoMediaInput: NSObject {
private let queue = DispatchQueue(label: "com.GenerateMetal.VideoMediaInput")
var videoURL: URL!
weak var delegate: VideoMediaInputDelegate?
private var playerItemObserver: NSKeyValueObservation?
var displayLink: CADisplayLink!
var player = AVPlayer()
var playerItem: AVPlayerItem!
let videoOutput = AVPlayerItemVideoOutput(pixelBufferAttributes: [String(kCVPixelBufferPixelFormatTypeKey): NSNumber(value: kCVPixelFormatType_32BGRA)])
var audioProcessingFormat: AudioStreamBasicDescription?//UnsafePointer<AudioStreamBasicDescription>?
var tap: Unmanaged<MTAudioProcessingTap>?
override init(){
}
convenience init(url: URL){
self.init()
self.videoURL = url
self.playerItem = AVPlayerItem(url: url)
playerItemObserver = playerItem.observe(\.status) { [weak self] item, _ in
guard item.status == .readyToPlay else { return }
self?.playerItemObserver = nil
self?.player.play()
}
setupProcessingTap()
player.replaceCurrentItem(with: playerItem)
player.currentItem!.add(videoOutput)
NotificationCenter.default.removeObserver(self)
NotificationCenter.default.addObserver(forName: NSNotification.Name.AVPlayerItemDidPlayToEndTime, object: nil, queue: nil) {[weak self] notification in
if let weakSelf = self {
/*
Setting actionAtItemEnd to None prevents the movie from getting paused at item end. A very simplistic, and not gapless, looped playback.
*/
weakSelf.player.actionAtItemEnd = .none
weakSelf.player.seek(to: CMTime.zero)
weakSelf.player.play()
}
}
NotificationCenter.default.addObserver(
self,
selector: #selector(applicationDidBecomeActive(_:)),
name: UIApplication.didBecomeActiveNotification,
object: nil)
}
func stopAllProcesses(){
self.queue.sync {
self.player.pause()
self.player.isMuted = true
self.player.currentItem?.audioMix = nil
self.playerItem.audioMix = nil
self.playerItem = nil
self.tap?.release()
}
}
deinit{
print(">> VideoInput deinited !!!! 📌📌")
if let link = self.displayLink {
link.invalidate()
}
NotificationCenter.default.removeObserver(self)
stopAllProcesses()
}
public func playVideo(){
if (player.currentItem != nil) {
print("Starting playback!")
player.play()
}
}
public func pauseVideo(){
if (player.currentItem != nil) {
print("Pausing playback!")
player.pause()
}
}
#objc func applicationDidBecomeActive(_ notification: NSNotification) {
playVideo()
}
//MARK: GET AUDIO BUFFERS
func setupProcessingTap(){
var callbacks = MTAudioProcessingTapCallbacks(
version: kMTAudioProcessingTapCallbacksVersion_0,
clientInfo: UnsafeMutableRawPointer(Unmanaged.passUnretained(self).toOpaque()),
init: tapInit,
finalize: tapFinalize,
prepare: tapPrepare,
unprepare: tapUnprepare,
process: tapProcess)
var tap: Unmanaged<MTAudioProcessingTap>?
let err = MTAudioProcessingTapCreate(kCFAllocatorDefault, &callbacks, kMTAudioProcessingTapCreationFlag_PostEffects, &tap)
self.tap = tap
print("err: \(err)\n")
if err == noErr {
}
print("tracks? \(playerItem.asset.tracks)\n")
let audioTrack = playerItem.asset.tracks(withMediaType: AVMediaType.audio).first!
let inputParams = AVMutableAudioMixInputParameters(track: audioTrack)
inputParams.audioTapProcessor = tap?.takeRetainedValue()//tap?.takeUnretainedValue()
// tap?.release()
// print("inputParms: \(inputParams), \(inputParams.audioTapProcessor)\n")
let audioMix = AVMutableAudioMix()
audioMix.inputParameters = [inputParams]
playerItem.audioMix = audioMix
}
//MARK: TAP CALLBACKS
let tapInit: MTAudioProcessingTapInitCallback = {
(tap, clientInfo, tapStorageOut) in
tapStorageOut.pointee = clientInfo
print("init \(tap, clientInfo, tapStorageOut)\n")
}
let tapFinalize: MTAudioProcessingTapFinalizeCallback = {
(tap) in
print("finalize \(tap)\n")
}
let tapPrepare: MTAudioProcessingTapPrepareCallback = {
(tap, itemCount, basicDescription) in
print("prepare: \(tap, itemCount, basicDescription)\n")
let selfMediaInput = Unmanaged<VideoMediaInput>.fromOpaque(MTAudioProcessingTapGetStorage(tap)).takeUnretainedValue()
selfMediaInput.audioProcessingFormat = AudioStreamBasicDescription(mSampleRate: basicDescription.pointee.mSampleRate,
mFormatID: basicDescription.pointee.mFormatID, mFormatFlags: basicDescription.pointee.mFormatFlags, mBytesPerPacket: basicDescription.pointee.mBytesPerPacket, mFramesPerPacket: basicDescription.pointee.mFramesPerPacket, mBytesPerFrame: basicDescription.pointee.mBytesPerFrame, mChannelsPerFrame: basicDescription.pointee.mChannelsPerFrame, mBitsPerChannel: basicDescription.pointee.mBitsPerChannel, mReserved: basicDescription.pointee.mReserved)
}
let tapUnprepare: MTAudioProcessingTapUnprepareCallback = {
(tap) in
print("unprepare \(tap)\n")
}
let tapProcess: MTAudioProcessingTapProcessCallback = {
(tap, numberFrames, flags, bufferListInOut, numberFramesOut, flagsOut) in
print("callback \(bufferListInOut)\n")
let selfMediaInput = Unmanaged<VideoMediaInput>.fromOpaque(MTAudioProcessingTapGetStorage(tap)).takeUnretainedValue()
let status = MTAudioProcessingTapGetSourceAudio(tap, numberFrames, bufferListInOut, flagsOut, nil, numberFramesOut)
//print("get audio: \(status)\n")
if status != noErr {
print("Error TAPGetSourceAudio :\(String(describing: status.description))")
return
}
selfMediaInput.processAudioData(audioData: bufferListInOut, framesNumber: UInt32(numberFrames))
}
func processAudioData(audioData: UnsafeMutablePointer<AudioBufferList>, framesNumber: UInt32) {
var sbuf: CMSampleBuffer?
var status : OSStatus?
var format: CMFormatDescription?
//FORMAT
// var audioFormat = self.audioProcessingFormat//self.audioProcessingFormat?.pointee
guard var audioFormat = self.audioProcessingFormat else {
return
}
status = CMAudioFormatDescriptionCreate(allocator: kCFAllocatorDefault, asbd: &audioFormat, layoutSize: 0, layout: nil, magicCookieSize: 0, magicCookie: nil, extensions: nil, formatDescriptionOut: &format)
if status != noErr {
print("Error CMAudioFormatDescriptionCreater :\(String(describing: status?.description))")
return
}
print(">> Audio Buffer mSampleRate:\(Int32(audioFormat.mSampleRate))")
var timing = CMSampleTimingInfo(duration: CMTimeMake(value: 1, timescale: Int32(audioFormat.mSampleRate)), presentationTimeStamp: self.player.currentTime(), decodeTimeStamp: CMTime.invalid)
status = CMSampleBufferCreate(allocator: kCFAllocatorDefault,
dataBuffer: nil,
dataReady: Bool(truncating: 0),
makeDataReadyCallback: nil,
refcon: nil,
formatDescription: format,
sampleCount: CMItemCount(framesNumber),
sampleTimingEntryCount: 1,
sampleTimingArray: &timing,
sampleSizeEntryCount: 0, sampleSizeArray: nil,
sampleBufferOut: &sbuf);
if status != noErr {
print("Error CMSampleBufferCreate :\(String(describing: status?.description))")
return
}
status = CMSampleBufferSetDataBufferFromAudioBufferList(sbuf!,
blockBufferAllocator: kCFAllocatorDefault ,
blockBufferMemoryAllocator: kCFAllocatorDefault,
flags: 0,
bufferList: audioData)
if status != noErr {
print("Error cCMSampleBufferSetDataBufferFromAudioBufferList :\(String(describing: status?.description))")
return
}
let currentSampleTime = CMSampleBufferGetOutputPresentationTimeStamp(sbuf!);
print(" audio buffer at time: \(currentSampleTime)")
self.delegate?.videoFrameRefresh(sampleBuffer: sbuf!)
}
}
How I use my class
self.inputVideoMedia = nil
self.inputVideoMedia = VideoMediaInput(url: videoURL)
self.inputVideoMedia!.delegate = self
the second time I do that.. it crashes (but not always). The times it doesnt crash I can see printed in the console the FINALIZE print.
If VideoMediaInput is deallocated before the tap is deallocated (which can happen as there seems to be no way to synchronously stop a tap), then the tap callback can choke on a reference to your deallocated class.
You can fix this by passing (a wrapped, I guess) weak reference to your class. You can do it like this:
First delete your tap instance variable, and any references to it - it's not needed. Then make these changes:
class VideoMediaInput: NSObject {
class TapCookie {
weak var input: VideoMediaInput?
deinit {
print("TapCookie deinit")
}
}
...
func setupProcessingTap(){
let cookie = TapCookie()
cookie.input = self
var callbacks = MTAudioProcessingTapCallbacks(
version: kMTAudioProcessingTapCallbacksVersion_0,
clientInfo: UnsafeMutableRawPointer(Unmanaged.passRetained(cookie).toOpaque()),
init: tapInit,
finalize: tapFinalize,
prepare: tapPrepare,
unprepare: tapUnprepare,
process: tapProcess)
...
let tapFinalize: MTAudioProcessingTapFinalizeCallback = {
(tap) in
print("finalize \(tap)\n")
// release cookie
Unmanaged<TapCookie>.fromOpaque(MTAudioProcessingTapGetStorage(tap)).release()
}
let tapPrepare: MTAudioProcessingTapPrepareCallback = {
(tap, itemCount, basicDescription) in
print("prepare: \(tap, itemCount, basicDescription)\n")
let cookie = Unmanaged<TapCookie>.fromOpaque(MTAudioProcessingTapGetStorage(tap)).takeUnretainedValue()
let selfMediaInput = cookie.input!
...
let tapProcess: MTAudioProcessingTapProcessCallback = {
(tap, numberFrames, flags, bufferListInOut, numberFramesOut, flagsOut) in
print("callback \(bufferListInOut)\n")
let cookie = Unmanaged<TapCookie>.fromOpaque(MTAudioProcessingTapGetStorage(tap)).takeUnretainedValue()
guard let selfMediaInput = cookie.input else {
print("Tap callback: VideoMediaInput was deallocated!")
return
}
...
I'm not sure if the cookie class is necessary, it exists only to wrap the weak reference. Cutting edge Swift experts may know how to mash the weakness through all the teenage mutant ninja raw pointers, but I don't.
The audio context runs in its own real-time thread. So audio processes don't stop synchronously with a stop or cancel function call, but some unknown time later (on the order of the duration of some number of audio samples in some internal audio buffers), after the real-time thread drains.
Thus, audio buffers, objects, and callbacks should not be released (or reallocated) until some (unknown, but less than a couple seconds) time after stopping any real-time audio stream.
Depending on deallocation object messages or instance variable states (including weak references) betweens real-time threads is reported to be currently unsafe in Swift (see WWDC 2018 session on audio). Thus, I recommend using semaphores (outside of a real-time context, such as audio), or posix memory barriers (inside a bridged call to a C function). (...until some future version of Swift figures out a real-time concurrency mechanism.) (...especially on iOS or Apple Silicon (M1) devices which can re-order memory writes).

Unable to play a very simple tone

I was following this question, but the tone that I try to play with an AVAudioPCMBuffer does not play. The code is pretty simple:
class Player: NSObject {
var engine = AVAudioEngine()
var player = AVAudioPlayerNode()
var mixer: AVAudioMixerNode!
var buffer: AVAudioPCMBuffer!
override init() {
mixer = engine.mainMixerNode
buffer = AVAudioPCMBuffer(pcmFormat: player.outputFormat(forBus: 0), frameCapacity: 100)
buffer.frameLength = 100
let sr = mixer.outputFormat(forBus: 0).sampleRate
let nChannels = mixer.outputFormat(forBus: 0).channelCount
var i = 0
while i < Int(buffer.frameLength) {
let val = sin(441 * Double(i) * Double.pi / sr)
buffer.floatChannelData?.pointee[i] = Float(val * 0.5)
i += Int(nChannels)
}
engine.attach(player)
engine.connect(player, to: mixer, format: player.outputFormat(forBus: 0))
engine.prepare()
}
func play() {
do {
try engine.start()
} catch {
print(error)
}
player.scheduleBuffer(buffer, at: nil, options: .loops) {
print("Played!")
}
player.play()
}
}
For some reason, though, the iPhone does not make any sound. In my ViewController, I have this:
class ViewController: UIViewController {
var player = Player()
override func viewDidAppear(_ animated: Bool) {
player.play()
}
}
As you can see, player is a class variable, so it should not be deallocated from memory.
When I run the app on my physical device (iPhone 6s iOS 11), it does not work, yet it does work on the simulator. Why is this not making any sound, and how can I fix it?
Thanks in advance!
Make sure your device is not on silent mode.
I just created a project that you can download for testing by yourself: https://github.com/mugx/TestSound

Allowing background audio with Swift not working

I want to allow background audio while the app is not in focus. I currently have this code, which should allow that:
do {
try AKSettings.setSession(category: .playback, with: .mixWithOthers)
} catch {
print("error")
}
AKSettings.playbackWhileMuted = true
I also have the setting 'Audio, Airplay and Picture in Picture' enabled in capabilities settings. However, when I press the home button on my device the audio doesn't keep playing. What am I doing wrong? I am using AudioKit to produce sounds if that matters.
I am using a singleton to house all of the AudioKit components which I named AudioPlayer.swift. Here is what I have in my AudioPlayer.swift singleton file:
class AudioPlayer: NSObject {
var currentFrequency = String()
var soundIsPlaying = false
var leftOscillator = AKOscillator()
var rightOscillator = AKOscillator()
var rain = try! AKAudioFile()
var rainPlayer: AKAudioPlayer!
var envelope = AKAmplitudeEnvelope()
override init() {
super.init()
do {
try AKSettings.setSession(category: .playback, with: .mixWithOthers)
} catch {
print("error")
}
AKSettings.playbackWhileMuted = true
AudioKit.output = envelope
AudioKit.start()
}
func setupFrequency(left: AKOscillator, right: AKOscillator, frequency: String) {
currentFrequency = frequency
leftOscillator = left
rightOscillator = right
let leftPanner = AKPanner(leftOscillator)
leftPanner.pan = -1
let rightPanner = AKPanner(rightOscillator)
rightPanner.pan = 1
//Set up rain and rainPlayer
do {
rain = try AKAudioFile(readFileName: "rain.wav")
rainPlayer = try AKAudioPlayer(file: rain, looping: true, deferBuffering: false, completionHandler: nil)
} catch { print(error) }
let mixer = AKMixer(leftPanner, rightPanner, rainPlayer)
//Put mixer in sound envelope
envelope = AKAmplitudeEnvelope(mixer)
envelope.attackDuration = 2.0
envelope.decayDuration = 0
envelope.sustainLevel = 1
envelope.releaseDuration = 0.2
//Start AudioKit stuff
AudioKit.output = envelope
AudioKit.start()
leftOscillator.start()
rightOscillator.start()
rainPlayer.start()
envelope.start()
soundIsPlaying = true
}
}
And here is an example of one of my sound effect view controllers, which reference the AudioKit singleton to send it a certain frequency (I have about a dozen of these view controllers, each with its own frequency settings):
class CalmView: UIViewController {
let leftOscillator = AKOscillator()
let rightOscillator = AKOscillator()
override func viewDidLoad() {
super.viewDidLoad()
leftOscillator.amplitude = 0.3
leftOscillator.frequency = 220
rightOscillator.amplitude = 0.3
rightOscillator.frequency = 230
}
#IBAction func playSound(_ sender: Any) {
if shared.soundIsPlaying == false {
AudioKit.stop()
shared.setupFrequency(left: leftOscillator, right: rightOscillator, frequency: "Calm")
} else if shared.soundIsPlaying == true && shared.currentFrequency != "Calm" {
AudioKit.stop()
shared.leftOscillator.stop()
shared.rightOscillator.stop()
shared.rainPlayer.stop()
shared.envelope.stop()
shared.setupFrequency(left: leftOscillator, right: rightOscillator, frequency: "Calm")
} else {
shared.soundIsPlaying = false
shared.envelope.stop()
}
}
}
I instantiated the AudioPlayer singleton in my ViewController.swift file.
It depends on when you are doing your configuration in relation to when AudioKit is started. If you're using AudioKit you should be using its AKSettings to manage your session category. Basically not only the playback category but also mixWithOthers. By default, does this:
/// Set the audio session type
#objc open static func setSession(category: SessionCategory,
with options: AVAudioSessionCategoryOptions = [.mixWithOthers]) throws {
So you'd do something like this in your ViewController:
do {
if #available(iOS 10.0, *) {
try AKSettings.setSession(category: .playAndRecord, with: [.defaultToSpeaker, .allowBluetooth, .allowBluetoothA2DP])
} else {
// Fallback on earlier versions
}
} catch {
print("Errored setting category.")
}
So I think its a matter of getting that straight. It might also help to have inter-app audio set up. If you still have trouble and provide more information, I can help more, but this is as good an answer as I can muster based on the info you've given so far.

How to disable audio in webrtc mobile app(ios) without changing in framework

I am working with webrtc mobile(ios). I can't disable audio in webrtc(ios). I have got no flag to disable audio.By changing in framwork/library it can done easily. My purpose is that I have to disable audio without changing in framework/library. Can anyone help me?.
Update your question with code snippet, how you are creating mediaStrem or tracks(audio/video).
Generally with default Native WebRTC Framework,
RTCMediaStream localStream = [_factory mediaStreamWithStreamId:kARDMediaStreamId];
if(audioRequired) {
RTCAudioTrack *aTrack = [_lmStream createLocalAudioTrack];
[localStream addAudioTrack:aTrack];
}
RTCVideoTrack *vTrack = [_lmStream createLocalVideoTrack];
[localStream addVideoTrack:vTrack];
[_peerConnection addStream:localStream];
If you want to mute the Audio during the call, use below function.
- (void)enableAudio:(NSString *)id isAudioEnabled:(BOOL) isAudioEnabled {
NSLog(#"Auido enabled: %d streams count:%d ", id, isAudioEnabled, _peerConnection.localStreams.count);
if(_peerConnection.localStreams.count > 0) {
RTCMediaStream *lStream = _peerConnection.localStreams[0];
if(lStream.audioTracks.count > 0) { // Usually we will have only one track. If you have more than one, need to traverse all.
// isAudioEnabled == 1 -> Unmute
// isAudioEnabled == 0 -> Mute
[lStream.audioTracks[0] setIsEnabled:isAudioEnabled];
}
}
}
in my case I didnt use streams and directly add audio track to peerconnection.
private func createMediaSenders() {
let streamId = "stream"
// Audio
let audioTrack = self.createAudioTrack()
self.pc.add(audioTrack, streamIds: [streamId])
// Video
/* let videoTrack = self.createVideoTrack()
self.localVideoTrack = videoTrack
self.peerConnection.add(videoTrack, streamIds: [streamId])
self.remoteVideoTrack = self.peerConnection.transceivers.first { $0.mediaType == .video }?.receiver.track as? RTCVideoTrack
// Data
if let dataChannel = createDataChannel() {
dataChannel.delegate = self
self.localDataChannel = dataChannel
}*/
}
private func createAudioTrack() -> RTCAudioTrack {
let audioConstrains = RTCMediaConstraints(mandatoryConstraints: nil, optionalConstraints: nil)
let audioSource = sessionFactory.audioSource(with: audioConstrains)
let audioTrack = sessionFactory.audioTrack(with: audioSource, trackId: "audio0")
return audioTrack
}
to mute and unmute microphone I use this function
public func muteMicrophone(_ mute:Bool){
for sender in pc.senders{
if (sender.track?.kind == "audio") {
sender.track?.isEnabled = mute
}
}
}

Resources