How to disable audio in webrtc mobile app(ios) without changing in framework - ios

I am working with webrtc mobile(ios). I can't disable audio in webrtc(ios). I have got no flag to disable audio.By changing in framwork/library it can done easily. My purpose is that I have to disable audio without changing in framework/library. Can anyone help me?.

Update your question with code snippet, how you are creating mediaStrem or tracks(audio/video).
Generally with default Native WebRTC Framework,
RTCMediaStream localStream = [_factory mediaStreamWithStreamId:kARDMediaStreamId];
if(audioRequired) {
RTCAudioTrack *aTrack = [_lmStream createLocalAudioTrack];
[localStream addAudioTrack:aTrack];
}
RTCVideoTrack *vTrack = [_lmStream createLocalVideoTrack];
[localStream addVideoTrack:vTrack];
[_peerConnection addStream:localStream];
If you want to mute the Audio during the call, use below function.
- (void)enableAudio:(NSString *)id isAudioEnabled:(BOOL) isAudioEnabled {
NSLog(#"Auido enabled: %d streams count:%d ", id, isAudioEnabled, _peerConnection.localStreams.count);
if(_peerConnection.localStreams.count > 0) {
RTCMediaStream *lStream = _peerConnection.localStreams[0];
if(lStream.audioTracks.count > 0) { // Usually we will have only one track. If you have more than one, need to traverse all.
// isAudioEnabled == 1 -> Unmute
// isAudioEnabled == 0 -> Mute
[lStream.audioTracks[0] setIsEnabled:isAudioEnabled];
}
}
}

in my case I didnt use streams and directly add audio track to peerconnection.
private func createMediaSenders() {
let streamId = "stream"
// Audio
let audioTrack = self.createAudioTrack()
self.pc.add(audioTrack, streamIds: [streamId])
// Video
/* let videoTrack = self.createVideoTrack()
self.localVideoTrack = videoTrack
self.peerConnection.add(videoTrack, streamIds: [streamId])
self.remoteVideoTrack = self.peerConnection.transceivers.first { $0.mediaType == .video }?.receiver.track as? RTCVideoTrack
// Data
if let dataChannel = createDataChannel() {
dataChannel.delegate = self
self.localDataChannel = dataChannel
}*/
}
private func createAudioTrack() -> RTCAudioTrack {
let audioConstrains = RTCMediaConstraints(mandatoryConstraints: nil, optionalConstraints: nil)
let audioSource = sessionFactory.audioSource(with: audioConstrains)
let audioTrack = sessionFactory.audioTrack(with: audioSource, trackId: "audio0")
return audioTrack
}
to mute and unmute microphone I use this function
public func muteMicrophone(_ mute:Bool){
for sender in pc.senders{
if (sender.track?.kind == "audio") {
sender.track?.isEnabled = mute
}
}
}

Related

Real-time AVAssetWriter synchronise audio and video when pausing/resuming

I am trying to record a video with sound using iPhone's front camera. As I need to also support pause/resume functionality, I need to use AVAssetWriter. I've found an example online, written in Objective-C, which almost achieves the desired functionality (http://www.gdcl.co.uk/2013/02/20/iPhone-Pause.html)
Unfortunately, after converting this example to Swift, I notice that if I pause/resume, at the end of each "section" there is a small but noticeable period during which the video is just a still frame and the audio is playing. So, it seems that when isPaused is triggered, the recorded audio track is longer than the recorded video track.
Sorry if it may seem like a noob question, but I am not a great expert in AVFoundation and some help would be appreciated!
Below I post my implementation of didOutput sampleBuffer.
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
var isVideo = true
if videoConntection != connection {
isVideo = false
}
if (!isCapturing || isPaused) {
return
}
if (encoder == nil) {
if isVideo {
return
}
if let fmt = CMSampleBufferGetFormatDescription(sampleBuffer) {
let desc = CMAudioFormatDescriptionGetStreamBasicDescription(fmt as CMAudioFormatDescription)
if let chan = desc?.pointee.mChannelsPerFrame, let rate = desc?.pointee.mSampleRate {
let path = tempPath()!
encoder = VideoEncoder(path: path, height: Int(cameraSize.height), width: Int(cameraSize.width), channels: chan, rate: rate)
}
}
}
if discont {
if isVideo {
return
}
discont = false
var pts = CMSampleBufferGetPresentationTimeStamp(sampleBuffer)
let last = lastAudio
if last.flags.contains(CMTimeFlags.valid) {
if cmOffset.flags.contains(CMTimeFlags.valid) {
pts = CMTimeSubtract(pts, cmOffset)
}
let off = CMTimeSubtract(pts, last)
print("setting offset from \(isVideo ? "video":"audio")")
print("adding \(CMTimeGetSeconds(off)) to \(CMTimeGetSeconds(cmOffset)) (pts \(CMTimeGetSeconds(cmOffset)))")
if cmOffset.value == 0 {
cmOffset = off
}
else {
cmOffset = CMTimeAdd(cmOffset, off)
}
}
lastVideo.flags = []
lastAudio.flags = []
return
}
var out:CMSampleBuffer?
if cmOffset.value > 0 {
var count:CMItemCount = CMSampleBufferGetNumSamples(sampleBuffer)
let pInfo = UnsafeMutablePointer<CMSampleTimingInfo>.allocate(capacity: count)
CMSampleBufferGetSampleTimingInfoArray(sampleBuffer, entryCount: count, arrayToFill: pInfo, entriesNeededOut: &count)
var i = 0
while i<count {
pInfo[i].decodeTimeStamp = CMTimeSubtract(pInfo[i].decodeTimeStamp, cmOffset)
pInfo[i].presentationTimeStamp = CMTimeSubtract(pInfo[i].presentationTimeStamp, cmOffset)
i+=1
}
CMSampleBufferCreateCopyWithNewTiming(allocator: nil, sampleBuffer: sampleBuffer, sampleTimingEntryCount: count, sampleTimingArray: pInfo, sampleBufferOut: &out)
}
else {
out = sampleBuffer
}
var pts = CMSampleBufferGetPresentationTimeStamp(out!)
let dur = CMSampleBufferGetDuration(out!)
if (dur.value > 0)
{
pts = CMTimeAdd(pts, dur);
}
if (isVideo) {
lastVideo = pts;
}
else {
lastAudio = pts;
}
encoder?.encodeFrame(sampleBuffer: out!, isVideo: isVideo)
}
And this is my VideoEncoder class:
final class VideoEncoder {
var writer:AVAssetWriter
var videoInput:AVAssetWriterInput
var audioInput:AVAssetWriterInput
var path:String
init(path:String, height:Int, width:Int, channels:UInt32, rate:Float64) {
self.path = path
if FileManager.default.fileExists(atPath:path) {
try? FileManager.default.removeItem(atPath: path)
}
let url = URL(fileURLWithPath: path)
writer = try! AVAssetWriter(outputURL: url, fileType: .mp4)
videoInput = AVAssetWriterInput(mediaType: .video, outputSettings: [
AVVideoCodecKey: AVVideoCodecType.h264,
AVVideoWidthKey:height,
AVVideoHeightKey:width
])
videoInput.expectsMediaDataInRealTime = true
writer.add(videoInput)
audioInput = AVAssetWriterInput(mediaType: .audio, outputSettings: [
AVFormatIDKey:kAudioFormatMPEG4AAC,
AVNumberOfChannelsKey:channels,
AVSampleRateKey:rate
])
audioInput.expectsMediaDataInRealTime = true
writer.add(audioInput)
}
func finish(with completionHandler:#escaping ()->Void) {
writer.finishWriting(completionHandler: completionHandler)
}
func encodeFrame(sampleBuffer:CMSampleBuffer, isVideo:Bool) -> Bool {
if CMSampleBufferDataIsReady(sampleBuffer) {
if writer.status == .unknown {
writer.startWriting()
writer.startSession(atSourceTime: CMSampleBufferGetPresentationTimeStamp(sampleBuffer))
}
if writer.status == .failed {
QFLogger.shared.addLog(format: "[ERROR initiating AVAssetWriter]", args: [], error: writer.error)
return false
}
if isVideo {
if videoInput.isReadyForMoreMediaData {
videoInput.append(sampleBuffer)
return true
}
}
else {
if audioInput.isReadyForMoreMediaData {
audioInput.append(sampleBuffer)
return true
}
}
}
return false
}
}
The rest of the code should be pretty obvious, but just to make it complete, here is what I have for pausing:
isPaused = true
discont = true
And here is resume:
isPaused = false
If anyone could help me to understand how to align video and audio tracks during such live recording that would be great!
Ok, turns out there was no mistake in the code which I provided. The issue which I experienced was caused by a video smoothing which was turned ON :) I guess it needs extra frames to smooth the video, which is why the video output freezes at the end for a short period of time.

Allowing background audio with Swift not working

I want to allow background audio while the app is not in focus. I currently have this code, which should allow that:
do {
try AKSettings.setSession(category: .playback, with: .mixWithOthers)
} catch {
print("error")
}
AKSettings.playbackWhileMuted = true
I also have the setting 'Audio, Airplay and Picture in Picture' enabled in capabilities settings. However, when I press the home button on my device the audio doesn't keep playing. What am I doing wrong? I am using AudioKit to produce sounds if that matters.
I am using a singleton to house all of the AudioKit components which I named AudioPlayer.swift. Here is what I have in my AudioPlayer.swift singleton file:
class AudioPlayer: NSObject {
var currentFrequency = String()
var soundIsPlaying = false
var leftOscillator = AKOscillator()
var rightOscillator = AKOscillator()
var rain = try! AKAudioFile()
var rainPlayer: AKAudioPlayer!
var envelope = AKAmplitudeEnvelope()
override init() {
super.init()
do {
try AKSettings.setSession(category: .playback, with: .mixWithOthers)
} catch {
print("error")
}
AKSettings.playbackWhileMuted = true
AudioKit.output = envelope
AudioKit.start()
}
func setupFrequency(left: AKOscillator, right: AKOscillator, frequency: String) {
currentFrequency = frequency
leftOscillator = left
rightOscillator = right
let leftPanner = AKPanner(leftOscillator)
leftPanner.pan = -1
let rightPanner = AKPanner(rightOscillator)
rightPanner.pan = 1
//Set up rain and rainPlayer
do {
rain = try AKAudioFile(readFileName: "rain.wav")
rainPlayer = try AKAudioPlayer(file: rain, looping: true, deferBuffering: false, completionHandler: nil)
} catch { print(error) }
let mixer = AKMixer(leftPanner, rightPanner, rainPlayer)
//Put mixer in sound envelope
envelope = AKAmplitudeEnvelope(mixer)
envelope.attackDuration = 2.0
envelope.decayDuration = 0
envelope.sustainLevel = 1
envelope.releaseDuration = 0.2
//Start AudioKit stuff
AudioKit.output = envelope
AudioKit.start()
leftOscillator.start()
rightOscillator.start()
rainPlayer.start()
envelope.start()
soundIsPlaying = true
}
}
And here is an example of one of my sound effect view controllers, which reference the AudioKit singleton to send it a certain frequency (I have about a dozen of these view controllers, each with its own frequency settings):
class CalmView: UIViewController {
let leftOscillator = AKOscillator()
let rightOscillator = AKOscillator()
override func viewDidLoad() {
super.viewDidLoad()
leftOscillator.amplitude = 0.3
leftOscillator.frequency = 220
rightOscillator.amplitude = 0.3
rightOscillator.frequency = 230
}
#IBAction func playSound(_ sender: Any) {
if shared.soundIsPlaying == false {
AudioKit.stop()
shared.setupFrequency(left: leftOscillator, right: rightOscillator, frequency: "Calm")
} else if shared.soundIsPlaying == true && shared.currentFrequency != "Calm" {
AudioKit.stop()
shared.leftOscillator.stop()
shared.rightOscillator.stop()
shared.rainPlayer.stop()
shared.envelope.stop()
shared.setupFrequency(left: leftOscillator, right: rightOscillator, frequency: "Calm")
} else {
shared.soundIsPlaying = false
shared.envelope.stop()
}
}
}
I instantiated the AudioPlayer singleton in my ViewController.swift file.
It depends on when you are doing your configuration in relation to when AudioKit is started. If you're using AudioKit you should be using its AKSettings to manage your session category. Basically not only the playback category but also mixWithOthers. By default, does this:
/// Set the audio session type
#objc open static func setSession(category: SessionCategory,
with options: AVAudioSessionCategoryOptions = [.mixWithOthers]) throws {
So you'd do something like this in your ViewController:
do {
if #available(iOS 10.0, *) {
try AKSettings.setSession(category: .playAndRecord, with: [.defaultToSpeaker, .allowBluetooth, .allowBluetoothA2DP])
} else {
// Fallback on earlier versions
}
} catch {
print("Errored setting category.")
}
So I think its a matter of getting that straight. It might also help to have inter-app audio set up. If you still have trouble and provide more information, I can help more, but this is as good an answer as I can muster based on the info you've given so far.

AVPlayerItem.loadedTimeRanges notifications aren't working with custom AVAssetResourceLoaderDelegate

I use AVAssetResourceLoaderDelegate for video caching but the video plays after it will be fully downloaded.
I add KVO observer to loadedTimeRanges property:
[self.player.currentItem addObserver:self
forKeyPath:#"loadedTimeRanges"
options:NSKeyValueObservingOptionNew
context:nil];
Check ranges and start playing:
- (void)checkRanges {
if (self.player.currentItem.asset) {
CMTimeRange loadedTimeRange =
[[self.player.currentItem.loadedTimeRanges firstObject] CMTimeRangeValue];
Float64 videoDuration = CMTimeGetSeconds(self.player.currentItem.asset.duration);
Float64 buffered = CMTimeGetSeconds(loadedTimeRange.duration);
Float64 percent = buffered / videoDuration;
if (percent > 0.2) {
[self.player play];
}
}
}
But notifications aren't working, for some reason, I get a notification when the video is almost downloaded.
Video downloading is implemented by using AVAssetResourceLoaderDelegate that allow use cache within video playing.
The downloading is initialized in the method AVAssetResourceLoaderDelegate:
func resourceLoader(_ resourceLoader: AVAssetResourceLoader,
shouldWaitForLoadingOfRequestedResource loadingRequest: AVAssetResourceLoadingRequest) -> Bool {
if let resourceURL = loadingRequest.request.url, resourceURL.isVCSVideoScheme(),
let originalURL = resourceURL.vcsRemoteVideoURL() {
let assetLoader: VideoAssetLoader
if let loader = assetLoaderForRequest(loadingRequest) {
assetLoader = loader
} else {
assetLoader = VideoAssetLoader(url: originalURL)
assetLoader.delegate = self
assetLoaders[keyForAssetLoaderWithURL(resourceURL)] = assetLoader
}
assetLoader.addRequest(loadingRequest)
return true
}
return false
}
VideoAssetLoader receives a HEAD request with file information and then updates AVAssetResourceLoadingRequest:
private func fillInContentInformation(_ contentInformationRequest: AVAssetResourceLoadingContentInformationRequest?) {
guard let contentInformationRequest = contentInformationRequest,
let contentInformation = self.contentInformation else {
return
}
contentInformationRequest.isByteRangeAccessSupported = contentInformation.byteRangeAccessSupported
contentInformationRequest.contentType = contentInformation.contentType
contentInformationRequest.contentLength = Int64(contentInformation.contentLength)
}
Then VideoAssetLoader downloads requested range of bytes and then updates AVAssetResourceLoadingDataRequest:
private func processPendingRequests() {
for request in pendingRequests {
fillInContentInformation(request.contentInformationRequest)
let didRespondCompletely = respondWithDataForRequest(request.dataRequest)
if didRespondCompletely {
request.finishLoading()
}
}
pendingRequests = pendingRequests.filter({ !$0.isFinished })
}
private func respondWithDataForRequest(_ dataRequest: AVAssetResourceLoadingDataRequest?) -> Bool {
guard let dataRequest = dataRequest, let downloadTask = videoDownloadTask else {
return false
}
var startOffset: UInt64 = UInt64(dataRequest.requestedOffset)
if dataRequest.currentOffset != 0 {
startOffset = UInt64(dataRequest.currentOffset)
}
let numberOfBytesToRespondWith = UInt64(dataRequest.requestedLength)
var didRespondFully = false
if let data = downloadTask.readCachedDataWithOffset(startOffset, lenght: numberOfBytesToRespondWith) {
dataRequest.respond(with: data)
didRespondFully = data.count >= dataRequest.requestedLength
}
return didRespondFully
}
Unfortunately, the video isn't playing while it won't be completely downloaded (AVAssetResourceLoadingRequest.finishLoading()), also not getting notification loadedTimeRanges.
Does anyone have experience with this to point me toward an area where I may be doing something wrong?
Thanks!

How to loop AVPlayer from 4 second to 8 second in swift 3?

I have an AVPlayer in swift 3 that plays video - the problem is that I want to use loop from A to B seconds (for example from 4 to 8 second)here is my codes for loop but didn't work
NotificationCenter.default.addObserver(forName: .AVPlayerItemDidPlayToEndTime, object: self.Player.currentItem, queue: nil, using: { (_) in
DispatchQueue.main.async {
self.Player.seek(to: kCMTimeZero)
self.Player.play()
DispatchQueue.main.asyncAfter(deadline: DispatchTime.now() + 4.0) {
// check if player is still playing
if self.Player.rate != 0 {
print("OK")
print("Player reached 4.0 seconds")
let timeScale = self.Player.currentItem?.asset.duration.timescale;
// let seconds = kCMTimeZero
let time = CMTimeMakeWithSeconds( 8.0 , timeScale!)
self.Player.seek(to: time, toleranceBefore: kCMTimeZero, toleranceAfter: kCMTimeZero)
self.Player.play()
}
}
}
})
the problem is that this loop doesn't work and because of AVPlayerItemDidPlayToEndTime the print("OK") won't work until the player has finished the movie
There are a few options:
If you want gapless playback, you can start off by using:
Pre iOS 10: https://developer.apple.com/library/content/samplecode/avloopplayer/Introduction/Intro.html
iOS 10+:
https://developer.apple.com/documentation/avfoundation/avplayerlooper
The pre-ios10 "solution" from apple does work, and is the only way I have gotten gapless looping since I target ios9.
If you are using that solution, you also need to either feed it an avplayeritem the right length or add to the solution to cut it up as you send it to the player.
For that, you can do something like how I changed apples code (sorry if its a bit sparse - just trying to show the main changes) - Basically adding in sending the track and the chunk of time to use, then make that an AVMutableCompositionTrack (I got rid of all the stuff for video - you will want to keep that in) :
class myClass: someClass {
var loopPlayer:QueuePlayerLooper!
var avAssetLength:Int64!
var avAssetTimescale:CMTimeScale!
var avAssetTimeRange:CMTimeRange!
let composition = AVMutableComposition()
var playerItem:AVPlayerItem!
var avAssetrack:AVAssetTrack!
var compAudioTrack:AVMutableCompositionTrack!
var uurl:URL!
var avAsset:AVURLAsset!
func createCMTimeRange(start:TimeInterval, end:TimeInterval) -> CMTimeRange {
avAssetTimescale = avAssetTrack.naturalTimeScale
let a:CMTime = CMTime(seconds: start, preferredTimescale: avAssetTimescale)
let b:CMTime = CMTime(seconds: end, preferredTimescale: avAssetTimescale)
return CMTimeRange(start: a, end: b)
}
func startLoopingSection() {
loopPlayer = QueuePlayerLooper(audioURL: uurl, loopCount: -1, timeRange: createCMTimeRange(start: a_playbackPosition, end: b_playbackPosition))
loopPlayer.start()
}
}
//--==--==--==--==--==--==--==--==--
/*
Copyright (C) 2016 Apple Inc. All Rights Reserved.
See LICENSE.txt for this sample’s licensing information
Abstract:
An object that uses AVQueuePlayer to loop a video.
*/
// Marked changed code with ++
class QueuePlayerLooper : NSObject, Looper {
// MARK: Types
private struct ObserverContexts {
static var playerStatus = 0
static var playerStatusKey = "status"
static var currentItem = 0
static var currentItemKey = "currentItem"
static var currentItemStatus = 0
static var currentItemStatusKey = "currentItem.status"
static var urlAssetDurationKey = "duration"
static var urlAssetPlayableKey = "playable"
}
// MARK: Properties
private var player: AVQueuePlayer?
private var playerLayer: AVPlayerLayer?
private var isObserving = false
private var numberOfTimesPlayed = 0
private let numberOfTimesToPlay: Int
private let videoURL: URL
++var assetTimeRange:CMTimeRange!
++let composition = AVMutableComposition()
++var currentTrack:AVAssetTrack!
++var assetTimeRange:CMTimeRange!
// MARK: Looper
required init(videoURL: URL, loopCount: Int, ++timeRange:CMTimeRange) {
self.videoURL = videoURL
self.numberOfTimesToPlay = loopCount
++self.assetTimeRange = timeRange
super.init()
super.init()
}
func start(in parentLayer: CALayer) {
stop()
player = AVQueuePlayer()
playerLayer = AVPlayerLayer(player: player)
guard let playerLayer = playerLayer else { fatalError("Error creating player layer") }
playerLayer.frame = parentLayer.bounds
parentLayer.addSublayer(playerLayer)
let videoAsset = AVURLAsset(url: videoURL)
++currentTrack = composition.addMutableTrack(withMediaType: AVMediaTypeVideo, preferredTrackID: CMPersistentTrackID())
++currentTrack = videoAsset.tracks(withMediaType: AVMediaTypeVideo)
++try! compositionTrack.insertTimeRange(assetTimeRange, of: currentTrack, at: CMTimeMake(0, 1))
videoAsset.loadValuesAsynchronously(forKeys: [ObserverContexts.urlAssetDurationKey, ObserverContexts.urlAssetPlayableKey]) {
/*
The asset invokes its completion handler on an arbitrary queue
when loading is complete. Because we want to access our AVQueuePlayer
in our ensuing set-up, we must dispatch our handler to the main
queue.
*/
DispatchQueue.main.async(execute: {
var durationError: NSError?
let durationStatus = videoAsset.statusOfValue(forKey: ObserverContexts.urlAssetDurationKey, error: &durationError)
guard durationStatus == .loaded else { fatalError("Failed to load duration property with error: \(durationError)") }
var playableError: NSError?
let playableStatus = videoAsset.statusOfValue(forKey: ObserverContexts.urlAssetPlayableKey, error: &playableError)
guard playableStatus == .loaded else { fatalError("Failed to read playable duration property with error: \(playableError)") }
guard videoAsset.isPlayable else {
print("Can't loop since asset is not playable")
return
}
guard CMTimeCompare(videoAsset.duration, CMTime(value:1, timescale:100)) >= 0 else {
print("Can't loop since asset duration too short. Duration is(\(CMTimeGetSeconds(videoAsset.duration)) seconds")
return
}
/*
Based on the duration of the asset, we decide the number of player
items to add to demonstrate gapless playback of the same asset.
*/
let numberOfPlayerItems = (Int)(1.0 / CMTimeGetSeconds(videoAsset.duration)) + 2
for _ in 1...numberOfPlayerItems {
let loopItem = AVPlayerItem(asset: ++self.composition)
self.player?.insert(loopItem, after: nil)
}
self.startObserving()
self.numberOfTimesPlayed = 0
self.player?.play()
})
}
}
}}
You can add periodic time observer to monitor current time
let timeObserverToken = player.addPeriodicTimeObserver(forInterval: someInterval, queue: DispatchQueue.main) { [unowned self] time in
let seconds = CMTimeGetSeconds(cmTime)
if seconds >= 8.0 {
// jump back to 4 seconds
// do stuff
}
}

Get System Volume iOS

My case is simple:
I need to play a warning signal and want to make sure the user will hear it, so I want to check the system volume.
How can I find out what the current system volume is?
Update for Swift
let vol = AVAudioSession.sharedInstance().outputVolume
The audio session can provide output volume (iOS >= 6.0).
float vol = [[AVAudioSession sharedInstance] outputVolume];
NSLog(#"output volume: %1.2f dB", 20.f*log10f(vol+FLT_MIN));
Swift 3.1
let audioSession = AVAudioSession.sharedInstance()
var volume: Float?
do {
try audioSession.setActive(true)
volume = audioSession.outputVolume
} catch {
print("Error Setting Up Audio Session")
}
audioSession.setActive(true) - important
Try this:
MPMusicPlayerController *iPod = [MPMusicPlayerController iPodMusicPlayer];
float volumeLevel = iPod.volume;
You need to import the MediaPlayer framework.
This works fine:
Float32 volume;
UInt32 dataSize = sizeof(Float32);
AudioSessionGetProperty (
kAudioSessionProperty_CurrentHardwareOutputVolume,
&dataSize,
&volume
);
For Swift 2:
let volume = AVAudioSession.sharedInstance().outputVolume
print("Output volume: \(volume)")
You can use the default system's volume View and add to wherever you need it. In my case I required it in my own music player. It's easy and hassle free. Just add the view, and everything is done. This is explained in Apple's MPVolume Class Reference.
mpVolumeViewParentView.backgroundColor = [UIColor clearColor];
MPVolumeView *myVolumeView =
[[MPVolumeView alloc] initWithFrame: mpVolumeViewParentView.bounds];
[mpVolumeViewParentView addSubview: myVolumeView];
[myVolumeView release];
I have prepared a class with static methods in order to deal with the volume of ios devices. Let me share with you :)
import AVFoundation
class HeadPhoneDetectHelper {
class func isHeadPhoneConnected() -> Bool
{
do{
let audioSession = AVAudioSession.sharedInstance()
try audioSession.setActive(true)
let currentRoute = audioSession.currentRoute
let headPhonePortDescriptionArray = currentRoute.outputs.filter{$0.portType == AVAudioSessionPortHeadphones}
let isHeadPhoneConnected = headPhonePortDescriptionArray.count != 0
return isHeadPhoneConnected
}catch{
print("Error while checking head phone connection : \(error)")
}
return false
}
class func isVolumeLevelAppropriate() -> Bool
{
let minimumVolumeLevelToAccept = 100
let currentVolumeLevel = HeadPhoneDetectHelper.getVolumeLevelAsPercentage()
let isVolumeLevelAppropriate = currentVolumeLevel >= minimumVolumeLevelToAccept
return isVolumeLevelAppropriate
}
class func getVolumeLevelAsPercentage() -> Int
{
do{
let audioSession = AVAudioSession.sharedInstance()
try audioSession.setActive(true)
let audioVolume = audioSession.outputVolume
let audioVolumePercentage = audioVolume * 100
return Int(audioVolumePercentage)
}catch{
print("Error while getting volume level \(error)")
}
return 0
}
}
Swift 2.2, make sure to import MediaPlayer
private func setupVolumeListener()
{
let frameView:CGRect = CGRectMake(0, 0, 0, 0)
let volumeView = MPVolumeView(frame: frameView)
//self.window?.addSubview(volumeView) //use in app delegate
self.view.addSubview(volumeView) //use in a view controller
NSNotificationCenter.defaultCenter().addObserver(self, selector: #selector(volumeChanged(_:)), name: "AVSystemController_SystemVolumeDidChangeNotification", object: nil)
}//eom
func volumeChanged(notification:NSNotification)
{
let volume = notification.userInfo!["AVSystemController_AudioVolumeNotificationParameter"]
let category = notification.userInfo!["AVSystemController_AudioCategoryNotificationParameter"]
let reason = notification.userInfo!["AVSystemController_AudioVolumeChangeReasonNotificationParameter"]
print("volume: \(volume!)")
print("category: \(category!)")
print("reason: \(reason!)")
print("\n")
}//eom

Resources