As AudioSessionInitialize and AudioSessionGetProperty are deprecated, I am getting the wrong return values:
CFStringRef state = nil;
UInt32 propertySize = sizeof(CFStringRef);
AudioSessionInitialize(NULL, NULL, NULL, NULL);
OSStatus status = AudioSessionGetProperty(kAudioSessionProperty_AudioRoute, &propertySize, &state);
[[AVAudioSession sharedInstance] setActive:YES error:nil];
if (status == kAudioSessionNoError) {
return CFStringGetLength(state) == 0; // YES = silent
}
return NO;
From this code (I found it here), I get the same incorrect result no matter what state is actually device on. How can I detect if the silent mode is ON on device right now?
The API is no longer available. But the work around is simple:
Play a short audio and detect time that it finishes playing
If the time that it finishes playing is shorter than the actual length of the audio, than the device is muted
Hoishing posted a helper class MuteChecker on his blog. Use it as the following:
self.muteChecker = [[MuteChecker alloc] initWithCompletionBlk:^(NSTimeInterval lapse, BOOL muted) {
NSLog(#"muted: %d", muted);
}];
[self.muteChecker check];
This is the complete code for the class, you can simple copy past to your project:
MuteChecker.h
#import <Foundation/Foundation.h>
#import <AudioToolbox/AudioToolbox.h>
typedef void (^MuteCheckCompletionHandler)(NSTimeInterval lapse, BOOL muted);
// this class must use with a MuteChecker.caf (a 0.2 sec mute sound) in Bundle
#interface MuteChecker : NSObject
-(instancetype)initWithCompletionBlk:(MuteCheckCompletionHandler)completionBlk;
-(void)check;
#end
MuteChecker.cpp
#import "MuteChecker.h"
void MuteCheckCompletionProc(SystemSoundID ssID, void* clientData);
#interface MuteChecker ()
#property (nonatomic,assign) SystemSoundID soundId;
#property (strong) MuteCheckCompletionHandler completionBlk;
#property (nonatomic, strong)NSDate *startTime;
-(void)completed;
#end
void MuteCheckCompletionProc(SystemSoundID ssID, void* clientData){
MuteChecker *obj = (__bridge MuteChecker *)clientData;
[obj completed];
}
#implementation MuteChecker
-(void)playMuteSound
{
self.startTime = [NSDate date];
AudioServicesPlaySystemSound(self.soundId);
}
-(void)completed
{
NSDate *now = [NSDate date];
NSTimeInterval t = [now timeIntervalSinceDate:self.startTime];
BOOL muted = (t > 0.1)? NO : YES;
self.completionBlk(t, muted);
}
-(void)check {
if (self.startTime == nil) {
[self playMuteSound];
} else {
NSDate *now = [NSDate date];
NSTimeInterval lastCheck = [now timeIntervalSinceDate:self.startTime];
if (lastCheck > 1) { //prevent checking interval shorter then the sound length
[self playMuteSound];
}
}
}
- (instancetype)initWithCompletionBlk:(MuteCheckCompletionHandler)completionBlk
{
self = [self init];
if (self) {
NSURL* url = [[NSBundle mainBundle] URLForResource:#"MuteChecker" withExtension:#"caf"];
if (AudioServicesCreateSystemSoundID((__bridge CFURLRef)url, &_soundId) == kAudioServicesNoError){
AudioServicesAddSystemSoundCompletion(self.soundId, CFRunLoopGetMain(), kCFRunLoopDefaultMode, MuteCheckCompletionProc,(__bridge void *)(self));
UInt32 yes = 1;
AudioServicesSetProperty(kAudioServicesPropertyIsUISound, sizeof(_soundId),&_soundId,sizeof(yes), &yes);
self.completionBlk = completionBlk;
} else {
NSLog(#"error setting up Sound ID");
}
}
return self;
}
- (void)dealloc
{
if (self.soundId != -1){
AudioServicesRemoveSystemSoundCompletion(self.soundId);
AudioServicesDisposeSystemSoundID(self.soundId);
}
}
#end
Important note: you will also have to provide a short audio MuteChecker.caf for the code to work. You could download one from his blog directly or generate one yourself.
Related
I am trying to implement a simple SuperpoweredAdvancedAudioPlayer in swift. I successfully modified the SuperpoweredCrossExample project so that playerA plays the song on starting the application.
ViewController.mm now looks like this:
#import "ViewController.h"
#import "SuperpoweredAdvancedAudioPlayer.h"
#import "SuperpoweredFilter.h"
#import "SuperpoweredRoll.h"
#import "SuperpoweredFlanger.h"
#import "SuperpoweredIOSAudioIO.h"
#import "SuperpoweredSimple.h"
#import <stdlib.h>
#define HEADROOM_DECIBEL 3.0f
static const float headroom = powf(10.0f, -HEADROOM_DECIBEL * 0.025);
/*
This is a .mm file, meaning it's Objective-C++.
You can perfectly mix it with Objective-C or Swift, until you keep the member variables and C++ related includes here.
Yes, the header file (.h) isn't the only place for member variables.
*/
#implementation ViewController {
SuperpoweredAdvancedAudioPlayer *playerA;
SuperpoweredIOSAudioIO *output;
float *stereoBuffer, volA;
unsigned int lastSamplerate;
}
void playerEventCallbackA(void *clientData, SuperpoweredAdvancedAudioPlayerEvent event, void *value) {
if (event == SuperpoweredAdvancedAudioPlayerEvent_LoadSuccess) {
ViewController *self = (__bridge ViewController *)clientData;
self->playerA->setBpm(126.0f);
self->playerA->setFirstBeatMs(353);
self->playerA->setPosition(self->playerA->firstBeatMs, false, false);
};
}
// This is where the Superpowered magic happens.
static bool audioProcessing(void *clientdata, float **buffers, unsigned int inputChannels, unsigned int outputChannels, unsigned int numberOfSamples, unsigned int samplerate, uint64_t hostTime) {
__unsafe_unretained ViewController *self = (__bridge ViewController *)clientdata;
if (samplerate != self->lastSamplerate) { // Has samplerate changed?
self->lastSamplerate = samplerate;
self->playerA->setSamplerate(samplerate);
};
bool silence = !self->playerA->process(self->stereoBuffer, false, numberOfSamples, self->volA);
if (!silence) SuperpoweredDeInterleave(self->stereoBuffer, buffers[0], buffers[1], numberOfSamples); // The stereoBuffer is ready now, let's put the finished audio into the requested buffers.
return !silence;
}
- (void)viewDidLoad {
[super viewDidLoad];
[self f];
}
- (void) f {
volA = 1.0f * headroom;
if (posix_memalign((void **)&stereoBuffer, 16, 4096 + 128) != 0) abort(); // Allocating memory, aligned to 16.
playerA = new SuperpoweredAdvancedAudioPlayer((__bridge void *)self, playerEventCallbackA, 44100, 0);
playerA->open([[[NSBundle mainBundle] pathForResource:#"lycka" ofType:#"mp3"] fileSystemRepresentation]);
output = [[SuperpoweredIOSAudioIO alloc] initWithDelegate:(id<SuperpoweredIOSAudioIODelegate>)self preferredBufferSize:12 preferredMinimumSamplerate:44100 audioSessionCategory:AVAudioSessionCategoryPlayback channels:2 audioProcessingCallback:audioProcessing clientdata:(__bridge void *)self];
[output start];
playerA->play(false);
}
- (void)dealloc {
delete playerA;
free(stereoBuffer);
#if !__has_feature(objc_arc)
[output release];
[super dealloc];
#endif
}
- (void)interruptionStarted {}
- (void)recordPermissionRefused {}
- (void)mapChannels:(multiOutputChannelMap *)outputMap inputMap:(multiInputChannelMap *)inputMap externalAudioDeviceName:(NSString *)externalAudioDeviceName outputsAndInputs:(NSString *)outputsAndInputs {}
- (void)interruptionEnded { // If a player plays Apple Lossless audio files, then we need this. Otherwise unnecessary.
playerA->onMediaserverInterrupt();
}
#end
I am trying to use the same code in swift following the same method used in SuperpoweredFrequencies project to import c++ files in swift.
Superpowered.h:
#import <UIKit/UIKit.h>
#interface Superpowered: NSObject
-(void) f;
#end
Superpowered.mm:
#import "Superpowered.h"
#import "Superpowered/Headers/SuperpoweredAdvancedAudioPlayer.h"
#import "Superpowered/Headers/SuperpoweredFilter.h"
#import "Superpowered/Headers/SuperpoweredRoll.h"
#import "Superpowered/Headers/SuperpoweredFlanger.h"
#import "Superpowered/SuperpoweredIOSAudioIO.h"
#import "Superpowered/Headers/SuperpoweredSimple.h"
#import <stdlib.h>
#define HEADROOM_DECIBEL 3.0f
static const float headroom = powf(10.0f, -HEADROOM_DECIBEL * 0.025);
/*
This is a .mm file, meaning it's Objective-C++.
You can perfectly mix it with Objective-C or Swift, until you keep the member variables and C++ related includes here.
Yes, the header file (.h) isn't the only place for member variables.
*/
#implementation Superpowered {
SuperpoweredAdvancedAudioPlayer *playerA;
SuperpoweredIOSAudioIO *output;
float *stereoBuffer, volA;
unsigned int lastSamplerate;
}
void playerEventCallbackA(void *clientData, SuperpoweredAdvancedAudioPlayerEvent event, void *value) {
if (event == SuperpoweredAdvancedAudioPlayerEvent_LoadSuccess) {
Superpowered *self = (__bridge Superpowered *)clientData;
self->playerA->setBpm(126.0f);
self->playerA->setFirstBeatMs(353);
self->playerA->setPosition(self->playerA->firstBeatMs, false, false);
};
}
// This is where the Superpowered magic happens.
static bool audioProcessing(void *clientdata, float **buffers, unsigned int inputChannels, unsigned int outputChannels, unsigned int numberOfSamples, unsigned int samplerate, uint64_t hostTime) {
__unsafe_unretained Superpowered *self = (__bridge Superpowered *)clientdata;
if (samplerate != self->lastSamplerate) { // Has samplerate changed?
self->lastSamplerate = samplerate;
self->playerA->setSamplerate(samplerate);
};
bool silence = !self->playerA->process(self->stereoBuffer, false, numberOfSamples, self->volA);
if (!silence) SuperpoweredDeInterleave(self->stereoBuffer, buffers[0], buffers[1], numberOfSamples); // The stereoBuffer is ready now, let's put the finished audio into the requested buffers.
return !silence;
}
- (void)f {
volA = 1.0f * headroom;
if (posix_memalign((void **)&stereoBuffer, 16, 4096 + 128) != 0) abort(); // Allocating memory, aligned to 16.
playerA = new SuperpoweredAdvancedAudioPlayer((__bridge void *)self, playerEventCallbackA, 44100, 0);
playerA->open([[[NSBundle mainBundle] pathForResource:#"lycka" ofType:#"mp3"] fileSystemRepresentation]);
output = [[SuperpoweredIOSAudioIO alloc] initWithDelegate:(id<SuperpoweredIOSAudioIODelegate>)self preferredBufferSize:12 preferredMinimumSamplerate:44100 audioSessionCategory:AVAudioSessionCategoryPlayback channels:2 audioProcessingCallback:audioProcessing clientdata:(__bridge void *)self];
[output start];
playerA->play(false);
}
- (void)dealloc {
delete playerA;
free(stereoBuffer);
#if !__has_feature(objc_arc)
[output release];
[super dealloc];
#endif
}
- (void)interruptionStarted {}
- (void)recordPermissionRefused {}
- (void)mapChannels:(multiOutputChannelMap *)outputMap inputMap:(multiInputChannelMap *)inputMap externalAudioDeviceName:(NSString *)externalAudioDeviceName outputsAndInputs:(NSString *)outputsAndInputs {}
- (void)interruptionEnded { // If a player plays Apple Lossless audio files, then we need this. Otherwise unnecessary.
playerA->onMediaserverInterrupt();
}
#end
Project-Bridging-Header.h:
#import "Superpowered.h"
Controller.swift:
override func viewDidLoad() {
super.viewDidLoad()
let s = Superpowered();
s.f();
}
When running the app it crashes and gives the following error:
let s = Superpowered(); should be declared outside viewDidLoad(). Declaring it as an instance variable solved the problem.
WebRTC video by default uses Front Camera, which works fine. However, i need to switch it to back camera, and i have not been able to find any code to do that.
Which part do i need to edit?
Is it the localView or localVideoTrack or capturer?
Swift 3.0
Peer connection can have only one 'RTCVideoTrack' for sending video stream.
At first, for change camera front/back you must remove current video track on peer connection.
After then, you create new 'RTCVideoTrack' on camera which you need, and set this for peer connection.
I used this methods.
func swapCameraToFront() {
let localStream: RTCMediaStream? = peerConnection?.localStreams.first as? RTCMediaStream
localStream?.removeVideoTrack(localStream?.videoTracks.first as! RTCVideoTrack)
let localVideoTrack: RTCVideoTrack? = createLocalVideoTrack()
if localVideoTrack != nil {
localStream?.addVideoTrack(localVideoTrack)
delegate?.appClient(self, didReceiveLocalVideoTrack: localVideoTrack!)
}
peerConnection?.remove(localStream)
peerConnection?.add(localStream)
}
func swapCameraToBack() {
let localStream: RTCMediaStream? = peerConnection?.localStreams.first as? RTCMediaStream
localStream?.removeVideoTrack(localStream?.videoTracks.first as! RTCVideoTrack)
let localVideoTrack: RTCVideoTrack? = createLocalVideoTrackBackCamera()
if localVideoTrack != nil {
localStream?.addVideoTrack(localVideoTrack)
delegate?.appClient(self, didReceiveLocalVideoTrack: localVideoTrack!)
}
peerConnection?.remove(localStream)
peerConnection?.add(localStream)
}
As of now I only have the answer in Objective C language in regard to Ankit's comment below. I will convert it into Swift after some time.
You can check the below code
- (RTCVideoTrack *)createLocalVideoTrack {
RTCVideoTrack *localVideoTrack = nil;
NSString *cameraID = nil;
for (AVCaptureDevice *captureDevice in [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo]) {
if (captureDevice.position == AVCaptureDevicePositionFront) {
cameraID = [captureDevice localizedName]; break;
}
}
RTCVideoCapturer *capturer = [RTCVideoCapturer capturerWithDeviceName:cameraID];
RTCMediaConstraints *mediaConstraints = [self defaultMediaStreamConstraints];
RTCVideoSource *videoSource = [_factory videoSourceWithCapturer:capturer constraints:mediaConstraints];
localVideoTrack = [_factory videoTrackWithID:#"ARDAMSv0" source:videoSource];
return localVideoTrack;
}
- (RTCVideoTrack *)createLocalVideoTrackBackCamera {
RTCVideoTrack *localVideoTrack = nil;
//AVCaptureDevicePositionFront
NSString *cameraID = nil;
for (AVCaptureDevice *captureDevice in [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo]) {
if (captureDevice.position == AVCaptureDevicePositionBack) {
cameraID = [captureDevice localizedName];
break;
}
}
RTCVideoCapturer *capturer = [RTCVideoCapturer capturerWithDeviceName:cameraID];
RTCMediaConstraints *mediaConstraints = [self defaultMediaStreamConstraints];
RTCVideoSource *videoSource = [_factory videoSourceWithCapturer:capturer constraints:mediaConstraints];
localVideoTrack = [_factory videoTrackWithID:#"ARDAMSv0" source:videoSource];
return localVideoTrack;
}
If you decide to use official Google build here the explanation:
First, you must configure your camera before call start, best place to do that in ARDVideoCallViewDelegate in method didCreateLocalCapturer
- (void)startCapture:(void (^)(BOOL succeeded))completionHandler {
AVCaptureDevicePosition position = _usingFrontCamera ? AVCaptureDevicePositionFront : AVCaptureDevicePositionBack;
__weak AVCaptureDevice *device = [self findDeviceForPosition:position];
if ([device lockForConfiguration:nil]) {
if ([device isFocusPointOfInterestSupported]) {
[device setFocusModeLockedWithLensPosition:0.9 completionHandler: nil];
}
}
AVCaptureDeviceFormat *format = [self selectFormatForDevice:device];
if (format == nil) {
RTCLogError(#"No valid formats for device %#", device);
NSAssert(NO, #"");
return;
}
NSInteger fps = [self selectFpsForFormat:format];
[_capturer startCaptureWithDevice: device
format: format
fps:fps completionHandler:^(NSError * error) {
NSLog(#"%#",error);
if (error == nil) {
completionHandler(true);
}
}];
}
Don't forget enabling capture device is asynchronous, sometime better to use completion to be sure everything done as expected.
I am not sure which chrome version you are using for webrtc but with v54 and above there is "bool" property called "useBackCamera" in RTCAVFoundationVideoSource class. You can make use of this property to switch between front/back camera.
Swift 4.0 & 'GoogleWebRTC' : '1.1.20913'
RTCAVFoundationVideoSource class has a property named useBackCamera that can be used for switching the camera used.
#interface RTCAVFoundationVideoSource : RTCVideoSource
- (instancetype)init NS_UNAVAILABLE;
/**
* Calling this function will cause frames to be scaled down to the
* requested resolution. Also, frames will be cropped to match the
* requested aspect ratio, and frames will be dropped to match the
* requested fps. The requested aspect ratio is orientation agnostic and
* will be adjusted to maintain the input orientation, so it doesn't
* matter if e.g. 1280x720 or 720x1280 is requested.
*/
- (void)adaptOutputFormatToWidth:(int)width height:(int)height fps:(int)fps;
/** Returns whether rear-facing camera is available for use. */
#property(nonatomic, readonly) BOOL canUseBackCamera;
/** Switches the camera being used (either front or back). */
#property(nonatomic, assign) BOOL useBackCamera;
/** Returns the active capture session. */
#property(nonatomic, readonly) AVCaptureSession *captureSession;
Below is the implementation for switching camera.
var useBackCamera: Bool = false
func switchCamera() {
useBackCamera = !useBackCamera
self.switchCamera(useBackCamera: useBackCamera)
}
private func switchCamera(useBackCamera: Bool) -> Void {
let localStream = peerConnection?.localStreams.first
if let videoTrack = localStream?.videoTracks.first {
localStream?.removeVideoTrack(videoTrack)
}
let localVideoTrack = createLocalVideoTrack(useBackCamera: useBackCamera)
localStream?.addVideoTrack(localVideoTrack)
self.delegate?.webRTCClientDidAddLocal(videoTrack: localVideoTrack)
if let ls = localStream {
peerConnection?.remove(ls)
peerConnection?.add(ls)
}
}
func createLocalVideoTrack(useBackCamera: Bool) -> RTCVideoTrack {
let videoSource = self.factory.avFoundationVideoSource(with: self.constraints)
videoSource.useBackCamera = useBackCamera
let videoTrack = self.factory.videoTrack(with: videoSource, trackId: "video")
return videoTrack
}
In the current version of WebRTC, RTCAVFoundationVideoSource has been deprecated and replaced with a
generic RTCVideoSource combined with an RTCVideoCapturer implementation.
In order to switch the camera I'm doing this:
- (void)switchCameraToPosition:(AVCaptureDevicePosition)position completionHandler:(void (^)(void))completionHandler {
if (self.cameraPosition != position) {
RTCMediaStream *localStream = self.peerConnection.localStreams.firstObject;
[localStream removeVideoTrack:self.localVideoTrack];
//[self.peerConnection removeStream:localStream];
self.localVideoTrack = [self createVideoTrack];
[self startCaptureLocalVideoWithPosition:position completionHandler:^{
[localStream addVideoTrack:self.localVideoTrack];
//[self.peerConnection addStream:localStream];
if (completionHandler) {
completionHandler();
}
}];
self.cameraPosition = position;
}
}
Take a look at the commented lines, If you start adding/removing the stream from the peer connection it will cause a delay in the video connection.
I'm using GoogleWebRTC-1.1.25102
I'm trying to measure the default vibration duration on iPhone, iOS 6:
#property (nonatomic, retain) NSDate *startDate;
- (void) runTest
{
AudioServicesAddSystemSoundCompletion(kSystemSoundID_Vibrate, NULL, NULL, playSoundFinished, self);
self.startDate = [NSDate date];
AudioServicesPlayAlertSound(kSystemSoundID_Vibrate);
}
void playSoundFinished(SystemSoundID soundID, void *data)
{
MyClassName *foo = (MyClassName*)data;
double vibrationDurationMs = [foo.startDate timeIntervalSinceNow] * -1000.0;
}
But it returns just 1-2 milliseconds, regardless of whether vibration is activated in system settings or not.
I heard the default duration should be 400 ms, followed by 100 ms of silence.
Is the above code correct?
I'm using KxMovie: https://github.com/kolyvan/kxmovie
It appears to stop a stream and close the view controller one should use [pause];
However, I'm trying to receive a stream from a version of gstreamer that has a memory leak if a stream isn't closed properly (it's just left hanging).
So, just [pause]ing isn't an option for me.
I'm trying to use [closeFile] in the KxMovie decoder:
-(void) closeFile
{
[self closeAudioStream];
[self closeVideoStream];
[self closeSubtitleStream];
_videoStreams = nil;
_audioStreams = nil;
_subtitleStreams = nil;
if (_formatCtx) {
_formatCtx->interrupt_callback.opaque = NULL;
_formatCtx->interrupt_callback.callback = NULL;
avformat_close_input(&_formatCtx);
_formatCtx = NULL;
}
}
However, I usually get a EXC_BAD_ACCESS from av_read_frame after [closeFile] issues avformat_close_input.
Can anyone give me some advice on how to cleanly shutdown an RTSP stream using ffmpeg?
Thanks!
I was also confused by this, and I do not quite understand your solution.
I fixed it like below, could you give some advice?
_dispatchQueue is the same queue as doing asyncDecodeFrames work.
- (void)unSetup {
_buffered = NO;
_interrupted = YES;
dispatch_async(_dispatchQueue, ^{
if (_decoder) {
[self pause];
[self freeBufferedFrames];
if (_moviePosition == 0 || _decoder.isEOF)
[gHistory removeObjectForKey:_decoder.path];
else if (!_decoder.isNetwork)
[gHistory setValue:[NSNumber numberWithFloat:_moviePosition]
forKey:_decoder.path];
[_decoder closeFile];
}
});
}
Needed to use the interrupt callbacks to interrupt av_read_frame
_formatCtx->interrupt_callback.opaque
_formatCtx->interrupt_callback.callback
Wait for the callback to be called and return non zero.
After the callback has returned an interrupt value av_close_input can safely be called (after closing any codecs used).
The below code snippets are in Objective-C and the implementation file .m is for the object that handles RTSP stuff (RTSPProvider).
It is tested with Xcode Version 10.1 (10B61) and an FFmpeg manually built version of the current FFmpeg versions to date (4.2.1 / 15.10.2019).
Should you need the build script configuration and or library versions used (just ask).
I had the same issue as the OP but couldn't use his solution.
The full versions was with the interrupt callback I used was:
int interruptCallBack(void *ctx){
RTSPProviderObject *whyFFmpeg = (__bridge RTSPProviderObject*)ctx;
NSLog(#"What is this!");
if(whyFFmpeg.whatIsHappeningSTR) {
return 1;
} else {
return 0;
}
}
The return value 1 should have interrupted the av_read_frame() and exited without a crash as to my current understanding.
It still crashed. My solution was to let av_read_frame() finish reading and terminate the session context which will be freed and don't allow any more reading. This was easy since I had this issue when I deallocated my RTSPProviderObject and no reading was done.
The final usage was:
[self.rtspProvider cleanup];
self.rtspProvider = nil;
Below is the full code snippet:
#import "Don't forget the required ffmpeg headers or header file"
int interruptCallBack(void *ctx){
RTSPProviderObject *whyFFmpeg = (__bridge RTSPProviderObject*)ctx;
NSLog(#"What is this!");
if(whyFFmpeg.whatIsHappeningSTR) {
return 1;
} else {
return 0;
}
}
#interface RTSPProviderObject ()
#property (nonatomic, assign) AVFormatContext *sessionContext;
#property (nonatomic, assign) NSString *whatIsHappeningSTR;
#property (nonatomic, assign) AVDictionary *sessionOptions;
#property (nonatomic, assign) BOOL usesTcp;
#property (nonatomic, assign) BOOL isInputStreamOpen;
#property (nonatomic, strong) NSLock *audioPacketQueueLock;
#property (nonatomic, strong) NSLock *packetQueueLock;
#property (nonatomic, strong, readwrite) NSMutableArray *audioPacketQueue;
#property (nonatomic, assign) int selectedVideoStreamIndex;
#property (nonatomic, assign) int selectedAudioStreamIndex;
#end
#implementation RTSPProviderObject
- (id _Nullable)init
{
self = [super init];
if (!self)
{
return nil;
}
self.sessionContext = NULL;
self.sessionContext = avformat_alloc_context();
AVFormatContext *pFormatCtx = self.sessionContext;
if (!pFormatCtx)
{
// Error handling code...
}
// MUST be called before avformat_open_input().
av_dict_free(&_sessionOptions);
self.sessionOptions = 0;
if (self.usesTcp)
{
// "rtsp_transport" - Set RTSP transport protocols.
// Allowed are: udp_multicast, tcp, udp, http.
av_dict_set(&_sessionOptions, "rtsp_transport", "tcp", 0);
}
// Open an input stream and read the header with the demuxer options.
// rtspURL - connection url to your remote ip camera which supports RTSP 2.0.
if (avformat_open_input(&pFormatCtx, rtspURL.UTF8String, NULL, &_sessionOptions) != 0)
{
self.isInputStreamOpen = NO;
// Error handling code...
}
self.isInputStreamOpen = YES;
// user-supplied AVFormatContext pFormatCtx might have been modified.
self.sessionContext = pFormatCtx;
pFormatCtx->interrupt_callback.callback = interruptCallBack;
pFormatCtx->interrupt_callback.opaque = (__bridge void *)(self);
// ... Other needed but currently not relevant code for codec/stream and other setup.
}
- (BOOL)prepareNextFrame
{
NSLog(#"%s", __PRETTY_FUNCTION__);
int isVideoFrameAvailable = 0;
// The session context is needed to provide frame data. Frame data is provided for video and audio.
// av_read_frame reads from pFormatCtx.
AVFormatContext *pFormatCtx = self.sessionContext;
if (!pFormatCtx) { return NO; }
// Audio packet access is forbidden.
[self.packetQueueLock lock];
BOOL readResult = YES;
// Calling av_read_frame while it is reading causes a bad_exception.
// We read frames as long as the session context cotains frames to be read and cosumed (usually one).
while (!isVideoFrameAvailable && self.isInputStreamOpen && readResult) {
if (packet.buf == nil && self.whatIsHappeningSTR) {
[self.packetQueueLock unlock];
return NO;
}
NSLog(#"New frame will be read.");
if (self.shouldTerminateStreams) {
[self terminate];
[self.packetQueueLock unlock];
return NO;
}
readResult = av_read_frame(pFormatCtx, &packet) >=0;
// Video packet data decoding.
// We need to make sure that the frame video data which is consumed matches the user selected stream.
if(packet.stream_index == self.selectedVideoStreamId) {
// DEPRECIATED:
// avcodec_decode_video2(self.videoCodecContext, self.rawFrameData, &isVideoFrameAvailable, &packet);
// Replaced by this new implememtation. Read more: https://blogs.gentoo.org/lu_zero/2016/03/29/new-avcodec-api/
// *
// We need the video context to decode video data.
AVCodecContext *videoContext = self.videoCodecContext;
if (!videoContext && videoContext->codec_type == AVMEDIA_TYPE_VIDEO) { isVideoFrameAvailable = 1; }
int ret;
// Supply raw packet data as input to a decoder.
ret = avcodec_send_packet(videoContext, &packet);
if (ret < 0)
{
NSLog(#"codec: sending video packet failed");
[self.packetQueueLock unlock];
return NO;
}
// Return decoded output data from a decoder.
ret = avcodec_receive_frame(videoContext, self.rawFrameData);
if (isVideoFrameAvailable < 0 && isVideoFrameAvailable != AVERROR(EAGAIN) && isVideoFrameAvailable != AVERROR_EOF)
{
[self.packetQueueLock unlock];
return NO;
}
if (ret >= 0) { isVideoFrameAvailable = 1; }
// *
} else {
// avcodec_decode_video2 unreference all the buffers referenced by self.rawFrameData and reset the frame fields.
// We must do this manually if we don't use the video frame or we will leak the frame data.
av_frame_unref(self.rawFrameData);
isVideoFrameAvailable = 1;
}
// Audio packet data consumption.
// We need to make sure that the frame audio data which will be consumed matches the user selected stream.
if (packet.stream_index == self.selectedAudioStreamIndex) {
[self.audioPacketQueueLock lock];
[self.audioPacketQueue addObject:[NSMutableData dataWithBytes:&packet length:sizeof(packet)]];
[self.audioPacketQueueLock unlock];
}
}
[self.packetQueueLock unlock];
return isVideoFrameAvailable!=0;
}
- (void)cleanup
{
NSLog(#"%s", __PRETTY_FUNCTION__);
self.shouldTerminateStreams = YES;
self.whatIsHappeningSTR = #"";
}
- (void)terminate
{
avformat_close_input(&_sessionContext);
}
#end
Hope this helps anyone. Thank you for reading and contributing.
I am using following code to check iPhone silent switch is ON or OFF :-
if (self)
{
self.detector = [SharkfoodMuteSwitchDetector shared];
CheckInViewController* sself = self;
self.detector.silentNotify = ^(BOOL silent)
{
[sself.silentSwitch setOn:silent animated:YES];
};
}
It works fine in iOS 6 and below but in iOS 7 it always gives TRUE value. So, Please any one tell, how to resolve this issue.
Thanks in advance.
It doesn't work in iOS 7, and it never really worked in iOS 6 if you look at why it doesn't work in iOS 7. This solution is based on the same code, so all credit to the original author though.
Keep mute.caf from your SharkfoodMuteSwitchDetector.
Create a new class, called HASilentSwitchDetector (or whatever), or replace the code in SharkfoodMuteSwitchDetector.
In the header file:
#import <AudioToolbox/AudioToolbox.h>
typedef void(^HASilentSwitchDetectorBlock)(BOOL success, BOOL silent);
#interface HASilentSwitchDetector : NSObject
+ (void)ifMute:(HASilentSwitchDetectorBlock)then;
#end
In the implementation file:
#import "HASilentSwitchDetector.h"
void MuteSoundPlaybackComplete(SystemSoundID ssID, void *clientData)
{
//Completion
NSDictionary *soundCompletion = CFBridgingRelease(clientData);
//Mute
NSTimeInterval interval = [soundCompletion[#"interval"] doubleValue];
NSTimeInterval elapsed = [NSDate timeIntervalSinceReferenceDate] - interval;
BOOL isMute = elapsed < 0.2; // mute.caf is .2s long...
//Then
HASilentSwitchDetectorBlock then = soundCompletion[#"then"];
then(YES, isMute);
//Cleanup
SystemSoundID soundID = [soundCompletion[#"soundID"] integerValue];
AudioServicesRemoveSystemSoundCompletion(soundID);
AudioServicesDisposeSystemSoundID(soundID);
}
#implementation HASilentSwitchDetector
+ (void)ifMute:(HASilentSwitchDetectorBlock)then
{
//Check
if ( !then ) {
return;
}
//Create
NSURL *url = [[NSBundle mainBundle] URLForResource:#"mute" withExtension:#"caf"];
SystemSoundID soundID;
if ( AudioServicesCreateSystemSoundID((__bridge CFURLRef)url, &soundID) == kAudioServicesNoError ) {
//UI Sound
UInt32 yes = 1;
AudioServicesSetProperty(kAudioServicesPropertyIsUISound, sizeof(soundID), &soundID,sizeof(yes), &yes);
//Callback
NSDictionary *soundCompletion = #{#"then" : [then copy], #"soundID" : #(soundID), #"interval" : #([NSDate timeIntervalSinceReferenceDate])};
AudioServicesAddSystemSoundCompletion(soundID, CFRunLoopGetMain(), kCFRunLoopDefaultMode, MuteSoundPlaybackComplete, (void *)CFBridgingRetain(soundCompletion));
//Play
AudioServicesPlaySystemSound(soundID);
} else {
//Fail
then(NO, NO);
}
}
#end
Use like so:
[HASilentSwitchDetector ifMute:^(BOOL success, BOOL silent) {
if ( success ) {
if ( ![[NSUserDefaults standardUserDefaults] boolForKey:forKey:kHasShownMuteWarning] && silent ) {
[[NSUserDefaults standardUserDefaults] setBool:YES forKey:kHasShownMuteWarning];
[[[UIAlertView alloc] initWithTitle:[#"Mute Warning" localized] message:[NSString stringWithFormat:[#"This %#'s mute switch is on. To ensure your alarm will be audible, unmute your device." localized], [[[UIDevice currentDevice] isiPad]? #"iPad" : #"iPhone" localized]] delegate:nil cancelButtonTitle:nil otherButtonTitles:[#"Ok" localized], nil] show];
}
}
}];