Send Replay Kit Audio samples(not Microphone) Over Webrtc - ios

I am able to send Replaykit video samples to the remote peer but with no Audio. What to use instead of RTCVideoFrame? Is there a way I can send a Audio Video data together?
Below is the snippet:
var peerConnectionFactory: RTCPeerConnectionFactory?
var localVideoSource: RTCVideoSource?
var videoCapturer: RTCVideoCapturer?
func setupVideoCapturer(){
// localVideoSource and videoCapturer will use
localVideoSource = self.peerConnectionFactory!.videoSource()
videoCapturer = RTCVideoCapturer()
localVideoSource.capturer(videoCapturer, didCapture: videoFrame!)
let videoTrack : RTCVideoTrack = self.peerConnectionFactory!.videoTrack(with: localVideoSource, trackId: "100”)
let mediaStream: RTCMediaStream = (self.peerConnectionFactory?.mediaStream(withStreamId: “1"))!
mediaStream.addVideoTrack(videoTrack)
self.newPeerConnection!.add(mediaStream)
}
override func processSampleBuffer(_ sampleBuffer: CMSampleBuffer, with sampleBufferType: RPSampleBufferType) {
switch sampleBufferType {
case RPSampleBufferType.video:
// create the CVPixelBuffer
let pixelBuffer:CVPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)!;
// create the RTCVideoFrame
var videoFrame:RTCVideoFrame?;
let timestamp = NSDate().timeIntervalSince1970 * 1000
videoFrame = RTCVideoFrame(pixelBuffer: pixelBuffer, rotation: RTCVideoRotation._0, timeStampNs: Int64(timestamp))
// connect the video frames to the WebRTC
localVideoSource.capturer(videoCapturer, didCapture: videoFrame!)
break
}
}

Related

Using AVKit to detect luminosity

I am working on an app using SwiftUi that leverages the device camera to detect luminosity as described in top answer of this post. The captureOutput(_:didOutput:from:) function in the top answer was used to calculate luminosity. According to Apple Docs this function is intended to notify a delegate that a new video frame was written, and so I have placed this function in a VideoDelegate class. This delegate is then set in a VideoStream class that handles the logic of asking permissions and setting up an AVCaptureSession. My question is how to access the luminosity value calculated within the delegate inside my SwiftUI view?
struct ContentView: View {
#StateObject var videoStream = VideoStream()
var body: some View {
Text("\(videoStream.luminosityReading) ?? Detecting...")
.padding()
}
}
class VideoStream: ObservableObject {
#Published var luminosityReading : Double = 0.0 // TODO get luminosity from VideoDelegate
var session : AVCaptureSession!
init() {
authorizeCapture()
}
func authorizeCapture() {
// permission logic and call to beginCapture()
}
func beginCapture() {
session = AVCaptureSession()
session.beginConfiguration()
let videoDevice = bestDevice() // func definition omitted for readability
guard
let videoDeviceInput = try? AVCaptureDeviceInput(device: videoDevice),
session.canAddInput(videoDeviceInput)
else {
print("Camera selection failed")
return
}
let videoOutput = AVCaptureVideoDataOutput()
guard
session.canAddOutput(videoOutput)
else {
print("Error creating video output")
return
}
session.sessionPreset = .high
session.addOutput(videoOutput)
let queue = DispatchQueue(label: "VideoFrameQueue")
let delegate = VideoDelegate()
videoOutput.setSampleBufferDelegate(delegate, queue: queue)
session.commitConfiguration()
session.startRunning()
}
}
class VideoDelegate: NSObject, AVCaptureVideoDataOutputSampleBufferDelegate {
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
//Retrieving EXIF data of camara frame buffer
let rawMetadata = CMCopyDictionaryOfAttachments(allocator: nil, target: sampleBuffer, attachmentMode: CMAttachmentMode(kCMAttachmentMode_ShouldPropagate))
let metadata = CFDictionaryCreateMutableCopy(nil, 0, rawMetadata) as NSMutableDictionary
let exifData = metadata.value(forKey: "{Exif}") as? NSMutableDictionary
let FNumber : Double = exifData?["FNumber"] as! Double
let ExposureTime : Double = exifData?["ExposureTime"] as! Double
let ISOSpeedRatingsArray = exifData!["ISOSpeedRatings"] as? NSArray
let ISOSpeedRatings : Double = ISOSpeedRatingsArray![0] as! Double
let CalibrationConstant : Double = 50
//Calculating the luminosity
let luminosity : Double = (CalibrationConstant * FNumber * FNumber ) / ( ExposureTime * ISOSpeedRatings )
// how to pass value of luminosity to `VideoStream`?
}
}
As discussed in the comments, the lowest friction option would be to have VideoStream conform to AVCaptureVideoDataOutputSampleBufferDelegate and implement the delegate method there.

AVAudioEngine player node excessive delay

I am trying to use AVAudioEngine for listening to mic samples and playing them simultaneously via external speakers or headphones (assuming they are attached to iOS device). I tried the following using AVAudioPlayerNode and it works, but there is too much delay in the audio playback. Is there a way to hear sound realtime without delay?
var engine: AVAudioEngine!
var playerNode: AVAudioPlayerNode!
var mixer: AVAudioMixerNode!
var audioEngineRunning = false
public func setupAudioEngine() {
self.engine = AVAudioEngine()
let input = engine.inputNode
let format = input.inputFormat(forBus: 0)
playerNode = AVAudioPlayerNode()
engine.attach(playerNode)
self.mixer = engine.mainMixerNode
engine.connect(self.playerNode, to: self.mixer, format: playerNode.outputFormat(forBus: 0))
engine.inputNode.installTap(onBus: 0, bufferSize: 4096, format: format, block: { buffer, time in
self.playerNode.scheduleBuffer(buffer, completionHandler: nil)
})
do {
engine.prepare()
try self.engine.start()
audioEngineRunning = true
self.playerNode.play()
}
catch {
print("error couldn't start engine")
audioEngineRunning = false
}
}

How to send CMSampleBuffer to WebRTC?

So I am using Replaykit to try stream my phone screen on a web browser.
override func processSampleBuffer(_ sampleBuffer: CMSampleBuffer, with sampleBufferType: RPSampleBufferType) {
//if source!.isSocketConnected {
switch sampleBufferType {
case RPSampleBufferType.video:
// Handle video sample buffer
break
case RPSampleBufferType.audioApp:
// Handle audio sample buffer for app audio
break
case RPSampleBufferType.audioMic:
// Handle audio sample buffer for mic audio
break
#unknown default:
break
}
}
So how do we send that data to WebRTC?
In order to use WebRTC, I learned that you need a signaling server.
Is it possible to start a signaling server on your mobile, just like http server?
Hi Sam WebRTC have one function which can process CMSampleBuffer frames to get Video Frames. But it is working with CVPixelBuffer. So you have to firstly convert your CMSampleBuffer to CVPixelBuffer. And than add this frames into your localVideoSource with RTCVideoCapturer. i have solved similar problem on AVCaptureVideoDataOutputSampleBufferDelegate. This delegate produces CMSampleBuffer as ReplayKit. i hope that below code lines could be help to you. You can try at the below code lines to solve your problem.
private var videoCapturer: RTCVideoCapturer?
private var localVideoSource = RTCClient.factory.videoSource()
private var localVideoTrack: RTCVideoTrack?
private var remoteVideoTrack: RTCVideoTrack?
private var peerConnection: RTCPeerConnection? = nil
public static let factory: RTCPeerConnectionFactory = {
RTCInitializeSSL()
let videoEncoderFactory = RTCDefaultVideoEncoderFactory()
let videoDecoderFactory = RTCDefaultVideoDecoderFactory()
return RTCPeerConnectionFactory(encoderFactory: videoEncoderFactory, decoderFactory: videoDecoderFactory)
}()
extension RTCClient : AVCaptureVideoDataOutputSampleBufferDelegate {
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
print("didOutPut: \(sampleBuffer)")
guard let imageBuffer: CVImageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
let timeStampNs: Int64 = Int64(CMTimeGetSeconds(CMSampleBufferGetPresentationTimeStamp(sampleBuffer)) * 1000000000)
let rtcPixlBuffer = RTCCVPixelBuffer(pixelBuffer: imageBuffer)
let rtcVideoFrame = RTCVideoFrame(buffer: rtcPixlBuffer, rotation: ._90, timeStampNs: timeStampNs)
self.localVideoSource.capturer(videoCapturer!, didCapture: rtcVideoFrame)
}
}
Also you need configuration like that for mediaSender,
func createMediaSenders() {
let streamId = "stream"
let videoTrack = self.createVideoTrack()
self.localVideoTrack = videoTrack
self.peerConnection!.add(videoTrack, streamIds: [streamId])
self.remoteVideoTrack = self.peerConnection!.transceivers.first { $0.mediaType == .video }?.receiver.track as? RTCVideoTrack
}
private func createVideoTrack() -> RTCVideoTrack {
let videoTrack = RTCClient.factory.videoTrack(with: self.videoSource, trackId: "video0")
return videoTrack
}

AVAssetWriter async video and audio after calling broadcastPaused()

I am trying to record a video with sound from device screen using ReplayKit and RPBroadcastSampleHandler.
When i record it just using "start broadcast" and "stop" the result i get is great.
But if i try to pause recording (by using red status bar) i got problems. The result i got is video and audio with different length (audio is shorter but have all i need). On the recording i got video and audio that start being async after moment of tapping status bar(ios14). Audio goes good, but video freezing when status bar tapped and continue when modal window closed. As result i got video without audio in the end.
Here is my code:
1.All class fields i have:
class SampleHandler: RPBroadcastSampleHandler {
private let videoService = VideoService()
private let audioService = AudioService()
private var isRecording = false
private let lock = NSLock()
private var finishCalled = false
private var videoWriter: AVAssetWriter!
private var videoWriterInput: AVAssetWriterInput!
private var microphoneWriterInput: AVAssetWriterInput!
private var sessionBeginAtSourceTime: CMTime!
2.Some configure on start capturing:
override func broadcastStarted(withSetupInfo setupInfo: [String : NSObject]?) {
guard !isRecording else { return }
isRecording = true
BroadcastData.clear()
BroadcastData.startVideoDate = Date()
BroadcastData.status = .writing
sessionBeginAtSourceTime = nil
configurateVideoWriter()
}
private func configurateVideoWriter() {
let outputFileLocation = videoService.getVideoFileLocation()
videoWriter = try? AVAssetWriter.init(outputURL: outputFileLocation,
fileType: AVFileType.mov)
configurateVideoWriterInput()
configurateMicrophoneWriterInput()
if videoWriter.canAdd(videoWriterInput) { videoWriter.add(videoWriterInput) }
if videoWriter.canAdd(microphoneWriterInput) { videoWriter.add(microphoneWriterInput) }
videoWriter.startWriting()
}
private func configurateVideoWriterInput() {
let RESOLUTION_COEF: CGFloat = 16
let naturalWidth = UIScreen.main.bounds.width
let naturalHeight = UIScreen.main.bounds.height
let width = naturalWidth - naturalWidth.truncatingRemainder(dividingBy: RESOLUTION_COEF)
let height = naturalHeight - naturalHeight.truncatingRemainder(dividingBy: RESOLUTION_COEF)
let videoSettings: [String: Any] = [
AVVideoCodecKey: AVVideoCodecType.h264,
AVVideoWidthKey: width,
AVVideoHeightKey: height
]
videoWriterInput = AVAssetWriterInput(mediaType: .video,
outputSettings: videoSettings)
videoWriterInput.expectsMediaDataInRealTime = true
}
private func configurateMicrophoneWriterInput() {
let audioOutputSettings: [String : Any] = [
AVFormatIDKey: kAudioFormatMPEG4AAC,
AVNumberOfChannelsKey : 1,
AVSampleRateKey : 44100.0,
AVEncoderBitRateKey: 96000
]
microphoneWriterInput = AVAssetWriterInput(mediaType: .audio,
outputSettings: audioOutputSettings)
microphoneWriterInput.expectsMediaDataInRealTime = true
}
3.Write process:
override func processSampleBuffer(_ sampleBuffer: CMSampleBuffer, with sampleBufferType:
RPSampleBufferType) {
guard isRecording && videoWriter?.status == .writing else { return }
if BroadcastData.status != .writing {
isRecording = false
finishBroadCast()
return
}
if sessionBeginAtSourceTime == nil {
sessionBeginAtSourceTime = CMSampleBufferGetPresentationTimeStamp(sampleBuffer)
videoWriter.startSession(atSourceTime: sessionBeginAtSourceTime!)
}
switch sampleBufferType {
case .video:
if videoWriterInput.isReadyForMoreMediaData {
videoWriterInput.append(sampleBuffer)
}
case .audioMic:
if microphoneWriterInput.isReadyForMoreMediaData {
microphoneWriterInput.append(sampleBuffer)
}
case .audioApp:
break
#unknown default:
print("unknown")
}
}
4.Pause and resume
override func broadcastPaused() {
super.broadcastPaused()
}
override func broadcastResumed() {
super.broadcastResumed()
}
Pausing and resuming the recording creates a gap in the video presentation timestamps and a discontinuity in the audio, which I believe explains your symptoms.
What you need to do is measure how long the recording was paused for, possibly using the sample buffer timestamps, and then subtract that offset from the presentation timestamps of all the subsequent CMSampleBuffer's that you process. CMSampleBufferCreateCopyWithNewTiming() can help you with this.

Using WebRTC to send an iOS devices’ screen capture using ReplayKit

We would like to use WebRTC to send an iOS devices’ screen capture using ReplayKit.
The ReplayKit has a processSampleBuffer callback which gives CMSampleBuffer.
But here is where we are stuck, we can’t seem to get the CMSampleBuffer to be sent to the connected peer.
We have tried to create pixelBuffer from the sampleBuffer, and then create RTCVideoFrame.
we also extracted the RTCVideoSource from RTCPeerConnectionFactory and then used an RTCVideoCapturer and stream it to the localVideoSource.
Any idea what we are doing wrong?
var peerConnectionFactory: RTCPeerConnectionFactory?
override func processSampleBuffer(_ sampleBuffer: CMSampleBuffer, with sampleBufferType: RPSampleBufferType) {
switch sampleBufferType {
case RPSampleBufferType.video:
// create the CVPixelBuffer
let pixelBuffer:CVPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)!;
// create the RTCVideoFrame
var videoFrame:RTCVideoFrame?;
let timestamp = NSDate().timeIntervalSince1970 * 1000
videoFrame = RTCVideoFrame(pixelBuffer: pixelBuffer, rotation: RTCVideoRotation._0, timeStampNs: Int64(timestamp))
// connect the video frames to the WebRTC
let localVideoSource = self.peerConnectionFactory!.videoSource()
let videoCapturer = RTCVideoCapturer()
localVideoSource.capturer(videoCapturer, didCapture: videoFrame!)
let videoTrack : RTCVideoTrack = self.peerConnectionFactory!.videoTrack(with: localVideoSource, trackId: "100”)
let mediaStream: RTCMediaStream = (self.peerConnectionFactory?.mediaStream(withStreamId: “1"))!
mediaStream.addVideoTrack(videoTrack)
self.newPeerConnection!.add(mediaStream)
break
}
}
This is a great idea to implement you just have to render the RTCVideoFrame in the method that you have used in the snippet, and all the other object will initialize outsize the method, best way. for better understanding, I am giving you a snippet.
var peerConnectionFactory: RTCPeerConnectionFactory?
var localVideoSource: RTCVideoSource?
var videoCapturer: RTCVideoCapturer?
func setupVideoCapturer(){
// localVideoSource and videoCapturer will use
localVideoSource = self.peerConnectionFactory!.videoSource()
videoCapturer = RTCVideoCapturer()
// localVideoSource.capturer(videoCapturer, didCapture: videoFrame!)
let videoTrack : RTCVideoTrack = self.peerConnectionFactory!.videoTrack(with: localVideoSource, trackId: "100")
let mediaStream: RTCMediaStream = (self.peerConnectionFactory?.mediaStream(withStreamId: "1"))!
mediaStream.addVideoTrack(videoTrack)
self.newPeerConnection!.add(mediaStream)
}
override func processSampleBuffer(_ sampleBuffer: CMSampleBuffer, with sampleBufferType: RPSampleBufferType) {
switch sampleBufferType {
case RPSampleBufferType.video:
// create the CVPixelBuffer
let pixelBuffer:CVPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)!;
// create the RTCVideoFrame
var videoFrame:RTCVideoFrame?;
let timestamp = NSDate().timeIntervalSince1970 * 1000
videoFrame = RTCVideoFrame(pixelBuffer: pixelBuffer, rotation: RTCVideoRotation._0, timeStampNs: Int64(timestamp))
// connect the video frames to the WebRTC
localVideoSource.capturer(videoCapturer, didCapture: videoFrame!)
break
}
}
Hope this will help you.

Resources