AudioKit playback cracks - audiokit

I want to analyze the microphone input frequency and then play the correct note which is near the frequency which was determined. I did that with of AudioKit.
This is working right now but since I implemented AudioKit to get the frequency feature the sound which plays after the frequency detection cracks sometimes during playback. Thats happened after I implemented AudioKit. Everything was fine before that...
var mic: AKMicrophone!
var tracker: AKFrequencyTracker!
var silence: AKBooster!
func initFrequencyTracker() {
AKSettings.channelCount = 2
AKSettings.audioInputEnabled = true
AKSettings.defaultToSpeaker = true
AKSettings.allowAirPlay = true
AKSettings.useBluetooth = true
AKSettings.allowHapticsAndSystemSoundsDuringRecording = true
mic = AKMicrophone()
tracker = AKFrequencyTracker(mic)
silence = AKBooster(tracker, gain: 0)
}
func deinitFrequencyTracker() {
AKSettings.audioInputEnabled = false
plotTimer.invalidate()
do {
try AudioKit.stop()
AudioKit.output = nil
} catch {
print(error)
}
}
func initPlotTimer() {
AudioKit.output = silence
do {
try AKSettings.setSession(category: .playAndRecord, with: [.defaultToSpeaker, .allowBluetooth, .allowAirPlay, .allowBluetoothA2DP])
try AudioKit.start()
} catch {
AKLog("AudioKit did not start!")
}
setupPlot()
plotTimer = Timer.scheduledTimer(timeInterval: 0.1, target: self, selector: #selector(updatePlotUI), userInfo: nil, repeats: true)
}
func setupPlot() {
let plot = AKNodeOutputPlot(mic, frame: audioInputPlot.bounds)
plot.translatesAutoresizingMaskIntoConstraints = false
plot.alpha = 0.3
plot.plotType = .rolling
plot.shouldFill = true
plot.shouldCenterYAxis = false
plot.shouldMirror = true
plot.color = UIColor(named: uiFarbe)
audioInputPlot.addSubview(plot)
// Pin the AKNodeOutputPlot to the audioInputPlot
var constraints = [plot.leadingAnchor.constraint(equalTo: audioInputPlot.leadingAnchor)]
constraints.append(plot.trailingAnchor.constraint(equalTo: audioInputPlot.trailingAnchor))
constraints.append(plot.topAnchor.constraint(equalTo: audioInputPlot.topAnchor))
constraints.append(plot.bottomAnchor.constraint(equalTo: audioInputPlot.bottomAnchor))
constraints.forEach { $0.isActive = true }
}
#objc func updatePlotUI() {
if tracker.amplitude > 0.3 {
let trackerFrequency = Float(tracker.frequency)
guard trackerFrequency < 7_000 else {
// This is a bit of hack because of modern Macbooks giving super high frequencies
return
}
var frequency = trackerFrequency
while frequency > Float(noteFrequencies[noteFrequencies.count - 1]) {
frequency /= 2.0
}
while frequency < Float(noteFrequencies[0]) {
frequency *= 2.0
}
var minDistance: Float = 10_000.0
var index = 0
for i in 0..<noteFrequencies.count {
let distance = fabsf(Float(noteFrequencies[i]) - frequency)
if distance < minDistance {
index = i
minDistance = distance
}
print(minDistance, distance)
}
// let octave = Int(log2f(trackerFrequency / frequency))
frequencyLabel.text = String(format: "%0.1f", tracker.frequency)
if frequencyTranspose(note: notesToTanspose[index]) != droneLabel.text {
momentaneNote = frequencyTranspose(note: notesToTanspose[index])
droneLabel.text = momentaneNote
stopSinglePlayer()
DispatchQueue.main.asyncAfter(deadline: .now() + 0.03, execute: {
self.prepareSinglePlayerFirstForStart(note: self.momentaneNote)
self.startSinglePlayer()
})
}
}
}
func frequencyTranspose(note: String) -> String {
var indexNote = notesToTanspose.firstIndex(of: note)!
let chosenInstrument = UserDefaults.standard.object(forKey: "whichInstrument") as! String
if chosenInstrument == "Bb" {
if indexNote + 2 >= notesToTanspose.count {
indexNote -= 12
}
return notesToTanspose[indexNote + 2]
} else if chosenInstrument == "Eb" {
if indexNote - 3 < 0 {
indexNote += 12
}
return notesToTanspose[indexNote - 3]
} else {
return note
}
}

Appears that your implementation can be improved slightly by putting the multithreading principles of iOS into practice. Now, I'm not an expert in the subject, but if we look into the statement: "the sound which plays after the frequency detection cracks sometimes during playback".
I'd like to point out that the "frequency" of the "crack" is random or unpredictable and this happens during computation.
So, move code that doesn't need to be computed in the main thread to a background thread (https://developer.apple.com/documentation/DISPATCH)
While refactoring, you can test your implementation by increasing the frequency of calls to the callback computation of your Timer, so reduce the value to 0.05 for example. Which means that if you increase the frequency to, let's say 0.2, you'll probably hear less random crackles.
Now, this is easier said than done when considering concurrency but that's what you need to improve.

Related

iOS AudioKit Hight Pass Filter

I am trying to detect pitch from iOS microphone with AudioKit, and here is the code
init() {
guard let input = engine.input else {
fatalError()
}
mic = input
filter = HighPassFilter(mic, cutoffFrequency: 200, resonance: 40)
silence = Fader(filter, gain: 0)
tracker = PitchTap(silence) { pitch , amp in
DispatchQueue.main.async {
print(pitch[0], amp[0])
self.update(pitch[0], amp[0])
}
}
engine.output = filter
}
func start() {
recordFrequency = []
do {
try engine.start()
tracker.start()
} catch let err {
Log(err)
}
}
Because there's always some detect (frequency 20 to 200) when I don't make sounds,
I add a high pass filter to filt the sound lower than 40 dB and 200 frequencies,
But it seems not work, what should I do?
Thanks.
It looks like you just have some connection point issues. Try putting the tracker before you make the signal silent:
filter = HighPassFilter(mic, cutoffFrequency: 200, resonance: 40)
tracker = PitchTap(filter) { pitch , amp in
DispatchQueue.main.async {
print(pitch[0], amp[0])
self.update(pitch[0], amp[0])
}
}
silence = Fader(tracker, gain: 0)
engine.output = filter

Coreplot Animation of a sequence of plots

I have a number of plots, which depict (x,y) data at different time intervals, and wish to plot them in a sequence one after the other(like a gif file). My approach was generate all the plots, use a Timer.scheduledTimer and initially hide all plots, unhiding current plot and hiding previous plot at each fired schedule. The time between each plot hiding/unhiding shows a blank graph for more time than the plots are shown. Each plot has 32x32 data points. How can I speed this up, so I never see a blank graph? Another approach was to fade out one plot, whilst introducing the next, but I see the same effect.
#objc func tapAnimationButton(_ sender: Any) {
isAnimating = !isAnimating
plotSpacesUserInteraction = !plotSpacesUserInteraction
if let _animationButton = animationButton {
_animationButton.isSelected = isAnimating
previousAnimatedPlot = nil
if isAnimating {
animationCounter = 0
for i in 0..<plotDetails.count {
// fieldsplots[i].isHidden = true
fieldsplots[i].opacity = 0.0
}
animationTimer = Timer.scheduledTimer(timeInterval: 0.5, target: self, selector: #selector(animateGraph(_:)), userInfo: nil, repeats: true)
if let _animationTimer = animationTimer {
animateGraph(_animationTimer)
}
}
else {
animationTimer?.invalidate()
animationTimer = nil
}
}
}
#objc func animateGraph(_ timer: Timer) {
if animationSeconds > 120.0 {
timer.invalidate()
self.animationTimer = nil
animationSeconds = 0.0
animationCounter = 0
previousAnimatedPlot = nil
}
else {
if let currentAnimatedPlot = self.graph.plot(at: animationCounter) {
// previousAnimatedPlot?.isHidden = true
// currentAnimatedPlot.isHidden = false
previousAnimatedPlot?.opacity = 1.0
let fadeOutAnimation = CABasicAnimation(keyPath: "opacity")
fadeOutAnimation.duration = 0.1
fadeOutAnimation.isRemovedOnCompletion = false
fadeOutAnimation.fillMode = CAMediaTimingFillMode.forwards
fadeOutAnimation.toValue = Float(0.0)
previousAnimatedPlot?.add(fadeOutAnimation, forKey: "animateOpacity")
currentAnimatedPlot.opacity = 0.0
let fadeInAnimation = CABasicAnimation(keyPath: "opacity")
fadeInAnimation.duration = 0.1
fadeInAnimation.isRemovedOnCompletion = false
fadeInAnimation.fillMode = CAMediaTimingFillMode.forwards
fadeInAnimation.toValue = Float(1.0)
currentAnimatedPlot.add(fadeInAnimation, forKey: "animateOpacity")
previousAnimatedPlot = currentAnimatedPlot
}
animationSeconds += 0.5
animationCounter += 1;
if animationCounter >= plotDetails.count {
animationCounter = 0
}
}
}
The fade in/out method actually works...not sure how I missed that.
As an add-on to answer, in order to produce a gif image one needs to hide/unhide the plots in sequence
for currentPlot in self.graph.allPlots() {
currentPlot.isHidden = true
}
var images: [UIImage] = []
var previousPlot: CPTPlot?
for currentPlot in self.graph.allPlots() {
if let _previousPlot = previousPlot {
_previousPlot.isHidden = true
}
currentPlot.isHidden = false
if let image = graph.imageOfLayer() {
images.append(image)
}
previousPlot = currentPlot
}
for currentPlot in self.graph.allPlots() {
currentPlot.isHidden = false
}
if images.count > 2 {
TwoDPlot_Utilities.GIFExport(with: images, plot: thisplot, plotIndex: plotIndex, frameDelay: 0.5)
}

AudioKit output changes to ear speakers

I implemented the AudioKit "MICROPHONE ANALYSIS" example https://audiokit.io/examples/MicrophoneAnalysis/ in my App.
I want to analyze the microphone input frequency and then play the correct note which is near the frequency which was determined.
Normally the sound output is the speaker or a Bluetooth device connected to my iPhone but after implementing the "MICROPHONE ANALYSIS" example the sound output changed to the tiny little speaker on the top of the iPhone which is normally used when you get a call.
How can I switch to the "normal" speaker or to the connected Bluetooth device like before?
var mic: AKMicrophone!
var tracker: AKFrequencyTracker!
var silence: AKBooster!
func initFrequencyTracker() {
AKSettings.audioInputEnabled = true
mic = AKMicrophone()
tracker = AKFrequencyTracker(mic)
silence = AKBooster(tracker, gain: 0)
}
func deinitFrequencyTracker() {
plotTimer.invalidate()
do {
try AudioKit.stop()
AudioKit.output = nil
} catch {
print(error)
}
}
func initPlotTimer() {
AudioKit.output = silence
do {
try AudioKit.start()
} catch {
AKLog("AudioKit did not start!")
}
setupPlot()
plotTimer = Timer.scheduledTimer(timeInterval: 0.1, target: self, selector: #selector(updatePlotUI), userInfo: nil, repeats: true)
}
func setupPlot() {
let plot = AKNodeOutputPlot(mic, frame: audioInputPlot.bounds)
plot.translatesAutoresizingMaskIntoConstraints = false
plot.alpha = 0.3
plot.plotType = .rolling
plot.shouldFill = true
plot.shouldCenterYAxis = false
plot.shouldMirror = true
plot.color = UIColor(named: uiFarbe)
audioInputPlot.addSubview(plot)
// Pin the AKNodeOutputPlot to the audioInputPlot
var constraints = [plot.leadingAnchor.constraint(equalTo: audioInputPlot.leadingAnchor)]
constraints.append(plot.trailingAnchor.constraint(equalTo: audioInputPlot.trailingAnchor))
constraints.append(plot.topAnchor.constraint(equalTo: audioInputPlot.topAnchor))
constraints.append(plot.bottomAnchor.constraint(equalTo: audioInputPlot.bottomAnchor))
constraints.forEach { $0.isActive = true }
}
#objc func updatePlotUI() {
if tracker.amplitude > 0.1 {
let trackerFrequency = Float(tracker.frequency)
guard trackerFrequency < 7_000 else {
// This is a bit of hack because of modern Macbooks giving super high frequencies
return
}
var frequency = trackerFrequency
while frequency > Float(noteFrequencies[noteFrequencies.count - 1]) {
frequency /= 2.0
}
while frequency < Float(noteFrequencies[0]) {
frequency *= 2.0
}
var minDistance: Float = 10_000.0
var index = 0
for i in 0..<noteFrequencies.count {
let distance = fabsf(Float(noteFrequencies[i]) - frequency)
if distance < minDistance {
index = i
minDistance = distance
}
}
// let octave = Int(log2f(trackerFrequency / frequency))
frequencyLabel.text = String(format: "%0.1f", tracker.frequency)
if frequencyTranspose(note: notesToTanspose[index]) != droneLabel.text {
note = frequencyTranspose(note: notesToTanspose[index])
droneLabel.text = note
DispatchQueue.main.asyncAfter(deadline: .now() + 0.03, execute: {
self.prepareSinglePlayerFirstForStart(note: self.note)
self.startSinglePlayer()
})
}
}
}
func frequencyTranspose(note: String) -> String {
var indexNote = notesToTanspose.firstIndex(of: note)!
let chosenInstrument = UserDefaults.standard.object(forKey: "whichInstrument") as! String
if chosenInstrument == "Bb" {
if indexNote + 2 >= notesToTanspose.count {
indexNote -= 12
}
return notesToTanspose[indexNote + 2]
} else if chosenInstrument == "Eb" {
if indexNote - 3 < 0 {
indexNote += 12
}
return notesToTanspose[indexNote - 3]
} else {
return note
}
}
It's a good practice to control the session settings, so start by creating a method in your application to take care of that during initialisation.
Following up, there's an example where I set a category and the desired options:
func start() {
do {
let session = AVAudioSession.sharedInstance()
try session.setCategory(.playAndRecord, options: .defaultToSpeaker)
try session.setActive(true, options: .notifyOthersOnDeactivation)
try session.overrideOutputAudioPort(AVAudioSession.PortOverride.speaker)
try AudioKit.start()
} catch {
// your error handler
}
}
You can call the method start where you make the call to AudioKit.Start() in initPlotTimer.
The example above is using the AVAudioSession, which I believe is what AKSettings wraps (please feel free to edit my answer to not mislead future readers, as I'm not looking at the AudioKit source-code at the moment).
Now that AVAudioSession is exposed, let's stick with the method offered by AudioKit since that's what you're dealing with.
Here's another example using AKSettings:
func start() {
do {
AKSettings.channelCount = 2
AKSettings.ioBufferDuration = 0.002
AKSettings.audioInputEnabled = true
AKSettings.bufferLength = .medium
AKSettings.defaultToSpeaker = true
// check docs for other options and settings
try AKSettings.setSession(category: .playAndRecord, with: [.defaultToSpeaker, .allowBluetooth])
try AudioKit.start()
} catch {
// your handler
}
}
Have in mind that you don't necessarily have to call it start, or run AudioKit's start method, I'm just exposing the initialisation phase, to make it readable to you and other use-cases.
Reference:
https://developer.apple.com/documentation/avfoundation/avaudiosession/categoryoptions
https://audiokit.io/docs/Classes/AKSettings.html

Swift AVFoundation timing info for audio measurements

I am creating an application that will take an audio measurement by playing some stimulus data and recording the microphone input, and then analysing the data.
I am having trouble accounting for the time taken to initialise and start the audio engine, as this varies each time and is also dependant on the hardware used, etc.
So, I have an audio engine and have installed a Tap the hardware input, with input 1 being the microphone recording, and input 2 being a reference input (also from the hardware). The output is physically Y-Split and fed back into input 2.
The app initialises the engine, plays the stimulus audio plus 1 second of silence (to allow propagation time for the microphone to record the whole signal back), and then stop and close the engine.
I write the two input buffers as a WAV file so that I can import this into an an existing DAW. to visually examine the signals. I can see that each time I take a measurement, the time difference between the two signals is different (despite the fact the microphone is not moved and the hardware has stayed the same). I am assuming this is to do with the latency of the hardware, the time taken to initialise the engine and the way the divice distributes tasks.
I have tried to capture the absolute time using mach_absolute_time of the first buffer callback on each installTap function and subtracting the two, and I can see that this does vary quite a lot with each call:
class newAVAudioEngine{
var engine = AVAudioEngine()
var audioBuffer = AVAudioPCMBuffer()
var running = true
var in1Buf:[Float]=Array(repeating:0, count:totalRecordSize)
var in2Buf:[Float]=Array(repeating:0, count:totalRecordSize)
var buf1current:Int = 0
var buf2current:Int = 0
var in1firstRun:Bool = false
var in2firstRun:Bool = false
var in1StartTime = 0
var in2startTime = 0
func measure(inputSweep:SweepFilter) -> measurement {
initializeEngine(inputSweep: inputSweep)
while running == true {
}
let measureResult = measurement.init(meas: meas,ref: ref)
return measureResult
}
func initializeEngine(inputSweep:SweepFilter) {
buf1current = 0
buf2current = 0
in1StartTime = 0
in2startTime = 0
in1firstRun = true
in2firstRun = true
in1Buf = Array(repeating:0, count:totalRecordSize)
in2Buf = Array(repeating:0, count:totalRecordSize)
engine.stop()
engine.reset()
engine = AVAudioEngine()
let srcNode = AVAudioSourceNode { _, _, frameCount, AudioBufferList -> OSStatus in
let ablPointer = UnsafeMutableAudioBufferListPointer(AudioBufferList)
if (Int(frameCount) + time) <= inputSweep.stimulus.count {
for frame in 0..<Int(frameCount) {
let value = inputSweep.stimulus[frame + time]
for buffer in ablPointer {
let buf: UnsafeMutableBufferPointer<Float> = UnsafeMutableBufferPointer(buffer)
buf[frame] = value
}
}
time += Int(frameCount)
return noErr
} else {
for frame in 0..<Int(frameCount) {
let value = 0
for buffer in ablPointer {
let buf: UnsafeMutableBufferPointer<Float> = UnsafeMutableBufferPointer(buffer)
buf[frame] = Float(value)
}
}
}
return noErr
}
let format = engine.outputNode.inputFormat(forBus: 0)
let stimulusFormat = AVAudioFormat(commonFormat: format.commonFormat,
sampleRate: Double(sampleRate),
channels: 1,
interleaved: format.isInterleaved)
do {
try AVAudioSession.sharedInstance().setCategory(.playAndRecord)
let ioBufferDuration = 128.0 / 44100.0
try AVAudioSession.sharedInstance().setPreferredIOBufferDuration(ioBufferDuration)
} catch {
assertionFailure("AVAudioSession setup failed")
}
let input = engine.inputNode
let inputFormat = input.inputFormat(forBus: 0)
print("InputNode Format is \(inputFormat)")
engine.attach(srcNode)
engine.connect(srcNode, to: engine.mainMixerNode, format: stimulusFormat)
if internalRefLoop == true {
srcNode.installTap(onBus: 0, bufferSize: 1024, format: stimulusFormat, block: {(buffer: AVAudioPCMBuffer!, time: AVAudioTime!) -> Void in
if self.in2firstRun == true {
var info = mach_timebase_info()
mach_timebase_info(&info)
let currentTime = mach_absolute_time()
let nanos = currentTime * UInt64(info.numer) / UInt64(info.denom)
self.in2startTime = Int(nanos)
self.in2firstRun = false
}
do {
let floatData = buffer.floatChannelData?.pointee
for frame in 0..<buffer.frameLength{
if (self.buf2current + Int(frame)) < totalRecordSize{
self.in2Buf[self.buf2current + Int(frame)] = floatData![Int(frame)]
}
}
self.buf2current += Int(buffer.frameLength)
if (self.numberOfSamples + Int(buffer.frameLength)) <= totalRecordSize{
try self.stimulusFile.write(from: buffer)
self.numberOfSamples += Int(buffer.frameLength) } else {
self.engine.stop()
self.running = false
}
} catch {
print(NSString(string: "write failed"))
}
})
}
let micAudioConverter = AVAudioConverter(from: inputFormat, to: stimulusFormat!)
var micChannelMap:[NSNumber] = [0,-1]
micAudioConverter?.channelMap = micChannelMap
let refAudioConverter = AVAudioConverter(from: inputFormat, to: stimulusFormat!)
var refChannelMap:[NSNumber] = [1,-1]
refAudioConverter?.channelMap = refChannelMap
//Measurement Tap
engine.inputNode.installTap(onBus: 0, bufferSize: 1024, format: inputFormat, block: {(buffer2: AVAudioPCMBuffer!, time: AVAudioTime!) -> Void in
//print(NSString(string:"writing"))
if self.in1firstRun == true {
var info = mach_timebase_info()
mach_timebase_info(&info)
let currentTime = mach_absolute_time()
let nanos = currentTime * UInt64(info.numer) / UInt64(info.denom)
self.in1StartTime = Int(nanos)
self.in1firstRun = false
}
do {
let micConvertedBuffer = AVAudioPCMBuffer(pcmFormat: stimulusFormat!, frameCapacity: buffer2.frameCapacity)
let micInputBlock: AVAudioConverterInputBlock = { inNumPackets, outStatus in
outStatus.pointee = AVAudioConverterInputStatus.haveData
return buffer2
}
var error: NSError? = nil
//let status = audioConverter.convert(to: convertedBuffer!, error: &error, withInputFrom: inputBlock)
let status = micAudioConverter?.convert(to: micConvertedBuffer!, error: &error, withInputFrom: micInputBlock)
//print(status)
let floatData = micConvertedBuffer?.floatChannelData?.pointee
for frame in 0..<micConvertedBuffer!.frameLength{
if (self.buf1current + Int(frame)) < totalRecordSize{
self.in1Buf[self.buf1current + Int(frame)] = floatData![Int(frame)]
}
if (self.buf1current + Int(frame)) >= totalRecordSize {
self.engine.stop()
self.running = false
}
}
self.buf1current += Int(micConvertedBuffer!.frameLength)
try self.measurementFile.write(from: micConvertedBuffer!)
} catch {
print(NSString(string: "write failed"))
}
if internalRefLoop == false {
if self.in2firstRun == true{
var info = mach_timebase_info()
mach_timebase_info(&info)
let currentTime = mach_absolute_time()
let nanos = currentTime * UInt64(info.numer) / UInt64(info.denom)
self.in2startTime = Int(nanos)
self.in2firstRun = false
}
do {
let refConvertedBuffer = AVAudioPCMBuffer(pcmFormat: stimulusFormat!, frameCapacity: buffer2.frameCapacity)
let refInputBlock: AVAudioConverterInputBlock = { inNumPackets, outStatus in
outStatus.pointee = AVAudioConverterInputStatus.haveData
return buffer2
}
var error: NSError? = nil
let status = refAudioConverter?.convert(to: refConvertedBuffer!, error: &error, withInputFrom: refInputBlock)
//print(status)
let floatData = refConvertedBuffer?.floatChannelData?.pointee
for frame in 0..<refConvertedBuffer!.frameLength{
if (self.buf2current + Int(frame)) < totalRecordSize{
self.in2Buf[self.buf2current + Int(frame)] = floatData![Int(frame)]
}
}
if (self.numberOfSamples + Int(buffer2.frameLength)) <= totalRecordSize{
self.buf2current += Int(refConvertedBuffer!.frameLength)
try self.stimulusFile.write(from: refConvertedBuffer!) } else {
self.engine.stop()
self.running = false
}
} catch {
print(NSString(string: "write failed"))
}
}
}
)
assert(engine.inputNode != nil)
running = true
try! engine.start()
So The above method is my entire class. Currently each buffer call on installTap writes the input directly to a WAV file. This is where I can see the two end results differing each time. I have tried adding the startTime variable and subtracting the two, but the results still vary.
Do I need to take into account my output will have latency too that may vary with each call? If so, how do I add this time into the equation? What I am looking for is for the two inputs and outputs to all have relative time, so that I can compare them. The different hardware latency will not matter too much, as long as I can identify the end call times.
If you are doing real-time measurements, you might want to use AVAudioSinkNode instead of a Tap. The Sink Node is new and introduced along with AVAudioSourceNode you are using. With installing a Tap you won't be able to get precise timing.

How to detect apple watch position using accelerometer and Gravity in swift?

I have creating an application for apple watch. The Logic is, when the user rise their hand and tap a button from app. At that time I will fetch the accelerometer values. And whenever user rise their hand and meet the captured position, I have to send message to iPhone.
For me am getting the values correctly But, It will always give the values based on accelerometer. Which means user doesn't rise the hand but accelerometer values matched. So values will send to mobile.
func startUpadateAccelerometer() {
self.motionManager.accelerometerUpdateInterval = 1.0 / 10.0
self.motionManager.startAccelerometerUpdates(to: OperationQueue()) { (accelerometerData, error) -> Void in
guard accelerometerData != nil else
{
print("There was an error: \(String(describing: error))")
return
}
DispatchQueue.main.async {
if(self.can_reset){
let differenceX : Bool = self.validateButtom(currentValue: accelerometerData!.acceleration.x, inititalValue: self.gravityReference.x)
let differenceY : Bool = self.validateButtom(currentValue: accelerometerData!.acceleration.y, inititalValue: self.gravityReference.y)
if(differenceX && differenceY && self.gravityOffsetDifference(currentValue: accelerometerData!.acceleration.x, referenceValue: self.gravityReference.x ) && self.gravityOffsetDifference(currentValue: accelerometerData!.acceleration.y, referenceValue: self.gravityReference.y)){
WKInterfaceDevice().play(.success)
// self.addLog(_logStr: EventsTypes.Achievements1.rawValue)
self.logString += String(format: "X: %0.3f Y: %0.3f Z: %0.3f \n", accelerometerData!.acceleration.x,accelerometerData!.acceleration.y,accelerometerData!.acceleration.z)
self.m_XYZValueLbl.setText(self.logString)
self.is_RechedZeroPos = true
self.session?.sendMessage(["msg" : "\(self.logString)"], replyHandler: nil) { (error) in
NSLog("%#", "Error sending message: \(error)")
}
} else {
if(self.checkAchievements2_3(deviceMotionData: accelerometerData!.acceleration) == true) {
if self.is_RechedZeroPos == true {
self.addLog(_logStr: EventsTypes.Achievements2.rawValue)
self.is_RechedZeroPos = false
} else {
self.addLog(_logStr: EventsTypes.Achievements3.rawValue)
}
}
}
} else {
self.gravityReference = accelerometerData!.acceleration
//self.logString = String(format: "Reference Acceleration %0.3f %0.3f %0.3f \n", self.gravityReference.x,self.gravityReference.y,self.gravityReference.z)
self.can_reset = true
}
}
}
}
func validateButtom(currentValue : Double , inititalValue : Double) -> Bool {
if( currentValue == 0 && inititalValue == 0) {
return true
} else if( currentValue < 0 && inititalValue < 0) {
return true
} else if( currentValue > 0 && inititalValue > 0) {
return true
} else {
return false
}
}
func gravityOffsetDifference(currentValue : Double , referenceValue: Double) -> Bool {
var difference : Double!
if (fabs(currentValue) <= fabs(referenceValue)) {
difference = fabs(referenceValue) - fabs(currentValue)
} else {
difference = fabs(currentValue) - fabs(referenceValue)
}
if (difference <= gravityOffset ) {
return true
} else {
return false
}
}
Please guide me to get the values only when the user captured the position.
Accelerometers measure changes in velocity along the x, y, and z axes
Fetching accelerometer data
let motion = CMMotionManager()
func startAccelerometers() {
// Make sure the accelerometer hardware is available.
if self.motion.isAccelerometerAvailable {
self.motion.accelerometerUpdateInterval = 1.0 / 60.0 // 60 Hz
self.motion.startAccelerometerUpdates()
// Configure a timer to fetch the data.
self.timer = Timer(fire: Date(), interval: (1.0/60.0),
repeats: true, block: { (timer) in
// Get the accelerometer data.
if let data = self.motion.accelerometerData {
let x = data.acceleration.x
let y = data.acceleration.y
let z = data.acceleration.z
// Use the accelerometer data in your app.
}
})
// Add the timer to the current run loop.
RunLoop.current.add(self.timer!, forMode: .defaultRunLoopMode)
}
}
Important
If your app relies on the presence of accelerometer hardware, configure the UIRequiredDeviceCapabilities key of its Info.plist file with the accelerometer value. For more information about the meaning of this key, see Information Property List Key Reference.

Resources