Controlling oscillator with sequencer in AudioKit v5 - audiokit

I'm trying to control an oscillator with a sequencer with AudioKit v5, and I've hit a snag. I'm subclassing MIDIInstrument, but I'm not sure if this is right. Please see code below.
I'm getting this error on startup:
AVAEInternal.h:76 required condition is false:
[AVAudioEngine.mm:413:AttachNode: (node != nil)]
There's a previous post about this with an older AK version, which was somewhat helpful, but none of the links to examples in it work:
How do I control an oscillator's frequency with a sequencer
Can you point me in the right direction? Many thanks!
EDIT: Slight progress? I misunderstood the AudioEngine function and had 2 instances, so I removed one, which cleared the error. And adding track!.setMIDIOutput(instrument.midiIn) has it logging the 4 notes now, but still no sound. MIDIInstrument seems to accept a MIDIClientRef, but I see no reference to that in the sequencer class...
import AudioKit
import CAudioKit
class Test2 {
var instrument: OscMIDIInstrument
var sequencer: AppleSequencer
init() {
instrument = OscMIDIInstrument()
sequencer = AppleSequencer()
sequencer.setGlobalMIDIOutput(instrument.midiIn)
instrument.enableMIDI()
let track = sequencer.newTrack()
track!.setMIDIOutput(instrument.midiIn)
for i in 0 ..< 4 {
track!.add(noteNumber: 60, velocity: 64, position: Duration(seconds: Double(i)), duration: Duration(seconds: 0.5))
}
}
func testButton() {
if sequencer.isPlaying {
sequencer.stop()
} else {
sequencer.rewind()
sequencer.play()
}
}
}
class OscMIDIInstrument: MIDIInstrument {
var akEngine: AudioEngine
var osc: Oscillator
init() {
akEngine = AudioEngine()
osc = Oscillator()
super.init()
akEngine.output = osc
osc.amplitude = 0.1
osc.frequency = 440.0
do {
try akEngine.start()
} catch {
print("Couldn't start AudioEngine.")
}
}
override func receivedMIDINoteOn(noteNumber: MIDINoteNumber, velocity: MIDIVelocity, channel: MIDIChannel, portID: MIDIUniqueID? = nil, offset: MIDITimeStamp = 0) {
osc.play()
}
override func receivedMIDINoteOff(noteNumber: MIDINoteNumber, velocity: MIDIVelocity, channel: MIDIChannel, portID: MIDIUniqueID? = nil, offset: MIDITimeStamp = 0) {
osc.stop()
}
}

Got it working, somewhat. I found this example which pointed me to MIDICallbackInstrument:
https://github.com/AudioKit/Cookbook/blob/main/Cookbook/Cookbook/Recipes/CallbackInstrument.swift
The remaining issue is you apparently can't implement sysex messages this way, I guess because the callback messages are limited to 3 bytes.
So I'm still looking for a better solution if anyone can help.
Thanks a lot!
class Test {
let akEngine = AudioEngine()
let sequencer = AppleSequencer()
let osc = Oscillator()
init() {
let callbackInstrument = MIDICallbackInstrument { [self] status, note, _ in
guard let midiStatus = MIDIStatusType.from(byte: status) else {
return
}
if midiStatus == .noteOn {
print("NoteOn \(note) at \(sequencer.currentPosition.seconds)")
osc.play()
} else if midiStatus == .noteOff {
print("NoteOff \(note) at \(sequencer.currentPosition.seconds)")
osc.stop()
}
}
let track = sequencer.newTrack()
for i in 0..< 4 {
track!.add(noteNumber: 60, velocity: 64, position: Duration(seconds: Double(i)), duration: Duration(seconds: 0.25))
}
track?.setMIDIOutput(callbackInstrument.midiIn)
akEngine.output = osc
do {
try akEngine.start()
} catch {
print("Couldn't start AudioEngine.")
}
}
func play() {
if sequencer.isPlaying {
sequencer.stop()
} else {
sequencer.rewind()
sequencer.play()
}
}
}

Related

AudioKit output changes to ear speakers

I implemented the AudioKit "MICROPHONE ANALYSIS" example https://audiokit.io/examples/MicrophoneAnalysis/ in my App.
I want to analyze the microphone input frequency and then play the correct note which is near the frequency which was determined.
Normally the sound output is the speaker or a Bluetooth device connected to my iPhone but after implementing the "MICROPHONE ANALYSIS" example the sound output changed to the tiny little speaker on the top of the iPhone which is normally used when you get a call.
How can I switch to the "normal" speaker or to the connected Bluetooth device like before?
var mic: AKMicrophone!
var tracker: AKFrequencyTracker!
var silence: AKBooster!
func initFrequencyTracker() {
AKSettings.audioInputEnabled = true
mic = AKMicrophone()
tracker = AKFrequencyTracker(mic)
silence = AKBooster(tracker, gain: 0)
}
func deinitFrequencyTracker() {
plotTimer.invalidate()
do {
try AudioKit.stop()
AudioKit.output = nil
} catch {
print(error)
}
}
func initPlotTimer() {
AudioKit.output = silence
do {
try AudioKit.start()
} catch {
AKLog("AudioKit did not start!")
}
setupPlot()
plotTimer = Timer.scheduledTimer(timeInterval: 0.1, target: self, selector: #selector(updatePlotUI), userInfo: nil, repeats: true)
}
func setupPlot() {
let plot = AKNodeOutputPlot(mic, frame: audioInputPlot.bounds)
plot.translatesAutoresizingMaskIntoConstraints = false
plot.alpha = 0.3
plot.plotType = .rolling
plot.shouldFill = true
plot.shouldCenterYAxis = false
plot.shouldMirror = true
plot.color = UIColor(named: uiFarbe)
audioInputPlot.addSubview(plot)
// Pin the AKNodeOutputPlot to the audioInputPlot
var constraints = [plot.leadingAnchor.constraint(equalTo: audioInputPlot.leadingAnchor)]
constraints.append(plot.trailingAnchor.constraint(equalTo: audioInputPlot.trailingAnchor))
constraints.append(plot.topAnchor.constraint(equalTo: audioInputPlot.topAnchor))
constraints.append(plot.bottomAnchor.constraint(equalTo: audioInputPlot.bottomAnchor))
constraints.forEach { $0.isActive = true }
}
#objc func updatePlotUI() {
if tracker.amplitude > 0.1 {
let trackerFrequency = Float(tracker.frequency)
guard trackerFrequency < 7_000 else {
// This is a bit of hack because of modern Macbooks giving super high frequencies
return
}
var frequency = trackerFrequency
while frequency > Float(noteFrequencies[noteFrequencies.count - 1]) {
frequency /= 2.0
}
while frequency < Float(noteFrequencies[0]) {
frequency *= 2.0
}
var minDistance: Float = 10_000.0
var index = 0
for i in 0..<noteFrequencies.count {
let distance = fabsf(Float(noteFrequencies[i]) - frequency)
if distance < minDistance {
index = i
minDistance = distance
}
}
// let octave = Int(log2f(trackerFrequency / frequency))
frequencyLabel.text = String(format: "%0.1f", tracker.frequency)
if frequencyTranspose(note: notesToTanspose[index]) != droneLabel.text {
note = frequencyTranspose(note: notesToTanspose[index])
droneLabel.text = note
DispatchQueue.main.asyncAfter(deadline: .now() + 0.03, execute: {
self.prepareSinglePlayerFirstForStart(note: self.note)
self.startSinglePlayer()
})
}
}
}
func frequencyTranspose(note: String) -> String {
var indexNote = notesToTanspose.firstIndex(of: note)!
let chosenInstrument = UserDefaults.standard.object(forKey: "whichInstrument") as! String
if chosenInstrument == "Bb" {
if indexNote + 2 >= notesToTanspose.count {
indexNote -= 12
}
return notesToTanspose[indexNote + 2]
} else if chosenInstrument == "Eb" {
if indexNote - 3 < 0 {
indexNote += 12
}
return notesToTanspose[indexNote - 3]
} else {
return note
}
}
It's a good practice to control the session settings, so start by creating a method in your application to take care of that during initialisation.
Following up, there's an example where I set a category and the desired options:
func start() {
do {
let session = AVAudioSession.sharedInstance()
try session.setCategory(.playAndRecord, options: .defaultToSpeaker)
try session.setActive(true, options: .notifyOthersOnDeactivation)
try session.overrideOutputAudioPort(AVAudioSession.PortOverride.speaker)
try AudioKit.start()
} catch {
// your error handler
}
}
You can call the method start where you make the call to AudioKit.Start() in initPlotTimer.
The example above is using the AVAudioSession, which I believe is what AKSettings wraps (please feel free to edit my answer to not mislead future readers, as I'm not looking at the AudioKit source-code at the moment).
Now that AVAudioSession is exposed, let's stick with the method offered by AudioKit since that's what you're dealing with.
Here's another example using AKSettings:
func start() {
do {
AKSettings.channelCount = 2
AKSettings.ioBufferDuration = 0.002
AKSettings.audioInputEnabled = true
AKSettings.bufferLength = .medium
AKSettings.defaultToSpeaker = true
// check docs for other options and settings
try AKSettings.setSession(category: .playAndRecord, with: [.defaultToSpeaker, .allowBluetooth])
try AudioKit.start()
} catch {
// your handler
}
}
Have in mind that you don't necessarily have to call it start, or run AudioKit's start method, I'm just exposing the initialisation phase, to make it readable to you and other use-cases.
Reference:
https://developer.apple.com/documentation/avfoundation/avaudiosession/categoryoptions
https://audiokit.io/docs/Classes/AKSettings.html

How to make the AKSequencer switch soundfonts?

I'm creating a function using the Audiokit API which the user presses music notes onto a screen and a sound comes out based on the SoundFont they chose. I then allow them to collect a host of notes and let them play it back in the order they chose.
The problem is that I am using an AKSequencer to play the notes back and when the AKSequencer plays the notes back it never sounds like the SoundFont. It makes a beep sound.
Is there code that lets me change what sound is coming out of the AKSequencer?
I'm using audio kit to do this.
Sample is an NSObject that contains midisampler, player, etc. Here's the code
class Sampler1: NSObject {
var engine = AVAudioEngine()
var sampler: AVAudioUnitSampler!
var midisampler = AKMIDISampler()
var octave = 4
let midiChannel = 0
var midiVelocity = UInt8(127)
var audioGraph: AUGraph?
var musicPlayer: MusicPlayer?
var patch = UInt32(0)
var synthUnit: AudioUnit?
var synthNode = AUNode()
var outputNode = AUNode()
override init() {
super.init()
// engine = AVAudioEngine()
sampler = AVAudioUnitSampler()
engine.attach(sampler)
engine.connect(sampler, to: engine.mainMixerNode, format: nil)
loadSF2PresetIntoSampler(5)
/* sampler2 = AVAudioUnitSampler()
engine.attachNode(sampler2)
engine.connect(sampler2, to: engine.mainMixerNode, format: nil)
*/
addObservers()
startEngine()
setSessionPlayback()
/* CheckError(NewAUGraph(&audioGraph))
createOutputNode(audioGraph: audioGraph!, outputNode: &outputNode)
createSynthNode()
CheckError(AUGraphNodeInfo(audioGraph!, synthNode, nil, &synthUnit))
let synthOutputElement: AudioUnitElement = 0
let ioUnitInputElement: AudioUnitElement = 0
CheckError(AUGraphConnectNodeInput(audioGraph!, synthNode, synthOutputElement,
outputNode, ioUnitInputElement))
CheckError(AUGraphInitialize(audioGraph!))
CheckError(AUGraphStart(audioGraph!))
loadnewSoundFont()
loadPatch(patchNo: 0)*/
setUpSequencer()
}
func createOutputNode(audioGraph: AUGraph, outputNode: UnsafeMutablePointer<AUNode>) {
var cd = AudioComponentDescription(
componentType: OSType(kAudioUnitType_Output),
componentSubType: OSType(kAudioUnitSubType_RemoteIO),
componentManufacturer: OSType(kAudioUnitManufacturer_Apple),
componentFlags: 0,componentFlagsMask: 0)
CheckError(AUGraphAddNode(audioGraph, &cd, outputNode))
}
func loadSF2PresetIntoSampler(_ preset: UInt8) {
guard let bankURL = Bundle.main.url(forResource: "Arachno SoundFont - Version 1.0", withExtension: "sf2") else {
print("could not load sound font")
return
}
let folder = bankURL.path
do {
try self.sampler.loadSoundBankInstrument(at: bankURL,
program: preset,
bankMSB: UInt8(kAUSampler_DefaultMelodicBankMSB),
bankLSB: UInt8(kAUSampler_DefaultBankLSB))
try midisampler.loadSoundFont(folder, preset: 0, bank: kAUSampler_DefaultBankLSB)
// try midisampler.loadPath(bankURL.absoluteString)
} catch {
print("error loading sound bank instrument")
}
}
func createSynthNode() {
var cd = AudioComponentDescription(
componentType: OSType(kAudioUnitType_MusicDevice),
componentSubType: OSType(kAudioUnitSubType_MIDISynth),
componentManufacturer: OSType(kAudioUnitManufacturer_Apple),
componentFlags: 0,componentFlagsMask: 0)
CheckError(AUGraphAddNode(audioGraph!, &cd, &synthNode))
}
func setSessionPlayback() {
let audioSession = AVAudioSession.sharedInstance()
do {
try
audioSession.setCategory(AVAudioSession.Category.playback, options:
AVAudioSession.CategoryOptions.mixWithOthers)
} catch {
print("couldn't set category \(error)")
return
}
do {
try audioSession.setActive(true)
} catch {
print("couldn't set category active \(error)")
return
}
}
func startEngine() {
if engine.isRunning {
print("audio engine already started")
return
}
do {
try engine.start()
print("audio engine started")
} catch {
print("oops \(error)")
print("could not start audio engine")
}
}
func addObservers() {
NotificationCenter.default.addObserver(self,
selector:"engineConfigurationChange:",
name:NSNotification.Name.AVAudioEngineConfigurationChange,
object:engine)
NotificationCenter.default.addObserver(self,
selector:"sessionInterrupted:",
name:AVAudioSession.interruptionNotification,
object:engine)
NotificationCenter.default.addObserver(self,
selector:"sessionRouteChange:",
name:AVAudioSession.routeChangeNotification,
object:engine)
}
func removeObservers() {
NotificationCenter.default.removeObserver(self,
name: NSNotification.Name.AVAudioEngineConfigurationChange,
object: nil)
NotificationCenter.default.removeObserver(self,
name: AVAudioSession.interruptionNotification,
object: nil)
NotificationCenter.default.removeObserver(self,
name: AVAudioSession.routeChangeNotification,
object: nil)
}
private func setUpSequencer() {
// set the sequencer voice to storedPatch so we can play along with it using patch
var status = NewMusicSequence(&musicSequence)
if status != noErr {
print("\(#line) bad status \(status) creating sequence")
}
status = MusicSequenceNewTrack(musicSequence!, &track)
if status != noErr {
print("error creating track \(status)")
}
// 0xB0 = bank select, first we do the most significant byte
var chanmess = MIDIChannelMessage(status: 0xB0 | sequencerMidiChannel, data1: 0, data2: 0, reserved: 0)
status = MusicTrackNewMIDIChannelEvent(track!, 0, &chanmess)
if status != noErr {
print("creating bank select event \(status)")
}
// then the least significant byte
chanmess = MIDIChannelMessage(status: 0xB0 | sequencerMidiChannel, data1: 32, data2: 0, reserved: 0)
status = MusicTrackNewMIDIChannelEvent(track!, 0, &chanmess)
if status != noErr {
print("creating bank select event \(status)")
}
// set the voice
chanmess = MIDIChannelMessage(status: 0xC0 | sequencerMidiChannel, data1: UInt8(0), data2: 0, reserved: 0)
status = MusicTrackNewMIDIChannelEvent(track!, 0, &chanmess)
if status != noErr {
print("creating program change event \(status)")
}
CheckError(MusicSequenceSetAUGraph(musicSequence!, audioGraph))
CheckError(NewMusicPlayer(&musicPlayer))
CheckError(MusicPlayerSetSequence(musicPlayer!, musicSequence))
CheckError(MusicPlayerPreroll(musicPlayer!))
}
func loadnewSoundFont() {
var bankURL = Bundle.main.url(forResource: "Arachno SoundFont - Version 1.0", withExtension: "sf2")
CheckError(AudioUnitSetProperty(synthUnit!, AudioUnitPropertyID(kMusicDeviceProperty_SoundBankURL), AudioUnitScope(kAudioUnitScope_Global), 0, &bankURL, UInt32(MemoryLayout<URL>.size)))
}
func loadPatch(patchNo: Int) {
let channel = UInt32(0)
var enabled = UInt32(1)
var disabled = UInt32(0)
patch = UInt32(patchNo)
CheckError(AudioUnitSetProperty(
synthUnit!,
AudioUnitPropertyID(kAUMIDISynthProperty_EnablePreload),
AudioUnitScope(kAudioUnitScope_Global),
0,
&enabled,
UInt32(MemoryLayout<UInt32>.size)))
let programChangeCommand = UInt32(0xC0 | channel)
CheckError(MusicDeviceMIDIEvent(self.synthUnit!, programChangeCommand, patch, 0, 0))
CheckError(AudioUnitSetProperty(
synthUnit!,
AudioUnitPropertyID(kAUMIDISynthProperty_EnablePreload),
AudioUnitScope(kAudioUnitScope_Global),
0,
&disabled,
UInt32(MemoryLayout<UInt32>.size)))
// the previous programChangeCommand just triggered a preload
// this one actually changes to the new voice
CheckError(MusicDeviceMIDIEvent(synthUnit!, programChangeCommand, patch, 0, 0))
}
func play(number: UInt8) {
sampler.startNote(number, withVelocity: 127, onChannel: 0)
}
func stop(number: UInt8) {
sampler.stopNote(number, onChannel: 0)
}
func musicPlayerPlay() {
var status = noErr
var playing:DarwinBoolean = false
CheckError(MusicPlayerIsPlaying(musicPlayer!, &playing))
if playing != false {
status = MusicPlayerStop(musicPlayer!)
if status != noErr {
print("Error stopping \(status)")
CheckError(status)
return
}
}
CheckError(MusicPlayerSetTime(musicPlayer!, 0))
CheckError(MusicPlayerStart(musicPlayer!))
}
var avsequencer: AVAudioSequencer!
var sequencerMode = 1
var sequenceStartTime: Date?
var noteOnTimes = [Date] (repeating: Date(), count:128)
var musicSequence: MusicSequence?
var midisequencer = AKSequencer()
// var musicPlayer: MusicPlayer?
let sequencerMidiChannel = UInt8(1)
var midisynthUnit: AudioUnit?
//track is the variable the notes are written on
var track: MusicTrack?
var newtrack: AKMusicTrack?
func setupSequencer(name: String) {
self.avsequencer = AVAudioSequencer(audioEngine: self.engine)
let options = AVMusicSequenceLoadOptions.smfChannelsToTracks
if let fileURL = Bundle.main.url(forResource: name, withExtension: "mid") {
do {
try avsequencer.load(from: fileURL, options: options)
print("loaded \(fileURL)")
} catch {
print("something screwed up \(error)")
return
}
}
avsequencer.prepareToPlay()
}
func playsequence() {
if avsequencer.isPlaying {
stopsequence()
}
avsequencer.currentPositionInBeats = TimeInterval(0)
do {
try avsequencer.start()
} catch {
print("cannot start \(error)")
}
}
func creatnewtrck(){
let sequencelegnth = AKDuration(beats: 8.0)
newtrack = midisequencer.newTrack()
}
func addnotestotrack(){
// AKMIDISampler
}
func stopsequence() {
avsequencer.stop()
}
func setSequencerMode(mode: Int) {
sequencerMode = mode
switch(sequencerMode) {
case SequencerMode.off.rawValue:
print(mode)
// CheckError(osstatus: MusicPlayerStop(musicPlayer!))
case SequencerMode.recording.rawValue:
print(mode)
case SequencerMode.playing.rawValue:
print(mode)
default:
break
}
}
/* func noteOn(note: UInt8) {
let noteCommand = UInt32(0x90 | midiChannel)
let base = note - 48
let octaveAdjust = (UInt8(octave) * 12) + base
let pitch = UInt32(octaveAdjust)
CheckError(MusicDeviceMIDIEvent(self.midisynthUnit!,
noteCommand, pitch, UInt32(self.midiVelocity), 0))
}
func noteOff(note: UInt8) {
let channel = UInt32(0)
let noteCommand = UInt32(0x80 | channel)
let base = note - 48
let octaveAdjust = (UInt8(octave) * 12) + base
let pitch = UInt32(octaveAdjust)
CheckError(MusicDeviceMIDIEvent(self.midisynthUnit!,
noteCommand, pitch, 0, 0))
}*/
func noteOn(note: UInt8) {
if sequencerMode == SequencerMode.recording.rawValue {
print("recording sequence note")
noteOnTimes[Int(note)] = Date()
} else {
print("no notes")
}
}
func noteOff(note: UInt8, timestamp: Float64, sequencetime: Date) {
if sequencerMode == SequencerMode.recording.rawValue {
let duration: Double = Date().timeIntervalSince(noteOnTimes[Int(note)])
let onset: Double = noteOnTimes[Int(note)].timeIntervalSince(sequencetime)
//the order of the notes in the array
var beat: MusicTimeStamp = 0
CheckError(MusicSequenceGetBeatsForSeconds(musicSequence!, onset, &beat))
var mess = MIDINoteMessage(channel: sequencerMidiChannel,
note: note,
velocity: midiVelocity,
releaseVelocity: 0,
duration: Float(duration) )
CheckError(MusicTrackNewMIDINoteEvent(track!, timestamp, &mess))
}
}
}
The code that plays the collection of notes
_ = sample.midisequencer.newTrack()
let sequencelegnth = AKDuration(beats: 8.0)
sample.midisequencer.setLength(sequencelegnth)
sample.sequenceStartTime = format.date(from: format.string(from: NSDate() as Date))
sample.midisequencer.setTempo(160.0)
sample.midisequencer.enableLooping()
sample.midisequencer.play()
This is the code that changes the soundfont
func loadSF2PresetIntoSampler(_ preset: UInt8) {
guard let bankURL = Bundle.main.url(forResource: "Arachno SoundFont - Version 1.0", withExtension: "sf2") else {
print("could not load sound font")
return
}
let folder = bankURL.path
do {
try self.sampler.loadSoundBankInstrument(at: bankURL,
program: preset,
bankMSB: UInt8(kAUSampler_DefaultMelodicBankMSB),
bankLSB: UInt8(kAUSampler_DefaultBankLSB))
try midisampler.loadSoundFont(folder, preset: 0, bank: kAUSampler_DefaultBankLSB)
// try midisampler.loadPath(bankURL.absoluteString)
} catch {
print("error loading sound bank instrument")
}
}
The midisampler is an AKMidisampler.
At minimum, you need to connect an AKSequencer to some kind of output to get it to make sounds. With the older version (now called AKAppleSequencer), if you don't explicitly set the output, you will hear the default (beepy) sampler.
For example, on AKAppleSequencer (in AudioKit 4.8, or AKSequencer for earlier version)
let track = seq.newTrack()
track!.setMIDIOutput(sampler.midiIn)
on the new AKSequencer
let track = seq.newTrack() // for the new AKSequencer, in AudioKit 4.8
track!.setTarget(node: sampler)
Also, make sure that you have allowed audio background mode in your project's Capabilities, as missing this step this will also get you the default sampler.
You've included a massive amount of code (and I haven't tried to absorb all of what is going on here) but the fact that you are using instances of both MusicSequence and AKSequencer (which I suspect is the older version, now called AKAppleSequencer, which is merely a wrapper around MusicSequence) is something of a red flag.

Swift - AudioKit AKMidi to AKSequencer

I currently have an application which uses an AKKeyboard to create sounds using an Oscillator. Whenever the keyboard is played I get the MIDI data also. What I would like to do is create an AKSequence from the MIDI data I receive.
Any advice or pointers will be greatly appreciated, thank you.
Here is a partial amount of my code:
var bank = AKOscillatorBank()
var midi: AKMIDI!
let sequencer = AKSequencer()
let sequenceLength = AKDuration(beats: 8.0)
func configureBank() {
AudioKit.output = bank
do {
try AudioKit.start()
} catch {
AKLog("AudioKit couldn't be started")
}
midi = AudioKit.midi
midi.addListener(self)
midi.openInput()
}
// AKKeyboard Protocol methods
func noteOn(note: MIDINoteNumber) {
let event = AKMIDIEvent(noteOn: note, velocity: 80, channel: 5)
midi.sendEvent(event)
bank.play(noteNumber: note, velocity: 100)
}
func noteOff(note: MIDINoteNumber) {
let event = AKMIDIEvent(noteOff: note, velocity: 0, channel: 5)
midi.sendEvent(event)
bank.stop(noteNumber: note)
}
// AKMIDIListener Protocol methods..
func receivedMIDINoteOff(noteNumber: MIDINoteNumber, velocity: MIDIVelocity, channel: MIDIChannel) {
print("ReceivedMIDINoteOff: \(noteNumber), velocity: \(velocity), channel: \(channel)")
}
You don't actually need to build the sequence directly from the AKMIDIEvents. Just query the sequence's currentPosition when you call AKKeyboardView's noteOn and noteOff methods and programmatically add events to a sequencer track based on this.
The process is basically identical to this (minus the final step, of course): https://stackoverflow.com/a/50071028/2717159
Edit - To get the noteOn and noteOff times, and duration:
// store notes and times in a dictionary:
var noteDict = [MIDINoteNumber: MIDITimeStamp]()
// when you get a noteOn, note the time
noteDict[currentMIDINote] = seq.currentPosition.beats
// when you get a noteOff
let endTime = seq.currentPosition.beats
if let startTime = noteDict[currentMIDINote] {
let durationInBeats = endTime - startTime
// use the startTime, duration and currentMIDINote to add event to track
noteDict[currentMIDINote] = nil
}

Clicks / Distortion in AudioKit

When I add a bunch (20-40) samples playing and overlapping eachother simultaneously sometimes it starts getting distorted and then some waving, oscillating, and clicking begins to happen. A similar sound happens when the samples are playing the the app crashes - sounds like an abrupt, crunchy halt.
Notice the waviness begins between 0:05 and 0:10; nasty clicks start around 0:15.
Listen Here
How can I make it smoother? I am spawning AKPlayer objects (from 4.1) that play 4-8 second .wav files. Those go into AKBoosters which go into AKMixers which go into the final AKMixer for output.
Edit:
Many PenAudioNodes get plugged into the mixer of the AudioReceiver singleton.
Here's my AudioReceiver singleton:
class AudioReceiver {
static var sharedInstance = AudioReceiver()
private var audioNodes = [UUID : AudioNode]()
private let mixer = AKMixer()
private let queue = DispatchQueue(label: "audio-queue")
//MARK: - Setup & Teardown
init() {
AudioKit.output = mixer //peakLimiter
AudioKit.start()
}
//MARK: - Public
func audioNodeBegan(_ message : AudioNodeMessage) {
queue.async {
var audioNode: AudioNode?
switch message.senderType {
case .pen:
audioNode = PenAudioNode()
case .home:
audioNode = LoopingAudioNode(with: AudioHelper.homeLoopFile())
default:
break
}
if let audioNode = audioNode {
self.audioNodes[message.senderId] = audioNode
self.mixer.connect(input: audioNode.output)
audioNode.start(message)
}
}
}
func audioNodeMoved(_ message : AudioNodeMessage) {
queue.async {
if let audioNode = self.audioNodes[message.senderId] {
audioNode.update(message)
}
}
}
func audioNodeEnded(_ message : AudioNodeMessage) {
queue.async {
if let audioNode = self.audioNodes[message.senderId] {
audioNode.stop(message)
}
self.audioNodes[message.senderId] = nil
}
}
}
Here's my PenAudioNode:
class PenAudioNode {
fileprivate var mixer: AKMixer?
fileprivate var playersBoosters = [AKPlayer : AKBooster]()
fileprivate var finalOutput: AKNode?
fileprivate let file: AKAudioFile = AudioHelper.randomBellSampleFile()
//MARK: - Setup & Teardown
init() {
mixer = AKMixer()
finalOutput = mixer!
}
}
extension PenAudioNode: AudioNode {
var output: AKNode {
return finalOutput!
}
func start(_ message: AudioNodeMessage) {
}
func update(_ message: AudioNodeMessage) {
if let velocity = message.velocity {
let newVolume = Swift.min((velocity / 50) + 0.1, 1)
mixer!.volume = newVolume
}
if let isClimactic = message.isClimactic, isClimactic {
let player = AKPlayer(audioFile: file)
player.completionHandler = { [weak self] in
self?.playerCompleted(player)
}
let booster = AKBooster(player)
playersBoosters[player] = booster
booster.rampTime = 1
booster.gain = 0
mixer!.connect(input: booster)
player.play()
booster.gain = 1
}
}
func stop(_ message: AudioNodeMessage) {
for (_, booster) in playersBoosters {
booster.gain = 0
}
DispatchQueue.global().asyncAfter(deadline: DispatchTime.now() + 1) {
self.mixer!.stop()
self.output.disconnectOutput()
}
}
private func playerCompleted(_ player: AKPlayer) {
playersBoosters.removeValue(forKey: player)
}
}
This sounds like you are not releasing objects and you are eventually overloading the audio engine with too many instances of processing nodes connected in the graph. In particular not releasing AKBoosters will cause an issue like this. I can't really tell what your code is doing, but if you are spawning objects without releasing them properly, it will lead to garbled audio.
You want to conserve objects as much as possible and make sure you are using the absolute minimum amount of AKNode based processing.
There are various ways to debug this, but you can start by printing out the current state of the AVAudioEngine:
AudioKit.engine.description
That will show how many nodes you have connected in the graph at any given moment.

How do I use the Swift sampler to play a tone then pause before playing the next?

I have code to take a sequence of letters in a string and interpret them as notes. The code will then play the notes. The problem is that they all play at the same time. How do I play them each as a quarter note, essentially to play a note, wait for it to end, and then play the next note?
#IBAction func playButton(sender: AnyObject) {
fractalEngine.output = "adgadefe"
var notes = Array(fractalEngine.output.characters)
var counter = 0
while counter < notes.count {
var note = notes[counter]
if note == "a" {
play(58)
}
else if note == "b" {
play(59)
}
else if note == "c" {
play(60)
}
else if note == "d" {
play(61)
}
else if note == "e" {
play(62)
}
else if note == "f" {
play(63)
}
else {
play(64)
}
counter += 1
}
//self.sampler.startNote(60, withVelocity: 64, onChannel: 0)
}
func play(note: UInt8) {
sampler.startNote(note, withVelocity: 64, onChannel: 0)
}
func stop(note: UInt8) {
sampler.stopNote(note, onChannel: 0)
}
Here's the code that initiates the sampler:
func initAudio(){
engine = AVAudioEngine()
self.sampler = AVAudioUnitSampler()
engine.attachNode(self.sampler)
engine.connect(self.sampler, to: engine.outputNode, format: nil)
guard let soundbank = NSBundle.mainBundle().URLForResource("gs_instruments", withExtension: "dls") else {
print("Could not initalize soundbank.")
return
}
let melodicBank:UInt8 = UInt8(kAUSampler_DefaultMelodicBankMSB)
let gmHarpsichord:UInt8 = 6
do {
try engine.start()
try self.sampler.loadSoundBankInstrumentAtURL(soundbank, program: gmHarpsichord, bankMSB: melodicBank, bankLSB: 0)
}catch {
print("An error occurred \(error)")
return
}
/*
self.musicSequence = createMusicSequence()
createAVMIDIPlayer(self.musicSequence)
createAVMIDIPlayerFromMIDIFIle()
self.musicPlayer = createMusicPlayer(musicSequence)
*/
}
It looks like you need to delay playing each in turn. Here's one implementation (that avoids blocking the main thread).
//global delay helper function
func delay(delay:Double, closure:()->()) {
dispatch_after(
dispatch_time(
DISPATCH_TIME_NOW,
Int64(delay * Double(NSEC_PER_SEC))
),
dispatch_get_main_queue(), closure)
}
//inside your playButton
let delayConstant = 0.05 //customize as needed
for (noteNumber, note) in notes.enumerate() {
delay(delayConstant * noteNumber) {
play(note)
//handle stopping
delay(delayConstant) {stop(note)}
}
}
What this does is plays each note after an escalating delay, then stops playing after note length, which is assumed to be the delay constant.

Resources