Add multiple channel pipeline handlers in SwiftNIO similarly as Java Netty - ios

I am exploring how to add multiple handlers in channel pipelines in SwiftNIO. In Java Netty, I have the following code:
#Component
public class NettyClientFilter extends ChannelInitializer<SocketChannel> {
#Autowired
private NettyClientHandler nettyClientHandler;
#Override
protected void initChannel(SocketChannel ch) throws Exception {
ChannelPipeline ph = ch.pipeline();
ph.addLast(new IdleStateHandler(20, 10, 0));
ph.addLast(new LengthFieldBasedFrameDecoder(1024, 0, 4, 0, 4));
ph.addLast(new ProtobufDecoder(IMessage.getDefaultInstance()));
ph.addLast(new LengthFieldPrepender(4));
ph.addLast(new ProtobufEncoder());
ph.addLast("nettyClientHandler",nettyClientHandler);
}
}
In the SwiftNIO, seems there are no similar classes as "LengthFieldBasedFrameDecoder", "ProtobufDecoder", "LengthFieldPrepender", "ProtobufEncoder". How can I get those ones in SwiftNIO?

Right, let me go through all the handlers you add to your pipeline in Netty:
IdleStateHandler: available with import NIO from the swift-nio package
LengthFieldBasedFrameDecoder: right now in a PR but will be available shortly through import NIOExtras from the swift-nio-extras package
ProtobufDecoder, LengthFieldPrepender, ProtobufEncoder: all currently unavailable but straightforward to implement:
LengthFieldPrepender:
final class LengthFieldPrepender<IntType: FixedWidthInteger>: ChannelOutboundHandler {
// we send send and receive ByteBuffers
typealias OutboundIn = ByteBuffer
typealias OutboundOut = ByteBuffer
private let endianness: Endianness
private var buf: ByteBuffer?
init(type: IntType.Type = IntType.self, endianness: Endianness = .big) {
self.endianness = endianness
}
func handlerAdded(ctx: ChannelHandlerContext) {
self.buf = ctx.channel.allocator.buffer(capacity: 8)
}
func write(ctx: ChannelHandlerContext, data: NIOAny, promise: EventLoopPromise<Void>?) {
let incomingData = self.unwrapOutboundIn(data)
// we cache `self.buf` so we might get lucky and save an allocation here if the previous buffer has been fully written already
self.buf!.clear()
// write the length as the right type
self.buf!.write(integer: IntType(incomingData.readableBytes), endianness: self.endianness)
ctx.write(self.wrapOutboundOut(self.buf!), promise: nil)
// write the actual data
ctx.write(data, promise: promise)
}
}
ProtobufDecoder:
import SwiftProtobuf
import NIOFoundationCompat // for ByteBuffer.readData
final class ProtobufDecoder<Msg: SwiftProtobuf.Message>: ChannelInboundHandler {
typealias InboundIn = ByteBuffer
typealias InboundOut = Msg
func channelRead(ctx: ChannelHandlerContext, data: NIOAny) {
var buffer = self.unwrapInboundIn(data)
let data = buffer.readData(length: buffer.readableBytes)!
do {
// pretty straightforward here, just call the message type's initialiser
let req = try Msg(serializedData: data)
ctx.fireChannelRead(self.wrapInboundOut(req))
} catch {
ctx.fireErrorCaught(error)
}
}
}
ProtobufEncoder:
import NIOFoundationCompat
import SwiftProtobuf
final class ProtobufEncoder<Msg: SwiftProtobuf.Message>: ChannelOutboundHandler {
typealias OutboundIn = Msg
typealias OutboundOut = ByteBuffer
private var buf: ByteBuffer?
func handlerAdded(ctx: ChannelHandlerContext) {
self.buf = ctx.channel.allocator.buffer(capacity: 4096)
}
func write(ctx: ChannelHandlerContext, data: NIOAny, promise: EventLoopPromise<Void>?) {
let msg = self.unwrapOutboundIn(data)
self.buf!.clear()
do {
// just use SwiftProtobuf's nice encoder
self.buf!.write(bytes: try msg.serializedData())
ctx.write(self.wrapOutboundOut(self.buf!), promise: promise)
} catch {
ctx.fireErrorCaught(error)
}
}
}

Related

How to translate NSOutputStream to InputStream?

I try to write code for iOS to upload files via Ktor using Stream but I don't know how to connect iOS stream to Android streams or Kotlin cannels.
I use the below code for Android.
private suspend fun uploadFiles(
uriList: List<Uri>,
contentResolver: ContentResolver,
entity: String,
objectId: String
): Map<String, Boolean>? {
val inputStreamMap = mutableMapOf<String, InputStream>()
uriList.forEach {
val inputStream = contentResolver.openInputStream(it) ?: return#forEach
val fileName = contentResolver.query(
it, null, null, null, null
)?.run {
val displayNameIndex = getColumnIndex(OpenableColumns.DISPLAY_NAME)
moveToFirst()
val displayName = getString(displayNameIndex)
close()
displayName
} ?: return#forEach
inputStreamMap[fileName] = inputStream
}
val channels = inputStreamMap.mapValues {
object : OutgoingContent.WriteChannelContent() {
override suspend fun writeTo(channel: ByteWriteChannel) {
it.value.copyTo(channel.toOutputStream(), 1024)
}
override val contentType = ContentType.Application.OctetStream
override val contentLength: Long = it.value.available().toLong()
}
}
val result = useCases.uploadFiles(channels, entity, objectId)
inputStreamMap.values.forEach { it.close() }
return result
}

How can I get native MediaStreamTrack from WebRtc MediaStreamTrackWeb object

I want to mix MediaStreamTrack objects in Dart using the package:universal_html/js.dart library.
JsAudioContext audioContext = JsAudioContext();
audioContext.initialize();
var senders = await call!.peerConnection!.getSenders();
for (var sender in senders) {
for (var track in senderTracks) {
if (sender.track!.id != track.id) {
audioContext.connect(track);
}
}
}
But WebRtc hides the jsTrack native object inside the MediaStreamTrackWeb object.
How can I access this object ?
Is there anyone have an idea ?
I found the solution using the js_bindings library.
The library's MediaStream.getTracks() method throws a type error.
I solved this problem using js_util interop.
JsAudioContext.dart:
import 'dart:convert';
import 'package:flutter_webrtc/flutter_webrtc.dart' as webrtc;
import 'package:dart_webrtc/src/media_stream_track_impl.dart' as track_impl;
import 'package:js_bindings/js_bindings.dart' as js_bindings;
import 'package:universal_html/html.dart' as html;
import 'dart:js_util' as js_util;
class JsAudioContext {
js_bindings.AudioContext? audioContext;
js_bindings.MediaStreamAudioDestinationNode? destinationNode;
JsAudioContext() {
audioContext = js_bindings.AudioContext();
}
void createMediaStreamDestination() {
destinationNode = audioContext?.createMediaStreamDestination();
}
void connect(webrtc.MediaStreamTrack? trackWeb) {
track_impl.MediaStreamTrackWeb mediaStreamTrackWeb =
trackWeb as track_impl.MediaStreamTrackWeb;
html.MediaStreamTrack htmlTrack = mediaStreamTrackWeb.jsTrack;
var sourceStream = audioContext?.createMediaStreamSource(
js_bindings.MediaStream([htmlTrack as js_bindings.MediaStreamTrack]));
sourceStream?.connect(destinationNode!);
}
webrtc.MediaStreamTrack getMixedTrack() {
List<dynamic> outputTrack =
js_util.callMethod(destinationNode!.stream, 'getTracks', []);
webrtc.MediaStreamTrack rtcTrack = track_impl.MediaStreamTrackWeb(
outputTrack.toList()[0] as html.MediaStreamTrack);
return rtcTrack;
}
}
sip_call_event_service.dart:
#override
Future startConference(List<SipCallData> activeCallList) async {
List<webrtc.MediaStreamTrack> receivedTracks = <webrtc.MediaStreamTrack>[];
for (var item in activeCallList) {
Call? call = sipuaHelper!.findCall(item.id!);
var receives = await call!.peerConnection!.getReceivers();
for (var element in receives) {
receivedTracks.add(element.track!);
}
}
JsAudioContext jsAudioContext = JsAudioContext();
for (var item in activeCallList) {
Call? call = sipuaHelper!.findCall(item.id!);
jsAudioContext.createMediaStreamDestination();
var receivers = await call!.peerConnection!.getReceivers();
for (var receiver in receivers) {
for (var track in receivedTracks) {
if (receiver.track!.id != track.id) {
jsAudioContext.connect(track);
}
}
}
var senders = await call.peerConnection!.getSenders();
for (var sender in senders) {
jsAudioContext.connect(sender.track);
}
await senders.first.replaceTrack(jsAudioContext.getMixedTrack());
}
}

AWS transcribe Unable to load credentials from any of the providers in the chain AwsCredentialsProviderChain

I am running a java program that uses AWS TranscribeStreaming. I have created(from AWS console) and downloaded AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY then set the environment variables.
When I run the program I am getting error message:
Unable to load credentials from any of the providers in the chain AwsCredentialsProviderChain.
I don't why but this error was also occurring while using Google SDK for speech recognition.
here is the code I am trying to run.
public class TranscribeStreamingDemoApp {
private static final Region REGION = Region.US_WEST_2;
private static TranscribeStreamingAsyncClient client;
public static void main(String args[]) throws URISyntaxException, ExecutionException, InterruptedException, LineUnavailableException {
client = TranscribeStreamingAsyncClient.builder()
.credentialsProvider(getCredentials())
.region(REGION)
.build();
CompletableFuture<Void> result = client.startStreamTranscription(getRequest(16_000),
new AudioStreamPublisher(getStreamFromMic()),
getResponseHandler());
result.get();
client.close();
}
private static InputStream getStreamFromMic() throws LineUnavailableException {
// Signed PCM AudioFormat with 16kHz, 16 bit sample size, mono
int sampleRate = 16000;
AudioFormat format = new AudioFormat(sampleRate, 16, 1, true, false);
DataLine.Info info = new DataLine.Info(TargetDataLine.class, format);
if (!AudioSystem.isLineSupported(info)) {
System.out.println("Line not supported");
System.exit(0);
}
TargetDataLine line = (TargetDataLine) AudioSystem.getLine(info);
line.open(format);
line.start();
InputStream audioStream = new AudioInputStream(line);
return audioStream;
}
private static AwsCredentialsProvider getCredentials() {
return DefaultCredentialsProvider.create();
}
private static StartStreamTranscriptionRequest getRequest(Integer mediaSampleRateHertz) {
return StartStreamTranscriptionRequest.builder()
.languageCode(LanguageCode.FR_FR.toString())
.mediaEncoding(MediaEncoding.PCM)
.mediaSampleRateHertz(mediaSampleRateHertz)
.build();
}
private static StartStreamTranscriptionResponseHandler getResponseHandler() {
return StartStreamTranscriptionResponseHandler.builder()
.onResponse(r -> {
System.out.println("Received Initial response");
})
.onError(e -> {
System.out.println(e.getMessage());
StringWriter sw = new StringWriter();
e.printStackTrace(new PrintWriter(sw));
System.out.println("Error Occurred: " + sw.toString());
})
.onComplete(() -> {
System.out.println("=== All records stream successfully ===");
})
.subscriber(event -> {
List<software.amazon.awssdk.services.transcribestreaming.model.Result> results = ((TranscriptEvent) event).transcript().results();
if (results.size() > 0) {
if (!results.get(0).alternatives().get(0).transcript().isEmpty()) {
System.out.println(results.get(0).alternatives().get(0).transcript());
}
}
})
.build();
}
private InputStream getStreamFromFile(String audioFileName) {
try {
File inputFile = new File(getClass().getClassLoader().getResource(audioFileName).getFile());
InputStream audioStream = new FileInputStream(inputFile);
return audioStream;
} catch (FileNotFoundException e) {
throw new RuntimeException(e);
}
}
private static class AudioStreamPublisher implements Publisher<AudioStream> {
private final InputStream inputStream;
private static Subscription currentSubscription;
private AudioStreamPublisher(InputStream inputStream) {
this.inputStream = inputStream;
}
#Override
public void subscribe(Subscriber<? super AudioStream> s) {
if (this.currentSubscription == null) {
this.currentSubscription = new SubscriptionImpl(s, inputStream);
} else {
this.currentSubscription.cancel();
this.currentSubscription = new SubscriptionImpl(s, inputStream);
}
s.onSubscribe(currentSubscription);
}
}
public static class SubscriptionImpl implements Subscription {
private static final int CHUNK_SIZE_IN_BYTES = 1024 * 1;
private final Subscriber<? super AudioStream> subscriber;
private final InputStream inputStream;
private ExecutorService executor = Executors.newFixedThreadPool(1);
private AtomicLong demand = new AtomicLong(0);
SubscriptionImpl(Subscriber<? super AudioStream> s, InputStream inputStream) {
this.subscriber = s;
this.inputStream = inputStream;
}
#Override
public void request(long n) {
if (n <= 0) {
subscriber.onError(new IllegalArgumentException("Demand must be positive"));
}
demand.getAndAdd(n);
executor.submit(() -> {
try {
do {
ByteBuffer audioBuffer = getNextEvent();
if (audioBuffer.remaining() > 0) {
AudioEvent audioEvent = audioEventFromBuffer(audioBuffer);
subscriber.onNext(audioEvent);
} else {
subscriber.onComplete();
break;
}
} while (demand.decrementAndGet() > 0);
} catch (Exception e) {
subscriber.onError(e);
}
});
}
#Override
public void cancel() {
executor.shutdown();
}
private ByteBuffer getNextEvent() {
ByteBuffer audioBuffer = null;
byte[] audioBytes = new byte[CHUNK_SIZE_IN_BYTES];
int len = 0;
try {
len = inputStream.read(audioBytes);
if (len <= 0) {
audioBuffer = ByteBuffer.allocate(0);
} else {
audioBuffer = ByteBuffer.wrap(audioBytes, 0, len);
}
} catch (IOException e) {
throw new UncheckedIOException(e);
}
return audioBuffer;
}
private AudioEvent audioEventFromBuffer(ByteBuffer bb) {
return AudioEvent.builder()
.audioChunk(SdkBytes.fromByteBuffer(bb))
.build();
}
}
}
Finally, I solved the problem, the documentation specifies that:
AWS credentials provider chain that looks for credentials in this order:
1.Java System Properties - aws.accessKeyId and aws.secretAccessKey
2.Environment Variables - AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY
3.Credential profiles file at the default location (~/.aws/credentials) shared by all AWS SDKs and the AWS CLI
Since setting up through environment variables didn't work, I opted to set credentials with Java system properties and it works!
Doc ref: https://sdk.amazonaws.com/java/api/latest/software/amazon/awssdk/auth/credentials/DefaultCredentialsProvider.html
System.setProperty("aws.accessKeyId", "**************");
System.setProperty("aws.secretAccessKey", "**************");

Kotlin coroutines delay do not work on IOS queue dispatcher

I have a KMM app, and there is code:
fun getWeather(callback: (WeatherInfo) -> Unit) {
println("Start loading")
GlobalScope.launch(ApplicationDispatcher) {
while (true) {
val response = httpClient.get<String>(API_URL) {
url.parameters.apply {
set("q", "Moscow")
set("units", "metric")
set("appid", weatherApiKey())
}
println(url.build())
}
val result = Json {
ignoreUnknownKeys = true
}.decodeFromString<WeatherApiResponse>(response).main
callback(result)
// because ApplicationDispatcher on IOS do not support delay
withContext(Dispatchers.Default) { delay(DELAY_TIME) }
}
}
}
And if I replace withContext(Dispatchers.Default) { delay(DELAY_TIME) } with delay(DELAY_TIME) execution is never returned to while cycle and it will have only one iteration.
And ApplicationDispatcher for IOS looks like:
internal actual val ApplicationDispatcher: CoroutineDispatcher = NsQueueDispatcher(dispatch_get_main_queue())
internal class NsQueueDispatcher(
private val dispatchQueue: dispatch_queue_t
) : CoroutineDispatcher() {
override fun dispatch(context: CoroutineContext, block: Runnable) {
dispatch_async(dispatchQueue) {
block.run()
}
}
}
And from delay source code I can guess, that DefaultDelay should be returned and there is should be similar behaviour with/without withContext(Dispatchers.Default)
/** Returns [Delay] implementation of the given context */
internal val CoroutineContext.delay: Delay get() = get(ContinuationInterceptor) as? Delay ?: DefaultDelay
Thanks!
P.S. I got ApplicationDispatcher from ktor-samples.
Probably ApplicationDispatcher is some old stuff, you don't need to use it anymore:
CoroutineScope(Dispatchers.Default).launch {
}
or
MainScope().launch {
}
And don't forget to use -native-mt version of coroutines, more info in this issue

ActionScript, NetStream.Play.Failed iOS AIR mobile

I'm trying to stream local audio files in m4a (aac) similar to Tiberiu-IonuČ› Stan (http://stackoverflow.com/questions/2036107/aac-mp4-not-working-in-actionscript-3s-netstream):
package
{
import flash.net.NetConnection;
import flash.net.NetStream;
import flash.events.NetStatusEvent;
import flash.events.AsyncErrorEvent;
import flash.events.Event;
public class Mysound
{
private var _connection:NetConnection;
private var _netStream:NetStream;
private var _filePath:String;
private var _client:Object;
public function MainDocument(filePath:String):void
{
_filePath = filePath;
connect();
}
private function connect():void
{
_connection=new NetConnection();
_connection.addEventListener(NetStatusEvent.NET_STATUS, netStatusHandler);
_connection.addEventListener(AsyncErrorEvent.ASYNC_ERROR, asyncErrorHandler);
_connection.connect(null);
}
private function netStatusHandler(event:NetStatusEvent):void
{
switch (event.info.code)
{
case "NetConnection.Connect.Success":
requestAudio();
break;
}
}
private function requestAudio():void
{
_netStream=new NetStream(_connection);
_netStream.addEventListener(NetStatusEvent.NET_STATUS, this._netStatusHandler);
_netStream.addEventListener(AsyncErrorEvent.ASYNC_ERROR, this._asyncErrorHandler);
_client = new Object();
_client.onMetaData = onMetaData;
_netStream.client = _client;
_netStream.backBufferTime = 0;
_netStream.bufferTime = 0.5;
_netStream.bufferTimeMax = 5;
_netStream.play(filePath);
}
private function asyncErrorHandler(event:AsyncErrorEvent):void
{
trace(event);
}
private function onMetaData(metadata:Object):void
{
var str:String = "";
for (var key:String in metadata) {
str += key + ": " + metadata[key];
}
trace(str);
}
}
}
It works for me on emulator but it doesn't on devices (ipads).
I found out that netStatusHandler on device catches status "NetStream.Play.Failed", but i have no idea why, I know that it correctly reads file as it does get correct metadata, it also starts to buffer sound, but fails to play it. Files are in folder next to my app swf so it shouln't be sandbox problem. What else should I try to get it working?

Resources