Problems gen_tcp:accept - erlang

I've made a tcp server witch spawns a process to listen incoming connections. Here is the sample code (removed a few things from my original code):
module a:
main([]) ->
{ ok, Pid } = b:start(),
receive
_ ->
ok
end.
module b:
-define(TCP_OPTIONS, [binary, { active, false}, { packet, 0 } , {reuseaddr, true}]).
...
start_link(Port) ->
Pid = spawn_link(server_listener, init, [ Port ]),
{ ok , self() }.
init(Port, Processor) ->
case gen_tcp:listen(Port, ?TCP_OPTIONS) of
{ ok , LSocket } ->
accept_loop(LSocket);
{ error, Reason } ->
{ stop, Reason }
end.
accept_loop( LSocket) ->
?LOG("Current socket acceptor PID [~w]~n", [self()]),
case gen_tcp:accept(LSocket) of
{ ok, Socket } ->
%do stuff here
spawn(server_listener , accept_loop, [ LSocket ]);
{ error, Reason } ->
?LOG("Error accepting socket! [ ~s ]~n", [ Reason ])
end.
The problem is: EVERY time that I try to connect from telnet on this port, I'm receiving an error { error, closed } on gen_tcp:accept. This is already driving me nuts trying to figure out what is happening.
Thanks,

Your "accept loop" isn't really a loop... and it is contrived.
You probably want "do_accept_loop" and a proper "server_loop" for handling a connection. Have a look at this.
You want something along the lines of:
% Call echo:listen(Port) to start the service.
listen(Port) ->
{ok, LSocket} = gen_tcp:listen(Port, ?TCP_OPTIONS),
accept(LSocket).
% Wait for incoming connections and spawn the echo loop when we get one.
accept(LSocket) ->
{ok, Socket} = gen_tcp:accept(LSocket),
spawn(fun() -> loop(Socket) end),
accept(LSocket).
% Echo back whatever data we receive on Socket.
loop(Socket) ->
case gen_tcp:recv(Socket, 0) of
{ok, Data} ->
gen_tcp:send(Socket, Data),
loop(Socket);
{error, closed} ->
ok
end.

Related

What is the usecase of Flux.repeat() without any parameter?

I was looking at the following code and was confused with what the repeat() operator is doing here.
return inboundFlux
.groupBy(record -> record.receiverOffset().topicPartition())
.flatMap(partitionFlux -> partitionFlux
.concatMap(el -> Flux.just(el)
.doOnNext(receiverRecord -> {
log.info("Starting to process {}", receiverRecord);
messageProcessor.processMessage(receiverRecord);
receiverRecord.receiverOffset().acknowledge();
log.info("Message acknowledged");
})
.doOnError(e -> log.error("ERRRRRRROOORRRRRR"))
.retryWhen(Retry.backoff(3, Duration.ofSeconds(5)).maxBackoff(Duration.ofSeconds(20)).transientErrors(true))
.onErrorResume(e -> {
// code to handle retry exhaustion
})
).repeat()
)
.subscribeOn(scheduler)
.subscribe();

Akka.NET: Dead Letters to Remote Actor

I am learning how to call a remote actor from a different machine. To simulate two different machines I have a Host machine and the other one is a Virtual Machine (VM). The Network Adapter is set to NAT because with this setting I am able to ping the host machine from VM (I read that it should be set to Bridge but the ping command timed out).
Host IP: 172.16.104.242
VM IP: 10.0.2.15
That aside, this is the code for RemoteActor.fsx on host machine
#r "nuget: Akka.FSharp"
#r "nuget: Akka.Remote"
open System
open Akka.Actor
open Akka.Configuration
open Akka.FSharp
let config =
Configuration.parse
#"akka {
actor.provider = ""Akka.Remote.RemoteActorRefProvider, Akka.Remote""
remote.helios.tcp {
hostname = 172.16.104.242
port = 9001
}
}"
let system = System.create "RemoteFSharp" config
let echoServer =
spawn system "EchoServer"
<| fun mailbox ->
let rec loop() =
actor {
let! message = mailbox.Receive()
let sender = mailbox.Sender()
printfn "echoServer called"
match box message with
| :? string ->
sender <! sprintf "Echo: %s" message
return! loop()
| _ -> failwith "Unknown message"
}
loop()
I first executed this script and this is the output
This is the code for LocalActor.fsx on VM
#r "nuget: Akka.FSharp"
#r "nuget: Akka.Remote"
open System
open Akka.Actor
open Akka.Configuration
open Akka.FSharp
let configuration =
ConfigurationFactory.ParseString(
#"akka {
actor {
provider = ""Akka.Remote.RemoteActorRefProvider, Akka.Remote""
deployment {
/remoteecho {
remote = ""akka.tcp://RemoteFSharp#172.16.104.242:9001""
}
}
}
remote {
helios.tcp {
port = 0
hostname = 10.0.2.15
}
}
}")
let system = ActorSystem.Create("RemoteFSharp", configuration)
let echoClient = system.ActorSelection("akka.tcp://RemoteFSharp#172.16.104.242:9001/EchoServer")
let task = echoClient <? "F#!"
let response = Async.RunSynchronously (task, 1000)
printfn "Reply from remote %s" (string(response))
This is the output for this
Now the RemoteActor.fsx throws this error
I found a few posts on Stack Overflow that had this same error but couldn't figure out the fix. Apparently the error is because the RemoteActor dies before the Local Actor sends the message. Also after running the RemoteActor.fsx script if I type this echoServer <! "Hello" in the RemoteActor terminal, I get the same error.
Any idea how to fix this? Any help would be greatly appreciated! Thank you!
Change this line
let echoClient = system.ActorSelection("akka.tcp://RemoteFSharp#172.16.104.242:9001/EchoServer")
To
let echoClient = system.ActorSelection("akka.tcp://RemoteFSharp#172.16.104.242:9001/user/EchoServer")
All user-defined actors exist under the /user actor root.

how to use gun:open in a gen_server module

I have a gen_server module, I use gun as http client to make a long pull connection with a http server, so I call gun:open in my module's init, but if gun:open fail, my module fail, so my application fail to start. What is the proper way to do this. The following is my codeļ¼š
init() ->
lager:debug("http_api_client: connecting to admin server...~n"),
{ok, ConnPid} = gun:open("localhost", 5001),
{ok, Protocol} = gun:await_up(ConnPid),
{ok, #state{conn_pid = ConnPid, streams = #{},protocol = Protocol}}.
Basically you have two options: either your process requires the HTTP server to be available (your current solution), or it doesn't, and handles requests while the connection to the HTTP server is down gracefully (by returning error responses). This blog post presents this idea more eloquently: https://ferd.ca/it-s-about-the-guarantees.html
You could do that by separating this code out into a separate function, that doesn't crash if the connection fails:
try_connect(State) ->
lager:debug("http_api_client: connecting to admin server...~n"),
case gun:open("localhost", 5001) of
{ok, ConnPid} ->
{ok, Protocol} = gun:await_up(ConnPid),
State#state{conn_pid = ConnPid, streams = #{},protocol = Protocol};
{error, _} ->
State#state{conn_pid = undefined}
end.
And call this function from init. That is, regardless of whether you can connect, your gen_server will start.
init(_) ->
{ok, try_connect(#state{})}.
Then, when you make a request to this gen_server that requires the connection to be present, check whether it is undefined:
handle_call(foo, _, State = #state{conn_pid = undefined}) ->
{reply, {error, not_connected}, State};
handle_call(foo, _, State = #state{conn_pid = ConnPid}) ->
%% make a request through ConnPid here
{reply, ok, State};
Of course, that means that if the connection fails at startup, your gen_server will never try to connect again. You could add a timer, or you could add an explicit reconnect command:
handle_call(reconnect, _, State = #state{conn_pid = undefined}) ->
NewState = try_connect(State),
Result = case NewState of
#state{conn_pid = undefined} ->
reconnect_failed;
_ ->
ok
end,
{reply, Result, NewState};
handle_call(reconnect, _, State) ->
{reply, already_connected, State}.
The code above doesn't handle the case when the connection goes down while the gen_server is running. You could handle that explicitly, or you could just let your gen_server process crash in that case, so that it restarts into the "not connected" state.

Issue when registering two local process with gproc within cowboy websocket handler

I tried to register a bunch of processes with a unique family name with gproc under a cowboy websocket handler.
In my handler I created two method, the first one is to handle registration:
websocket_handle({text, <<"Reg: ",Message/binary>>}, State) ->
io:format("Client ~p requesting to register ~n",[Message]),
MyPID=list_to_binary(pid_to_list(self())),
{[{_,Family}]}=jiffy:decode(Message),
io:format("Client ~p requesting to register ~n",[Family]),
Test = gproc:reg({p, l, Family}),
erlang:display(Test),
io:format("Registration OK, replying ..."),
Result = gproc:lookup_pids({p, l, Family}),
erlang:display(Result),
[PID] = Result,
io:format("PASS ~n"),
io:format("PID ~p FORMATTED ~n",[PID]),
Res= list_to_binary(pid_to_list(PID)),
\"inform\",\"From\" : \"Server\",\"Message\" : \"How you are doing !\"}">>),
{reply, {text,<<"{\"Type\" : \"fb_server\",\"Action\" : \"registration\",\"From\" : \"Server\",\"Message\" : \"",Res/binary,"\"}">>}, State};
The second one is to handle pis recuperation:
websocket_handle({text, <<"Get: ",Message/binary>>}, State) ->
io:format("Client ~p requesting Pids ~n",[Message]),
{[{_,Family}]}=jiffy:decode(Message),
Result = gproc:lookup_pids({p, l, Family}),
erlang:display(Result),
if
Result == [] ->
{reply, {text,<<"{\"Type\" : \"fb_server\",\"Action\" : \"Get Pids\",\"From\" : \"Server\",\"Message\" : \"Empty list\"}">>}, State};
true ->
[PID] = Result,
io:format("PASS ~n"),
io:format("PID ~p FORMATTED ~n",[PID]),
Res= list_to_binary(pid_to_list(PID)),
\"fb_server\",\"Action\" : \"inform\",\"From\" : \"Server\",\"Message\" : \"How you are doing !\"}">>),
{reply, {text,<<"{\"Type\" : \"fb_server\",\"Action\" : \"Get Pids\",\"From\" : \"Server\",\"Message\" : \"",Res/binary,"\"}">>}, State}
end.
To test my handler I created two js files, the first one is to register a process family, I start the registration request as follows:
writeToScreen("CONNECTED");
var msg = {family: "Js"};
websocket.send("Reg: "+JSON.stringify(msg) );
The second test file is to get the pid of process already registered by the first file:
function onOpen(evt)
{
//ON opening connection we will send a getPids request to get pids of processes registered under Family "Js"
writeToScreen("CONNECTED");
var msg = {family: "Js"};
//websocket.send("Reg: "+JSON.stringify(msg) );
getPids(msg);
//doSend("WebSocket rocks");
}
function getPids(msg)
{
writeToScreen("get Pids");
websocket.send("Get: "+JSON.stringify(msg) );
}
My problem is that the first file register the process successfully but the second one get en empty list, basically it should get a list with the pid already created by the first file ??
Best regards .
#Stefan Zobel, you are right,In my onmessage event I have a call to onclose() event.

Erlang drop messages

I have a proxy that prevents the server from getting overloaded with requests.
Clients sends their requests to the proxy and the proxy determine wether or not to pass the requests to the server.
NrOfReq is the current number of requests that the server is handling.
MaxReq is the maximum number of requests that the server can handle before
the mailbox get full.
Every time the server has handled a request it sends the ready_to_serve atom
to the proxy.
Whenever the guard after the when-keyword is false I want to drop the message from the client and prevent it from ending up in the proxys mail-box.
How can I do this?
proxy(ServerPid, NrOfReq, MaxReq) ->
receive
{client_request, Request, ClientPid} when NrOfReq < MaxReq ->
New = NrOfReq + 1,
ServerPid ! {Request, ClientPid, self()};
ready_to_serve ->
New = NrOfReq - 1
end,
proxy(ServerPid, New, MaxReq).
Separate message receiving from message handling
proxy(ServerPid, NrOfReq, MaxReq) ->
receive
{client_request, Request, ClientPid} ->
if
NrOfReq < MaxReq ->
New = NrOfReq + 1,
ServerPid ! {Request, ClientPid, self()};
true ->
'message-dropped';
end;
ready_to_serve ->
New = NrOfReq - 1
end,
proxy(ServerPid, New, MaxReq).
Or else make fallback clause for dropping messages
proxy(ServerPid, NrOfReq, MaxReq) ->
receive
{client_request, Request, ClientPid} when NrOfReq < MaxReq ->
New = NrOfReq + 1,
ServerPid ! {Request, ClientPid, self()};
{client_request, Request, ClientPid} when NrOfReq >= MaxReq ->
'message-dropped';
ready_to_serve ->
New = NrOfReq - 1
end,
proxy(ServerPid, New, MaxReq).
The latter is a bit flatter but can cause issues when you decide to change message format.
PS: Why you no use OTP?
I never tried your proposal, on my side I would use 2 clauses for the loop:
proxy(ServerPid, MaxReq, MaxReq) ->
receive
ready_to_serve ->
New = NrOfReq - 1
end,
proxy(ServerPid, New, MaxReq);
proxy(ServerPid, NrOfReq, MaxReq) ->
receive
{client_request, Request, ClientPid} ->
New = NrOfReq + 1,
ServerPid ! {Request, ClientPid, self()};
ready_to_serve ->
New = NrOfReq - 1
end,
proxy(ServerPid, New, MaxReq).
This proxy looks weird to me. The pattern I saw most often in Erlang is to spawn 1 server per client, and have dedicated processes to spawn those servers, manage storage ...

Resources