High erlang process memory executing mnesia:dirty_select/2 - erlang

I have an mnesia table gen_saga_persist which is queried by a process every 1 second.
The function used is mnesia:dirty_select(gen_saga_persist, [{#gen_saga_persist{id = '$1', app = undefined, partition = 33, execution_type = auto, _ = '_'}, [], ['$1']}])
Now as the table grows, i observed my process querying this table grows in terms of memory. Can anyone help me understand why my process memory is increasing (mnesia newbie here), as i am not even storing the result in my process data or anywhere.
Mnesia table info:
[{access_mode,read_write},
{active_replicas,[server#localhost]},
{all_nodes,[server#localhost]},
{arity,12},
{attributes,[id,partition,app,type,subtype,name,status,
execution_type,retry_count,update_time,controller_state]},
{checkpoints,[]},
{commit_work,[{index,ordered_set,
[{{9,ordered},{ram,#Ref<0.3472765205.1199964162.76896>}},
{{8,ordered},{ram,#Ref<0.3472765205.1199964162.76882>}},
{{7,ordered},{ram,#Ref<0.3472765205.1199964163.74776>}},
{{6,ordered},{ram,#Ref<0.3472765205.1199964162.76856>}},
{{5,ordered},{ram,#Ref<0.3472765205.1199964161.79429>}},
{{4,ordered},{ram,#Ref<0.3472765205.1199964161.79409>}},
{{3,ordered},{ram,#Ref<0.3472765205.1199964162.76810>}}]}]},
{cookie,{{1632211298276938000,-576460752303418559,1},
server#localhost}},
{cstruct,{cstruct,gen_saga_persist,ordered_set,[],
[server#localhost],
[],[],0,read_write,false,
[{3,ordered},
{4,ordered},
{5,ordered},
{6,ordered},
{7,ordered},
{8,ordered},
{9,ordered}],
[],false,gen_saga_persist,
[id,partition,app,type,subtype,name,status,execution_type,
retry_count,update_time,controller_state],
[],[],[],
{{1632211298276938000,-576460752303418559,1},
server#localhost},
{{2,0},[]}}},
{disc_copies,[server#localhost]},
{disc_only_copies,[]},
{external_copies,[]},
{frag_properties,[]},
{index,[9,8,7,6,5,4,3]},
{index_info,{index,ordered_set,
[{{9,ordered},{ram,#Ref<0.3472765205.1199964162.76896>}},
{{8,ordered},{ram,#Ref<0.3472765205.1199964162.76882>}},
{{7,ordered},{ram,#Ref<0.3472765205.1199964163.74776>}},
{{6,ordered},{ram,#Ref<0.3472765205.1199964162.76856>}},
{{5,ordered},{ram,#Ref<0.3472765205.1199964161.79429>}},
{{4,ordered},{ram,#Ref<0.3472765205.1199964161.79409>}},
{{3,ordered},{ram,#Ref<0.3472765205.1199964162.76810>}}]}},
{load_by_force,false},
{load_node,server#localhost},
{load_order,0},
{load_reason,local_only},
{local_content,false},
{majority,false},
{master_nodes,[]},
{memory,2455944},
{ram_copies,[]},
{record_name,gen_saga_persist},
{record_validation,{gen_saga_persist,12,ordered_set}},
{size,7747},
{snmp,[]},
{storage_properties,[]},
{storage_type,disc_copies},
{subscribers,[]},
{type,ordered_set},
{user_properties,[]},
{version,{{2,0},[]}},
{where_to_commit,[{server#localhost,disc_copies}]},
{where_to_read,server#localhost},
{where_to_wlock,{[server#localhost],false}},
{where_to_write,[server#localhost]},
{wild_pattern,#gen_saga_persist{id = '_',partition = '_',
app = '_',type = '_',subtype = '_',name = '_',status = '_',
execution_type = '_',retry_count = '_',update_time = '_',
controller_state = '_'}},
{{index,3},#Ref<0.3472765205.1199964162.76810>},
{{index,4},#Ref<0.3472765205.1199964161.79409>},
{{index,5},#Ref<0.3472765205.1199964161.79429>},
{{index,6},#Ref<0.3472765205.1199964162.76856>},
{{index,7},#Ref<0.3472765205.1199964163.74776>},
{{index,8},#Ref<0.3472765205.1199964162.76882>},
{{index,9},#Ref<0.3472765205.1199964162.76896>}]
Process info:
[{current_function,{timer,sleep,1}},
{initial_call,{bin_updates_dispatcher,poll_func,3}},
{status,waiting},
{message_queue_len,0},
{links,[]},
{dictionary,[]},
{trap_exit,false},
{error_handler,error_handler},
{priority,normal},
{group_leader,<105603.71.0>},
{total_heap_size,4781410},
{heap_size,1199557},
{stack_size,6},
{reductions,155758726},
{garbage_collection,[{max_heap_size,#{error_logger => true,kill => true,size => 0}},
{min_bin_vheap_size,46422},
{min_heap_size,233},
{fullsweep_after,65535},
{minor_gcs,11}]},
{suspending,[]}]
Process function:
poll_gen_saga_persist_table(Delay) ->
Pid = spawn(bin_updates_dispatcher, poll_func, [Delay, gen_saga_persist,
[{#gen_saga_persist{
id = '$1', app = undefined, partition = 33, execution_type = auto, _ = '_'
}, [], ['$1']}]]),
unlink(Pid),
Pid.
poll_func(Delay, TableName, [{MatchSpec, Guards, Return}]) ->
mnesia:dirty_select(TableName, [{MatchSpec, Guards, Return}]),
timer:sleep(Delay),
poll_func(Delay, TableName, [{MatchSpec, Guards, Return}]).

I think it is not related with mneisa, it's caused by your poll_func. It's running forever in the self-loop.
poll_func(Delay, TableName, [{MatchSpec, Guards, Return}]) ->
mnesia:dirty_select(TableName, [{MatchSpec, Guards, Return}]),
timer:sleep(Delay),
poll_func(Delay, TableName, [{MatchSpec, Guards, Return}]). <--- it can't release any memory.
You can rewrite the following code by gen_server by using timer:send_after message
Pid = spawn(bin_updates_dispatcher, poll_func, [Delay, gen_saga_persist,
[{#gen_saga_persist{
id = '$1', app = undefined, partition = 33, execution_type = auto, _ = '_'
}, [], ['$1']}]]),
and simplifize poll_func
poll_func(Delay, TableName, [{MatchSpec, Guards, Return}]) ->
mnesia:dirty_select(TableName, [{MatchSpec, Guards, Return}]).
Therefore for the pid, the memory caused by dirty_select will be released after completing the calling, and be reused in the next loop.

You need to add a stop condition for this function:
poll_func(Delay, TableName, [{MatchSpec, Guards, Return}]) ->
mnesia:dirty_select(TableName, [{MatchSpec, Guards, Return}]),
timer:sleep(Delay),
poll_func(Delay, TableName, [{MatchSpec, Guards, Return}]).
Because you are calling poll_func function one more time and it will go to loop forever without a stop condition.

Related

Ejabberd - Route message to users in offline_message_hook

I want to build a customer support chat app. There are users and an admin. Below admin there are multiple sub-admins. Initially the chat is initiated with admin only, but if the admin is offline I need to route the message to sub-admins.
offline_message_hook hook serves the purpose. I'll check if the To is admin, then I need to route the Packet to one of the sub-admins. How do I route/send the packet to other user within offline_message_hook. In short how do I change the To from the packet so that the packet is re-directed to the new sub-admin?
Here is what I've tried:-
offline_message_hook({_Action, #message{from = Peer, to = To} = Pkt} = Acc) ->
?INFO_MSG("Inside offline", []),
ejabberd_router:route(From, To, Packet),
ok.
I'm using ejabberd 17.04.105.
Update
After following user2610053's advice, I did this:-
-spec offline_message_hook({any(), message()}) -> {any(), message()}.
offline_message_hook({_Action, Msg} = Acc) ->
ejabberd_router:route(xmpp:set_to(Msg, 'praful#localhost')),
{routed, Msg}.
Following is the error:-
15:13:12.291 [error] failed to route packet:
#message{id = <<"purple187f6502">>,type = chat,lang = <<"en">>,
from = {jid,<<"praful2">>,<<"localhost">>,<<"Prafuls-MacBook-Pro">>,
<<"praful2">>,<<"localhost">>,<<"Prafuls-MacBook-Pro">>},
to = praful#localhost,subject = [],
body = [#text{lang = <<>>,data = <<"co=umon">>}],
thread = undefined,
sub_els = [{xmlel,<<"active">>,
[{<<"xmlns">>,
<<"http://jabber.org/protocol/chatstates">>}],
[]}],
meta = #{ip => {0,0,0,0,0,0,0,1}}}
Reason = {error,{{badrecord,jid},[{ejabberd_router,do_route,1,[{file,"src/ejabberd_router.erl"},{line,343}]},{ejabberd_router,route,1,[{file,"src/ejabberd_router.erl"},{line,87}]},{mod_sunshine,offline_message_hook,1,[{file,"src/mod_sunshine.erl"},{line,24}]},{ejabberd_hooks,safe_apply,4,[{file,"src/ejabberd_hooks.erl"},{line,380}]},{ejabberd_hooks,run_fold1,4,[{file,"src/ejabberd_hooks.erl"},{line,364}]},{ejabberd_sm,route,1,[{file,"src/ejabberd_sm.erl"},{line,138}]},{ejabberd_local,route,1,[{file,"src/ejabberd_local.erl"},{line,116}]},{ejabberd_router,do_route,1,[{file,"src/ejabberd_router.erl"},{line,348}]}]}}
The user praful#localhost exist. Please advice what exactly is wrong?
Update2 - `UserReceivePacket Hook
In user_receive_packet packet hook, upon using the same function ejabberd_router:route(xmpp:set_to(Packet, jid:decode("praful#localhost"))), it throws an error saying :-
Hook user_receive_packet crashed when running mod_sunshine:user_receive_packet/1:
** Reason = {error,function_clause,[{jid,decode,[{file,"src/jid.erl"},{line,132}],["praful#localhost"]},{mod_sunshine,user_receive_packet,[{file,"src/mod_sunshine.erl"},{line,29}],1},{ejabberd_hooks,safe_apply,[{file,"src/ejabberd_hooks.erl"},{line,380}],4},{ejabberd_hooks,run_fold1,[{file,"src/ejabberd_hooks.erl"},{line,364}],4},{ejabberd_c2s,process_info,[{file,"src/ejabberd_c2s.erl"},{line,231}],2},{ejabberd_hooks,safe_apply,[{file,"src/ejabberd_hooks.erl"},{line,380}],4},{ejabberd_hooks,run_fold1,[{file,"src/ejabberd_hooks.erl"},{line,364}],4},{xmpp_stream_in,handle_info,[{file,"src/xmpp_stream_in.erl"},{line,373}],2}]}
So, I read about function_clause, but couldnt understand the same. What exactly is wrong over here?
I think you're asking about xmpp:set_to/2. Here is an example:
offline_message_hook({_Action, Msg} = Acc) ->
SubAdmins = get_sub_admins(Msg#message.to),
lists:foreach(
fun(Admin) ->
ejabberd_router:route(xmpp:set_to(Msg, Admin))
end, Admins),
{routed, Msg}.

Erlang supervisor dynamic change to restart intensity

My question is, can one modify the restart intensity thresholds of an already running supervisor, apart from in a release upgrade scenario, and if so, how?
It's never come up before, but running a supervisor with initially no children, so that another process starts children by way of supervisor:start_child/2, so my sup init/1 being like this:
init([]) ->
RestartSt = {simple_one_for_one, 10, 10},
ChSpec = [foo, {foo,start_link,[]}, transient, 1000, worker, [foo]}],
{ok, {RestartSt, ChSpec}}.
At the time of supervisor start, the likely number of children is unknown; certainly it could vary dramatically from 10, to 10,000, or more.
A restart intensity of say 20 is generous enough for 10 children, but for say 10,000 children I would like to be able to increase it... and decrease it as the number of children drops due to normal terminations.
There's no API for doing this, so I believe you're stuck with the upgrade approach unless you want to propose a new API for this to the OTP team by submitting a pull request providing a complete patch with code changes, new tests, and documentation changes.
There's also a really dirty hack way of doing this that involves manipulating internal supervisor state, and so it's absolutely not something I would recommend for a production system but I think it's still interesting to look at. A supervisor stores restart intensity in its internal loop state. You can see this state by calling sys:get_state/1,2 on a supervisor process. For example, here's the state of a supervisor in the Yaws web server:
1> rr(supervisor).
[child,state]
2> sys:get_state(yaws_sup).
#state{name = {local,yaws_sup},
strategy = one_for_all,
children = [#child{pid = <0.67.0>,name = yaws_sup_restarts,
mfargs = {yaws_sup_restarts,start_link,[]},
restart_type = transient,shutdown = infinity,
child_type = supervisor,
modules = [yaws_sup_restarts]},
#child{pid = <0.42.0>,name = yaws_server,
mfargs = {yaws_server,start_link,
[{env,true,false,false,false,false,false,"default"}]},
restart_type = permanent,shutdown = 120000,
child_type = worker,
modules = [yaws_server]},
#child{pid = <0.39.0>,name = yaws_trace,
mfargs = {yaws_trace,start_link,[]},
restart_type = permanent,shutdown = 5000,
child_type = worker,
modules = [yaws_trace]},
#child{pid = <0.36.0>,name = yaws_log,
mfargs = {yaws_log,start_link,[]},
restart_type = permanent,shutdown = 5000,
child_type = worker,
modules = [yaws_log]}],
dynamics = undefined,intensity = 0,period = 1,restarts = [],
module = yaws_sup,args = []}
The initial rr command retrieves the record definitions from supervisor so we can see the field names when we get the state from yaws_sup, otherwise we would just get a tuple full of anonymous values.
The retrieved state shows the intensity in this case to be 0. We can change it using sys:replace_state/2,3:
3> sys:replace_state(yaws_sup, fun(S) -> S#state{intensity=2} end).
#state{name = {local,yaws_sup},
strategy = one_for_all,
children = [#child{pid = <0.67.0>,name = yaws_sup_restarts,
mfargs = {yaws_sup_restarts,start_link,[]},
restart_type = transient,shutdown = infinity,
child_type = supervisor,
modules = [yaws_sup_restarts]},
#child{pid = <0.42.0>,name = yaws_server,
mfargs = {yaws_server,start_link,
[{env,true,false,false,false,false,false,"default"}]},
restart_type = permanent,shutdown = 120000,
child_type = worker,
modules = [yaws_server]},
#child{pid = <0.39.0>,name = yaws_trace,
mfargs = {yaws_trace,start_link,[]},
restart_type = permanent,shutdown = 5000,
child_type = worker,
modules = [yaws_trace]},
#child{pid = <0.36.0>,name = yaws_log,
mfargs = {yaws_log,start_link,[]},
restart_type = permanent,shutdown = 5000,
child_type = worker,
modules = [yaws_log]}],
dynamics = undefined,intensity = 2,period = 1,restarts = [],
module = yaws_sup,args = []}
Our second argument to sys:replace_state/2 takes a state record as an argument and changes its intensity field to 2. The sys:replace_state/2,3 functions return the new state, and as you can see near the end of the result here, intensity is now 2 instead of 0.
As the sys:replace_state/2,3 documentation explains, these functions are intended only for debugging purposes, so using them to do this in a production system is definitely not something I recommend. The second argument to replace_state here shows that this approach requires knowledge of the details of the internal state record of supervisor, which we obtained here via the rr shell command, so if that record ever changes, this code may stop working. Even more fragile would be treating the supervisor state record as a tuple and counting on the intensity field to be in a particular tuple position so you can change its value. Therefore, if you really want this functionality of changing a supervisor's restart intensity, you're best off in the long run proposing to the OTP team that it be added; if you're going to take that route, I recommend first proposing the idea on the erlang-questions mailing list to gauge interest.
One solution would be to nest your supervisors. But the main question is what do you want to achieve by this restart intensities. The intensity when you want to kill the supervisor needs to be a indication for something very wrong e.g. a needed resource unexpectedly not being available.

Rewrite variable in Erlang

I am playing with records and list. Please, I want to know how to use one variable twice. When I assign any values into variable _list and after that I try rewrite this variable then raising error:
** exception error: no match of right hand side value
-module(hello).
-author("anx00040").
-record(car, {evc, type, color}).
-record(person, {name, phone, addresa, rc}).
-record(driver, {rc, evc}).
-record(list, {cars = [], persons = [], drivers = []} ).
%% API
-export([helloIF/1, helloCase/1, helloResult/1, helloList/0, map/2, filter/2, helloListCaA/0, createCar/3, createPerson/4, createDriver/2, helloRecords/0, empty_list/0, any_data/0, del_Person/1, get_persons/1, do_it_hard/0, add_person/2]).
createCar(P_evc, P_type, P_color) -> _car = #car{evc = P_evc, type = P_type, color = P_color}, _car
.
createPerson(P_name, P_phone, P_addres, P_rc) -> _person= #person{name = P_name, phone = P_phone, addresa = P_addres, rc = P_rc}, _person
.
createDriver(P_evc, P_rc) -> _driver = #driver{rc = P_rc, evc = P_evc}, _driver
.
empty_list() ->
#list{}.
any_data() ->
_car1 = hello:createCar("BL 4", "Skoda octavia", "White"),
_person1 = hello:createPerson("Eduard B.","+421 917 111 711","Kr, 81107 Bratislava1", "8811235"),
_driver1 = hello:createDriver(_car1#car.evc, _person1#person.rc),
_car2 = hello:createCar("BL 111 HK", "BMW M1", "Red"),
_person2 = hello:createPerson("Lenka M","+421 917 111 111","Krizn0, 81107 Bratislava1", "8811167695"),
_driver2 = hello:createDriver(_car2#car.evc, _person2#person.rc),
_car3 = hello:createCar("BL 123 AB", "Audi A1 S", "Black"),
_person3 = hello:createPerson("Stela Ba.","+421 918 111 711","Azna 20, 81107 Bratislava1", "8811167695"),
_driver3 = hello:createDriver(_car3#car.evc, _person3#person.rc),
_list = #list{
cars = [_car1,_car2,_car3],
persons = [_person1, _person2, _person3],
drivers = [_driver1, _driver2, _driver3]},
_list.
add_person(List, Person) ->
List#list{persons = lists:append([Person], List#list.persons) }.
get_persons(#list{persons = P}) -> P.
do_it_hard()->
empty_list(),
_list = add_person(any_data(), #person{name = "Test",phone = "+421Test", addresa = "Testova 20 81101 Testovo", rc =88113545}),
io:fwrite("\n"),
get_persons(add_person(_list, #person{name = "Test2",phone = "+421Test2", addresa = "Testova 20 81101 Testovo2", rc =991135455}))
.
But it raising error when i use variable _list twice:
do_it_hard()->
empty_list(),
_list = add_person(any_data(), #person{name = "Test",phone = "+421Test", addresa = "Testova 20 81101 Testovo", rc =88113545}),
_list =add_person(_list, #person{name = "Test2",phone = "+421Test2", addresa = "Testova 20 81101 Testovo2", rc =991135455}),
get_persons(_list)
.
In the REPL, it can be convenient to experiment with things while re-using variable names. There, you can do f(A). to have Erlang "forget" the current assignment of A.
1> Result = connect("goooogle.com").
{error, "server not found"}
2> % oops! I misspelled the server name
2> f(Result).
ok
3> Result = connect("google.com").
{ok, <<"contents of the page">>}
Note that this is only a REPL convenience feature. You can't do this in actual code.
In actual code, variables can only be assigned once. In a procedural language (C, Java, Python, etc), the typical use-case for reassignment is loops:
for (int i = 0; i < max; i++) {
conn = connect(servers[i]);
reply = send_data(conn);
print(reply);
}
In the above, the variables i, conn, and reply are reassigned in each iteration of the loop.
Functional languages use recursion to perform their loops:
send_all(Max, Servers) ->
send_loop(1, Max, Servers).
send_loop(Current, Max, _Servers) when Current =:= Max->
ok;
send_loop(Current, Max, Servers) ->
Conn = connect(lists:nth(Current, Servers)),
Reply = send_data(Conn),
print(Reply).
This isn't very idiomatic Erlang; I'm trying to make it mirror the procedural code above.
As you can see, I'm getting the same effect, but my assignments within a function are fixed.
As a side note, you are using a lot of variable names beginning with underscore. In Erlang this is a way of hinting that you will not be using the value of these variables. (Like in the above example, when I've reached the end of my list, I don't care about the list of servers.) Using a leading underscore as in your code turns off some useful compiler warnings and will confuse any other developers who look at your code.
In some situations it is convenient to use use SeqBind:
SeqBind is a parse transformation that auto-numbers all occurrences of these bindings following the suffix # (creating L#0, L#1, Req#0, Req#1) and so on.
Simple example:
...
-compile({parse_transform,seqbind}).
...
List# = lists:seq(0, 100),
List# = lists:filter(fun (X) -> X rem 2 == 0 end, List#)
...
I used google...
Erlang is a single-assignment language. That is, once a variable has been given a value, it cannot be given a different value. In this sense it is like algebra rather than like most conventional programming languages.
http://www.cis.upenn.edu/~matuszek/General/ConciseGuides/concise-erlang.html

Ejabberd 13.12 how to add element XMPP Packet?

I am using ejabberd hook named "filter-packet" to make a module. Here I want to add an element to packet. How to do it? My Code is -
on_filter_packet({From, To, Packet}=Input) ->
Type = xml:get_tag_attr_s(list_to_binary("type"), Packet),
if (Type == <<"groupchat">>) ->
?INFO_MSG("type is group chat", []),
NPacket={Packet, [{xmlelement, "time",
[],
[{xmlcdata, "testtime"}]}]},
{From, To, NPacket};
true ->
Input
end.
This code is giving error of bad match. Any Help ?
13.12 uses different type for xmlelement.
Packet is a type of record #xmlel, so you need to insert new element to Packet#xmlel.children.
on_filter_packet({From, To, #xmlel{ children=OldChildren } = Packet}=Input) ->
...
TimeElem = #xmlel{ name = <<"time">>,
children =
[{xmlcdata, <<"testtime">>}]},
NPacket = Packet#xmlel{ children = [TimeElem|OldChildren] },
...
Not tested, but will work.

how to use mnesia:select/4 and mnesia:select/1 for paging query

a table named "md" with structure {id,name},I want read records from md use paging query,I tried mnesia:select/4 and mnesia:select/1 as below:
%% first use select/2: "ID < 10",returned [1,2,4,3,8,5,9,7,6]
(ejabberd#localhost)5> mnesia:activity(transaction,fun mnesia:select/2,md, [{{md,'$1','_'},[{'<','$1',10}],['$1']}]).
{atomic,[1,2,4,3,8,5,9,7,6]}
%%but when query with select/4,returned [6], why?
(ejabberd#localhost)7> {atomic,{R1,C1}}=mnesia:activity(transaction,fun mnesia:select/4,md,[{{md,'$1','_'},[{'<','$1',10}],['$1']}],5,read).
{atomic,{[6],
{mnesia_select,md,
{tid,10535470,<0.460.0>},
ejabberd#localhost,disc_only_copies,
{dets_cont,select,5,
<<0,0,0,29,18,52,86,120,0,0,0,21,131,104,3,...>>,
{141720,148792,<<>>},
md,<0.130.0>,<<>>},
[],undefined,undefined,
[{{md,'$1','_'},[{'<','$1',10}],['$1']}]}}}
%% and then use mnesia:select/1 with continuation "C1",got wrong_transaction
(ejabberd#localhost)8> mnesia:activity(transaction,fun mnesia:select/1,C1).
{aborted,wrong_transaction}
how to use mnesia:select/4 and mnesia:select/1 for paging query?
You will have to call select/1 inside the same transaction.
Otherwise the table can change between invocations to select/4 and select/1.
You must use a dirty context if you want to use is as written above.
here is my solution:
use async_dirty instead of transaction
{Record,Cont}=mnesia:activity(async_dirty, fun mnesia:select/4,[md,[{Match_head,[Guard],[Result]}],Limit,read])
then read next Limit number of records:
mnesia:activity(async_dirty, fun mnesia:select/1,[Cont])
full code:
-record(md,{id,name}).
batch_delete(Id,Limit) ->
Match_head = #md{id='$1',name='$2'},
Guard = {'<','$1',Id},
Result = '$_',
{Record,Cont} = mnesia:activity(async_dirty, fun mnesia:select/4,[md,[{Match_head,[Guard],[Result]}],Limit,read]),
delete_next({Record,Cont}).
delete_next('$end_of_table') ->
over;
delete_next({Record,Cont}) ->
delete(Record),
delete_next(mnesia:activity(async_dirty, fun mnesia:select/1,[Cont])).
delete(Records) ->
io:format("delete(~p)~n",[Records]),
F = fun() ->
[ mnesia:delete_object(O) || O <- Records]
end,
mnesia:transaction(F).

Resources