in the following code the executeProg function is executing the program instructions list.
it's execute the program with "PUSH ax, POP bx ..etc".
//Execute a program as a list of operations. Note how pc is incremented
//so any jump instruction must be written carefully.
let mutable executeProg = fun (program:operation list) ->
pc <- 0
fault <- false
while not(fault) && pc<program.Length do
let instruction = program.[pc];
execute instruction
pc <- pc+1
// end of while by indentation
if not(fault) then printfn "top of stack holds value %d" (RAM.[sp-1]);
what I'm trying to do is: modify the executeProg so it would do the following:
push ax
pop bx --> replace with single instruction mov ax bx
push ax
pop ax --> eliminate altogether
mov ax bx
mov bx ax --> mov ax bx
My question is, how would I implement a new executeProg modullary without change the original one?
my code is as the following:
let rec optimize = fun newProgram program ->
let mutable newProgram = newProgram
match program with
| (PUSH(Reg(a)):: POP(Reg(b)) :: r) when String.Equals(a,b) ->
optimize newProgram r
| (PUSH(Reg(a)):: POP(Reg(b)) :: r) ->
newProgram <- newProgram # [MOV(Reg(a),Reg(b))]
optimize newProgram r
| (PUSH(Imm(a)):: POP(Imm(x)):: r) when a = x ->
optimize newProgram r
| (MOV(Reg(a), Reg(b)) :: MOV(Reg(b), Reg(a)) :: r) ->
newProgram <- newProgram # [MOV(Reg(a),Reg(b))]
optimize newProgram r
| (a::r) ->
newProgram <- newProgram # [a]
optimize newProgram r
| [] -> newProgram
I'm new to F# language and I'm wondering if my syntax is correct.
Would be grateful for any help
Thank you.
I think that you don't need to have a mutable function. Perhaps you can change the design to use immutable, composable structures that will allow you to add new functionality to your solution.
I played with the problem a bit and here's what I got.
Declare types that you will use in the program:
type Register =
| AX
| BX
type Operand =
| Register of Register
| Int of int
type Command =
| MOV of Register * Operand
| PUSH of Operand
| POP of Operand
type FnBinaryOp = Register -> Operand -> Command
type FnUnaryOp = Operand -> Command
type Program = Command list
//two helper functions to save some typing
let i n = Int n
let r n = Register n
//helper functions to clarify the declaration syntax
let mov:FnBinaryOp =
fun reg op -> MOV (reg, op)
let push: FnUnaryOp = fun op -> PUSH (op)
let pop: FnUnaryOp = fun op -> POP (op)
Implement your optimization method - it returns the modified instruction list. This is where the code inefficiencies are rectified.
///optimize the program - remove cancelling stack commands, simplify MOV commands
let optimize (program:Program) =
let rec recOptimize stack prog =
match prog with
| PUSH (Register r1)::POP (Register r2)::xs when r1 = r2 -> //push ax; pop ax => nop
xs |> recOptimize stack
| PUSH (Register r1)::POP (Register r2)::xs when r1 <> r2 -> //push ax; pop bx => move ax bx
xs |> recOptimize (MOV(r1, Register(r2))::stack)
| PUSH (Int i1)::POP (Int i2)::xs when i1 = i2 -> //push 3; pop 3 => nop
xs |> recOptimize stack
| ((MOV (r1, Register r2 )) as op1)::((MOV (r3, Register r4 )) as op2)::xs when r1 = r4 && r2 = r3 -> //move ax bx; move bx ax => move ax bx
xs |> recOptimize (op1::stack)
| ((MOV (r1, Register r2 )) as op1)::((MOV (r3, Register r4 )) as op2)::xs when r1 = r3 && r2 = r4 -> //move ax bx; move ax bx => move ax bx
xs |> recOptimize (op1::stack)
| op::xs -> //all other cases - pass through
xs |> recOptimize (op::stack)
| [] -> stack
program
|> recOptimize []
Implement the actual execution of your program (not implemented here)
let runProgram (program:Program) =
let rec run stack prog =
[]
program |> run []
And this is how you can invoke this:
let program:Program = [
mov AX (r BX)
push (r BX)
pop (r BX)
mov AX (r BX)
]
program
|> optimize
|> runProgram
If you need to add new functionality to the optimization step, you can just replace the optimize element of the composite call.
You can run multiple passes of optimize to cover for cases where certain operations become 'paired' and you want to optimize them as well.
program
|> optimize
|> optimize
|> runProgram
Here is a few tests:
[<Fact>]
let ``push 42; pop 42 => nop`` () =
let program:Program = [
push (i 42)
pop (i 42)
]
let optimized = program |> optimize
//should be reduced to empty list
Assert.Empty(optimized)
[<Fact>]
let ``push ax; pop ax => nop`` () =
let program:Program = [
push (r AX)
pop (r AX)
]
let optimized = program |> optimize
//should be reduced to empty list
Assert.Empty(optimized)
[<Fact>]
let ``push ax; pop bx => move ax bx`` () =
let program:Program = [
push (r AX)
pop (r BX)
]
let optimized = program |> optimize
//should be reduced to a single element list mov ax bx
let expected = MOV( AX, (r BX))
let op = optimized.Head
Assert.Equal(1, optimized.Length)
Assert.Equal(expected, op)
[<Fact>]
let ``move ax bx; move bx ax => move ax bx`` () =
let program:Program = [
mov AX (r BX)
mov BX (r AX)
]
let optimized = program |> optimize
//should be reduced to a single element list mov ax bx
let expected = MOV( AX, (r BX))
let op = optimized.Head
Assert.Equal(1, optimized.Length)
Assert.Equal(expected, op)
[<Fact>]
let ``run optimize on program`` () =
let program:Program = [
mov AX (r BX)
push (r BX)
pop (r BX)
mov AX (r BX)
]
let optimized = program
|> optimize
|> optimize
|> optimize
//should be reduced to a single element list mov ax bx
let expected = MOV( AX, (r BX))
let op = optimized.Head
Assert.Equal(1, optimized.Length)
Assert.Equal(expected, op)
Related
Originated from this question, I have this little F# code (github) to generate random values according to a normal distribution:
// val nextSingle : (unit -> float32)
let nextSingle =
let r = System.Random()
r.NextDouble >> float32
// val gauss : (float32 -> float32 -> seq<float32>)
let gauss mean stdDev =
let rec gauss ready = seq {
match ready with
| Some spare ->
yield spare * stdDev + mean
yield! gauss None
| _ ->
let rec loop () =
let u = nextSingle() * 2.f - 1.f
let v = nextSingle() * 2.f - 1.f
let s = pown u 2 + pown v 2
if s >= 1.f || s = 0.f then loop() else
u, v, s
let u, v, s = loop()
let mul = (*)(sqrt(-2.f * log s / s))
yield mul u * stdDev + mean
yield! mul v |> Some |> gauss
}
gauss None
To me it seems that this should only call itself in tail call position, ergo never cause a StackOverflowException when TCO is enabled. But it does when running 64-bit. It does not when running 32-bit (i.e. “Prefer 32-bit” checkbox in project settings).
I'm using .NET Framework 4.5.2 and F# 4.4.0.0.
Can somebody explain what is causing the problem?
Looks like a bug in the compiler's sequence expression compilation mechanism. Here's a simplified repro:
let rec loop r = seq {
if r > 0 then
let rec unused() = unused()
yield r
yield! loop r
}
printfn "%i" (Seq.nth 10000000 (loop 1))
Obviously the presence of the unused recursive definition shouldn't affect whether this generates a stack overflow, but it does.
I'm trying to implement Kosaraju's algorithm on a large graph
as part of an assignment [MOOC Algo I Stanford on Coursera]
https://en.wikipedia.org/wiki/Kosaraju%27s_algorithm
The current code works on a small graph, but I'm hitting Stack Overflow during runtime execution.
Despite having read the relevant chapter in Expert in F#, or other available examples on websites and SO, i still don't get how to use continuation to solve this problem
Below is the full code for general purpose, but it will already fail when executing DFSLoop1 and the recursive function DFSsub inside. I think I'm not making the function tail recursive [because of the instructions
t<-t+1
G.[n].finishingtime <- t
?]
but i don't understand how i can implement the continuation properly.
When considering only the part that fails, DFSLoop1 is taking as argument a graph to which we will apply Depth-First Search. We need to record the finishing time as part of the algo to proceed to the second part of the algo in a second DFS Loop (DFSLoop2) [of course we are failing before that].
open System
open System.Collections.Generic
open System.IO
let x = File.ReadAllLines "C:\Users\Fagui\Documents\GitHub\Learning Fsharp\Algo Stanford I\PA 4 - SCC.txt";;
// let x = File.ReadAllLines "C:\Users\Fagui\Documents\GitHub\Learning Fsharp\Algo Stanford I\PA 4 - test1.txt";;
// val x : string [] =
let splitAtTab (text:string)=
text.Split [|'\t';' '|]
let splitIntoKeyValue (A: int[]) =
(A.[0], A.[1])
let parseLine (line:string)=
line
|> splitAtTab
|> Array.filter (fun s -> not(s=""))
|> Array.map (fun s-> (int s))
|> splitIntoKeyValue
let y =
x |> Array.map parseLine
//val it : (int * int) []
type Children = int[]
type Node1 =
{children : Children ;
mutable finishingtime : int ;
mutable explored1 : bool ;
}
type Node2 =
{children : Children ;
mutable leader : int ;
mutable explored2 : bool ;
}
type DFSgraphcore = Dictionary<int,Children>
let directgraphcore = new DFSgraphcore()
let reversegraphcore = new DFSgraphcore()
type DFSgraph1 = Dictionary<int,Node1>
let reversegraph1 = new DFSgraph1()
type DFSgraph2 = Dictionary<int,Node2>
let directgraph2 = new DFSgraph2()
let AddtoGraph (G:DFSgraphcore) (n,c) =
if not(G.ContainsKey n) then
let node = [|c|]
G.Add(n,node)
else
let c'= G.[n]
G.Remove(n) |> ignore
G.Add (n, Array.append c' [|c|])
let inline swaptuple (a,b) = (b,a)
y|> Array.iter (AddtoGraph directgraphcore)
y|> Array.map swaptuple |> Array.iter (AddtoGraph reversegraphcore)
for i in directgraphcore.Keys do
if reversegraphcore.ContainsKey(i) then do
let node = {children = reversegraphcore.[i] ;
finishingtime = -1 ;
explored1 = false ;
}
reversegraph1.Add (i,node)
else
let node = {children = [||] ;
finishingtime = -1 ;
explored1 = false ;
}
reversegraph1.Add (i,node)
directgraphcore.Clear |> ignore
reversegraphcore.Clear |> ignore
// for i in reversegraph1.Keys do printfn "%d %A" i reversegraph1.[i].children
printfn "pause"
Console.ReadKey() |> ignore
let num_nodes =
directgraphcore |> Seq.length
let DFSLoop1 (G:DFSgraph1) =
let mutable t = 0
let mutable s = -1
let mutable k = num_nodes
let rec DFSsub (G:DFSgraph1)(n:int) (cont:int->int) =
//how to make it tail recursive ???
G.[n].explored1 <- true
// G.[n].leader <- s
for j in G.[n].children do
if not(G.[j].explored1) then DFSsub G j cont
t<-t+1
G.[n].finishingtime <- t
// end of DFSsub
for i in num_nodes .. -1 .. 1 do
printfn "%d" i
if not(G.[i].explored1) then do
s <- i
( DFSsub G i (fun s -> s) ) |> ignore
// printfn "%d %d" i G.[i].finishingtime
DFSLoop1 reversegraph1
printfn "pause"
Console.ReadKey() |> ignore
for i in directgraphcore.Keys do
let node = {children =
directgraphcore.[i]
|> Array.map (fun k -> reversegraph1.[k].finishingtime) ;
leader = -1 ;
explored2= false ;
}
directgraph2.Add (reversegraph1.[i].finishingtime,node)
let z = 0
let DFSLoop2 (G:DFSgraph2) =
let mutable t = 0
let mutable s = -1
let mutable k = num_nodes
let rec DFSsub (G:DFSgraph2)(n:int) (cont:int->int) =
G.[n].explored2 <- true
G.[n].leader <- s
for j in G.[n].children do
if not(G.[j].explored2) then DFSsub G j cont
t<-t+1
// G.[n].finishingtime <- t
// end of DFSsub
for i in num_nodes .. -1 .. 1 do
if not(G.[i].explored2) then do
s <- i
( DFSsub G i (fun s -> s) ) |> ignore
// printfn "%d %d" i G.[i].leader
DFSLoop2 directgraph2
printfn "pause"
Console.ReadKey() |> ignore
let table = [for i in directgraph2.Keys do yield directgraph2.[i].leader]
let results = table |> Seq.countBy id |> Seq.map snd |> Seq.toList |> List.sort |> List.rev
printfn "%A" results
printfn "pause"
Console.ReadKey() |> ignore
Here is a text file with a simple graph example
1 4
2 8
3 6
4 7
5 2
6 9
7 1
8 5
8 6
9 7
9 3
(the one which is causing overflow is 70Mo big with around 900,000 nodes)
EDIT
to clarify a few things first
Here is the "pseudo code"
Input: a directed graph G = (V,E), in adjacency list representation. Assume that the vertices V are labeled
1, 2, 3, . . . , n.
1. Let Grev denote the graph G after the orientation of all arcs have been reversed.
2. Run the DFS-Loop subroutine on Grev, processing vertices according to the given order, to obtain a
finishing time f(v) for each vertex v ∈ V .
3. Run the DFS-Loop subroutine on G, processing vertices in decreasing order of f(v), to assign a leader
to each vertex v ∈ V .
4. The strongly connected components of G correspond to vertices of G that share a common leader.
Figure 2: The top level of our SCC algorithm. The f-values and leaders are computed in the first and second
calls to DFS-Loop, respectively (see below).
Input: a directed graph G = (V,E), in adjacency list representation.
1. Initialize a global variable t to 0.
[This keeps track of the number of vertices that have been fully explored.]
2. Initialize a global variable s to NULL.
[This keeps track of the vertex from which the last DFS call was invoked.]
3. For i = n downto 1:
[In the first call, vertices are labeled 1, 2, . . . , n arbitrarily. In the second call, vertices are labeled by
their f(v)-values from the first call.]
(a) if i not yet explored:
i. set s := i
ii. DFS(G, i)
Figure 3: The DFS-Loop subroutine.
Input: a directed graph G = (V,E), in adjacency list representation, and a source vertex i ∈ V .
1. Mark i as explored.
[It remains explored for the entire duration of the DFS-Loop call.]
2. Set leader(i) := s
3. For each arc (i, j) ∈ G:
(a) if j not yet explored:
i. DFS(G, j)
4. t + +
5. Set f(i) := t
Figure 4: The DFS subroutine. The f-values only need to be computed during the first call to DFS-Loop, and
the leader values only need to be computed during the second call to DFS-Loop.
EDIT
i have amended the code, with the help of an experienced programmer (a lisper but who has no experience in F#) simplifying somewhat the first part to have more quickly an example without bothering about non-relevant code for this discussion.
The code focuses only on half of the algo, running DFS once to get finishing times of the reversed tree.
This is the first part of the code just to create a small example
y is the original tree. the first element of a tuple is the parent, the second is the child. But we will be working with the reverse tree
open System
open System.Collections.Generic
open System.IO
let x = File.ReadAllLines "C:\Users\Fagui\Documents\GitHub\Learning Fsharp\Algo Stanford I\PA 4 - SCC.txt";;
// let x = File.ReadAllLines "C:\Users\Fagui\Documents\GitHub\Learning Fsharp\Algo Stanford I\PA 4 - test1.txt";;
// val x : string [] =
let splitAtTab (text:string)=
text.Split [|'\t';' '|]
let splitIntoKeyValue (A: int[]) =
(A.[0], A.[1])
let parseLine (line:string)=
line
|> splitAtTab
|> Array.filter (fun s -> not(s=""))
|> Array.map (fun s-> (int s))
|> splitIntoKeyValue
// let y =
// x |> Array.map parseLine
//let y =
// [|(1, 4); (2, 8); (3, 6); (4, 7); (5, 2); (6, 9); (7, 1); (8, 5); (8, 6);
// (9, 7); (9, 3)|]
// let y = Array.append [|(1,1);(1,2);(2,3);(3,1)|] [|for i in 4 .. 10000 do yield (i,4)|]
let y = Array.append [|(1,1);(1,2);(2,3);(3,1)|] [|for i in 4 .. 99999 do yield (i,i+1)|]
//val it : (int * int) []
type Children = int list
type Node1 =
{children : Children ;
mutable finishingtime : int ;
mutable explored1 : bool ;
}
type Node2 =
{children : Children ;
mutable leader : int ;
mutable explored2 : bool ;
}
type DFSgraphcore = Dictionary<int,Children>
let directgraphcore = new DFSgraphcore()
let reversegraphcore = new DFSgraphcore()
type DFSgraph1 = Dictionary<int,Node1>
let reversegraph1 = new DFSgraph1()
let AddtoGraph (G:DFSgraphcore) (n,c) =
if not(G.ContainsKey n) then
let node = [c]
G.Add(n,node)
else
let c'= G.[n]
G.Remove(n) |> ignore
G.Add (n, List.append c' [c])
let inline swaptuple (a,b) = (b,a)
y|> Array.iter (AddtoGraph directgraphcore)
y|> Array.map swaptuple |> Array.iter (AddtoGraph reversegraphcore)
// définir reversegraph1 = ... with....
for i in reversegraphcore.Keys do
let node = {children = reversegraphcore.[i] ;
finishingtime = -1 ;
explored1 = false ;
}
reversegraph1.Add (i,node)
for i in directgraphcore.Keys do
if not(reversegraphcore.ContainsKey(i)) then do
let node = {children = [] ;
finishingtime = -1 ;
explored1 = false ;
}
reversegraph1.Add (i,node)
directgraphcore.Clear |> ignore
reversegraphcore.Clear |> ignore
// for i in reversegraph1.Keys do printfn "%d %A" i reversegraph1.[i].children
printfn "pause"
Console.ReadKey() |> ignore
let num_nodes =
directgraphcore |> Seq.length
So basically the graph is (1->2->3->1)::(4->5->6->7->8->....->99999->10000)
and the reverse graph is (1->3->2->1)::(10000->9999->....->4)
here is the main code written in direct style
//////////////////// main code is below ///////////////////
let DFSLoop1 (G:DFSgraph1) =
let mutable t = 0
let mutable s = -1
let rec iter (n:int) (f:'a->unit) (list:'a list) : unit =
match list with
| [] -> (t <- t+1) ; (G.[n].finishingtime <- t)
| x::xs -> f x ; iter n f xs
let rec DFSsub (G:DFSgraph1) (n:int) : unit =
let my_f (j:int) : unit = if not(G.[j].explored1) then (DFSsub G j)
G.[n].explored1 <- true
iter n my_f G.[n].children
for i in num_nodes .. -1 .. 1 do
// printfn "%d" i
if not(G.[i].explored1) then do
s <- i
DFSsub G i
printfn "%d %d" i G.[i].finishingtime
// End of DFSLoop1
DFSLoop1 reversegraph1
printfn "pause"
Console.ReadKey() |> ignore
its not tail recursive, so we use continuations, here is the same code adapted to CPS style:
//////////////////// main code is below ///////////////////
let DFSLoop1 (G:DFSgraph1) =
let mutable t = 0
let mutable s = -1
let rec iter_c (n:int) (f_c:'a->(unit->'r)->'r) (list:'a list) (cont: unit->'r) : 'r =
match list with
| [] -> (t <- t+1) ; (G.[n].finishingtime <- t) ; cont()
| x::xs -> f_c x (fun ()-> iter_c n f_c xs cont)
let rec DFSsub (G:DFSgraph1) (n:int) (cont: unit->'r) : 'r=
let my_f_c (j:int)(cont:unit->'r):'r = if not(G.[j].explored1) then (DFSsub G j cont) else cont()
G.[n].explored1 <- true
iter_c n my_f_c G.[n].children cont
for i in maxnum_nodes .. -1 .. 1 do
// printfn "%d" i
if not(G.[i].explored1) then do
s <- i
DFSsub G i id
printfn "%d %d" i G.[i].finishingtime
DFSLoop1 reversegraph1
printfn "faré"
printfn "pause"
Console.ReadKey() |> ignore
both codes compile and give the same results for the small example (the one in comment) or the same tree that we are using , with a smaller size (1000 instead of 100000)
so i don't think its a bug in the algo here, we've got the same tree structure, just a bigger tree is causing problems. it looks to us the continuations are well written. we've typed the code explicitly. and all calls end with a continuation in all cases...
We are looking for expert advice !!! thanks !!!
I did not try to understand the whole code snippet, because it is fairly long, but you'll certainly need to replace the for loop with an iteration implemented using continuation passing style. Something like:
let rec iterc f cont list =
match list with
| [] -> cont ()
| x::xs -> f x (fun () -> iterc f cont xs)
I didn't understand the purpose of cont in your DFSub function (it is never called, is it?), but the continuation based version would look roughly like this:
let rec DFSsub (G:DFSgraph2)(n:int) cont =
G.[n].explored2 <- true
G.[n].leader <- s
G.[n].children
|> iterc
(fun j cont -> if not(G.[j].explored2) then DFSsub G j cont else cont ())
(fun () -> t <- t + 1)
Overflowing the stack when you recurse through hundreds of thousands of entries isn't bad at all, really. A lot of programming language implementations will choke on much shorter recursions than that. You're having serious programmer problems — nothing to be ashamed of!
Now if you want to do deeper recursions than your implementation will handle, you need to transform your algorithm so it is iterative and/or tail-recursive (the two are isomorphic — except that tail-recursion allows for decentralization and modularity, whereas iteration is centralized and non-modular).
To transform an algorithm from recursive to tail-recursive, which is an important skill to possess, you need to understand the state that is implicitly stored in a stack frame, i.e. those free variables in the function body that change across the recursion, and explicitly store them in a FIFO queue (a data structure that replicates your stack, and can be implemented trivially as a linked list). Then you can pass that linked list of reified frame variables as an argument to your tail recursive functions.
In more advanced cases where you have many tail recursive functions each with a different kind of frame, instead of simple self-recursion, you may need to define some mutually recursive data types for the reified stack frames, instead of using a list. But I believe Kosaraju's algorithm only involves self-recursive functions.
OK, so the code given above was the RIGHT code !
the problem lies with the compiler of F#
here is some words about it from Microsoft
http://blogs.msdn.com/b/fsharpteam/archive/2011/07/08/tail-calls-in-fsharp.aspx
Basically, be careful with the settings, in default mode, the compiler may NOT make automatically the tail calls. To do so, in VS2015, go to the Solution Explorer, right click with the mouse and click on "Properties" (the last element of the scrolling list)
Then in the new window, click on "Build" and tick the box "Generate tail calls"
It is also to check if the compiler did its job looking at the disassembly using
ILDASM.exe
you can find the source code for the whole algo in my github repository
https://github.com/FaguiCurtain/Learning-Fsharp/blob/master/Algo%20Stanford/Algo%20Stanford/Kosaraju_cont.fs
on a performance point of view, i'm not very satisfied. The code runs on 36 seconds on my laptop. From the forum with other fellow MOOCers, C/C++/C# typically executes in subsecond to 5s, Java around 10-15, Python around 20-30s.
So my implementation is clearly not optimized. I am now happy to hear about tricks to make it faster !!! thanks !!!!
I'm trying to master continuation passing style (CPS) and am therefore reworking an example shown to me by Gary Short quite a while ago. I don't have his sample source code so I'm trying to rework his example from memory. Consider the following code:
let checkedDiv m n =
match n with
| 0.0 -> None
| _ -> Some(m/n)
let reciprocal r = checkedDiv 1.0 r
let resistance c1 c2 c3 =
(fun c1 -> if (reciprocal c1).IsSome then
(fun c2 -> if (reciprocal c2).IsSome then
(fun c3 -> if (reciprocal c3).IsSome then
Some((reciprocal c1).Value + (reciprocal c2).Value + (reciprocal c3).Value))));;
What I can't quite figure out is how to structure the resistance function. I came up with this earlier:
let resistance r1 r2 r3 =
if (reciprocal r1).IsSome then
if (reciprocal r2).IsSome then
if (reciprocal r3).IsSome then
Some((reciprocal r1).Value + (reciprocal r2).Value + (reciprocal r3).Value)
else
None
else
None
else
None
but, of course, that's not using CPS--not to mention the fact that it seems really hacky and there's quite a bit of repeated code which also seems like a code smell.
Can someone show me how to rewrite the resistance function in a CPS way?
straightforward way:
let resistance_cps c1 c2 c3 =
let reciprocal_cps r k = k (checkedDiv 1.0 r)
reciprocal_cps c1 <|
function
| Some rc1 ->
reciprocal_cps c2 <|
function
| Some rc2 ->
reciprocal_cps c3 <|
function
| Some rc3 -> Some (rc1 + rc2 + rc3)
| _ -> None
| _ -> None
| _ -> None
or a bit shorter with Option.bind
let resistance_cps2 c1 c2 c3 =
let reciprocal_cps r k = k (checkedDiv 1.0 r)
reciprocal_cps c1 <|
Option.bind(fun rc1 ->
reciprocal_cps c2 <|
Option.bind(fun rc2 ->
reciprocal_cps c3 <|
Option.bind(fun rc3 -> Some (rc1 + rc2 + rc3))
)
)
This is a known task from "Programming F#" book by Chris Smith; the CPS-style solution code is given on page 244 there:
let let_with_check result restOfComputation =
match result with
| DivByZero -> DivByZero
| Success(x) -> restOfComputation x
let totalResistance r1 r2 r3 =
let_with_check (divide 1.0 r1) (fun x ->
let_with_check (divide 1.0 r2) (fun y ->
let_with_check (divide 1.0 r3) (fun z ->
divide 1.0 (x + y + z) ) ) )
Using the Maybe monad defined here
let resistance r1 r2 r3 =
maybe {
let! r1 = reciprocal r1
let! r2 = reciprocal r2
let! r3 = reciprocal r3
return r1 + r2 + r3
}
Disclosure: this came up in FsCheck, an F# random testing framework I maintain. I have a solution, but I do not like it. Moreover, I do not understand the problem - it was merely circumvented.
A fairly standard implementation of (monadic, if we're going to use big words) sequence is:
let sequence l =
let k m m' = gen { let! x = m
let! xs = m'
return (x::xs) }
List.foldBack k l (gen { return [] })
Where gen can be replaced by a computation builder of choice. Unfortunately, that implementation consumes stack space, and so eventually stack overflows if the list is long enough.The question is: why? I know in principle foldBack is not tail recursive, but the clever bunnies of the F# team have circumvented that in the foldBack implementation. Is there a problem in the computation builder implementation?
If I change the implementation to the below, everything is fine:
let sequence l =
let rec go gs acc size r0 =
match gs with
| [] -> List.rev acc
| (Gen g)::gs' ->
let r1,r2 = split r0
let y = g size r1
go gs' (y::acc) size r2
Gen(fun n r -> go l [] n r)
For completeness, the Gen type and computation builder can be found in the FsCheck source
Building on Tomas's answer, let's define two modules:
module Kurt =
type Gen<'a> = Gen of (int -> 'a)
let unit x = Gen (fun _ -> x)
let bind k (Gen m) =
Gen (fun n ->
let (Gen m') = k (m n)
m' n)
type GenBuilder() =
member x.Return(v) = unit v
member x.Bind(v,f) = bind f v
let gen = GenBuilder()
module Tomas =
type Gen<'a> = Gen of (int -> ('a -> unit) -> unit)
let unit x = Gen (fun _ f -> f x)
let bind k (Gen m) =
Gen (fun n f ->
m n (fun r ->
let (Gen m') = k r
m' n f))
type GenBuilder() =
member x.Return v = unit v
member x.Bind(v,f) = bind f v
let gen = GenBuilder()
To simplify things a bit, let's rewrite your original sequence function as
let rec sequence = function
| [] -> gen { return [] }
| m::ms -> gen {
let! x = m
let! xs = sequence ms
return x::xs }
Now, sequence [for i in 1 .. 100000 -> unit i] will run to completion regardless of whether sequence is defined in terms of Kurt.gen or Tomas.gen. The issue is not that sequence causes a stack overflow when using your definitions, it's that the function returned from the call to sequence causes a stack overflow when it is called.
To see why this is so, let's expand the definition of sequence in terms of the underlying monadic operations:
let rec sequence = function
| [] -> unit []
| m::ms ->
bind (fun x -> bind (fun xs -> unit (x::xs)) (sequence ms)) m
Inlining the Kurt.unit and Kurt.bind values and simplifying like crazy, we get
let rec sequence = function
| [] -> Kurt.Gen(fun _ -> [])
| (Kurt.Gen m)::ms ->
Kurt.Gen(fun n ->
let (Kurt.Gen ms') = sequence ms
(m n)::(ms' n))
Now it's hopefully clear why calling let (Kurt.Gen f) = sequence [for i in 1 .. 1000000 -> unit i] in f 0 overflows the stack: f requires a non-tail-recursive call to sequence and evaluation of the resulting function, so there will be one stack frame for each recursive call.
Inlining Tomas.unit and Tomas.bind into the definition of sequence instead, we get the following simplified version:
let rec sequence = function
| [] -> Tomas.Gen (fun _ f -> f [])
| (Tomas.Gen m)::ms ->
Tomas.Gen(fun n f ->
m n (fun r ->
let (Tomas.Gen ms') = sequence ms
ms' n (fun rs -> f (r::rs))))
Reasoning about this variant is tricky. You can empirically verify that it won't blow the stack for some arbitrarily large inputs (as Tomas shows in his answer), and you can step through the evaluation to convince yourself of this fact. However, the stack consumption depends on the Gen instances in the list that's passed in, and it is possible to blow the stack for inputs that aren't themselves tail recursive:
// ok
let (Tomas.Gen f) = sequence [for i in 1 .. 1000000 -> unit i]
f 0 (fun list -> printfn "%i" list.Length)
// not ok...
let (Tomas.Gen f) = sequence [for i in 1 .. 1000000 -> Gen(fun _ f -> f i; printfn "%i" i)]
f 0 (fun list -> printfn "%i" list.Length)
You're correct - the reason why you're getting a stack overflow is that the bind operation of the monad needs to be tail-recursive (because it is used to aggregate values during folding).
The monad used in FsCheck is essentially a state monad (it keeps the current generator and some number). I simplified it a bit and got something like:
type Gen<'a> = Gen of (int -> 'a)
let unit x = Gen (fun n -> x)
let bind k (Gen m) =
Gen (fun n ->
let (Gen m') = k (m n)
m' n)
Here, the bind function is not tail-recursive because it calls k and then does some more work. You can change the monad to be a continuation monad. It is implemented as a function that takes the state and a continuation - a function that is called with the result as an argument. For this monad, you can make bind tail recursive:
type Gen<'a> = Gen of (int -> ('a -> unit) -> unit)
let unit x = Gen (fun n f -> f x)
let bind k (Gen m) =
Gen (fun n f ->
m n (fun r ->
let (Gen m') = k r
m' n f))
The following example will not stack overflow (and it did with the original implementation):
let sequence l =
let k m m' =
m |> bind (fun x ->
m' |> bind (fun xs ->
unit (x::xs)))
List.foldBack k l (unit [])
let (Gen f) = sequence [ for i in 1 .. 100000 -> unit i ]
f 0 (fun list -> printfn "%d" list.Length)
Is it possible to combine memoization and tail-recursion somehow? I'm learning F# at the moment and understand both concepts but can't seem to combine them.
Suppose I have the following memoize function (from Real-World Functional Programming):
let memoize f = let cache = new Dictionary<_, _>()
(fun x -> match cache.TryGetValue(x) with
| true, y -> y
| _ -> let v = f(x)
cache.Add(x, v)
v)
and the following factorial function:
let rec factorial(x) = if (x = 0) then 1 else x * factorial(x - 1)
Memoizing factorial isn't too difficult and making it tail-recursive isn't either:
let rec memoizedFactorial =
memoize (fun x -> if (x = 0) then 1 else x * memoizedFactorial(x - 1))
let tailRecursiveFactorial(x) =
let rec factorialUtil(x, res) = if (x = 0)
then res
else let newRes = x * res
factorialUtil(x - 1, newRes)
factorialUtil(x, 1)
But can you combine memoization and tail-recursion? I made some attempts but can't seem to get it working. Or is this simply not possible?
As always, continuations yield an elegant tailcall solution:
open System.Collections.Generic
let cache = Dictionary<_,_>() // TODO move inside
let memoizedTRFactorial =
let rec fac n k = // must make tailcalls to k
match cache.TryGetValue(n) with
| true, r -> k r
| _ ->
if n=0 then
k 1
else
fac (n-1) (fun r1 ->
printfn "multiplying by %d" n //***
let r = r1 * n
cache.Add(n,r)
k r)
fun n -> fac n id
printfn "---"
let r = memoizedTRFactorial 4
printfn "%d" r
for KeyValue(k,v) in cache do
printfn "%d: %d" k v
printfn "---"
let r2 = memoizedTRFactorial 5
printfn "%d" r2
printfn "---"
// comment out *** line, then run this
//let r3 = memoizedTRFactorial 100000
//printfn "%d" r3
There are two kinds of tests. First, this demos that calling F(4) caches F(4), F(3), F(2), F(1) as you would like.
Then, comment out the *** printf and uncomment the final test (and compile in Release mode) to show that it does not StackOverflow (it uses tailcalls correctly).
Perhaps I'll generalize out 'memoize' and demonstrate it on 'fib' next...
EDIT
Ok, here's the next step, I think, decoupling memoization from factorial:
open System.Collections.Generic
let cache = Dictionary<_,_>() // TODO move inside
let memoize fGuts n =
let rec newFunc n k = // must make tailcalls to k
match cache.TryGetValue(n) with
| true, r -> k r
| _ ->
fGuts n (fun r ->
cache.Add(n,r)
k r) newFunc
newFunc n id
let TRFactorialGuts n k memoGuts =
if n=0 then
k 1
else
memoGuts (n-1) (fun r1 ->
printfn "multiplying by %d" n //***
let r = r1 * n
k r)
let memoizedTRFactorial = memoize TRFactorialGuts
printfn "---"
let r = memoizedTRFactorial 4
printfn "%d" r
for KeyValue(k,v) in cache do
printfn "%d: %d" k v
printfn "---"
let r2 = memoizedTRFactorial 5
printfn "%d" r2
printfn "---"
// comment out *** line, then run this
//let r3 = memoizedTRFactorial 100000
//printfn "%d" r3
EDIT
Ok, here's a fully generalized version that seems to work.
open System.Collections.Generic
let memoize fGuts =
let cache = Dictionary<_,_>()
let rec newFunc n k = // must make tailcalls to k
match cache.TryGetValue(n) with
| true, r -> k r
| _ ->
fGuts n (fun r ->
cache.Add(n,r)
k r) newFunc
cache, (fun n -> newFunc n id)
let TRFactorialGuts n k memoGuts =
if n=0 then
k 1
else
memoGuts (n-1) (fun r1 ->
printfn "multiplying by %d" n //***
let r = r1 * n
k r)
let facCache,memoizedTRFactorial = memoize TRFactorialGuts
printfn "---"
let r = memoizedTRFactorial 4
printfn "%d" r
for KeyValue(k,v) in facCache do
printfn "%d: %d" k v
printfn "---"
let r2 = memoizedTRFactorial 5
printfn "%d" r2
printfn "---"
// comment out *** line, then run this
//let r3 = memoizedTRFactorial 100000
//printfn "%d" r3
let TRFibGuts n k memoGuts =
if n=0 || n=1 then
k 1
else
memoGuts (n-1) (fun r1 ->
memoGuts (n-2) (fun r2 ->
printfn "adding %d+%d" r1 r2 //%%%
let r = r1+r2
k r))
let fibCache, memoizedTRFib = memoize TRFibGuts
printfn "---"
let r5 = memoizedTRFib 4
printfn "%d" r5
for KeyValue(k,v) in fibCache do
printfn "%d: %d" k v
printfn "---"
let r6 = memoizedTRFib 5
printfn "%d" r6
printfn "---"
// comment out %%% line, then run this
//let r7 = memoizedTRFib 100000
//printfn "%d" r7
The predicament of memoizing tail-recursive functions is, of course, that when tail-recursive function
let f x =
......
f x1
calls itself, it is not allowed to do anything with a result of the recursive call, including putting it into cache. Tricky; so what can we do?
The critical insight here is that since the recursive function is not allowed to do anything with a result of recursive call, the result for all arguments to recursive calls will be the same! Therefore if recursion call trace is this
f x0 -> f x1 -> f x2 -> f x3 -> ... -> f xN -> res
then for all x in x0,x1,...,xN the result of f x will be the same, namely res. So the last invocation of a recursive function, the non-recursive call, knows the results for all the previous values - it is in a position to cache them. The only thing you need to do is to pass a list of visited values to it. Here is what it might look for factorial:
let cache = Dictionary<_,_>()
let rec fact0 l ((n,res) as arg) =
let commitToCache r =
l |> List.iter (fun a -> cache.Add(a,r))
match cache.TryGetValue(arg) with
| true, cachedResult -> commitToCache cachedResult; cachedResult
| false, _ ->
if n = 1 then
commitToCache res
cache.Add(arg, res)
res
else
fact0 (arg::l) (n-1, n*res)
let fact n = fact0 [] (n,1)
But wait! Look - l parameter of fact0 contains all the arguments to recursive calls to fact0 - just like the stack would in a non-tail-recursive version! That is exactly right. Any non-tail recursive algorithm can be converted to a tail-recursive one by moving the "list of stack frames" from stack to heap and converting the "postprocessing" of recursive call result into a walk over that data structure.
Pragmatic note: The factorial example above illustrates a general technique. It is quite useless as is - for factorial function it is quite enough to cache the top-level fact n result, because calculation of fact n for a particular n only hits a unique series of (n,res) pairs of arguments to fact0 - if (n,1) is not cached yet, then none of the pairs fact0 is going to be called on are.
Note that in this example, when we went from non-tail-recursive factorial to a tail-recursive factorial, we exploited the fact that multiplication is associative and commutative - tail-recursive factorial execute a different set of multiplications than a non-tail-recursive one.
In fact, a general technique exists for going from non-tail-recursive to tail-recursive algorithm, which yields an algorithm equivalent to a tee. This technique is called "continuatuion-passing transformation". Going that route, you can take a non-tail-recursive memoizing factorial and get a tail-recursive memoizing factorial by pretty much a mechanical transformation. See Brian's answer for exposition of this method.
I'm not sure if there's a simpler way to do this, but one approach would be to create a memoizing y-combinator:
let memoY f =
let cache = Dictionary<_,_>()
let rec fn x =
match cache.TryGetValue(x) with
| true,y -> y
| _ -> let v = f fn x
cache.Add(x,v)
v
fn
Then, you can use this combinator in lieu of "let rec", with the first argument representing the function to call recursively:
let tailRecFact =
let factHelper fact (x, res) =
printfn "%i,%i" x res
if x = 0 then res
else fact (x-1, x*res)
let memoized = memoY factHelper
fun x -> memoized (x,1)
EDIT
As Mitya pointed out, memoY doesn't preserve the tail recursive properties of the memoee. Here's a revised combinator which uses exceptions and mutable state to memoize any recursive function without overflowing the stack (even if the original function is not itself tail recursive!):
let memoY f =
let cache = Dictionary<_,_>()
fun x ->
let l = ResizeArray([x])
while l.Count <> 0 do
let v = l.[l.Count - 1]
if cache.ContainsKey(v) then l.RemoveAt(l.Count - 1)
else
try
cache.[v] <- f (fun x ->
if cache.ContainsKey(x) then cache.[x]
else
l.Add(x)
failwith "Need to recurse") v
with _ -> ()
cache.[x]
Unfortunately, the machinery which is inserted into each recursive call is somewhat heavy, so performance on un-memoized inputs requiring deep recursion can be a bit slow. However, compared to some other solutions, this has the benefit that it requires fairly minimal changes to the natural expression of recursive functions:
let fib = memoY (fun fib n ->
printfn "%i" n;
if n <= 1 then n
else (fib (n-1)) + (fib (n-2)))
let _ = fib 5000
EDIT
I'll expand a bit on how this compares to other solutions. This technique takes advantage of the fact that exceptions provide a side channel: a function of type 'a -> 'b doesn't actually need to return a value of type 'b, but can instead exit via an exception. We wouldn't need to use exceptions if the return type explicitly contained an additional value indicating failure. Of course, we could use the 'b option as the return type of the function for this purpose. This would lead to the following memoizing combinator:
let memoO f =
let cache = Dictionary<_,_>()
fun x ->
let l = ResizeArray([x])
while l.Count <> 0 do
let v = l.[l.Count - 1]
if cache.ContainsKey v then l.RemoveAt(l.Count - 1)
else
match f(fun x -> if cache.ContainsKey x then Some(cache.[x]) else l.Add(x); None) v with
| Some(r) -> cache.[v] <- r;
| None -> ()
cache.[x]
Previously, our memoization process looked like:
fun fib n ->
printfn "%i" n;
if n <= 1 then n
else (fib (n-1)) + (fib (n-2))
|> memoY
Now, we need to incorporate the fact that fib should return an int option instead of an int. Given a suitable workflow for option types, this could be written as follows:
fun fib n -> option {
printfn "%i" n
if n <= 1 then return n
else
let! x = fib (n-1)
let! y = fib (n-2)
return x + y
} |> memoO
However, if we're willing to change the return type of the first parameter (from int to int option in this case), we may as well go all the way and just use continuations in the return type instead, as in Brian's solution. Here's a variation on his definitions:
let memoC f =
let cache = Dictionary<_,_>()
let rec fn n k =
match cache.TryGetValue(n) with
| true, r -> k r
| _ ->
f fn n (fun r ->
cache.Add(n,r)
k r)
fun n -> fn n id
And again, if we have a suitable computation expression for building CPS functions, we can define our recursive function like this:
fun fib n -> cps {
printfn "%i" n
if n <= 1 then return n
else
let! x = fib (n-1)
let! y = fib (n-2)
return x + y
} |> memoC
This is exactly the same as what Brian has done, but I find the syntax here is easier to follow. To make this work, all we need are the following two definitions:
type CpsBuilder() =
member this.Return x k = k x
member this.Bind(m,f) k = m (fun a -> f a k)
let cps = CpsBuilder()
I wrote a test to visualize the memoization. Each dot is a recursive call.
......720 // factorial 6
......720 // factorial 6
.....120 // factorial 5
......720 // memoizedFactorial 6
720 // memoizedFactorial 6
120 // memoizedFactorial 5
......720 // tailRecFact 6
720 // tailRecFact 6
.....120 // tailRecFact 5
......720 // tailRecursiveMemoizedFactorial 6
720 // tailRecursiveMemoizedFactorial 6
.....120 // tailRecursiveMemoizedFactorial 5
kvb's solution returns the same results are straight memoization like this function.
let tailRecursiveMemoizedFactorial =
memoize
(fun x ->
let rec factorialUtil x res =
if x = 0 then
res
else
printf "."
let newRes = x * res
factorialUtil (x - 1) newRes
factorialUtil x 1
)
Test source code.
open System.Collections.Generic
let memoize f =
let cache = new Dictionary<_, _>()
(fun x ->
match cache.TryGetValue(x) with
| true, y -> y
| _ ->
let v = f(x)
cache.Add(x, v)
v)
let rec factorial(x) =
if (x = 0) then
1
else
printf "."
x * factorial(x - 1)
let rec memoizedFactorial =
memoize (
fun x ->
if (x = 0) then
1
else
printf "."
x * memoizedFactorial(x - 1))
let memoY f =
let cache = Dictionary<_,_>()
let rec fn x =
match cache.TryGetValue(x) with
| true,y -> y
| _ -> let v = f fn x
cache.Add(x,v)
v
fn
let tailRecFact =
let factHelper fact (x, res) =
if x = 0 then
res
else
printf "."
fact (x-1, x*res)
let memoized = memoY factHelper
fun x -> memoized (x,1)
let tailRecursiveMemoizedFactorial =
memoize
(fun x ->
let rec factorialUtil x res =
if x = 0 then
res
else
printf "."
let newRes = x * res
factorialUtil (x - 1) newRes
factorialUtil x 1
)
factorial 6 |> printfn "%A"
factorial 6 |> printfn "%A"
factorial 5 |> printfn "%A\n"
memoizedFactorial 6 |> printfn "%A"
memoizedFactorial 6 |> printfn "%A"
memoizedFactorial 5 |> printfn "%A\n"
tailRecFact 6 |> printfn "%A"
tailRecFact 6 |> printfn "%A"
tailRecFact 5 |> printfn "%A\n"
tailRecursiveMemoizedFactorial 6 |> printfn "%A"
tailRecursiveMemoizedFactorial 6 |> printfn "%A"
tailRecursiveMemoizedFactorial 5 |> printfn "%A\n"
System.Console.ReadLine() |> ignore
That should work if mutual tail recursion through y are not creating stack frames:
let rec y f x = f (y f) x
let memoize (d:System.Collections.Generic.Dictionary<_,_>) f n =
if d.ContainsKey n then d.[n]
else d.Add(n, f n);d.[n]
let rec factorialucps factorial' n cont =
if n = 0I then cont(1I) else factorial' (n-1I) (fun k -> cont (n*k))
let factorialdpcps =
let d = System.Collections.Generic.Dictionary<_, _>()
fun n -> y (factorialucps >> fun f n -> memoize d f n ) n id
factorialdpcps 15I //1307674368000