I'm implementing an image processing algorithm called BM3D and I've already achieved the outcome which is denoising a grayscale image but the thing is that it is too slow, even with a 436 by 436 gray image.
I have already tried to find way to maybe fasten up the work that I do with array and lists, but didn't get much improvement
val img = imread("files/image.png", 0)
val img3= Mat(img.rows(),img.cols(),img.type())
val listaBlocos = mutableListOf(Pair(0.0, Pair(0,0)))
val tamanhoBloco = 3 //Block Size
val tamanhoJanela = 9 //Window Size
val mediaPorBloco = DoubleArray(16)
var sum = 0.0
listaBlocos.clear()
val stats_file = File("files/tempos436x436.txt")
val test = 10
for (x in 0 until test){
val timeelapsed = measureTimeMillis {
for (col in 20 ..img.width() - 20) {
for (row in 20 ..img.height() - 20) {
val block1 = generateBlock(img, row, col, tamanhoBloco)
for (c in -tamanhoJanela..tamanhoJanela) {
for (l in -tamanhoJanela..tamanhoJanela) {
val block2 = generateBlock(img, row + l, col + c, tamanhoBloco)
val d = distBlock(block1, block2)
val par = Pair(d, Pair(row + l, col + c))
listaBlocos.add(par)
}
}
val listaBlocosOrdenada = listaBlocos.sortedWith(compareBy { it.first })
listaBlocos.clear()
for (k in 0..15) {
sum = 0.0
val c2 = listaBlocosOrdenada[k].second.second
val l2 = listaBlocosOrdenada[k].second.first
for (c in 0..tamanhoBloco - 1) {
for (l in 0..tamanhoBloco - 1) {
sum += img.get(l2 - l, c2 - c)[0]
}
}
mediaPorBloco[k] = sum / 4
}
val v = mediaPorBloco.average()
img3.put(row,col,v)
}
}
}
imwrite("files/resultado.png", img3)
stats_file.appendText("teste$x 100X200 $timeelapsed\n")
}
well the result in the actual image denoising is good but is takes maybe 15 min to denoise a 436 x 436 image. I'm currently using a virtual machine with Ubuntu and 4 cores a 4 gbs of Ram
Related
I am trying to create my own hashing framework/library, but I've stumbled across an issue. When I calculate the SHA256 hash of an empty string, the hash is calculated successfully, but when I calculate it for anything else, it fails. Can someone help me figure out why?
As provided by Wikipedia, when performed online and using python, this hash matches.
let h = SHA256(message: Data("".utf8))
let d = h.digest()
// e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
print(d)
But 'Hello world' does not
let h = SHA256(message: Data("Hello world".utf8))
let d = h.digest()
// ce9f4c08f0688d09b8061ed6692c1d5af2516c8682fad2d9a5d72f96ba787a80
print(d)
// Expected:
// 64ec88ca00b268e5ba1a35678a1b5316d212f4f366b2477232534a8aeca37f3c
I hope someone can help me. SHA256 implementation below:
/*
First 32 bits of the fractional parts of the
square roots of the first 8 primes 2..19.
*/
fileprivate let kSHA256H0: UInt32 = 0x6a09e667
fileprivate let kSHA256H1: UInt32 = 0xbb67ae85
fileprivate let kSHA256H2: UInt32 = 0x3c6ef372
fileprivate let kSHA256H3: UInt32 = 0xa54ff53a
fileprivate let kSHA256H4: UInt32 = 0x510e527f
fileprivate let kSHA256H5: UInt32 = 0x9b05688c
fileprivate let kSHA256H6: UInt32 = 0x1f83d9ab
fileprivate let kSHA256H7: UInt32 = 0x5be0cd19
/*
First 32 bits of the fractional parts of the
cube roots of the first 64 primes 2..311.
*/
fileprivate let kSHA256K: [UInt32] = [
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
]
/// Shift the value of x n amount to the right.
/// - Parameters:
/// - x: The value to shift.
/// - n: The amount to shift by.
/// - Returns: The shifted value.
fileprivate func shiftRight(_ x: UInt32, _ n: UInt32) -> UInt32 { x >> n }
/// Rotate the value of x n amount of times.
/// - Parameters:
/// - x: The value to rotate.
/// - y: The amount to rotate by.
/// - Returns: The rotated value.
fileprivate func rotateRight(_ x: UInt32, _ y: UInt32) -> UInt32 { (x >> (y & 31)) | (x << (32 - (y & 31))) }
/// Split data into chunks of specified size.
/// - Note: This function will not pad or append data
/// to make sure all the chunks are equal in size.
/// - Parameters:
/// - data: The data to split.
/// - size: The size of a chunk.
/// - Returns: An array containing chunks of specified size (when able).
fileprivate func chunk(_ data: Data, toSize size: Int) -> [Data] {
stride(from: 0, to: data.count, by: size).map {
data.subdata(in: $0 ..< Swift.min($0 + size, data.count))
}
}
public class SHA256 {
/// The pre-processed data.
fileprivate let message: Data
fileprivate var hash = [
kSHA256H0, kSHA256H1, kSHA256H2, kSHA256H3,
kSHA256H4, kSHA256H5, kSHA256H6, kSHA256H7
]
public init(message: Data) {
self.message = Self.preProcess(message: message)
}
fileprivate static func preProcess(message: Data) -> Data {
let L = message.count * 8 // Original message length in bits.
var K = 0 // Required padding bits.
while (L + 1 + K + 64) % 512 != 0 {
K += 1
}
var padding = Data(repeating: 0, count: K / 8)
padding.insert(0x80, at: 0) // Insert 1000 0000 into the padding.
var length = UInt64(L).bigEndian
return message + padding + Data(bytes: &length, count: 8)
}
public func digest() -> Data {
let chunks = chunk(message, toSize: 64)
for chunk in chunks {
var w = [UInt32](repeating: 0, count: 64) // 64-entry message schedule array of 32-bit words.
// Copy the chunk into first 16 words w[0..15] of the schedule array.
for i in 0 ..< 16 {
let sub = chunk.subdata(in: i ..< i + 4)
w[i] = sub.withUnsafeBytes { $0.load(as: UInt32.self) }.bigEndian
}
// Extend the first 16 words into the remaining 48 words w[16..63] of the schedule array.
for i in 16 ..< 64 {
let s0 = rotateRight(w[i - 15], 7) ^ rotateRight(w[i - 15], 18) ^ shiftRight(w[i - 15], 3)
let s1 = rotateRight(w[i - 2], 17) ^ rotateRight(w[i - 2], 19) ^ shiftRight(w[i - 2], 10)
w[i] = s1 &+ w[i - 7] &+ s0 &+ w[i - 16]
}
// Create some working variables.
var a = hash[0]
var b = hash[1]
var c = hash[2]
var d = hash[3]
var e = hash[4]
var f = hash[5]
var g = hash[6]
var h = hash[7]
// Compress function main loop.
for i in 0 ..< 64 {
let S1 = rotateRight(e, 6) ^ rotateRight(e, 11) ^ rotateRight(e, 25)
let ch = (e & f) ^ (~e & g)
let T1 = h &+ S1 &+ ch &+ kSHA256K[i] &+ w[i]
let S0 = rotateRight(a, 2) ^ rotateRight(a, 13) ^ rotateRight(a, 22)
let maj = (a & b) ^ (a & c) ^ (b & c)
let T2 = S0 &+ maj
h = g
g = f
f = e
e = d &+ T1
d = c
c = b
b = a
a = T1 &+ T2
}
hash[0] &+= a
hash[1] &+= b
hash[2] &+= c
hash[3] &+= d
hash[4] &+= e
hash[5] &+= f
hash[6] &+= g
hash[7] &+= h
}
return hash.map {
var num = $0.bigEndian
return Data(bytes: &num, count: 4)
}.reduce(Data(), +)
}
}
Turns out, I was creating the wrong sub data to construct my UInt32's from to create the message schedule array. (The first couple of lines in the .digest() function)
The old one was
let sub = chunk.subdata(in: i ..< i + 4)
The new one is
let sub = chunk.subdata(in: i * 4 ..< (i * 4) + 4)
This resolves the issue
I am writing this code for linear regression and trying Gradient Descent to minimize the RSS. The cost function seems to explode to infinity within 12 iterations. I know this is not supposed to happen. Maybe, I have used the wrong gradient function for RSS (can be seen in the function "grad()")?
NumberObservations=100
minVal=1
maxVal=20
X = np.random.uniform(minVal,maxVal,(NumberObservations,1))
e = np.random.normal(0, 1, (NumberObservations,1))
Y= 10 + 5*X + e
B = np.array([[0], [0]])
sum_y = sum(Y)
sum_x = sum(X)
sum_xy = sum(np.multiply(X, Y))
sum_x2 = sum(X*X)
alpha = 0.00001
iterations = 15
def cost_fun(X, Y, B):
b0 = B[0]
b1 = B[1]
s = (Y - (b0 + (b1*X)))**2
rss = sum(s)
return rss
def grad(X, Y, B):
print("B = " + str(B))
b0 = B[0]
b1 = B[1]
g0 = -2*(Y - b0 - (b1*X))
g1 = -2*((X*Y) - (b0*X) - (b1*X**2))
grad = np.concatenate((g0, g1), axis = 1)
return grad
def gradient_descent(X, Y, B, alpha, iterations):
cost_history = [0] * iterations
m = len(Y)
x0 = np.array(np.ones(m))
x0 = x0.reshape((100, 1))
X1 = np.concatenate((x0, X), axis = 1)
for iteration in range(iterations):
h = np.dot(X1, B)
h = h.reshape((100, 1))
loss = h - Y
g = grad(X, Y, B)
gradient = (np.dot(g.T, loss) / m)
B = B - alpha * gradient
cost = cost_fun(X, Y, B)
cost_history[iteration] = cost
print("Iteration %d | Cost: %f" % (iteration, cost))
print("-----------------------------------------------------------------------")
return B, cost_history
newB, cost_history = gradient_descent(X, Y, B, alpha, iterations)
# New Values of B
print(newB)
Please help.
i am working on a calculator running in pure lua but i need help with making the out put decimals in to fractions
This solution uses continued fraction to exactly restore fractions with denominator up to 107
local function to_frac(num)
local W = math.floor(num)
local F = num - W
local pn, n, N = 0, 1
local pd, d, D = 1, 0
local x, err, q, Q
repeat
x = x and 1 / (x - q) or F
q, Q = math.floor(x), math.floor(x + 0.5)
pn, n, N = n, q*n + pn, Q*n + pn
pd, d, D = d, q*d + pd, Q*d + pd
err = F - N/D
until math.abs(err) < 1e-15
return N + D*W, D, err
end
local function print_frac(numer,denom)
print(string.format("%.14g/%d = %d/%d + %g", numer, denom, to_frac(numer/denom)))
end
print_frac(1, 4) --> 1/4 = 1/4 + 0
print_frac(12, 8) --> 12/8 = 3/2 + 0
print_frac(4, 2) --> 4/2 = 2/1 + 0
print_frac(16, 11) --> 16/11 = 16/11 + 5.55112e-17
print_frac(1, 13) --> 1/13 = 1/13 + 0
print_frac(math.sqrt(3), 1) --> 1.7320508075689/1 = 50843527/29354524 + -4.44089e-16
print_frac(math.pi, 1) --> 3.1415926535898/1 = 80143857/25510582 + 4.44089e-16
print_frac(0, 3) --> 0/3 = 0/1 + 0
print_frac(-10, 3) --> -10/3 = -10/3 + -1.11022e-16
This is not possible. You need a class which stores fractions for that.
You can achieve an approximate solution. It works nicely for things that can be expressed as fraction and blows up for everything else
local function gcd(a, b)
while a ~= 0 do
a, b = b%a, a;
end
return b;
end
local function round(a)
return math.floor(a+.5)
end
function to_frac(num)
local integer = math.floor(num)
local decimal = num - integer
if decimal == 0 then
return num, 1.0, 0.0
end
local prec = 1000000000
local gcd_ = gcd(round(decimal*prec), prec)
local numer = math.floor((integer*prec + round(decimal*prec))/gcd_)
local denom = math.floor(prec/gcd_)
local err = numer/denom - num
return numer, denom, err
end
function print_frac(numer,denom)
print(string.format("%d/%d = %d/%d + %g", numer, denom, to_frac(numer/denom)))
end
print_frac(1,4)
print_frac(12,8)
print_frac(4,2)
print_frac(16,11)
print_frac(1,13)
Output:
1/4 = 1/4 + 0
12/8 = 3/2 + 0
4/2 = 2/1 + 0
16/11 = 290909091/200000000 + 4.54546e-10
1/13 = 76923077/1000000000 + 7.69231e-11
I'm writing answers for project Euler Questions in this repo
but having some performance issues in my solution
Question 2:
Each new term in the Fibonacci sequence is generated by adding the previous two terms.
By starting with 1 and 2, the first 10 terms will be:
1, 2, 3, 5, 8, 13, 21, 34, 55, 89, ...
By considering the terms in the Fibonacci sequence whose values do not exceed four million, find the sum of the even-valued terms.
My Solution is
func solution2()
{
func fibonacci(number: Int) -> (Int)
{
if number <= 1
{
return number
}
else
{
return fibonacci(number - 1) + fibonacci(number - 2)
}
}
var sum = 0
print("calculating...")
for index in 2..<50
{
print (index)
if (fibonacci(index) % 2 == 0)
{
sum += fibonacci(index)
}
}
print(sum)
}
My Question is, why it gets super slow after iteration 42, i want to do it for 4000000 as the question says, any help?
solution 2
func solution2_fast()
{
var phiOne : Double = (1.0 + sqrt(5.0)) / 2.0
var phiTwo : Double = (1.0 - sqrt(5.0)) / 2.0
func findFibonacciNumber (nthNumber : Double) -> Int64
{
let nthNumber : Double = (pow(phiOne, nthNumber) - (pow(phiTwo, nthNumber))) / sqrt(5.0)
return Int64(nthNumber)
}
var sum : Int64 = 0
print("calculating...")
for index in 2..<4000000
{
print (index)
let f = findFibonacciNumber(Double(index))
if (f % 2 == 0)
{
sum += f
}
}
print(sum)
}
The most important thing about PE questions is to think about what it is asking.
This is not asking you to produce all Fibonacci numbers F(n) less than 4000000. It is asking for the sum of all even F(n) less than 4000000.
Think about the sum of all F(n) where F(n) < 10.
1 + 2 + 3 + 5 + 8
I could do this by calculating F(1), then F(2), then F(3), and so on... and then checking they are less than 10 before adding them up.
Or I could store two variables...
F1 = 1
F2 = 2
And a total...
Total = 3
Now I can turn this into a while loop and lose the recursion altogether. In fact, the most complex thing I'm doing is adding two numbers together...
I came up with this...
func sumEvenFibonacci(lessThan limit: Int) -> Int {
// store the first two Fibonacci numbers
var n1 = 1
var n2 = 2
// and a cumulative total
var total = 0
// repeat until you hit the limit
while n2 < limit {
// if the current Fibonacci is even then add to total
if n2 % 2 == 0 {
total += n2
}
// move the stored Fibonacci numbers up by one.
let temp = n2
n2 = n2 + n1
n1 = temp
}
return total
}
It runs in a fraction of a second.
sumEvenFibonacci(lessThan: 4000000)
Finds the correct answer.
In fact this... sumEvenFibonacci(lessThan: 1000000000000000000) runs in about half a second.
The second solution seems to be fast(er) although an Int64 will not be sufficient to store the result. The sum of Fibonacci numbers from 2..91 is 7,527,100,471,027,205,936 but the largest number you can store in an Int64 is 9,223,372,036,854,775,807. For this you need to use some other types like BigInteger
Because you use the recursive, and it cache in the memory.If you iteration 42, it maybe has so many fibonacci function in your memory, and recursive.So it isn't suitable for recursive, and you can store the result in the array, not the reason of the swift.
this is the answer in two different ways
func solution2_recursive()
{
func fibonacci(number: Int) -> (Int)
{
if number <= 1
{
return number
}
else
{
return fibonacci(number - 1) + fibonacci(number - 2)
}
}
var sum = 0
print("calculating...")
for index in 2..<50
{
print (index)
let f = fibonacci(index)
if( f < 4000000)
{
if (f % 2 == 0)
{
sum += f
}
}
else
{
print(sum)
return
}
}
}
solution 2
func solution2()
{
var phiOne : Double = (1.0 + sqrt(5.0)) / 2.0
var phiTwo : Double = (1.0 - sqrt(5.0)) / 2.0
func findFibonacciNumber (nthNumber : Double) -> Int64
{
let nthNumber : Double = (pow(phiOne, nthNumber) - (pow(phiTwo, nthNumber))) / sqrt(5.0)
return Int64(nthNumber)
}
var sum : Int64 = 0
print("calculating...")
for index in 2..<50
{
let f = findFibonacciNumber(Double(index))
if(f < 4000000)
{
if (f % 2 == 0)
{
sum += f
}
}
else
{
print(sum)
return
}
}
}
I'm trying to convert the following C# into F#:
public class Matrix
{
double[,] matrix;
public int Cols
{
get
{
return this.matrix.GetUpperBound(1) + 1;
}
}
public int Rows
{
get
{
return this.matrix.GetUpperBound(0) + 1;
}
}
public Matrix(double[,] sourceMatrix)
{
this.matrix = new double[sourceMatrix.GetUpperBound(0) + 1, sourceMatrix.GetUpperBound(1) + 1];
for (int r = 0; r < this.Rows; r++)
{
for (int c = 0; c < this.Cols; c++)
{
this[r, c] = sourceMatrix[r, c];
}
}
}
public double this[int row, int col]
{
get
{
return this.matrix[row, col];
}
set
{
this.matrix[row, col] = value;
}
}
}
This is what I have so far:
type Matrix(sourceMatrix:double[,]) =
let mutable (matrix:double[,]) = Array2D.create (sourceMatrix.GetUpperBound(0) + 1) (sourceMatrix.GetUpperBound(1) + 1) 0.0
member this.Item
with get(x, y) = matrix.[(x, y)]
and set(x, y) value = matrix.[(x, y)] <- value
do
for i = 0 to matrix.[i].Length - 1 do
for j = (i + 1) to matrix.[j].Length - 1 do
this.[i].[j] = matrix.[i].[j]
My type above seems to have two problems I'm not sure how to resolve. The first one is that matrix.[(x, y)] is expected to have type `a[] but has type double[,]. The second is type definitions must have let/do bindings preceding member and interface definitions. The problem with that is I'm trying to populate an indexed property in the do block, which means I have to create it first.
Thanks in advance,
Bob
Regarding your first problem, you want to use matrix.[x,y] instead of matrix.[(x,y)] - your matrix is indexed by two integers, not by a tuple of integers (although these are conceptually similar).
Here's something roughly equivalent to your C#:
type Matrix(sourceMatrix:double[,]) =
let rows = sourceMatrix.GetUpperBound(0) + 1
let cols = sourceMatrix.GetUpperBound(1) + 1
let matrix = Array2D.zeroCreate<double> rows cols
do
for i in 0 .. rows - 1 do
for j in 0 .. cols - 1 do
matrix.[i,j] <- sourceMatrix.[i,j]
member this.Rows = rows
member this.Cols = cols
member this.Item
with get(x, y) = matrix.[x, y]
and set(x, y) value = matrix.[x, y] <- value
This assumes that your matrix can't actually be reassigned (e.g. in the C# you've posted, you could have made your matrix field readonly - unless there's additional code that you've hidden). Therefore, the number of rows and columns can be calculated once in the constructor since the entries of the matrix may change but its size won't.
However, if you want a more literal translation of your code, you can give your newly constructed instance a name (this in this case):
type Matrix(sourceMatrix:double[,]) as this =
let mutable matrix = Array2D.zeroCreate<double> (sourceMatrix.GetUpperBound(0) + 1) (sourceMatrix.GetUpperBound(1) + 1)
do
for i in 0 .. this.Rows - 1 do
for j in 0 .. this.Cols - 1 do
this.[i,j] <- sourceMatrix.[i,j]
member this.Rows = matrix.GetUpperBound(0) + 1
member this.Cols = matrix.GetUpperBound(1) + 1
member this.Item
with get(x, y) = matrix.[x, y]
and set(x, y) value = matrix.[x, y] <- value
type Matrix(sourceMatrix:double[,]) =
let matrix = Array2D.copy sourceMatrix
member this.Item
with get(x, y) = matrix.[x, y]
and set(x, y) value = matrix.[x, y] <- value