Horn-Schunck optical flow implementation issue - opencv

I am trying to implement Horn-Schunck optical flow algorithm by NumPy and OpenCV
I use Horn-Schunck method on wiki and original paper
But my implementation fails on following simple example
Frame1:
[[ 0 0 0 0 0 0 0 0 0 0]
[ 0 255 255 0 0 0 0 0 0 0]
[ 0 255 255 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 0 0 0]]
Frame2:
[[ 0 0 0 0 0 0 0 0 0 0]
[ 0 0 0 255 255 0 0 0 0 0]
[ 0 0 0 255 255 0 0 0 0 0]
[ 0 0 0 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 0 0 0]]
This is just small white rectangle that moves by 2 pixels on frame2
My implementation produce following flow
U part of flow (I apply np.round to every part of flow. Original values is pretty the same):
[[ 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]
[ 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]
[ 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]
[ 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]
[ 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]]
V part of flow:
[[ 0. 1. 0. -1. -0. 0. 0. 0. 0. 0.]
[-0. -0. 0. 0. 0. 0. 0. 0. 0. 0.]
[-0. -1. -0. 1. 0. 0. 0. 0. 0. 0.]
[-0. -0. -0. 0. 0. 0. 0. 0. 0. 0.]
[-0. -0. -0. 0. 0. 0. 0. 0. 0. 0.]]
It look like this flow is incorrect (Because if i move every pixel of frame2 in direction of corresponding flow component i never get frame1)
Also my implementation fails on real images
But if i move rectangle by 1 pixel right (or left or top or down) my implementation produce:
U part of flow:
[[1 1 1 .....]
[1 1 1 .....]
......
[1 1 1 .....]]
V part of flow:
[[0 0 0 .....]
[0 0 0 .....]
......
[0 0 0 .....]]
I suppose that this flow is correct because i can reconstruct frame 1 by following procedure
def translateBrute(img, u, v):
res = np.zeros_like(img)
u = np.round(u).astype(np.int)
v = np.round(v).astype(np.int)
for i in xrange(img.shape[0]):
for j in xrange(img.shape[1]):
res[i, j] = takePixel(img, i + v[i, j], j + u[i, j])
return res
where takePixel is simple function that returns pixel intensity if input coordinates lays inside of image or intensity on image border otherwise
This is my implementation
import cv2
import sys
import numpy as np
def takePixel(img, i, j):
i = i if i >= 0 else 0
j = j if j >= 0 else 0
i = i if i < img.shape[0] else img.shape[0] - 1
j = j if j < img.shape[1] else img.shape[1] - 1
return img[i, j]
#Numerical derivatives from original paper: http://people.csail.mit.edu/bkph/papers/Optical_Flow_OPT.pdf
def xDer(img1, img2):
res = np.zeros_like(img1)
for i in xrange(res.shape[0]):
for j in xrange(res.shape[1]):
sm = 0
sm += takePixel(img1, i, j + 1) - takePixel(img1, i, j)
sm += takePixel(img1, i + 1, j + 1) - takePixel(img1, i + 1, j)
sm += takePixel(img2, i, j + 1) - takePixel(img2, i, j)
sm += takePixel(img2, i + 1, j + 1) - takePixel(img2, i + 1, j)
sm /= 4.0
res[i, j] = sm
return res
def yDer(img1, img2):
res = np.zeros_like(img1)
for i in xrange(res.shape[0]):
for j in xrange(res.shape[1]):
sm = 0
sm += takePixel(img1, i + 1, j ) - takePixel(img1, i, j )
sm += takePixel(img1, i + 1, j + 1) - takePixel(img1, i, j + 1)
sm += takePixel(img2, i + 1, j ) - takePixel(img2, i, j )
sm += takePixel(img2, i + 1, j + 1) - takePixel(img2, i, j + 1)
sm /= 4.0
res[i, j] = sm
return res
def tDer(img, img2):
res = np.zeros_like(img)
for i in xrange(res.shape[0]):
for j in xrange(res.shape[1]):
sm = 0
for ii in xrange(i, i + 2):
for jj in xrange(j, j + 2):
sm += takePixel(img2, ii, jj) - takePixel(img, ii, jj)
sm /= 4.0
res[i, j] = sm
return res
averageKernel = np.array([[ 0.08333333, 0.16666667, 0.08333333],
[ 0.16666667, 0. , 0.16666667],
[ 0.08333333, 0.16666667, 0.08333333]], dtype=np.float32)
#average intensity around flow in point i,j. I use filter2D to improve performance.
def average(img):
return cv2.filter2D(img.astype(np.float32), -1, averageKernel)
def translateBrute(img, u, v):
res = np.zeros_like(img)
u = np.round(u).astype(np.int)
v = np.round(v).astype(np.int)
for i in xrange(img.shape[0]):
for j in xrange(img.shape[1]):
res[i, j] = takePixel(img, i + v[i, j], j + u[i, j])
return res
#Core of algorithm. Iterative scheme from wiki: https://en.wikipedia.org/wiki/Horn%E2%80%93Schunck_method#Mathematical_details
def hornShunckFlow(img1, img2, alpha):
img1 = img1.astype(np.float32)
img2 = img2.astype(np.float32)
Idx = xDer(img1, img2)
Idy = yDer(img1, img2)
Idt = tDer(img1, img2)
u = np.zeros_like(img1)
v = np.zeros_like(img1)
#100 iterations enough for small example
for iteration in xrange(100):
u0 = np.copy(u)
v0 = np.copy(v)
uAvg = average(u0)
vAvg = average(v0)
# '*', '+', '/' operations in numpy works component-wise
u = uAvg - 1.0/(alpha**2 + Idx**2 + Idy**2) * Idx * (Idx * uAvg + Idy * vAvg + Idt)
v = vAvg - 1.0/(alpha**2 + Idx**2 + Idy**2) * Idy * (Idx * uAvg + Idy * vAvg + Idt)
if iteration % 10 == 0:
print 'iteration', iteration, np.linalg.norm(u - u0) + np.linalg.norm(v - v0)
return u, v
if __name__ == '__main__':
img1c = cv2.imread(sys.argv[1])
img2c = cv2.imread(sys.argv[2])
img1g = cv2.cvtColor(img1c, cv2.COLOR_BGR2GRAY)
img2g = cv2.cvtColor(img2c, cv2.COLOR_BGR2GRAY)
u, v = hornShunckFlow(img1g, img2g, 0.1)
imgRes = translateBrute(img2g, u, v)
cv2.imwrite('res.png', imgRes)
print img1g
print translateBrute(img2g, u, v)
Optimization scheme are taken from wikipedia and numerical derivatives are taken from original paper.
Anyone have idea why my implementation produce incorrect flow?
I can provide any additional info if it necessary
PS Sorry for my poor english
UPD:
I implement Horn-Schunck cost function
def grad(img):
Idx = cv2.filter2D(img, -1, np.array([
[-1, -2, -1],
[ 0, 0, 0],
[ 1, 2, 1]], dtype=np.float32))
Idy = cv2.filter2D(img, -1, np.array([
[-1, 0, 1],
[-2, 0, 2],
[-1, 0, 1]], dtype=np.float32))
return Idx, Idy
def hornShunckCost(Idx, Idy, Idt, u, v, alpha):
#return sum(sum(It**2))
udx, udy = grad(u)
vdx, vdy = grad(v)
return (sum(sum((Idx*u + Idy*v + Idt)**2)) +
(alpha**2)*(sum(sum(udx**2)) +
sum(sum(udy**2)) +
sum(sum(vdx**2)) +
sum(sum(vdy**2))
))
and check value of this function inside iterations
if iteration % 10 == 0:
print 'iter', iteration, np.linalg.norm(u - u0) + np.linalg.norm(v - v0)
print hornShunckCost(Idx, Idy, Idt, u, v, alpha)
If i use simple example with rectangle that has been moved by one pixel everything is ok: value of cost function decrease at every step.
But on example with rectangle that has been moved by two pixels value of cost function increase at every step.
This behaviour of algorithm is really strange
Maybe i choose incorrect way to calculate cost function.

I lost a fact that classic Horn-Schunck scheme uses linearized data term (I1(x, y) - I2(x + u(x, y), y + v(x, y))). This linearization make optimization easy but disallows large displacements
To handle big displacements there are next approach Pyramidal Horn-Schunck

Related

pytorch LSTM to map series of feature vectors to their labels

currently I have input X with shape (50, 25), where there are 50 feature vectors and each vector has 25 dimensions. The data of X is, for example, like follows:
X = [[0. 0. 0. ... 1. 1. 1.]
[0. 0. 0. ... 1. 1. 1.]
[0. 0. 0. ... 1. 1. 1.]
...
[0. 0. 0. ... 1. 1. 1.]
[0. 0. 0. ... 1. 1. 1.]
[0. 0. 0. ... 1. 1. 1.]]
And the output label y is [0 0 0 0 0 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0], of length 50. I.e. each feature vector has a label which corresponds to an element in y.
how can I construct a pytorch LSTM, reshape the input object to 3 dimensions, and properly interpret the output object? Thanks so much for the help in advance.
Currently I have a template for LSTM like this, since my input is already numerical, I was thinking to get rid of the encoder / decoder part, is that correct?
class RNNModel(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers, dropout=0, tie_weights=False):
super(RNNModel, self).__init__()
self.drop = nn.Dropout(dropout)
self.ntoken = ntoken
self.decoder = nn.Linear(nhid, self.ntoken)
if rnn_type in ['LSTM', 'GRU']:
self.rnn = getattr(nn, rnn_type)(ninp, nhid, nlayers, dropout=dropout)
else:
try:
nonlinearity = {'RNN_TANH': 'tanh', 'RNN_RELU': 'relu'}[rnn_type]
except KeyError:
raise ValueError( """An invalid option for `--model` was supplied,
options are ['LSTM', 'GRU', 'RNN_TANH' or 'RNN_RELU']""")
self.rnn = nn.RNN(ninp, nhid, nlayers, nonlinearity=nonlinearity, dropout=dropout)
self.init_weights()
self.rnn_type = rnn_type
self.nhid = nhid
self.nlayers = nlayers
def init_weights(self):
initrange = 0.1
nn.init.zeros_(self.decoder.weight)
nn.init.uniform_(self.decoder.weight, -initrange, initrange)
def forward(self, input, hidden):
emb = self.drop(input)
emb = emb.transpose(1, 0)
output, hidden = self.rnn(emb, hidden) #output of shape (length, batchsize, nhid)
output = self.drop(output)
output = output[-1, :, :] #shape (batchsize, nhid)
decoded = self.decoder(output) #shape (batchsize, ntoken)
return F.log_softmax(decoded, dim=1), hidden
def init_hidden(self, bsz):
weight = next(self.parameters())
if self.rnn_type == 'LSTM':
return (weight.new_zeros(self.nlayers, bsz, self.nhid),
weight.new_zeros(self.nlayers, bsz, self.nhid))
else:
return weight.new_zeros(self.nlayers, bsz, self.nhid)
Currently the train I wrote is
X = X.reshape((1, 50, 25))
hidden = self.model.init_hidden(1)
for iter in range(0, self.epochs):
data = torch.from_numpy(X)
target = torch.LongTensor(y.reshape((1, torch.LongTensor(y).size(0))))
self.model.zero_grad()
self.optimizer.zero_grad()
hidden = self.repackage_hidden(hidden)
output, hidden = self.model(data.float(), hidden)
loss = self.criterion(output, target)
loss.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), 0.25)
self.optimizer.step()
self.model.train()
But I got the error: RuntimeError: multi-target not supported at /tmp/pip-req-build-4baxydiv/aten/src/THNN/generic/ClassNLLCriterion.c:22
Output of rnn take shape of (length, batchsize, nhid), base on your label (1 number per sample) I assume you're doing classification, so usually we will give the classifier (self.decoder) output features of the last timestep. Here I changed your forward method to this and got output of shape (batchsize, ntoken) which fit the shape of your label.
def forward(self, input, hidden):
emb = self.drop(self.encoder(input))
emb = emb.transpose(1, 0) #(batchsize, length, ninp) => (length, batchsize, ninp)
output, hidden = self.rnn(emb, hidden) #output of shape (length, batchsize, nhid)
output = self.drop(output)
output = output[-1, :, :] #shape (batchsize, nhid)
decoded = self.decoder(output) #shape (batchsize, ntoken)
return F.log_softmax(decoded, dim=1), hidden
About getting rid of self.encoder, it is a embedding layer which take a array of indices and replace each of them with a vector. If your input includes indices (int/long) of something, you may use it, otherwise (it is not index but some number as float like temperature,...) you should get rid of it (because it's wrong). Sorry if my English is confusing.

Trivial change but different results (ForAll and Exists)

I have a set of inequalities that I want to find a (trivial) solution.
When I use the Exists operator, everything works great, as you can see in this Z3 script and in its Z3Py version.
#!/bin/python
from z3 import *
# we have that
s = Solver()
## mu0_px is the initial marking for place px;
mu_p1, mu_p2, mu_p3 = 0, 0, 1
## pi_tj is the pre-condition from place pi to transition tj
p1_t1, p1_t2, p1_t3 = 1, 0, 0
p2_t1, p2_t2, p2_t3 = 0, 1, 0
p3_t1, p3_t2, p3_t3 = 0, 0, 1
## tj_pi is the post-condition from transition tj to place pi
t1_p1, t2_p1, t3_p1 = 0, 1, 0
t1_p2, t2_p2, t3_p2 = 1, 0, 0
t1_p3, t2_p3, t3_p3 = 0, 0, 0
## find the values for the faulty transitions
f_p1, p1_f = Ints('f_p1 p1_f')
f_p2, p2_f = Ints('f_p2 p2_f')
f_p3, p3_f = Ints('f_p3 p3_f')
# where they should be
s.add( f_p1 == 1, f_p2 == 0, f_p3 == 0 )
s.add( p1_f == 0, p2_f == 0, p3_f == 1 )
## l \in Naturals ;
l11 = Int('l11')
# Sequence 11: t1,t2,t3
s11_t1, s11_t2, s11_t3 = 1, 1, 0
# It does works! :o
s.add( l11 == 1 )
s.add(
Exists([l11],
Or(
mu_p1 + (t1_p1-p1_t1)*s11_t1 + (t2_p1-p1_t2)*s11_t2 + (t3_p1-p1_t3)*s11_t3 + l11 * (f_p1 - p1_f) < p1_t3,
mu_p2 + (t1_p2-p2_t1)*s11_t1 + (t2_p2-p2_t2)*s11_t2 + (t3_p2-p2_t3)*s11_t3 + l11 * (f_p2 - p2_f) < p2_t3,
mu_p3 + (t1_p3-p3_t1)*s11_t1 + (t2_p3-p3_t2)*s11_t2 + (t3_p3-p3_t3)*s11_t3 + l11 * (f_p3 - p3_f) < p3_t3,
)
)
)
print(s)
print(s.check())
print(s.model())
However, when I replace the existential quantifier by Forall as in this link, and in the Python code below, there is no solution when I believe that it should still be sat.
#!/bin/python
from z3 import *
# we have that
s = Solver()
## mu0_px is the initial marking for place px;
mu_p1, mu_p2, mu_p3 = 0, 0, 1
## pi_tj is the pre-condition from place pi to transition tj
p1_t1, p1_t2, p1_t3 = 1, 0, 0
p2_t1, p2_t2, p2_t3 = 0, 1, 0
p3_t1, p3_t2, p3_t3 = 0, 0, 1
## tj_pi is the post-condition from transition tj to place pi
t1_p1, t2_p1, t3_p1 = 0, 1, 0
t1_p2, t2_p2, t3_p2 = 1, 0, 0
t1_p3, t2_p3, t3_p3 = 0, 0, 0
## find the values for the faulty transitions
f_p1, p1_f = Ints('f_p1 p1_f')
f_p2, p2_f = Ints('f_p2 p2_f')
f_p3, p3_f = Ints('f_p3 p3_f')
# where they should be
s.add( f_p1 == 1, f_p2 == 0, f_p3 == 0 )
s.add( p1_f == 0, p2_f == 0, p3_f == 1 )
## l \in Naturals ;
l11 = Int('l11')
# Sequence 11: t1,t2,t3
s11_t1, s11_t2, s11_t3 = 1, 1, 0
# It does not work! :(
s.add( l11 == 1 )
s.add(
ForAll([l11],
Or(
mu_p1 + (t1_p1-p1_t1)*s11_t1 + (t2_p1-p1_t2)*s11_t2 + (t3_p1-p1_t3)*s11_t3 + l11 * (f_p1 - p1_f) < p1_t3,
mu_p2 + (t1_p2-p2_t1)*s11_t1 + (t2_p2-p2_t2)*s11_t2 + (t3_p2-p2_t3)*s11_t3 + l11 * (f_p2 - p2_f) < p2_t3,
mu_p3 + (t1_p3-p3_t1)*s11_t1 + (t2_p3-p3_t2)*s11_t2 + (t3_p3-p3_t3)*s11_t3 + l11 * (f_p3 - p3_f) < p3_t3,
)
)
)
print(s)
print(s.check())
print(s.model())
Did anyone ever have a problem like this before?
The variable l11 as you declared and the one that gets used in the quantification are totally different: In particular, you stating it equals 1 have no bearing in the quantified formula. So you get sat with existential but unsat with universal since the formula is clearly not true for all values of l11.
This might be confusing, but it is the intended behaviour. To see the effect, simply print the smtlib equivalent and you’ll see how the variables are assigned.

Keras ImageDataGenerator how to see parameters by which image was modified

I understand how and why to use an ImageDataGenerator, but I am interested in casting an eyeball on how the ImageDataGenerator affects my images so I can decide whether I have chosen a good amount of latitude in augmenting my data. I see that I can iterate over the images coming from the generator. I am looking for a way to see whether it's an original image or a modified image, and if the latter what parameters were modified in that particular instance I'm looking at. How/can I see this?
Most of the transformations (except flipping) will always modify the input image. For example, if you've specified rotation_range, from the source code:
theta = np.pi / 180 * np.random.uniform(-self.rotation_range, self.rotation_range)
it's unlikely that the random number will be exactly 0.
There's no convenient way to print out the amount of transformations applied to each image. You have to modify the source code and add some printing functions inside ImageDataGenerator.random_transform().
If you don't want to touch the source code (for example, on a shared machine), you can extend ImageDataGenerator and override random_transform().
import numpy as np
from keras.preprocessing.image import *
class MyImageDataGenerator(ImageDataGenerator):
def random_transform(self, x, seed=None):
# these lines are just copied-and-pasted from the original random_transform()
img_row_axis = self.row_axis - 1
img_col_axis = self.col_axis - 1
img_channel_axis = self.channel_axis - 1
if seed is not None:
np.random.seed(seed)
if self.rotation_range:
theta = np.pi / 180 * np.random.uniform(-self.rotation_range, self.rotation_range)
else:
theta = 0
if self.height_shift_range:
tx = np.random.uniform(-self.height_shift_range, self.height_shift_range) * x.shape[img_row_axis]
else:
tx = 0
if self.width_shift_range:
ty = np.random.uniform(-self.width_shift_range, self.width_shift_range) * x.shape[img_col_axis]
else:
ty = 0
if self.shear_range:
shear = np.random.uniform(-self.shear_range, self.shear_range)
else:
shear = 0
if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(self.zoom_range[0], self.zoom_range[1], 2)
transform_matrix = None
if theta != 0:
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = rotation_matrix
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = shift_matrix if transform_matrix is None else np.dot(transform_matrix, shift_matrix)
if shear != 0:
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
transform_matrix = shear_matrix if transform_matrix is None else np.dot(transform_matrix, shear_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
transform_matrix = zoom_matrix if transform_matrix is None else np.dot(transform_matrix, zoom_matrix)
if transform_matrix is not None:
h, w = x.shape[img_row_axis], x.shape[img_col_axis]
transform_matrix = transform_matrix_offset_center(transform_matrix, h, w)
x = apply_transform(x, transform_matrix, img_channel_axis,
fill_mode=self.fill_mode, cval=self.cval)
if self.channel_shift_range != 0:
x = random_channel_shift(x,
self.channel_shift_range,
img_channel_axis)
if self.horizontal_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_col_axis)
if self.vertical_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_row_axis)
# print out the trasformations applied to the image
print('Rotation:', theta / np.pi * 180)
print('Height shift:', tx / x.shape[img_row_axis])
print('Width shift:', ty / x.shape[img_col_axis])
print('Shear:', shear)
print('Zooming:', zx, zy)
return x
I just add 5 prints at the end of the function. Other lines are copied and pasted from the original source code.
Now you can use it with, e.g.,
gen = MyImageDataGenerator(rotation_range=15,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.5)
flow = gen.flow_from_directory('data', batch_size=1)
img = next(flow)
and see information like this printed on your terminal:
Rotation: -9.185074669096467
Height shift: 0.03791625365979884
Width shift: -0.08398553078553198
Shear: 0
Zooming: 1.40950509832 1.12895574928

Projective Transform - matlab code

i can't use any toolbox function i need to build it from scratch.
% load images
img1 = readImage('roadSign.tif');
img2 = readImage('lena.tif');
% call the main function
mapIntoImage(img1,img2)
function [newImage] = mapIntoImage(imageA,imageB)
% Input: imageA, imageB - a grayscale image in the range [0..255].
%
% Output: newImage – imageA into which image B has been mapped.
%
showImage(imageA)
hold on
% Initially, the list of points is empty.
xy = [];
% Loop, picking up the points.
disp('Please enter corners of place to insert image in clockwise order.')
for j = 1:4
[xi,yi] = ginput(1);
%draw a yellow dot
plot(xi,yi,'yo')
xy(:,j) = [xi;yi];
end
% get x1 y1 cordinates - xy(:, 1)
imgRow = size(imageB,1);
imgCol = size(imageB,2);
[X,Y] = meshgrid(1:imgCol,1:imgRow);
imgBcords = [0 size(imageB, 1) size(imageB,1) 0 ;
0 0 size(imageB,2) size(imageB,2)];
coefs = findCoefficients(xy, imgBcords);
A = [coefs(1) coefs(2) coefs(5);coefs(3) coefs(4) coefs(6); coefs(7) coefs(8) 1];
temp = zeros(size(X,1), size(X,2), 3);
new = ones(256);
for i = 1:size(X,1)
for j = 1:size(X,2)
temp(i,j,:) =A*[X(i,j); Y(i,j); new(i,j)];
end
end
end
function [ result ] = findCoefficients( imageA, imageB )
% finds coefficients for inverse mapping algorithem
% takes 2 X 2d vectors each consists of 4 points x,y
% and returns the coef accroding to reverse mapping function
%
% x y 0 0 1 0 -xx' -yx'
% 0 0 x y 0 1 -xy' -yy'
% y' and x' are in the destenation picture;
A = [imageB(1,1) imageB(2,1) 0 0 1 0 -imageB(1,1)*imageA(1,1) -imageB(2,1)*imageA(1,1);
0 0 imageB(1,1) imageB(2,1) 0 1 -imageB(1,1)*imageA(2,1) -imageB(2,1)*imageA(2,1);
imageB(1,2) imageB(2,2) 0 0 1 0 -imageB(1,2)*imageA(1,2) -imageB(2,2)*imageA(1,2);
0 0 imageB(1,2) imageB(2,2) 0 1 -imageB(1,2)*imageA(2,2) -imageB(2,2)*imageA(2,2);
imageB(1,3) imageB(2,3) 0 0 1 0 -imageB(1,3)*imageA(1,3) -imageB(2,3)*imageA(1,3);
0 0 imageB(1,3) imageB(2,3) 0 1 -imageB(1,3)*imageA(2,3) -imageB(2,3)*imageA(2,3);
imageB(1,4) imageB(2,4) 0 0 1 0 -imageB(1,4)*imageA(1,4) -imageB(2,4)*imageA(1,4);
0 0 imageB(1,4) imageB(2,4) 0 1 -imageB(1,4)*imageA(2,4) -imageB(2,4)*imageA(2,4)];
B = [imageB(1,1); imageB(2,1); imageB(1,2); imageB(2,2); imageB(1,3); imageB(2,3); imageB(1,4); imageB(2,4)];
result = pinv(A)*B;
end
i want to build now the transform
[x' y' 1] = A*[X Y 1];
i have figured out that i would need to use repmat, but i can't seem to get to the real syntax without loops.
what's the most efficient way to do it?
A projective transform has the form of
$ x' = \frac {a_{11}x+a_{12}y+a_{13}}{a_{13}x+a_{23}y+a_{33}} \\
y' = \frac {a_{21}x+a_{22}y+a_{23}}{a_{13}x+a_{23}y+a_{33}}
$
Where the coefficients are defined up to some scale factor. One of the ways to ensure a constant scale factor is to set $a_{33}=1$. One easy way to think about it is to use the homogenous coordinates:
$ \left( \begin{array}{ccc} x'\\y'\\S\end{array} \right) =
\left( \begin{array}{ccc} a_{11} & a_{12} & a_{13}\\a_{21} & a_{22} & a_{23}\\ a_{31} & a_{32} & a_{33}\end{array} \right)
\left( \begin{array}{ccc} x\\y\\1\end{array} \right)
$
These coordinates are defined up to scale. That is,
$ \left( \begin{array}{ccc} x'/S\\y'/S\\1\end{array} \right) \equiv
\left( \begin{array}{ccc} x'\\y'\\S\end{array} \right)$
Thus, in your case you should do: (Assuming that x and y are column vectors, and A is the transpose of the matrix that I described above:
XY = A * [x y ones(size(x))];
XY(:,1) = XY(:,1)./XY(:,3);
XY(:,2) = XY(:,2)./XY(:,3);

Sobel filter kernel of large size

I am using a sobel filter of size 3x3 to calculate the image derivative. Looking at some articles on the internet, it seems that kernels for sobel filter for size 5x5 and 7x7 are also common, but I am not able to find their kernel values.
Could someone please let me know the kernel values for sobel filter of size 5x5 and 7x7? Also, if someone could share a method to generate the kernel values, that will be much useful.
Thanks in advance.
Complete solution for arbitrary Sobel kernel sizes and angles
tl;dr: skip down to section 'Examples'
To add another solution, expanding on this document (it's not particularly high quality, but it shows some usable graphics and matrices starting at the bottom of page 2).
Goal
What we're trying to do is estimate the local gradient of the image at position (x,y). The gradient is a vector made up of the components in x and y direction, gx and gy.
Now, imagine we want to approximate the gradient based on our pixel (x,y) and its neighbours as a kernel operation (3x3, 5x5, or whatever size).
Solution idea
We can approximate the gradient by summing over the projections of all neighbor-center pairs onto the gradient direction. (Sobel's kernel is just a particular method of weighting the different contributions, and so is Prewitt, basically).
Explicit intermediate steps for 3x3
This is the local image, central pixel (x,y) marked as 'o' (center)
a b c
d o f
g h i
Let's say we want the gradient in positive x direction. The unit vector in positive x-direction is (1,0) [I'll later use the convention that the positive y direction is DOWN, i.e. (0,1), and that (0,0) is top left of image).]
The vector from o to f ('of' for short) is (1,0). The gradient in direction 'of' is (f - o) / 1 (value of image at pixel here denoted f minus value at center o, divided by distance between those pixels). If we project the unit vector of that particular neighbor gradient onto our desired gradient direction (1,0) via a dot product we get 1. Here is a little table with the contributions of all neighbors, starting with the easier cases. Note that for diagonals, their distance is sqrt2, and the unit vectors in the diagonal directions are 1/sqrt2 * (+/-1, +/-1)
f: (f-o)/1 * 1
d: (d-o)/1 * -1 because (-1, 0) dot (1, 0) = -1
b: (b-o)/1 * 0 because (0, -1) dot (1, 0) = 0
h: (h-o)/1 * 0 (as per b)
a: (a-o)/sqrt2 * -1/sqrt2 distance is sqrt2, and 1/sqrt2*(-1,-1) dot (1,0) = -1/sqrt2
c: (c-o)/sqrt2 * +1/sqrt2 ...
g: (g-o)/sqrt2 * -1/sqrt2 ...
i: (i-o)/sqrt2 * +1/sqrt2 ...
edit for clarification:
There are two factors of 1/sqrt(2) for the following reason:
We are interested in the contribution to the gradient in a specific direction (here x), so we need to project the directional gradient from the center pixel to the neighbor pixel onto the direction we are interested in. This is accomplished by taking the scalar product of the unit vectors in the respective directions, which introduces the first factor 1/L (here 1/sqrt(2) for the diagonals).
The gradient measures the infinitesimal change at a point, which we approximate by finite differences. In terms of a linear equation, m = (y2-y1)/(x2-x1). For this reason, the value difference from the center pixel to the neighbor pixel (y2-y1) has to be distributed over their distance (corresponds to x2-x1) in order to get the ascent units per distance unit. This yields a second factor of 1/L (here 1/sqrt(2) for the diagonals)
Ok, now we know the contributions. Let's simplify this expression by combining opposing pairs of pixel contributions. I'll start with d and f:
{(f-o)/1 * 1} + {(d-o)/1 * -1}
= f - o - (d - o)
= f - d
Now the first diagonal:
{(c-o)/sqrt2 * 1/sqrt2} + {(g-o)/sqrt2 * -1/sqrt2}
= (c - o)/2 - (g - o)/2
= (c - g)/2
The second diagonal contributes (i - a)/2. The perpendicular direction contributes zero. Note that all contributions from the central pixel 'o' vanish.
We have now calculated the contributions of all closest neighbours to the gradient in positive x-direction at pixel (x,y), so our total approximation of the gradient in x-direction is simply their sum:
gx(x,y) = f - d + (c - g)/2 + (i - a)/2
We can obtain the same result by using a convolution kernel where the coefficients are written in the place of the corresponding neighbor pixel:
-1/2 0 1/2
-1 0 1
-1/2 0 1/2
If you don't want to deal with fractions, you multiply this by 2 and get the well-known Sobel 3x3 kernel.
-1 0 1
G_x = -2 0 2
-1 0 1
The multiplication by two only serves to get convenient integers. The scaling of your output image is basically arbitrary, most of the time you normalize it to your image range, anyway (to get clearly visible results).
By the same reasoning as above, you get the kernel for the vertical gradient gy by projecting the neighbor contributions onto the unit vector in positive y direction (0,1)
-1 -2 -1
G_y = 0 0 0
1 2 1
Formula for kernels of arbitrary size
If you want 5x5 or larger kernels, you only need to pay attention to the distances, e.g.
A B 2 B A
B C 1 C B
2 1 - 1 2
B C 1 C B
A B 2 B A
where
A = 2 * sqrt2
B = sqrt5
C = sqrt2.
If the length of the vector connecting any two pixels is L, the unit vector in that direction has a prefactor of 1/L. For this reason, the contributions of any pixel 'k' to (say) the x-gradient (1,0) can be simplified to "(value difference over squared distance) times (DotProduct of unnormalized direction vector 'ok' with gradient vector, e.g. (1,0) )"
gx_k = (k - o)/(pixel distance^2) ['ok' dot (1,0)].
Because the dot product of the connecting vector with the x unit vector selects the corresponding vector entry, the corresponding G_x kernel entry at position k is just
i / (i*i + j*j)
where i and j are the number of steps from the center pixel to the pixel k in x and y direction. In the above 3x3 calculation, the pixel 'a' would have i = -1 (1 to the left), j = -1 (1 to the top) and hence the 'a' kernel entry is -1 / (1 + 1) = -1/2.
The entries for the G_y kernel are
j/(i*i + j*j).
If I want integer values for my kernel, I follow these steps:
check the available range of the output image
compute highest possible result from applying floating point kernel (i.e. assume max input value under all positive kernel entries, so output value is (sum over all positive kernel values) * (max possible input image value). If you have signed input, you need to consider the negative values as well. Worst case is then the sum of all positive values + sum of all abs values of negative entries (if max input under positives, -max input under negatives). edit: the sum of all abs values has also been aptly called the weight of the kernel
calculate maximum allowed up-scaling for kernel (without overflowing range of output image)
for all integer multiples (from 2 to above maximum) of floating point kernel: check which has the lowest sum of absolute round-off errors and use this kernel
So in summary:
Gx_ij = i / (i*i + j*j)
Gy_ij = j / (i*i + j*j)
where i,j is position in the kernel counted from the center. Scale kernel entries as needed to obtain integer numbers (or at least close approximations).
These formulae hold for all kernel sizes.
Examples
-2/8 -1/5 0 1/5 2/8 -5 -4 0 4 5
-2/5 -1/2 0 1/2 2/5 -8 -10 0 10 8
G_x (5x5) -2/4 -1/1 0 1/1 2/4 (*20) = -10 -20 0 20 10
-2/5 -1/2 0 1/2 2/5 -8 -10 0 10 8
-2/8 -1/5 0 1/5 2/8 -5 -4 0 4 5
Note that the central 3x3 pixels of the 5x5 kernel in float notation are just the 3x3 kernel, i.e. larger kernels represent a continued approximation with additional but lower-weighted data. This continues on to larger kernel sizes:
-3/18 -2/13 -1/10 0 1/10 2/13 3/18
-3/13 -2/8 -1/5 0 1/5 2/8 3/13
-3/10 -2/5 -1/2 0 1/2 2/5 3/10
G_x (7x7) -3/9 -2/4 -1/1 0 1/1 2/4 3/9
-3/10 -2/5 -1/2 0 1/2 2/5 3/10
-3/13 -2/8 -1/5 0 1/5 2/8 3/13
-3/18 -2/13 -1/10 0 1/10 2/13 3/18
Exact integer representations become impractical at this point.
As far as I can tell (don't have access to the original paper), the "Sobel" part to this is properly weighting the contributions. The Prewitt solution can be obtained by leaving out the distance weighting and just entering i and j in the kernel as appropriate.
Bonus: Sobel Kernels for arbitrary directions
So we can approximate the x and y components of the image gradient (which is actually a vector, as stated in the very beginning). The gradient in any arbitrary direction alpha (measured mathematically positive, in this case clockwise since positive y is downward) can be obtained by projecting the gradient vector onto the alpha-gradient unit vector.
The alpha-unit vector is (cos alpha, sin alpha). For alpha = 0° you can obtain the result for gx, for alpha = 90° you get gy.
g_alpha = (alpha-unit vector) dot (gx, gy)
= (cos a, sin a) dot (gx, gy)
= cos a * gx + sin a * gy
If you bother to write down gx and gy as sums of neighbor contributions, you realize that you can group the resulting long expression by terms that apply to the same neighbor pixel, and then rewrite this as a single convolution kernel with entries
G_alpha_ij = (i * cos a + j * sin a)/(i*i + j*j)
If you want the closest integer approximation, follow the steps outlined above.
Other sources seem to give different definitions of the larger kernels. The Intel IPP library, for example, gives the 5x5 kernel as
1 2 0 -2 -1
4 8 0 -8 -4
6 12 0 -12 -6
4 8 0 -8 -4
1 2 0 -2 -1
Intuitively, this makes more sense to me because you're paying more attention to the elements closer to the centre. It also has a natural definition in terms of the 3x3 kernel which is easy to extend to generate larger kernels. That said, in my brief search I've found 3 different definitions of the 5x5 kernel - so I suspect that (as Paul says) the larger kernels are ad hoc, and so this is by no means the definitive answer.
The 3x3 kernel is the outer product of a smoothing kernel and a gradient kernel, in Matlab this is something like
sob3x3 = [ 1 2 1 ]' * [1 0 -1]
the larger kernels can be defined by convolving the 3x3 kernel with another smoothing kernel
sob5x5 = conv2( [ 1 2 1 ]' * [1 2 1], sob3x3 )
you can repeat the process to get progressively larger kernels
sob7x7 = conv2( [ 1 2 1 ]' * [1 2 1], sob5x5 )
sob9x9 = conv2( [ 1 2 1 ]' * [1 2 1], sob7x7 )
...
there are a lot of other ways of writing it, but I think this explains exactly what is happening best. Basically, you start off with a smoothing kernel in one direction and a finite differences estimate of the derivative in the other and then just apply smoothing until you get the kernel size you want.
Because it's just a series of convolutions, all the nice properties hold, (commutativity, associativity and so forth) which might be useful for your implementation. For example, you can trivially separate the 5x5 kernel into its smoothing and derivative components:
sob5x5 = conv([1 2 1],[1 2 1])' * conv([1 2 1],[-1 0 1])
Note that in order to be a "proper" derivative estimator, the 3x3 Sobel should be scaled by a factor of 1/8:
sob3x3 = 1/8 * [ 1 2 1 ]' * [1 0 -1]
and each larger kernel needs to be scaled by an additional factor of 1/16 (because the smoothing kernels are not normalised):
sob5x5 = 1/16 * conv2( [ 1 2 1 ]' * [1 2 1], sob3x3 )
sob7x7 = 1/16 * conv2( [ 1 2 1 ]' * [1 2 1], sob5x5 )
...
UPDATE 23-Apr-2018: it seems that the kernels defined in the link below are not true Sobel kernels (for 5x5 and above) - they may do a reasonable job of edge detection, but they should not be called Sobel kernels. See Daniel’s answer for a more accurate and comprehensive summary. (I will leave this answer here as (a) it is linked to from various places and (b) accepted answers can not easily be deleted.)
Google seems to turn up plenty of results, e.g.
http://rsbweb.nih.gov/nih-image/download/user-macros/slowsobel.macro suggests the following kernels for 3x3, 5x5, 7x7 and 9x9:
3x3:
1 0 -1
2 0 -2
1 0 -1
5x5:
2 1 0 -1 -2
3 2 0 -2 -3
4 3 0 -3 -4
3 2 0 -2 -3
2 1 0 -1 -2
7x7:
3 2 1 0 -1 -2 -3
4 3 2 0 -2 -3 -4
5 4 3 0 -3 -4 -5
6 5 4 0 -4 -5 -6
5 4 3 0 -3 -4 -5
4 3 2 0 -2 -3 -4
3 2 1 0 -1 -2 -3
9x9:
4 3 2 1 0 -1 -2 -3 -4
5 4 3 2 0 -2 -3 -4 -5
6 5 4 3 0 -3 -4 -5 -6
7 6 5 4 0 -4 -5 -6 -7
8 7 6 5 0 -5 -6 -7 -8
7 6 5 4 0 -4 -5 -6 -7
6 5 4 3 0 -3 -4 -5 -6
5 4 3 2 0 -2 -3 -4 -5
4 3 2 1 0 -1 -2 -3 -4
Here is a simple solution made with python 3 using numpy and the #Daniel answer.
def custom_sobel(shape, axis):
"""
shape must be odd: eg. (5,5)
axis is the direction, with 0 to positive x and 1 to positive y
"""
k = np.zeros(shape)
p = [(j,i) for j in range(shape[0])
for i in range(shape[1])
if not (i == (shape[1] -1)/2. and j == (shape[0] -1)/2.)]
for j, i in p:
j_ = int(j - (shape[0] -1)/2.)
i_ = int(i - (shape[1] -1)/2.)
k[j,i] = (i_ if axis==0 else j_)/float(i_*i_ + j_*j_)
return k
It returns the kernel (5,5) like this:
Sobel x:
[[-0.25 -0.2 0. 0.2 0.25]
[-0.4 -0.5 0. 0.5 0.4 ]
[-0.5 -1. 0. 1. 0.5 ]
[-0.4 -0.5 0. 0.5 0.4 ]
[-0.25 -0.2 0. 0.2 0.25]]
Sobel y:
[[-0.25 -0.4 -0.5 -0.4 -0.25]
[-0.2 -0.5 -1. -0.5 -0.2 ]
[ 0. 0. 0. 0. 0. ]
[ 0.2 0.5 1. 0.5 0.2 ]
[ 0.25 0.4 0.5 0.4 0.25]]
If anyone know a better way to do that in python, please let me know. I'm a newbie yet ;)
Sobel gradient filter generator
(This answer refers to the analysis given by #Daniel, above.)
Gx[i,j] = i / (i*i + j*j)
Gy[i,j] = j / (i*i + j*j)
This is an important result, and a better explanation than can be found in the original paper. It should be written up in Wikipedia, or somewhere, because it also seems superior to any other discussion of the issue available on the internet.
However, it is not actually true that integer-valued representations are impractical for filters of size greater than 5*5, as claimed. Using 64-bit integers, Sobel filter sizes up to 15*15 can be exactly expressed.
Here are the first four; the result should be divided by the "weight", so that the gradient of an image region such as the following, is normalized to a value of 1.
1 2 3 4 5
1 2 3 4 5
1 2 3 4 5
1 2 3 4 5
1 2 3 4 5
Gx(3) :
-1/2 0/1 1/2 -1 0 1
-1/1 0 1/1 * 2 = -2 0 2
-1/2 0/1 1/2 -1 0 1
weight = 4 weight = 8
Gx(5) :
-2/8 -1/5 0/4 1/5 2/8 -5 -4 0 4 5
-2/5 -1/2 0/1 1/2 2/5 -8 -10 0 10 8
-2/4 -1/1 0 1/1 2/4 * 20 = -10 -20 0 20 10
-2/5 -1/2 0/1 1/2 2/5 -8 -10 0 10 8
-2/8 -1/5 0/4 1/5 2/8 -5 -4 0 4 5
weight = 12 weight = 240
Gx(7) :
-3/18 -2/13 -1/10 0/9 1/10 2/13 3/18 -130 -120 -78 0 78 120 130
-3/13 -2/8 -1/5 0/4 1/5 2/8 3/13 -180 -195 -156 0 156 195 180
-3/10 -2/5 -1/2 0/1 1/2 2/5 3/10 -234 -312 -390 0 390 312 234
-3/9 -2/4 -1/1 0 1/1 2/4 3/9 * 780 = -260 -390 -780 0 780 390 260
-3/10 -2/5 -1/2 0/1 1/2 2/5 3/10 -234 -312 -390 0 390 312 234
-3/13 -2/8 -1/5 0/4 1/5 2/8 3/13 -180 -195 -156 0 156 195 180
-3/18 -2/13 -1/10 0/9 1/10 2/13 3/18 -130 -120 -78 0 78 120 130
weight = 24 weight = 18720
Gx(9) :
-4/32 -3/25 -2/20 -1/17 0/16 1/17 2/20 3/25 4/32 -16575 -15912 -13260 -7800 0 7800 13260 15912 16575
-4/25 -3/18 -2/13 -1/10 0/9 1/10 2/13 3/18 4/25 -21216 -22100 -20400 -13260 0 13260 20400 22100 21216
-4/20 -3/13 -2/8 -1/5 0/4 1/5 2/8 3/13 4/20 -26520 -30600 -33150 -26520 0 26520 33150 30600 26520
-4/17 -3/10 -2/5 -1/2 0/1 1/2 2/5 3/10 4/17 -31200 -39780 -53040 -66300 0 66300 53040 39780 31200
-4/16 -3/9 -2/4 -1/1 0 1/1 2/4 3/9 4/16 * 132600 = -33150 -44200 -66300 -132600 0 132600 66300 44200 33150
-4/17 -3/10 -2/5 -1/2 0/1 1/2 2/5 3/10 4/17 -31200 -39780 -53040 -66300 0 66300 53040 39780 31200
-4/20 -3/13 -2/8 -1/5 0/4 1/5 2/8 3/13 4/20 -26520 -30600 -33150 -26520 0 26520 33150 30600 26520
-4/25 -3/18 -2/13 -1/10 0/9 1/10 2/13 3/18 4/25 -21216 -22100 -20400 -13260 0 13260 20400 22100 21216
-4/32 -3/25 -2/20 -1/17 0/16 1/17 2/20 3/25 4/32 -16575 -15912 -13260 -7800 0 7800 13260 15912 16575
weight = 40 weight = 5304000
The Ruby program appended below, will calculate Sobel filters and corresponding weights of any size, although the integer-valued filters are not likely to be useful for sizes larger than 15*15.
#!/usr/bin/ruby
# Sobel image gradient filter generator
# by <ian_bruce#mail.ru> -- Sept 2017
# reference:
# https://stackoverflow.com/questions/9567882/sobel-filter-kernel-of-large-size
if (s = ARGV[0].to_i) < 3 || (s % 2) == 0
$stderr.puts "invalid size"
exit false
end
s /= 2
n = 1
# find least-common-multiple of all fractional denominators
(0..s).each { |j|
(1..s).each { |i|
d = i*i + j*j
n = n.lcm(d / d.gcd(i))
}
}
fw1 = format("%d/%d", s, 2*s*s).size + 2
fw2 = format("%d", n).size + 2
weight = 0
s1 = ""
s2 = ""
(-s..s).each { |y|
(-s..s).each { |x|
i, j = x, y # "i, j = y, x" for transpose
d = i*i + j*j
if (i != 0)
if (n * i % d) != 0 # this should never happen
$stderr.puts "inexact division: #{n} * #{i} / ((#{i})^2 + (#{j})^2)"
exit false
end
w = n * i / d
weight += i * w
else
w = 0
end
s1 += "%*s" % [fw1, d > 0 ? "%d/%d" % [i, d] : "0"]
s2 += "%*d" % [fw2, w]
}
s1 += "\n" ; s2 += "\n"
}
f = n.gcd(weight)
puts s1
puts "\nweight = %d%s" % [weight/f, f < n ? "/%d" % (n/f) : ""]
puts "\n* #{n} =\n\n"
puts s2
puts "\nweight = #{weight}"
TL;DR: Use a Gaussian derivative operator instead.
As Adam Bowen explained in his answer, the Sobel kernel is a combination of a smoothing along one axis, and a central difference derivative along the other axis:
sob3x3 = [1 2 1]' * [1 0 -1]
The smoothing adds regularization (reduces sensitivity to noise).
(I'm leaving out all factors 1/8 in this post, as did Sobel himself, meaning that the operator determines the derivative up to scaling. Also, * always means convolution in this post.)
Let's generalize this:
deriv_kernel = smoothing_kernel * d/dx
One of the properties of the convolution is that
d/dx f = d/dx * f
That is, convolving an image with the elemental derivative operator yields the derivative of the image. Noting also that the convolution is commutative,
deriv_kernel = d/dx * smoothing_kernel = d/dx smoothing_kernel
That is, the derivative kernel is the derivative of a smoothing kernel.
Note that applying such a kernel to an image by convolution:
image * deriv_kernel = image * smoothing_kernel * d/dx = d/dx (image * smoothing_kernel)
That is, with this generalized, idealized derivative kernel we can compute the true derivative of the smoothed image. This is of course not the case with the Sobel kernel, as it uses a central difference approximation to the derivative.
But choosing a better smoothing_kernel, this can be achieved. The Gaussian kernel is the ideal option here, as it offers the best compromise between compactness in the spatial domain (small kernel footprint) with compactness in the frequency domain (good smoothing). Furthermore, the Gaussian is perfectly isotropic and separable. Using a Gaussian derivative kernel yields the best possible regularized derivative operator.
Thus, if you are looking for a larger Sobel operator, because you need more regularization, use a Gaussian derivative operator instead.
Let's analyze the Sobel kernel a little bit more.
The smoothing kernel is triangular, with samples [1 2 1]. This is a triangular function, which, sampled, leads to those three values:
2 + x , if -2 < x < 0
h = { 2 , if x = 0
2 - x , if 0 < x < 2
Its derivative is:
1 , if -2 < x < 0
d/dx h = { 0 , if x = 0 (not really, but it's the sensible solution)
-1 , if 0 < x < 2
So, we can see that the central difference derivative approximation can be seen as a sampling of the analytical derivative of the same triangular function used for smoothing. Thus we have:
sob3x3 = [1 2 1]' * d/dx [1 2 1] = d/dx ( [1 2 1]' * [1 2 1] )
So, if you want to make this kernel larger, simply enlarge the smoothing kernel:
sob5x5 = d/dx ( [1 2 3 2 1]' * [1 2 3 2 1] ) = [1 2 3 2 1]' * [1 1 0 -1 -1]
sob7x7 = d/dx ( [1 2 3 4 3 2 1]' * [1 2 3 4 3 2 1] ) = [1 2 3 4 3 2 1]' * [1 1 1 0 -1 -1 -1]
This is quite different from the advice given by Adam Bowen, who suggests convolving the kernel with the 3-tab triangular kernel along each dimension: [1 2 1] * [1 2 1] = [1 4 6 4 1], and [1 2 1] * [1 0 -1] = [1 2 0 -2 -1]. Note that, due to the central limit theorem, convolving this triangular kernel with itself leads to a filter that approximates the Gaussian a little bit more. The larger the kernel we create by repeated convolutions with itself, the more we approximate this Gaussian. So, instead of using this method, you might as well directly sample the Gaussian function.
Daniel has a long post in which he suggests extending the Sobel kernel in yet another way. The shape of the smoothing kernel here diverges from the Gaussian approximation, I have not tried to study its properties.
Note that none of these three possible extensions of the Sobel kernel are actually Sobel kernels, since the Sobel kernel is explicitly a 3x3 kernel (see an historical note by Sobel about his operator, which he never actually published).
Note also that I'm not advocating the extended Sobel kernel derived here. Use Gaussian derivatives!
I quickly hacked an algorithm to generate a Sobel kernel of any odd size > 1, based on the examples given by #Paul R:
public static void CreateSobelKernel(int n, ref float[][] Kx, ref float[][] Ky)
{
int side = n * 2 + 3;
int halfSide = side / 2;
for (int i = 0; i < side; i++)
{
int k = (i <= halfSide) ? (halfSide + i) : (side + halfSide - i - 1);
for (int j = 0; j < side; j++)
{
if (j < halfSide)
Kx[i][j] = Ky[j][i] = j - k;
else if (j > halfSide)
Kx[i][j] = Ky[j][i] = k - (side - j - 1);
else
Kx[i][j] = Ky[j][i] = 0;
}
}
}
Hope it helps.
Thanks for all, I will try second variant by #Adam Bowen, take C# code for Sobel5x5, 7x7, 9x9... matrix generaion for this variant (maybe with bugs, if you find bug or can optimize code - write it there):
static void Main(string[] args)
{
float[,] Sobel3x3 = new float[,] {
{-1, 0, 1},
{-2, 0, 2},
{-1, 0, 1}};
float[,] Sobel5x5 = Conv2DforSobelOperator(Sobel3x3);
float[,] Sobel7x7 = Conv2DforSobelOperator(Sobel5x5);
Console.ReadKey();
}
public static float[,] Conv2DforSobelOperator(float[,] Kernel)
{
if (Kernel == null)
throw new Exception("Kernel = null");
if (Kernel.GetLength(0) != Kernel.GetLength(1))
throw new Exception("Kernel matrix must be Square matrix!");
float[,] BaseMatrix = new float[,] {
{1, 2, 1},
{2, 4, 2},
{1, 2, 1}};
int KernelSize = Kernel.GetLength(0);
int HalfKernelSize = KernelSize / 2;
int OutSize = KernelSize + 2;
if ((KernelSize & 1) == 0) // Kernel_Size must be: 3, 5, 7, 9 ...
throw new Exception("Kernel size must be odd (3x3, 5x5, 7x7...)");
float[,] Out = new float[OutSize, OutSize];
float[,] InMatrix = new float[OutSize, OutSize];
for (int x = 0; x < BaseMatrix.GetLength(0); x++)
for (int y = 0; y < BaseMatrix.GetLength(1); y++)
InMatrix[HalfKernelSize + x, HalfKernelSize + y] = BaseMatrix[x, y];
for (int x = 0; x < OutSize; x++)
for (int y = 0; y < OutSize; y++)
for (int Kx = 0; Kx < KernelSize; Kx++)
for (int Ky = 0; Ky < KernelSize; Ky++)
{
int X = x + Kx - HalfKernelSize;
int Y = y + Ky - HalfKernelSize;
if (X >= 0 && Y >= 0 && X < OutSize && Y < OutSize)
Out[x, y] += InMatrix[X, Y] * Kernel[KernelSize - 1 - Kx, KernelSize - 1 - Ky];
}
return Out;
}
Results (NormalMap) or it copy there, where this metod - №2, #Paul R metod - №1. Now I am using last, becouse it give more smooth result and it's easy to generate kernels with this code.
Matlab implementation of Daniel's answer:
kernel_width = 9;
halfway = floor(kernel_width/2);
step = -halfway : halfway;
i_matrix = repmat(step,[kernel_width 1]);
j_matrix = i_matrix';
gx = i_matrix ./ ( i_matrix.*i_matrix + j_matrix.*j_matrix );
gx(halfway+1,halfway+1) = 0; % deals with NaN in middle
gy = gx';
I made a Python NumPy implementation of Daniel's answer. It seems to be about 3x faster than Joao Ponte's implementation.
def calc_sobel_kernel(target_shape: tuple[int, int]):
assert target_shape[0] % 2 != 0
assert target_shape[1] % 2 != 0
gx = np.zeros(target_shape, dtype=np.float32)
gy = np.zeros(target_shape, dtype=np.float32)
indices = np.indices(target_shape, dtype=np.float32)
cols = indices[0] - target_shape[0] // 2
rows = indices[1] - target_shape[1] // 2
squared = cols ** 2 + rows ** 2
np.divide(cols, squared, out=gy, where=squared!=0)
np.divide(rows, squared, out=gx, where=squared!=0)
return gx, gy

Resources