Empirical probability with boost histogram - histogram

I have a boost::histogram with 100 bins over the range [-3.5,3.5]. I fill it with a data vector qp. Since I work with periodic BC all the values of q in qp are in [-3.5,3.5].
auto h = boost::histogram::make_histogram(boost::histogram::axis::regular<>(100, -3.5, 3.5));
for (auto&& value : qp)
h(value.first);
For security I count all points in the bin with
int samplesize = 0;
for (auto&& it : indexed(h))
samplesize += *it;
I prepare the data for the plot
for (auto&& it : indexed(h)) {
const auto bin = it.bin(0_c);
double xaxis = bin.lower();
double density = *it / samplesize;
const std::pair<double, double> paar = std::make_pair(xaxis,density);
printToStream(file, paar);
}
The result confuses me. It should be a normalized probability distribution, but it is definitely not (the values on the y-axis are way to low)
Is there a boost method with which I automatically get a (normalized) relative frequency?

You need to divide by the width of the bin. So you need to modify the density:
double density = *it / bin.width() / samplesize;
Here's the complete code in boost-histogram's Python bindings, since it was quicker to mock up:
import boost_histogram as bh
import numpy as np
import matplotlib.pyplot as plt
h = bh.Histogram(bh.axis.Regular(100, -3.5, 3.5))
data = np.concatenate([
np.random.normal(-.75,.3, 1_000_000),
np.random.normal(.75,.3, 750_000),
np.random.normal(-1.5,.2, 200_000),
])
h.fill(data)
# Compute the areas of each bin (any number of axes)
areas = np.prod(h.axes.widths, axis=0)
# Compute the density (any number of axes)
density = h.view() / h.sum() / areas
plt.plot(h.axes.centers[0], density)
PS: You can use a circular axes if your data is circular

Related

Detecting lines on test cassettes opencv

(There is a solid line at C and a faint line at T)
I want to detect the line at T. Currently I am using opencv to locate the qr code and rotate the image until the qr code is upright. Then I calculate the approximate location of the C and T mark by using the coordinates of the qr code. Then my code will scan along the y axis down and detect there are difference in the Green and Blue values.
My problem is, even if the T line is as faint as shown, it should be regarded as positive. How could I make a better detection?
I cropped out just the white strip since I assume you have a way of finding it already. Since we're looking for red, I changed to the LAB colorspace and looked on the "a" channel.
Note: all images of the strip have been transposed (np.transpose) for viewing convenience, it's not that way in the code.
the A channel
I did a linear reframe to improve the contrast
The image is super noisy. Again, I'm not sure if this is from the camera or the jpg compression. I averaged each row to smooth out some of the nonsense.
I graphed the intensities (x-vals were the row index)
Use a mean filter to smooth out the graph
I ran a mountain climber algorithm to look for peaks and valleys
And then I filtered for peaks with a climb greater than 10 (the second highest peak has a climb of 25.5, the third highest is 4.4).
Using these peaks we can determine that there are two lines and they are (about) here:
import cv2
import numpy as np
import matplotlib.pyplot as plt
# returns direction of gradient
# 1 if positive, -1 if negative, 0 if flat
def getDirection(one, two):
dx = two - one;
if dx == 0:
return 0;
if dx > 0:
return 1;
return -1;
# detects and returns peaks and valleys
def mountainClimber(vals, minClimb):
# init trackers
last_valley = vals[0];
last_peak = vals[0];
last_val = vals[0];
last_dir = getDirection(vals[0], vals[1]);
# get climbing
peak_valley = []; # index, height, climb (positive for peaks, negative for valleys)
for a in range(1, len(vals)):
# get current direction
sign = getDirection(last_val, vals[a]);
last_val = vals[a];
# if not equal, check gradient
if sign != 0:
if sign != last_dir:
# change in gradient, record peak or valley
# peak
if last_dir > 0:
last_peak = vals[a];
climb = last_peak - last_valley;
climb = round(climb, 2);
peak_valley.append([a, vals[a], climb]);
else:
# valley
last_valley = vals[a];
climb = last_valley - last_peak;
climb = round(climb, 2);
peak_valley.append([a, vals[a], climb]);
# change direction
last_dir = sign;
# filter out very small climbs
filtered_pv = [];
for dot in peak_valley:
if abs(dot[2]) > minClimb:
filtered_pv.append(dot);
return filtered_pv;
# run an mean filter over the graph values
def meanFilter(vals, size):
fil = [];
filtered_vals = [];
for val in vals:
fil.append(val);
# check if full
if len(fil) >= size:
# pop front
fil = fil[1:];
filtered_vals.append(sum(fil) / size);
return filtered_vals;
# averages each row (also gets graph values while we're here)
def smushRows(img):
vals = [];
h,w = img.shape[:2];
for y in range(h):
ave = np.average(img[y, :]);
img[y, :] = ave;
vals.append(ave);
return vals;
# linear reframe [min1, max1] -> [min2, max2]
def reframe(img, min1, max1, min2, max2):
copy = img.astype(np.float32);
copy -= min1;
copy /= (max1 - min1);
copy *= (max2 - min2);
copy += min2;
return copy.astype(np.uint8);
# load image
img = cv2.imread("strip.png");
# resize
scale = 2;
h,w = img.shape[:2];
h = int(h*scale);
w = int(w*scale);
img = cv2.resize(img, (w,h));
# lab colorspace
lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB);
l,a,b = cv2.split(lab);
# stretch contrast
low = np.min(a);
high = np.max(a);
a = reframe(a, low, high, 0, 255);
# smush and get graph values
vals = smushRows(a);
# filter and round values
mean_filter_size = 20;
filtered_vals = meanFilter(vals, mean_filter_size);
for ind in range(len(filtered_vals)):
filtered_vals[ind] = round(filtered_vals[ind], 2);
# get peaks and valleys
pv = mountainClimber(filtered_vals, 1);
# pull x and y values
pv_x = [ind[0] for ind in pv];
pv_y = [ind[1] for ind in pv];
# find big peaks
big_peaks = [];
for dot in pv:
if dot[2] > 10: # climb filter size
big_peaks.append(dot);
print(big_peaks);
# make plot points for the two best
tops_x = [dot[0] for dot in big_peaks];
tops_y = [dot[1] for dot in big_peaks];
# plot
x = [index for index in range(len(filtered_vals))];
fig, ax = plt.subplots()
ax.plot(x, filtered_vals);
ax.plot(pv_x, pv_y, 'og');
ax.plot(tops_x, tops_y, 'vr');
plt.show();
# draw on original image
h,w = img.shape[:2];
for dot in big_peaks:
y = int(dot[0] + mean_filter_size / 2.0); # adjust for mean filter cutting
cv2.line(img, (0, y), (w,y), (100,200,0), 2);
# show
cv2.imshow("a", a);
cv2.imshow("strip", img);
cv2.waitKey(0);
Edit:
I was wondering why the lines seemed so off, then I realized that I forgot to account for the fact that the meanFilter reduces the size of the list (it cuts from the front and back). I've updated to take that into account.

Linear Regression - Implementing Feature Scaling

I was trying to implement Linear Regression in Octave 5.1.0 on a data set relating the GRE score to the probability of Admission.
The data set is of the sort,
337 0.92
324 0.76
316 0.72
322 0.8
. . .
My main Program.m file looks like,
% read the data
data = load('Admission_Predict.txt');
% initiate variables
x = data(:,1);
y = data(:,2);
m = length(y);
theta = zeros(2,1);
alpha = 0.01;
iters = 1500;
J_hist = zeros(iters,1);
% plot data
subplot(1,2,1);
plot(x,y,'rx','MarkerSize', 10);
title('training data');
% compute cost function
x = [ones(m,1), (data(:,1) ./ 300)]; % feature scaling
J = computeCost(x,y,theta);
% run gradient descent
[theta, J_hist] = gradientDescent(x,y,theta,alpha,iters);
hold on;
subplot(1,2,1);
plot((x(:,2) .* 300), (x*theta),'-');
xlabel('GRE score');
ylabel('Probability');
hold off;
subplot (1,2,2);
plot(1:iters, J_hist, '-b');
xlabel('no: of iteration');
ylabel('Cost function');
computeCost.m looks like,
function J = computeCost(x,y,theta)
m = length(y);
h = x * theta;
J = (1/(2*m))*sum((h-y) .^ 2);
endfunction
and gradientDescent.m looks like,
function [theta, J_hist] = gradientDescent(x,y,theta,alpha,iters)
m = length(y);
J_hist = zeros(iters,1);
for i=1:iters
diff = (x*theta - y);
theta = theta - (alpha * (1/(m))) * (x' * diff);
J_hist(i) = computeCost(x,y,theta);
endfor
endfunction
The graphs plotted then looks like this,
which you can see, doesn't feel right even though my Cost function seems to be minimized.
Can someone please tell me if this is right? If not, what am I doing wrong?
The easiest way to check whether your implementation is correct is to compare with a validated implementation of linear regression. I suggest using an alternative implementation approach like the one suggested here, and then comparing your results. If the fits match, then this is the best linear fit to your data and if they don't match, then there may be something wrong in your implementation.

How do I 'fit a line' to a cluster of pixels?

I would like to generate a polynomial 'fit' to the cluster of colored pixels in the image here
(The point being that I would like to measure how much that cluster approximates an horizontal line).
I thought of using grabit or something similar and then treating this as a cloud of points in a graph. But is there a quicker function to do so directly on the image file?
thanks!
Here is a Python implementation. Basically we find all (xi, yi) coordinates of the colored regions, then set up a regularized least squares system where the we want to find the vector of weights, (w0, ..., wd) such that yi = w0 + w1 xi + w2 xi^2 + ... + wd xi^d "as close as possible" in the least squares sense.
import numpy as np
import matplotlib.pyplot as plt
def rgb2gray(rgb):
return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])
def feature(x, order=3):
"""Generate polynomial feature of the form
[1, x, x^2, ..., x^order] where x is the column of x-coordinates
and 1 is the column of ones for the intercept.
"""
x = x.reshape(-1, 1)
return np.power(x, np.arange(order+1).reshape(1, -1))
I_orig = plt.imread("2Md7v.jpg")
# Convert to grayscale
I = rgb2gray(I_orig)
# Mask out region
mask = I > 20
# Get coordinates of pixels corresponding to marked region
X = np.argwhere(mask)
# Use the value as weights later
weights = I[mask] / float(I.max())
# Convert to diagonal matrix
W = np.diag(weights)
# Column indices
x = X[:, 1].reshape(-1, 1)
# Row indices to predict. Note origin is at top left corner
y = X[:, 0]
We want to find vector w that minimizes || Aw - y ||^2
so that we can use it to predict y = w . x
Here are 2 versions. One is a vanilla least squares with l2 regularization and the other is weighted least squares with l2 regularization.
# Ridge regression, i.e., least squares with l2 regularization.
# Should probably use a more numerically stable implementation,
# e.g., that in Scikit-Learn
# alpha is regularization parameter. Larger alpha => less flexible curve
alpha = 0.01
# Construct data matrix, A
order = 3
A = feature(x, order)
# w = inv (A^T A + alpha * I) A^T y
w_unweighted = np.linalg.pinv( A.T.dot(A) + alpha * np.eye(A.shape[1])).dot(A.T).dot(y)
# w = inv (A^T W A + alpha * I) A^T W y
w_weighted = np.linalg.pinv( A.T.dot(W).dot(A) + alpha * \
np.eye(A.shape[1])).dot(A.T).dot(W).dot(y)
The result
# Generate test points
n_samples = 50
x_test = np.linspace(0, I_orig.shape[1], n_samples)
X_test = feature(x_test, order)
# Predict y coordinates at test points
y_test_unweighted = X_test.dot(w_unweighted)
y_test_weighted = X_test.dot(w_weighted)
# Display
fig, ax = plt.subplots(1, 1, figsize=(10, 5))
ax.imshow(I_orig)
ax.plot(x_test, y_test_unweighted, color="green", marker='o', label="Unweighted")
ax.plot(x_test, y_test_weighted, color="blue", marker='x', label="Weighted")
fig.legend()
fig.savefig("curve.png")
For simple straight line fit, set the argument order of feature to 1. You can then use the gradient of the line to get a sense of how close it is to a horizontal line (e.g., by checking the angle of its slope).
It is also possible to set this to any degree of polynomial you want. I find that degree 3 looks pretty good. In this case, the 6 times the absolute value of the coefficient corresponding to x^3 (w_unweighted[3] or w_weighted[3]) is one measure of the curvature of the line.
See A measure for the curvature of a quadratic polynomial in Matlab for additional details.

iOS Accelerate low-pass FFT filter mirroring result

I am trying to port an existing FFT based low-pass filter to iOS using the Accelerate vDSP framework.
It seems like the FFT works as expected for about the first 1/4 of the sample. But then after that the results seem wrong, and even more odd are mirrored (with the last half of the signal mirroring most of the first half).
You can see the results from a test application below. First is plotted the original sampled data, then an example of the expected filtered results (filtering out signal higher than 15Hz), then finally the results of my current FFT code (note that the desired results and example FFT result are at a different scale than the original data):
The actual code for my low-pass filter is as follows:
double *lowpassFilterVector(double *accell, uint32_t sampleCount, double lowPassFreq, double sampleRate )
{
double stride = 1;
int ln = log2f(sampleCount);
int n = 1 << ln;
// So that we get an FFT of the whole data set, we pad out the array to the next highest power of 2.
int fullPadN = n * 2;
double *padAccell = malloc(sizeof(double) * fullPadN);
memset(padAccell, 0, sizeof(double) * fullPadN);
memcpy(padAccell, accell, sizeof(double) * sampleCount);
ln = log2f(fullPadN);
n = 1 << ln;
int nOver2 = n/2;
DSPDoubleSplitComplex A;
A.realp = (double *)malloc(sizeof(double) * nOver2);
A.imagp = (double *)malloc(sizeof(double) * nOver2);
// This can be reused, just including it here for simplicity.
FFTSetupD setupReal = vDSP_create_fftsetupD(ln, FFT_RADIX2);
vDSP_ctozD((DSPDoubleComplex*)padAccell,2,&A,1,nOver2);
// Use the FFT to get frequency counts
vDSP_fft_zripD(setupReal, &A, stride, ln, FFT_FORWARD);
const double factor = 0.5f;
vDSP_vsmulD(A.realp, 1, &factor, A.realp, 1, nOver2);
vDSP_vsmulD(A.imagp, 1, &factor, A.imagp, 1, nOver2);
A.realp[nOver2] = A.imagp[0];
A.imagp[0] = 0.0f;
A.imagp[nOver2] = 0.0f;
// Set frequencies above target to 0.
// This tells us which bin the frequencies over the minimum desired correspond to
NSInteger binLocation = (lowPassFreq * n) / sampleRate;
// We add 2 because bin 0 holds special FFT meta data, so bins really start at "1" - and we want to filter out anything OVER the target frequency
for ( NSInteger i = binLocation+2; i < nOver2; i++ )
{
A.realp[i] = 0;
}
// Clear out all imaginary parts
bzero(A.imagp, (nOver2) * sizeof(double));
//A.imagp[0] = A.realp[nOver2];
// Now shift back all of the values
vDSP_fft_zripD(setupReal, &A, stride, ln, FFT_INVERSE);
double *filteredAccell = (double *)malloc(sizeof(double) * fullPadN);
// Converts complex vector back into 2D array
vDSP_ztocD(&A, stride, (DSPDoubleComplex*)filteredAccell, 2, nOver2);
// Have to scale results to account for Apple's FFT library algorithm, see:
// http://developer.apple.com/library/ios/#documentation/Performance/Conceptual/vDSP_Programming_Guide/UsingFourierTransforms/UsingFourierTransforms.html#//apple_ref/doc/uid/TP40005147-CH202-15952
double scale = (float)1.0f / fullPadN;//(2.0f * (float)n);
vDSP_vsmulD(filteredAccell, 1, &scale, filteredAccell, 1, fullPadN);
// Tracks results of conversion
printf("\nInput & output:\n");
for (int k = 0; k < sampleCount; k++)
{
printf("%3d\t%6.2f\t%6.2f\t%6.2f\n", k, accell[k], padAccell[k], filteredAccell[k]);
}
// Acceleration data will be replaced in-place.
return filteredAccell;
}
In the original code the library was handling non power-of-two sizes of input data; in my Accelerate code I am padding out the input to the nearest power of two. In the case of the sample test below the original sample data is 1000 samples so it's padded to 1024. I don't think that would affect results but I include that for the sake of possible differences.
If you want to experiment with a solution, you can download the sample project that generates the graphs here (in the FFTTest folder):
FFT Example Project code
Thanks for any insight, I've not worked with FFT's before so I feel like I am missing something critical.
If you want a strictly real (not complex) result, then the data before the IFFT must be conjugate symmetric. If you don't want the result to be mirror symmetric, then don't zero the imaginary component before the IFFT. Merely zeroing bins before the IFFT creates a filter with a huge amount of ripple in the passband.
The Accelerate framework also supports more FFT lengths than just powers of 2.

Impulse, gaussian and salt and pepper noise with OpenCV

I'm studying Image Processing on the famous Gonzales "Digital Image Processing" and talking about image restoration a lot of examples are done with computer-generated noise (gaussian, salt and pepper, etc). In MATLAB there are some built-in functions to do it. What about OpenCV?
As far as I know there are no convenient built in functions like in Matlab. But with only a few lines of code you can create those images yourself.
For example additive gaussian noise:
Mat gaussian_noise = img.clone();
randn(gaussian_noise,128,30);
Salt and pepper noise:
Mat saltpepper_noise = Mat::zeros(img.rows, img.cols,CV_8U);
randu(saltpepper_noise,0,255);
Mat black = saltpepper_noise < 30;
Mat white = saltpepper_noise > 225;
Mat saltpepper_img = img.clone();
saltpepper_img.setTo(255,white);
saltpepper_img.setTo(0,black);
There is function random_noise() from the scikit-image package. It has several builtin noise patterns, such as gaussian, s&p (for salt and pepper noise), possion and speckle.
Below I show an example of how to use this method
from PIL import Image
import numpy as np
from skimage.util import random_noise
im = Image.open("test.jpg")
# convert PIL Image to ndarray
im_arr = np.asarray(im)
# random_noise() method will convert image in [0, 255] to [0, 1.0],
# inherently it use np.random.normal() to create normal distribution
# and adds the generated noised back to image
noise_img = random_noise(im_arr, mode='gaussian', var=0.05**2)
noise_img = (255*noise_img).astype(np.uint8)
img = Image.fromarray(noise_img)
img.show()
There is also a package called imgaug which are dedicated to augment images in various ways. It provides gaussian, poissan and salt&pepper noise augmenter. Here is how you can use it to add noise to image:
from PIL import Image
import numpy as np
from imgaug import augmenters as iaa
def main():
im = Image.open("bg_img.jpg")
im_arr = np.asarray(im)
# gaussian noise
# aug = iaa.AdditiveGaussianNoise(loc=0, scale=0.1*255)
# poisson noise
# aug = iaa.AdditivePoissonNoise(lam=10.0, per_channel=True)
# salt and pepper noise
aug = iaa.SaltAndPepper(p=0.05)
im_arr = aug.augment_image(im_arr)
im = Image.fromarray(im_arr).convert('RGB')
im.show()
if __name__ == "__main__":
main()
Simple Function to add Gaussian, Salt-pepper speckle and poisson noise to an image
Parameters
----------
image : ndarray
Input image data. Will be converted to float.
mode : str
One of the following strings, selecting the type of noise to add:
'gauss' Gaussian-distributed additive noise.
'poisson' Poisson-distributed noise generated from the data.
's&p' Replaces random pixels with 0 or 1.
'speckle' Multiplicative noise using out = image + n*image,where
n,is uniform noise with specified mean & variance.
import numpy as np
import os
import cv2
def noisy(noise_typ,image):
if noise_typ == "gauss":
row,col,ch= image.shape
mean = 0
#var = 0.1
#sigma = var**0.5
gauss = np.random.normal(mean,1,(row,col,ch))
gauss = gauss.reshape(row,col,ch)
noisy = image + gauss
return noisy
elif noise_typ == "s&p":
row,col,ch = image.shape
s_vs_p = 0.5
amount = 0.004
out = image
# Salt mode
num_salt = np.ceil(amount * image.size * s_vs_p)
coords = [np.random.randint(0, i - 1, int(num_salt))
for i in image.shape]
out[coords] = 1
# Pepper mode
num_pepper = np.ceil(amount* image.size * (1. - s_vs_p))
coords = [np.random.randint(0, i - 1, int(num_pepper))
for i in image.shape]
out[coords] = 0
return out
elif noise_typ == "poisson":
vals = len(np.unique(image))
vals = 2 ** np.ceil(np.log2(vals))
noisy = np.random.poisson(image * vals) / float(vals)
return noisy
elif noise_typ =="speckle":
row,col,ch = image.shape
gauss = np.random.randn(row,col,ch)
gauss = gauss.reshape(row,col,ch)
noisy = image + image * gauss
return noisy
"Salt & Pepper" noise can be added in a quite simple fashion using NumPy matrix operations.
def add_salt_and_pepper(gb, prob):
'''Adds "Salt & Pepper" noise to an image.
gb: should be one-channel image with pixels in [0, 1] range
prob: probability (threshold) that controls level of noise'''
rnd = np.random.rand(gb.shape[0], gb.shape[1])
noisy = gb.copy()
noisy[rnd < prob] = 0
noisy[rnd > 1 - prob] = 1
return noisy
# Adding noise to the image
import cv2
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
img = cv2.imread('./fruit.png',0)
im = np.zeros(img.shape, np.uint8) # do not use original image it overwrites the image
mean = 0
sigma = 10
cv2.randn(im,mean,sigma) # create the random distribution
Fruit_Noise = cv2.add(img, im) # add the noise to the original image
plt.imshow(Fruit_Noise, cmap='gray')
The values of mean and sigma can be altered to bring about a specific change in noise like gaussian or pepper-salt noise etc.
You can use either randn or randu according to the need. Have a look at the documentation: https://docs.opencv.org/2.4/modules/core/doc/operations_on_arrays.html#cv2.randu
I made some change of #Shubham Pachori 's code. When reading a image into numpy arrary, the default dtype is uint8, which can cause wrapping when adding noise onto the image.
import numpy as np
from PIL import Image
"""
image: read through PIL.Image.open('path')
sigma: variance of gaussian noise
factor: the bigger this value is, the more noisy is the poisson_noised image
##IMPORTANT: when reading a image into numpy arrary, the default dtype is uint8,
which can cause wrapping when adding noise onto the image.
E.g, example = np.array([128,240,255], dtype='uint8')
example + 50 = np.array([178,44,49], dtype='uint8')
Transfer np.array to dtype='int16' can solve this problem.
"""
def gaussian_noise(image, sigma):
img = np.array(image)
noise = np.random.randn(img.shape[0], img.shape[1], img.shape[2])
img = img.astype('int16')
img_noise = img + noise * sigma
img_noise = np.clip(img_noise, 0, 255)
img_noise = img_noise.astype('uint8')
return Image.fromarray(img_noise)
def poisson_noise(image, factor):
factor = 1 / factor
img = np.array(image)
img = img.astype('int16')
img_noise = np.random.poisson(img * factor) / float(factor)
np.clip(img_noise, 0, 255, img_noise)
img_noise = img_noise.astype('uint8')
return Image.fromarray(img_noise)
http://scikit-image.org/docs/dev/api/skimage.util.html#skimage.util.random_noise
skimage.util.random_noise(image, mode='gaussian', seed=None, clip=True, **kwargs)
#Adding noise
[m,n]=img.shape
saltpepper_noise=zeros((m, n));
saltpepper_noise=rand(m,n); #creates a uniform random variable from 0 to 1
for i in range(0,m):
for j in range(0,n):
if saltpepper_noise[i,j]<=0.5:
saltpepper_noise[i,j]=0
else:
saltpepper_noise[i,j]=255
def add_salt_noise(src, ratio: float = 0.05, noise: list = [0, 0, 0]):
dst = src.copy()
import random
shuffle_dict = {}
i = 0
while i < (int(dst.shape[0]*dst.shape[1] * ratio)):
x, y = random.randint(0, dst.shape[0] - 1), random.randint(0, dst.shape[1] - 1)
if (x, y) in shuffle_dict:
continue
else:
dst[x, y] = noise
shuffle_dict[(x, y)] = 0
i += 1
return dst
although there is no built-in functions like in matlab
imnoise(image,noiseType,NoiseLevel) but we can easily add required amount random
valued impulse noise or salt and pepper into an image manually.
to add random valued impulse noise.
import random as r
def addRvinGray(image,n): # add random valued impulse noise in grayscale
'''parameters:
image: type=numpy array. input image in which you want add noise.
n: noise level (in percentage)'''
k=0 # counter variable
ih=image.shape[0]
iw=image.shape[1]
noisypixels=(ih*iw*n)/100 # here we calculate the number of pixels to be altered.
for i in range(ih*iw):
if k<noisypixels:
image[r.randrange(0,ih)][r.randrange(0,iw)]=r.randrange(0,256) #access random pixel in the image gives random intensity (0-255)
k+=1
else:
break
return image
to add salt and pepper noise
def addSaltGray(image,n): #add salt-&-pepper noise in grayscale image
k=0
salt=True
ih=image.shape[0]
iw=image.shape[1]
noisypixels=(ih*iw*n)/100
for i in range(ih*iw):
if k<noisypixels: #keep track of noise level
if salt==True:
image[r.randrange(0,ih)][r.randrange(0,iw)]=255
salt=False
else:
image[r.randrange(0,ih)][r.randrange(0,iw)]=0
salt=True
k+=1
else:
break
return image
Note: for color images: first split image in to three or four channels depending on the input image using opencv function:
(B, G, R) = cv2.split(image)
(B, G, R, A) = cv2.split(image)
after spliting perform the same operations on all channels.
at the end merge all the channels:
merged = cv2.merge([B, G, R])
return merged

Resources