LLVM IR - from IR Assembly Format to C - clang

I know you can you llvm-dis to get Assembly Format (*.ll) from IR. Is there a way to get C from Assembly Format? Actually to translate certain parts of it to C?
I.e this code:
test.c
#define NMAX 9
#include <stdio.h>
typedef short Word16;
Word16 gmed_n( /* o : the median value */
Word16 ind[], /* i : input values */
Word16 n /* i : number of inputs */
)
{
Word16 i, j, ix = 0;
Word16 max;
Word16 medianIndex;
Word16 tmp[NMAX];
Word16 tmp2[NMAX];
for (i = 0; i < n; i++)
{
*(tmp2 + i) = *(ind + i);
}
for (i = 0; i < n; i++)
{
max = -32767;
for (j = 0; j < n; j++)
{
if (*(tmp2 + j) >= max)
{
max = *(tmp2 + j);
ix = j;
}
}
*(tmp2 + ix) = -32768;
*(tmp + i) = ix;
}
medianIndex = *(tmp + (n >> 1)); /* account for complex addressing */
return (*(ind + medianIndex));
}
int main()
{
short a[5]={1,32767,3,4,5};
int i = 0;
short res;
res=gmed_n(a,5);
printf("%i\n",res);
}
How can I translate (part from LL)
85: ; preds = %34
%86 = getelementptr inbounds [9 x i16], [9 x i16]* %10, i64 0, i64 0
%87 = load i16, i16* %4, align 2
%88 = sext i16 %87 to i32
%89 = ashr i32 %88, 1
%90 = sext i32 %89 to i64
%91 = getelementptr inbounds i16, i16* %86, i64 %90
%92 = load i16, i16* %91, align 2
store i16 %92, i16* %9, align 2
%93 = load i16*, i16** %3, align 8
%94 = load i16, i16* %9, align 2
%95 = sext i16 %94 to i32
%96 = sext i32 %95 to i64
%97 = getelementptr inbounds i16, i16* %93, i64 %96
%98 = load i16, i16* %97, align 2
ret i16 %98
To C:
return (*(ind + medianIndex));
How would you approach it? What tech stack?
I would like to pinpoint C code from LLVM IR Assembly format.

Related

What are my options to convert OpenCV reduce loop to a native iOS code. SIMD anyone?

Which native iOS framework is best used to eradicate this cpu hog written in OpenCV?
/// Reduce the channel elements of given Mat to a single channel
static func reduce(input: Mat) throws -> Mat {
let output = Mat(rows: input.rows(), cols: input.cols(), type: CvType.CV_8UC1)
for x in 0 ..< input.rows() {
for y in 0 ..< input.cols() {
let value = input.get(row: x, col: y)
let dataValue = value.reduce(0, +)
try output.put(row: x, col: y, data: [dataValue])
}
}
return output
}
takes about 20+ seconds to do those gets and puts on real world data I put this code through.
Assuming your input matrix is CV_64FC2, call computeSumX2 C function for each row.
Untested.
#include <arm_neon.h>
#include <stdint.h>
#include <stddef.h>
// Load 8 FP64 values, add pairwise, narrow uint64 to uint32, combine into a single vector
inline uint32x4_t reduce4( const double* rsi )
{
// Load 8 values
float64x2x4_t f64 = vld1q_f64_x4( rsi );
// Add them pairwise
float64x2_t f64_1 = vpaddq_f64( f64.val[ 0 ], f64.val[ 1 ] );
float64x2_t f64_2 = vpaddq_f64( f64.val[ 2 ], f64.val[ 3 ] );
// Convert FP64 to uint64
uint64x2_t i64_1 = vcvtq_u64_f64( f64_1 );
uint64x2_t i64_2 = vcvtq_u64_f64( f64_2 );
// Convert int64 to int32 in a single vector, using saturation
uint32x2_t low = vqmovn_u64( i64_1 );
return vqmovn_high_u64( low, i64_2 );
}
// Compute pairwise sum of FP64 values, cast to bytes
void computeSumX2( uint8_t* rdi, size_t length, const double* rsi )
{
const double* const rsiEnd = rsi + length * 2;
size_t lengthAligned = ( length / 16 ) * 16;
const double* const rsiEndAligned = rsi + lengthAligned * 2;
for( ; rsi < rsiEndAligned; rsi += 16 * 2, rdi += 16 )
{
// Each iteration of the loop loads 32 source values, stores 16 bytes
uint16x4_t low16 = vqmovn_u32( reduce4( rsi ) );
uint16x8_t u16 = vqmovn_high_u32( low16, reduce4( rsi + 8 ) );
uint8x8_t low8 = vqmovn_u16( u16 );
low16 = vqmovn_u32( reduce4( rsi + 8 * 2 ) );
u16 = vqmovn_high_u32( low16, reduce4( rsi + 8 * 3 ) );
uint8x16_t res = vqmovn_high_u16( low8, u16 );
vst1q_u8( rdi, res );
}
for( ; rsi < rsiEnd; rsi += 2, rdi++ )
{
// Each iteration of the loop loads 2 source values, stores a single byte
float64x2_t f64 = vld1q_f64( rsi );
double sum = vaddvq_f64( f64 );
*rdi = (uint8_t)sum;
}
}
For folks such as myself who have a poor comprehension of ARM Intrinsics
a simpler solution is to bridge into Objective C code as Soonts did
and thusly ditch crude Swift api to opencv bypassing costly memory copying with gets and puts.
void fasterSumX2( const char *input,
int rows,
int columns,
long step,
int channels,
char* output,
long output_step
)
{
for(int j = 0;j < rows;j++){
for(int i = 0;i < columns;i++){
long offset = step * j + i * channels;
const unsigned char *ptr = (const unsigned char *)(input + offset);
int res = ptr[0]+ptr[1];
if (res > 0) {
if (res > 255) {
assert(false);
}
}
*(output + output_step * j + i) = res;
}
}
}

Separable gaussian blur - optimize vertical pass

I have implemented separable Gaussian blur. Horizontal pass was relatively easy to optimize with SIMD processing. However, I am not sure how to optimize vertical pass.
Accessing elements is not very cache friendly and filling SIMD lane would mean reading many different pixels. I was thinking about transpose the image and run horizontal pass and then transpose image back, however, I am not sure if it will gain any improvement because of two tranpose operations.
I have quite large images 16k resolution and kernel size is 19, so vectorization of vertical pass gain was about 15%.
My Vertical pass is as follows (it is sinde generic class typed to T which can be uint8_t or float):
int yStart = kernelHalfSize;
int xStart = kernelHalfSize;
int yEnd = input.GetWidth() - kernelHalfSize;
int xEnd = input.GetHeigh() - kernelHalfSize;
const T * inData = input.GetData().data();
V * outData = output.GetData().data();
int kn = kernelHalfSize * 2 + 1;
int kn4 = kn - kn % 4;
for (int y = yStart; y < yEnd; y++)
{
size_t yW = size_t(y) * output.GetWidth();
size_t outX = size_t(xStart) + yW;
size_t xEndSimd = xStart;
int len = xEnd - xStart;
len = len - len % 4;
xEndSimd = xStart + len;
for (int x = xStart; x < xEndSimd; x += 4)
{
size_t inYW = size_t(y) * input.GetWidth();
size_t x0 = ((x + 0) - kernelHalfSize) + inYW;
size_t x1 = x0 + 1;
size_t x2 = x0 + 2;
size_t x3 = x0 + 3;
__m128 sumDot = _mm_setzero_ps();
int i = 0;
for (; i < kn4; i += 4)
{
__m128 kx = _mm_set_ps1(kernelDataX[i + 0]);
__m128 ky = _mm_set_ps1(kernelDataX[i + 1]);
__m128 kz = _mm_set_ps1(kernelDataX[i + 2]);
__m128 kw = _mm_set_ps1(kernelDataX[i + 3]);
__m128 dx, dy, dz, dw;
if constexpr (std::is_same<T, uint8_t>::value)
{
//we need co convert uint8_t inputs to float
__m128i u8_0 = _mm_loadu_si128((const __m128i*)(inData + x0));
__m128i u8_1 = _mm_loadu_si128((const __m128i*)(inData + x1));
__m128i u8_2 = _mm_loadu_si128((const __m128i*)(inData + x2));
__m128i u8_3 = _mm_loadu_si128((const __m128i*)(inData + x3));
__m128i u32_0 = _mm_unpacklo_epi16(
_mm_unpacklo_epi8(u8_0, _mm_setzero_si128()),
_mm_setzero_si128());
__m128i u32_1 = _mm_unpacklo_epi16(
_mm_unpacklo_epi8(u8_1, _mm_setzero_si128()),
_mm_setzero_si128());
__m128i u32_2 = _mm_unpacklo_epi16(
_mm_unpacklo_epi8(u8_2, _mm_setzero_si128()),
_mm_setzero_si128());
__m128i u32_3 = _mm_unpacklo_epi16(
_mm_unpacklo_epi8(u8_3, _mm_setzero_si128()),
_mm_setzero_si128());
dx = _mm_cvtepi32_ps(u32_0);
dy = _mm_cvtepi32_ps(u32_1);
dz = _mm_cvtepi32_ps(u32_2);
dw = _mm_cvtepi32_ps(u32_3);
}
else
{
/*
//load 8 consecutive values
auto dd = _mm256_loadu_ps(inData + x0);
//extract parts by shifting and casting to 4 values float
dx = _mm256_castps256_ps128(dd);
dy = _mm256_castps256_ps128(_mm256_permutevar8x32_ps(dd, _mm256_set_epi32(0, 0, 0, 0, 4, 3, 2, 1)));
dz = _mm256_castps256_ps128(_mm256_permutevar8x32_ps(dd, _mm256_set_epi32(0, 0, 0, 0, 5, 4, 3, 2)));
dw = _mm256_castps256_ps128(_mm256_permutevar8x32_ps(dd, _mm256_set_epi32(0, 0, 0, 0, 6, 5, 4, 3)));
*/
dx = _mm_loadu_ps(inData + x0);
dy = _mm_loadu_ps(inData + x1);
dz = _mm_loadu_ps(inData + x2);
dw = _mm_loadu_ps(inData + x3);
}
//calculate 4 dots at once
//[dx, dy, dz, dw] <dot> [kx, ky, kz, kw]
auto mx = _mm_mul_ps(dx, kx); //dx * kx
auto my = _mm_fmadd_ps(dy, ky, mx); //mx + dy * ky
auto mz = _mm_fmadd_ps(dz, kz, my); //my + dz * kz
auto res = _mm_fmadd_ps(dw, kw, mz); //mz + dw * kw
sumDot = _mm_add_ps(sumDot, res);
x0 += 4;
x1 += 4;
x2 += 4;
x3 += 4;
}
for (; i < kn; i++)
{
auto v = _mm_set_ps1(kernelDataX[i]);
auto v2 = _mm_set_ps(
*(inData + x3), *(inData + x2),
*(inData + x1), *(inData + x0)
);
sumDot = _mm_add_ps(sumDot, _mm_mul_ps(v, v2));
x0++;
x1++;
x2++;
x3++;
}
sumDot = _mm_mul_ps(sumDot, _mm_set_ps1(weightX));
if constexpr (std::is_same<V, uint8_t>::value)
{
__m128i asInt = _mm_cvtps_epi32(sumDot);
asInt = _mm_packus_epi32(asInt, asInt);
asInt = _mm_packus_epi16(asInt, asInt);
uint32_t res = _mm_cvtsi128_si32(asInt);
((uint32_t *)(outData + outX))[0] = res;
outX += 4;
}
else
{
float tmpRes[4];
_mm_store_ps(tmpRes, sumDot);
outData[outX + 0] = tmpRes[0];
outData[outX + 1] = tmpRes[1];
outData[outX + 2] = tmpRes[2];
outData[outX + 3] = tmpRes[3];
outX += 4;
}
}
for (int x = xEndSimd; x < xEnd; x++)
{
int kn = kernelHalfSize * 2 + 1;
const T * v = input.GetPixelStart(x - kernelHalfSize, y);
float tmp = 0;
for (int i = 0; i < kn; i++)
{
tmp += kernelDataX[i] * v[i];
}
tmp *= weightX;
outData[outX] = ImageUtils::clamp_cast<V>(tmp);
outX++;
}
}
There’s a well-known trick for that.
While you compute both passes, read them sequentially, use SIMD to compute, but write out the result into another buffer, transposed, using scalar stores. Protip: SSE 4.1 has _mm_extract_ps just don’t forget to cast your destination image pointer from float* into int*. Another thing about these stores, I would recommend using _mm_stream_si32 for that as you want maximum cache space used by your input data. When you’ll be computing the second pass, you’ll be reading sequential memory addresses again, the prefetcher hardware will deal with the latency.
This way both passes will be identical, I usually call same function twice, with different buffers.
Two transposes caused by your 2 passes cancel each other. Here’s an HLSL version, BTW.
There’s more. If your kernel size is only 19, that fits in 3 AVX registers. I think shuffle/permute/blend instructions are still faster than even L1 cache loads, i.e. it might be better to load the kernel outside the loop.

Convolution in Golang

I want execute a convolution product on an image.
The original image is:
So I test the convolution with gimp. With this matrix:
1 1 1
1 1 1
1 1 1
and the divider 9
I obtain
When I execute my algorithm I obtain:
My algorithm is:
func Convolution(img *image.Image, matrice [][]int) *image.NRGBA {
imageRGBA := image.NewNRGBA((*img).Bounds())
w := (*img).Bounds().Dx()
h := (*img).Bounds().Dy()
sumR := 0
sumB := 0
sumG := 0
var r uint32
var g uint32
var b uint32
for y := 0; y < h; y++ {
for x := 0; x < w; x++ {
for i := -1; i <= 1; i++ {
for j := -1; j <= 1; j++ {
var imageX int
var imageY int
imageX = x + i
imageY = y + j
r, g, b, _ = (*img).At(imageX, imageY).RGBA()
sumG = (sumG + (int(g) * matrice[i+1][j+1]))
sumR = (sumR + (int(r) * matrice[i+1][j+1]))
sumB = (sumB + (int(b) * matrice[i+1][j+1]))
}
}
imageRGBA.Set(x, y, color.NRGBA{
uint8(min(sumR/9, 255)),
uint8(min(sumG/9, 255)),
uint8(min(sumB/9, 255)),
255,
})
sumR = 0
sumB = 0
sumG = 0
}
}
return imageRGBA
}
Where are the error ?
Thank you for your help.
r, g, and b are uint32 values, and they contain 16bits of color information which is always greater than 255 if started as a non-zero 8 bit value.
You then can't operate on the RGBA values and truncate them to a uint8; that gives you a useless result because the least significant bits are just fractional parts of the 8bit values.
Compare the candidate integer value with the max 16bit value 65535, and shift it 8 bits before truncating it to get the 8 most significant bits.
uint8(min(sumR/9, 0xffff) >> 8),
uint8(min(sumG/9, 0xffff) >> 8),
uint8(min(sumB/9, 0xffff) >> 8),

Fast Pixel Count on Binary Image- ARM neon intrinsics - iOS Dev

Can someone tell me a fast function to count the number of white pixels in a binary image. I need it for iOS app dev. I am working directly on the memory of the image defined as
bool *imageData = (bool *) malloc(noOfPixels * sizeof(bool));
I am implementing the function
int whiteCount = 0;
for (int q=i; q<i+windowHeight; q++)
{
for (int w=j; w<j+windowWidth; w++)
{
if (imageData[q*W + w] == 1)
whiteCount++;
}
}
This is obviously the slowest function possible. I heard that ARM Neon intrinsics on the iOS
can be used to make several operations in 1 cycle. Maybe thats the way to go ??
The problem is that I am not very familiar and don't have enough time to learn assembly language at the moment. So it would be great if anyone can post a Neon intrinsics code for the problem mentioned above or any other fast implementation in C/C++.
The only code in neon intrinsics that I am able to find online is the code for rgb to gray
http://computer-vision-talks.com/2011/02/a-very-fast-bgra-to-grayscale-conversion-on-iphone/
Firstly you can speed up the original code a little by factoring out the multiply and getting rid of the branch:
int whiteCount = 0;
for (int q = i; q < i + windowHeight; q++)
{
const bool * const row = &imageData[q * W];
for (int w = j; w < j + windowWidth; w++)
{
whiteCount += row[w];
}
}
(This assumes that imageData[] is truly binary, i.e. each element can only ever be 0 or 1.)
Here is a simple NEON implementation:
#include <arm_neon.h>
// ...
int i, w;
int whiteCount = 0;
uint32x4_t v_count = { 0 };
for (q = i; q < i + windowHeight; q++)
{
const bool * const row = &imageData[q * W];
uint16x8_t vrow_count = { 0 };
for (w = j; w <= j + windowWidth - 16; w += 16) // SIMD loop
{
uint8x16_t v = vld1q_u8(&row[j]); // load 16 x 8 bit pixels
vrow_count = vpadalq_u8(vrow_count, v); // accumulate 16 bit row counts
}
for ( ; w < j + windowWidth; ++w) // scalar clean up loop
{
whiteCount += row[j];
}
v_count = vpadalq_u16(v_count, vrow_count); // update 32 bit image counts
} // from 16 bit row counts
// add 4 x 32 bit partial counts from SIMD loop to scalar total
whiteCount += vgetq_lane_s32(v_count, 0);
whiteCount += vgetq_lane_s32(v_count, 1);
whiteCount += vgetq_lane_s32(v_count, 2);
whiteCount += vgetq_lane_s32(v_count, 3);
// total is now in whiteCount
(This assumes that imageData[] is truly binary, imageWidth <= 2^19, and sizeof(bool) == 1.)
Updated version for unsigned char and values of 255 for white, 0 for black:
#include <arm_neon.h>
// ...
int i, w;
int whiteCount = 0;
const uint8x16_t v_mask = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
uint32x4_t v_count = { 0 };
for (q = i; q < i + windowHeight; q++)
{
const uint8_t * const row = &imageData[q * W];
uint16x8_t vrow_count = { 0 };
for (w = j; w <= j + windowWidth - 16; w += 16) // SIMD loop
{
uint8x16_t v = vld1q_u8(&row[j]); // load 16 x 8 bit pixels
v = vandq_u8(v, v_mask); // mask out all but LS bit
vrow_count = vpadalq_u8(vrow_count, v); // accumulate 16 bit row counts
}
for ( ; w < j + windowWidth; ++w) // scalar clean up loop
{
whiteCount += (row[j] == 255);
}
v_count = vpadalq_u16(v_count, vrow_count); // update 32 bit image counts
} // from 16 bit row counts
// add 4 x 32 bit partial counts from SIMD loop to scalar total
whiteCount += vgetq_lane_s32(v_count, 0);
whiteCount += vgetq_lane_s32(v_count, 1);
whiteCount += vgetq_lane_s32(v_count, 2);
whiteCount += vgetq_lane_s32(v_count, 3);
// total is now in whiteCount
(This assumes that imageData[] is has values of 255 for white and 0 for black, and imageWidth <= 2^19.)
Note that all the above code is untested and may need some further work.
http://gcc.gnu.org/onlinedocs/gcc/ARM-NEON-Intrinsics.html
Section 6.55.3.6
The vectorized algorithm will do the comparisons and put them in a structure for you, but you'd still need to go through each element of the structure and determine if it's a zero or not.
How fast does that loop currently run and how fast do you need it to run? Also remember that NEON will work in the same registers as the floating point unit, so using NEON here may force an FPU context switch.

Specify an origin to warpPerspective() function in OpenCV 2.x

I try to specify a different origin for the warpPerspective() function than the basic (0,0), in order to apply the transform independently of the support image size. I added a CvPoint parameter to the original code, but I can't find where to use these coordinates. I tried to use them in the computation of X0, Y0 and W0 but it didn't work, this only shift the transformed image in the resulting image. Any idea?
Here the code:
void warpPerspective( const Mat& src, Mat& dst, const Mat& M0, Size dsize,
int flags, int borderType, const Scalar& borderValue, CvPoint origin )
{
dst.create( dsize, src.type() );
const int BLOCK_SZ = 32;
short XY[BLOCK_SZ*BLOCK_SZ*2], A[BLOCK_SZ*BLOCK_SZ];
double M[9];
Mat _M(3, 3, CV_64F, M);
int interpolation = flags & INTER_MAX;
if( interpolation == INTER_AREA )
interpolation = INTER_LINEAR;
CV_Assert( (M0.type() == CV_32F || M0.type() == CV_64F) && M0.rows == 3 && M0.cols == 3 );
M0.convertTo(_M, _M.type());
if( !(flags & WARP_INVERSE_MAP) )
invert(_M, _M);
int x, y, x1, y1, width = dst.cols, height = dst.rows;
int bh0 = std::min(BLOCK_SZ/2, height);
int bw0 = std::min(BLOCK_SZ*BLOCK_SZ/bh0, width);
bh0 = std::min(BLOCK_SZ*BLOCK_SZ/bw0, height);
for( y = 0; y < height; y += bh0 )
{
for( x = 0; x < width; x += bw0 )
{
int bw = std::min( bw0, width - x);
int bh = std::min( bh0, height - y);
Mat _XY(bh, bw, CV_16SC2, XY), _A;
Mat dpart(dst, Rect(x, y, bw, bh));
for( y1 = 0; y1 < bh; y1++ )
{
short* xy = XY + y1*bw*2;
double X0 = M[0]*x + M[1]*(y + y1) + M[2];
double Y0 = M[3]*x + M[4]*(y + y1) + M[5];
double W0 = M[6]*x + M[7]*(y + y1) + M[8];
if( interpolation == INTER_NEAREST )
for( x1 = 0; x1 < bw; x1++ )
{
double W = W0 + M[6]*x1;
W = W ? 1./W : 0;
int X = saturate_cast<int>((X0 + M[0]*x1)*W);
int Y = saturate_cast<int>((Y0 + M[3]*x1)*W);
xy[x1*2] = (short)X;
xy[x1*2+1] = (short)Y;
}
else
{
short* alpha = A + y1*bw;
for( x1 = 0; x1 < bw; x1++ )
{
double W = W0 + M[6]*x1;
W = W ? INTER_TAB_SIZE/W : 0;
int X = saturate_cast<int>((X0 + M[0]*x1)*W);
int Y = saturate_cast<int>((Y0 + M[3]*x1)*W);
xy[x1*2] = (short)(X >> INTER_BITS);
xy[x1*2+1] = (short)(Y >> INTER_BITS);
alpha[x1] = (short)((Y & (INTER_TAB_SIZE-1))*INTER_TAB_SIZE +
(X & (INTER_TAB_SIZE-1)));
}
}
}
if( interpolation == INTER_NEAREST )
remap( src, dpart, _XY, Mat(), interpolation, borderType, borderValue );
else
{
Mat _A(bh, bw, CV_16U, A);
remap( src, dpart, _XY, _A, interpolation, borderType, borderValue );
}
}
}
}
Ok, I found it myself! You have 2 things to do:
compute the destination dimensions in source referential, and do the remap using these dimensions ;
increment the computed points coordinates.
Here is the code thus transformed:
void warpPerspective( const Mat& src, Mat& dst, const Mat& M0, Size dsize,
int flags, int borderType, const Scalar& borderValue, CvPoint origin )
{
dst.create( dsize, src.type() );
const int BLOCK_SZ = 32;
short XY[BLOCK_SZ*BLOCK_SZ*2], A[BLOCK_SZ*BLOCK_SZ];
double M[9];
Mat _M(3, 3, CV_64F, M);
int interpolation = flags & INTER_MAX;
if( interpolation == INTER_AREA )
interpolation = INTER_LINEAR;
CV_Assert( (M0.type() == CV_32F || M0.type() == CV_64F) && M0.rows == 3 && M0.cols == 3 );
M0.convertTo(_M, _M.type());
if( !(flags & WARP_INVERSE_MAP) )
invert(_M, _M);
int x, xDest, y, yDest, x1, y1, width = dst.cols, height = dst.rows;
int bh0 = std::min(BLOCK_SZ/2, height);
int bw0 = std::min(BLOCK_SZ*BLOCK_SZ/bh0, width);
bh0 = std::min(BLOCK_SZ*BLOCK_SZ/bw0, height);
for( y = -origin.y, yDest = 0; y < height; y += bh0, yDest += bh0 )
{
for( x = -origin.x, xDest = 0; x < width; x += bw0, xDest += bw0 )
{
int bw = std::min( bw0, width - x);
int bh = std::min( bh0, height - y);
// to avoid dimensions errors
if (bw <= 0 || bh <= 0)
break;
Mat _XY(bh, bw, CV_16SC2, XY), _A;
Mat dpart(dst, Rect(xDest, yDest, bw, bh));
for( y1 = 0; y1 < bh; y1++ )
{
short* xy = XY + y1*bw*2;
double X0 = M[0]*x + M[1]*(y + y1) + M[2];
double Y0 = M[3]*x + M[4]*(y + y1) + M[5];
double W0 = M[6]*x + M[7]*(y + y1) + M[8];
if( interpolation == INTER_NEAREST )
for( x1 = 0; x1 < bw; x1++ )
{
double W = W0 + M[6]*x1;
W = W ? 1./W : 0;
int X = saturate_cast<int>((X0 + M[0]*x1)*W);
int Y = saturate_cast<int>((Y0 + M[3]*x1)*W);
xy[x1*2] = (short)X;
xy[x1*2+1] = (short)Y;
}
else
{
short* alpha = A + y1*bw;
for( x1 = 0; x1 < bw; x1++ )
{
double W = W0 + M[6]*x1;
W = W ? INTER_TAB_SIZE/W : 0;
int X = saturate_cast<int>((X0 + M[0]*x1)*W);
int Y = saturate_cast<int>((Y0 + M[3]*x1)*W);
xy[x1*2] = (short)(X >> INTER_BITS) + origin.x;
xy[x1*2+1] = (short)(Y >> INTER_BITS) + origin.y;
alpha[x1] = (short)((Y & (INTER_TAB_SIZE-1))*INTER_TAB_SIZE +
(X & (INTER_TAB_SIZE-1)));
}
}
}
if( interpolation == INTER_NEAREST )
remap( src, dpart, _XY, Mat(), interpolation, borderType, borderValue );
else
{
Mat _A(bh, bw, CV_16U, A);
remap( src, dpart, _XY, _A, interpolation, borderType, borderValue );
}
}
}
}
with this function:
CvPoint transformPoint(const CvPoint pointToTransform, const CvMat* matrix) {
double coordinates[3] = {pointToTransform.x, pointToTransform.y, 1};
CvMat originVector = cvMat(3, 1, CV_64F, coordinates);
CvMat transformedVector = cvMat(3, 1, CV_64F, coordinates);
cvMatMul(matrix, &originVector, &transformedVector);
CvPoint outputPoint = cvPoint((int)(cvmGet(&transformedVector, 0, 0) / cvmGet(&transformedVector, 2, 0)), (int)(cvmGet(&transformedVector, 1, 0) / cvmGet(&transformedVector, 2, 0)));
return outputPoint;
}
A much simpler and cleaner solution is to modify the perspective transformation. You can do a translation which moves the origin to the desired position, then do the perspective transformation and finally do the inverse translation.
Here is a small example program in python, which rotates an image by 45 degrees around the point(100, 100):
import cv2
import numpy as np
def translation_mat(dx, dy):
return np.array([1, 0, dx, 0, 1, dy, 0, 0, 1]).reshape((3,3))
def main():
img = cv2.imread(r"pigeon.png", cv2.IMREAD_GRAYSCALE)
# a simple rotation by 45 degrees
rot = np.array([np.sin(np.pi/4), -np.cos(np.pi/4), 0, np.cos(np.pi/4), np.sin(np.pi/4), 0, 0, 0, 1]).reshape((3,3))
t1 = translation_mat(-100, -100)
t2 = translation_mat(100, 100)
rot_shifted = t2.dot(rot.dot(t1))
size = (img.shape[1], img.shape[0])
img1 = cv2.warpPerspective(img, rot, size)
img2 = cv2.warpPerspective(img, rot_shifted, size)
cv2.imshow("Original image", img)
cv2.imshow("Rotated around (0,0)", img1)
cv2.imshow("Rotated around(100, 100)", img2)
cv2.waitKey(0)
if __name__ == '__main__':
main()
Not that you read the order of transformations from right to left.
rot_shifted = t2.dot(rot.dot(t1))
will apply t1 first, then rot, and then t2.
For those of you looking for this piece in Python, here's a start. I'm not 100% sure it works as I've stripped some optimizations from it. Also there is an issue with lineair interpolation, I simply didn't use it but you might want to take a closer look if you do.
import cv2
import numpy as np
def warp_perspective(src, M, (width, height), (origin_x, origin_y),
flags=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT,
borderValue=0, dst=None):
"""
Implementation in Python using base code from
http://stackoverflow.com/questions/4279008/specify-an-origin-to-warpperspective-function-in-opencv-2-x
Note there is an issue with linear interpolation.
"""
B_SIZE = 32
if dst == None:
dst = np.zeros((height, width, 3), dtype=src.dtype)
# Set interpolation mode.
interpolation = flags & cv2.INTER_MAX
if interpolation == cv2.INTER_AREA:
raise Exception('Area interpolation is not supported!')
# Prepare matrix.
M = M.astype(np.float64)
if not(flags & cv2.WARP_INVERSE_MAP):
M = cv2.invert(M)[1]
M = M.flatten()
x_dst = y_dst = 0
for y in xrange(-origin_y, height, B_SIZE):
for x in xrange(-origin_x, width, B_SIZE):
print (x, y)
# Block dimensions.
bw = min(B_SIZE, width - x_dst)
bh = min(B_SIZE, height - y_dst)
# To avoid dimension errors.
if bw <= 0 or bh <= 0:
break
# View of the destination array.
dpart = dst[y_dst:y_dst+bh, x_dst:x_dst+bw]
# Original code used view of array here, but we're using numpy array's.
XY = np.zeros((bh, bw, 2), dtype=np.int16)
A = np.zeros((bh, bw), dtype=np.uint16)
for y1 in xrange(bh):
X0 = M[0]*x + M[1]*(y + y1) + M[2]
Y0 = M[3]*x + M[4]*(y + y1) + M[5]
W0 = M[6]*x + M[7]*(y + y1) + M[8]
if interpolation == cv2.INTER_NEAREST:
for x1 in xrange(bw):
W = np.float64(W0 + M[6]*x1);
if W != 0:
W = np.float64(1.0)/W
X = np.int32((X0 + M[0]*x1)*W)
Y = np.int32((Y0 + M[3]*x1)*W)
XY[y1, x1][0] = np.int16(X)
XY[y1, x1][1] = np.int16(Y)
else:
for x1 in xrange(bw):
W = np.float64(W0 + M[6]*x1);
if W != 0:
W = cv2.INTER_TAB_SIZE/W
X = np.int32((X0 + M[0]*x1)*W)
Y = np.int32((Y0 + M[3]*x1)*W)
XY[y1, x1][0] = np.int16((X >> cv2.INTER_BITS) + origin_x)
XY[y1, x1][1] = np.int16((Y >> cv2.INTER_BITS) + origin_y)
A[y1, x1] = np.int16(((Y & (cv2.INTER_TAB_SIZE-1))*cv2.INTER_TAB_SIZE + (X & (cv2.INTER_TAB_SIZE-1))))
if interpolation == cv2.INTER_NEAREST:
cv2.remap(src, XY, None, interpolation, dst=dpart,
borderMode=borderMode, borderValue=borderValue)
else:
cv2.remap(src, XY, A, interpolation, dst=dpart,
borderMode=borderMode, borderValue=borderValue)
x_dst += B_SIZE
x_dst = 0
y_dst += B_SIZE
return dst

Resources