As we know, X86_64 use register rdi, rsi, rdx, rcx, r8, r9 to store normal function's arguments, and use stack memory to store large argument, and use xmm to store float and double argument. But in my code, the function 'myuprobe_sum_dww_ptr' is wired. It doesn't use rdi to store the first argument but to store a local variable. Please see the code below, and I have commented the register usage information in the code. Could anyone help to explain?
#include <stdlib.h>
#include <unistd.h>
#include <stdio.h>
#include <stdint.h>
struct double_wraper
{
double data;
};
struct double_wraper_wraper
{
double_wraper_wraper()
{
dp = new double_wraper();
}
~double_wraper_wraper()
{
delete dp;
}
uint32_t a;
struct double_wraper d;
struct double_wraper* dp;
};
// a -> rdi, b -> rsi
uint32_t myuprobe_sum_int(uint32_t a, uint32_t b)
{
//printf("%u + %u = %u, addr = %p\n", a, b, &a);
return a + b;
}
// d1 -> rsi, d2 -> rdx, v -> rcx
double_wraper_wraper myuprobe_sum_dww_ptr(const double_wraper_wraper* d1, const double_wraper_wraper* d2, uint32_t v)
{
double_wraper_wraper d3; // d3 -> rdi, why??????????????
d3.a = d1->a + d2->a + v;
d3.d.data = d1->d.data + d2->d.data;
d3.dp->data = d1->dp->data + d2->dp->data;
//printf("%u + %u = %u, addr = %p\n", d1->a, d2->a, d3.a, d1);
//printf("%lf + %lf = %lf, addr = %p\n", d1->d.data, d2->d.data, d3.d.data, d1);
printf("%lf + %lf = %lf, addr = %p\n", d1->dp->data, d2->dp->data, d3.dp->data, d1);
return d3;
}
// d1 -> rdi, d2 -> rsi, d3 -> rdx, v -> rcx
double_wraper myuprobe_sum_dw_ptr(const double_wraper* d1, const double_wraper* d2, const double_wraper_wraper* d3, uint32_t v)
{
double_wraper d4;
d4.data = d1->data + d2->data + d3->d.data;
//printf("%lf + %lf + %lf = %lf, addr = %p\n", d1->data, d2->data, d3->d.data, d4.data, d1);
return d4;
}
int main()
{
while(1) {
double_wraper_wraper d4, d5;
d4.a = rand();
d4.d.data = rand() + (double)rand() / RAND_MAX;
d4.dp->data = rand() + (double)rand() / RAND_MAX;
d5.a = rand();
d5.d.data = rand() + (double)rand() / RAND_MAX;
d5.dp->data = rand() + (double)rand() / RAND_MAX;
auto d7 = myuprobe_sum_dww_ptr(&d4, &d5, 100);
uint32_t a,b;
a = rand();
b = rand();
auto c = myuprobe_sum_int(a, b);
double_wraper d8,d9;
d8.data = rand() + (double)rand() / RAND_MAX;
d9.data = rand() + (double)rand() / RAND_MAX;
auto d10 = myuprobe_sum_dw_ptr(&d8, &d9, &d4, 100);
usleep(5000000);
}
}
Compile: g++ test.cpp -o test -g -O0
Related
I use pthreads on ubuntu to implement multithreaded matrix-vector multiplication, but the runtime reports an error Segmentation fault
#pragma comment(lib, "pthreadVC2.lib")
#define _CRT_SECURE_NO_WARNINGS 1
#include <stdio.h>
#include <stdlib.h>
#include <pthread.h>
/* Global variables */
int thread_count = 8;
int m, n;
double* A = NULL;
double* x = NULL;
double* y = NULL;
/* Serial functions */
void Usage(char* prog_name);
void Read_matrix(char* prompt, double A[], int m, int n);
void Read_vector(char* prompt, double x[], int n);
void Print_matrix(char* title, double A[], int m, int n);
void Print_vector(char* title, double y[], double m);
/* Parallel function */
void* Pth_mat_vect(void* rank);
/*------------------------------------------------------------------*/
int main(int argc, char* argv[]) {
long thread;
pthread_t* thread_handles;
thread_count = atoi(argv[1]);
thread_handles = malloc(thread_count * sizeof(pthread_t));
printf("Enter m and n\n");
scanf("%d%d", &m, &n);
A = malloc(m * n * sizeof(double));
x = malloc(n * sizeof(double));
y = malloc(m * sizeof(double));
Read_matrix("Enter the matrix", A, m, n);
Print_matrix("We read", A, m, n);
Read_vector("Enter the vector", x, n);
Print_vector("We read", x, n);
for (thread = 0; thread < thread_count; thread++)
pthread_create(&thread_handles[thread], NULL,
Pth_mat_vect, (void*)thread);
for (thread = 0; thread < thread_count; thread++)
pthread_join(thread_handles[thread], NULL);
Print_vector("The product is", y, m);
free(A);
free(x);
free(y);
return 0;
} /* main */
/*------------------------------------------------------------------
* Function: Read_matrix
* Purpose: Read in the matrix
* In args: prompt, m, n
* Out arg: A
*/
void Read_matrix(char* prompt, double A[], int m, int n) {
int i, j;
printf("%s\n", prompt);
for (i = 0; i < m; i++)
for (j = 0; j < n; j++)
scanf("%lf", &A[i * n + j]);
} /* Read_matrix */
/*------------------------------------------------------------------
* Function: Read_vector
* Purpose: Read in the vector x
* In arg: prompt, n
* Out arg: x
*/
void Read_vector(char* prompt, double x[], int n) {
int i;
printf("%s\n", prompt);
for (i = 0; i < n; i++)
scanf("%lf", &x[i]);
} /* Read_vector */
/*------------------------------------------------------------------
* Function: Pth_mat_vect
* Purpose: Multiply an mxn matrix by an nx1 column vector
* In arg: rank
* Global in vars: A, x, m, n, thread_count
* Global out var: y
*/
void* Pth_mat_vect(void* rank) {
long my_rank = (long)rank;
int i, j;
int local_m = m / thread_count;
int my_first_row = my_rank * local_m;
int my_last_row = (my_rank + 1) * local_m - 1;
for (i = my_first_row; i <= my_last_row; i++) {
y[i] = 0.0;
for (j = 0; j < n; j++)
y[i] += A[i * n + j] * x[j];
}
return NULL;
} /* Pth_mat_vect */
/*------------------------------------------------------------------
* Function: Print_matrix
* Purpose: Print the matrix
* In args: title, A, m, n
*/
void Print_matrix(char* title, double A[], int m, int n) {
int i, j;
printf("%s\n", title);
for (i = 0; i < m; i++) {
for (j = 0; j < n; j++)
printf("%4.1f ", A[i * n + j]);
printf("\n");
}
} /* Print_matrix */
/*------------------------------------------------------------------
* Function: Print_vector
* Purpose: Print a vector
* In args: title, y, m
*/
void Print_vector(char* title, double y[], double m) {
int i;
printf("%s\n", title);
for (i = 0; i < m; i++)
printf("%4.1f ", y[i]);
printf("\n");
} /* Print_vector */
This code is from An Introduction to Parallel Programming
I know this error seems to be related to memory, in fact the code runs without entering main().I tried some other people's methods, but none of them worked.
Which native iOS framework is best used to eradicate this cpu hog written in OpenCV?
/// Reduce the channel elements of given Mat to a single channel
static func reduce(input: Mat) throws -> Mat {
let output = Mat(rows: input.rows(), cols: input.cols(), type: CvType.CV_8UC1)
for x in 0 ..< input.rows() {
for y in 0 ..< input.cols() {
let value = input.get(row: x, col: y)
let dataValue = value.reduce(0, +)
try output.put(row: x, col: y, data: [dataValue])
}
}
return output
}
takes about 20+ seconds to do those gets and puts on real world data I put this code through.
Assuming your input matrix is CV_64FC2, call computeSumX2 C function for each row.
Untested.
#include <arm_neon.h>
#include <stdint.h>
#include <stddef.h>
// Load 8 FP64 values, add pairwise, narrow uint64 to uint32, combine into a single vector
inline uint32x4_t reduce4( const double* rsi )
{
// Load 8 values
float64x2x4_t f64 = vld1q_f64_x4( rsi );
// Add them pairwise
float64x2_t f64_1 = vpaddq_f64( f64.val[ 0 ], f64.val[ 1 ] );
float64x2_t f64_2 = vpaddq_f64( f64.val[ 2 ], f64.val[ 3 ] );
// Convert FP64 to uint64
uint64x2_t i64_1 = vcvtq_u64_f64( f64_1 );
uint64x2_t i64_2 = vcvtq_u64_f64( f64_2 );
// Convert int64 to int32 in a single vector, using saturation
uint32x2_t low = vqmovn_u64( i64_1 );
return vqmovn_high_u64( low, i64_2 );
}
// Compute pairwise sum of FP64 values, cast to bytes
void computeSumX2( uint8_t* rdi, size_t length, const double* rsi )
{
const double* const rsiEnd = rsi + length * 2;
size_t lengthAligned = ( length / 16 ) * 16;
const double* const rsiEndAligned = rsi + lengthAligned * 2;
for( ; rsi < rsiEndAligned; rsi += 16 * 2, rdi += 16 )
{
// Each iteration of the loop loads 32 source values, stores 16 bytes
uint16x4_t low16 = vqmovn_u32( reduce4( rsi ) );
uint16x8_t u16 = vqmovn_high_u32( low16, reduce4( rsi + 8 ) );
uint8x8_t low8 = vqmovn_u16( u16 );
low16 = vqmovn_u32( reduce4( rsi + 8 * 2 ) );
u16 = vqmovn_high_u32( low16, reduce4( rsi + 8 * 3 ) );
uint8x16_t res = vqmovn_high_u16( low8, u16 );
vst1q_u8( rdi, res );
}
for( ; rsi < rsiEnd; rsi += 2, rdi++ )
{
// Each iteration of the loop loads 2 source values, stores a single byte
float64x2_t f64 = vld1q_f64( rsi );
double sum = vaddvq_f64( f64 );
*rdi = (uint8_t)sum;
}
}
For folks such as myself who have a poor comprehension of ARM Intrinsics
a simpler solution is to bridge into Objective C code as Soonts did
and thusly ditch crude Swift api to opencv bypassing costly memory copying with gets and puts.
void fasterSumX2( const char *input,
int rows,
int columns,
long step,
int channels,
char* output,
long output_step
)
{
for(int j = 0;j < rows;j++){
for(int i = 0;i < columns;i++){
long offset = step * j + i * channels;
const unsigned char *ptr = (const unsigned char *)(input + offset);
int res = ptr[0]+ptr[1];
if (res > 0) {
if (res > 255) {
assert(false);
}
}
*(output + output_step * j + i) = res;
}
}
}
I have implemented separable Gaussian blur. Horizontal pass was relatively easy to optimize with SIMD processing. However, I am not sure how to optimize vertical pass.
Accessing elements is not very cache friendly and filling SIMD lane would mean reading many different pixels. I was thinking about transpose the image and run horizontal pass and then transpose image back, however, I am not sure if it will gain any improvement because of two tranpose operations.
I have quite large images 16k resolution and kernel size is 19, so vectorization of vertical pass gain was about 15%.
My Vertical pass is as follows (it is sinde generic class typed to T which can be uint8_t or float):
int yStart = kernelHalfSize;
int xStart = kernelHalfSize;
int yEnd = input.GetWidth() - kernelHalfSize;
int xEnd = input.GetHeigh() - kernelHalfSize;
const T * inData = input.GetData().data();
V * outData = output.GetData().data();
int kn = kernelHalfSize * 2 + 1;
int kn4 = kn - kn % 4;
for (int y = yStart; y < yEnd; y++)
{
size_t yW = size_t(y) * output.GetWidth();
size_t outX = size_t(xStart) + yW;
size_t xEndSimd = xStart;
int len = xEnd - xStart;
len = len - len % 4;
xEndSimd = xStart + len;
for (int x = xStart; x < xEndSimd; x += 4)
{
size_t inYW = size_t(y) * input.GetWidth();
size_t x0 = ((x + 0) - kernelHalfSize) + inYW;
size_t x1 = x0 + 1;
size_t x2 = x0 + 2;
size_t x3 = x0 + 3;
__m128 sumDot = _mm_setzero_ps();
int i = 0;
for (; i < kn4; i += 4)
{
__m128 kx = _mm_set_ps1(kernelDataX[i + 0]);
__m128 ky = _mm_set_ps1(kernelDataX[i + 1]);
__m128 kz = _mm_set_ps1(kernelDataX[i + 2]);
__m128 kw = _mm_set_ps1(kernelDataX[i + 3]);
__m128 dx, dy, dz, dw;
if constexpr (std::is_same<T, uint8_t>::value)
{
//we need co convert uint8_t inputs to float
__m128i u8_0 = _mm_loadu_si128((const __m128i*)(inData + x0));
__m128i u8_1 = _mm_loadu_si128((const __m128i*)(inData + x1));
__m128i u8_2 = _mm_loadu_si128((const __m128i*)(inData + x2));
__m128i u8_3 = _mm_loadu_si128((const __m128i*)(inData + x3));
__m128i u32_0 = _mm_unpacklo_epi16(
_mm_unpacklo_epi8(u8_0, _mm_setzero_si128()),
_mm_setzero_si128());
__m128i u32_1 = _mm_unpacklo_epi16(
_mm_unpacklo_epi8(u8_1, _mm_setzero_si128()),
_mm_setzero_si128());
__m128i u32_2 = _mm_unpacklo_epi16(
_mm_unpacklo_epi8(u8_2, _mm_setzero_si128()),
_mm_setzero_si128());
__m128i u32_3 = _mm_unpacklo_epi16(
_mm_unpacklo_epi8(u8_3, _mm_setzero_si128()),
_mm_setzero_si128());
dx = _mm_cvtepi32_ps(u32_0);
dy = _mm_cvtepi32_ps(u32_1);
dz = _mm_cvtepi32_ps(u32_2);
dw = _mm_cvtepi32_ps(u32_3);
}
else
{
/*
//load 8 consecutive values
auto dd = _mm256_loadu_ps(inData + x0);
//extract parts by shifting and casting to 4 values float
dx = _mm256_castps256_ps128(dd);
dy = _mm256_castps256_ps128(_mm256_permutevar8x32_ps(dd, _mm256_set_epi32(0, 0, 0, 0, 4, 3, 2, 1)));
dz = _mm256_castps256_ps128(_mm256_permutevar8x32_ps(dd, _mm256_set_epi32(0, 0, 0, 0, 5, 4, 3, 2)));
dw = _mm256_castps256_ps128(_mm256_permutevar8x32_ps(dd, _mm256_set_epi32(0, 0, 0, 0, 6, 5, 4, 3)));
*/
dx = _mm_loadu_ps(inData + x0);
dy = _mm_loadu_ps(inData + x1);
dz = _mm_loadu_ps(inData + x2);
dw = _mm_loadu_ps(inData + x3);
}
//calculate 4 dots at once
//[dx, dy, dz, dw] <dot> [kx, ky, kz, kw]
auto mx = _mm_mul_ps(dx, kx); //dx * kx
auto my = _mm_fmadd_ps(dy, ky, mx); //mx + dy * ky
auto mz = _mm_fmadd_ps(dz, kz, my); //my + dz * kz
auto res = _mm_fmadd_ps(dw, kw, mz); //mz + dw * kw
sumDot = _mm_add_ps(sumDot, res);
x0 += 4;
x1 += 4;
x2 += 4;
x3 += 4;
}
for (; i < kn; i++)
{
auto v = _mm_set_ps1(kernelDataX[i]);
auto v2 = _mm_set_ps(
*(inData + x3), *(inData + x2),
*(inData + x1), *(inData + x0)
);
sumDot = _mm_add_ps(sumDot, _mm_mul_ps(v, v2));
x0++;
x1++;
x2++;
x3++;
}
sumDot = _mm_mul_ps(sumDot, _mm_set_ps1(weightX));
if constexpr (std::is_same<V, uint8_t>::value)
{
__m128i asInt = _mm_cvtps_epi32(sumDot);
asInt = _mm_packus_epi32(asInt, asInt);
asInt = _mm_packus_epi16(asInt, asInt);
uint32_t res = _mm_cvtsi128_si32(asInt);
((uint32_t *)(outData + outX))[0] = res;
outX += 4;
}
else
{
float tmpRes[4];
_mm_store_ps(tmpRes, sumDot);
outData[outX + 0] = tmpRes[0];
outData[outX + 1] = tmpRes[1];
outData[outX + 2] = tmpRes[2];
outData[outX + 3] = tmpRes[3];
outX += 4;
}
}
for (int x = xEndSimd; x < xEnd; x++)
{
int kn = kernelHalfSize * 2 + 1;
const T * v = input.GetPixelStart(x - kernelHalfSize, y);
float tmp = 0;
for (int i = 0; i < kn; i++)
{
tmp += kernelDataX[i] * v[i];
}
tmp *= weightX;
outData[outX] = ImageUtils::clamp_cast<V>(tmp);
outX++;
}
}
There’s a well-known trick for that.
While you compute both passes, read them sequentially, use SIMD to compute, but write out the result into another buffer, transposed, using scalar stores. Protip: SSE 4.1 has _mm_extract_ps just don’t forget to cast your destination image pointer from float* into int*. Another thing about these stores, I would recommend using _mm_stream_si32 for that as you want maximum cache space used by your input data. When you’ll be computing the second pass, you’ll be reading sequential memory addresses again, the prefetcher hardware will deal with the latency.
This way both passes will be identical, I usually call same function twice, with different buffers.
Two transposes caused by your 2 passes cancel each other. Here’s an HLSL version, BTW.
There’s more. If your kernel size is only 19, that fits in 3 AVX registers. I think shuffle/permute/blend instructions are still faster than even L1 cache loads, i.e. it might be better to load the kernel outside the loop.
What does WTV stands for in the following Opencl code?
I can't find much info for that. The code is from Opencv for processing on gpu.
__
kernel void resizeAREA(__global const uchar * src, int src_step, int src_offset, int src_rows, int src_cols,
__global uchar * dst, int dst_step, int dst_offset, int dst_rows, int dst_cols,
float ifx, float ify, __global const int * ofs_tab,
__global const int * map_tab, __global const float * alpha_tab)
{
int dx = get_global_id(0);
int dy = get_global_id(1);
if (dx < dst_cols && dy < dst_rows)
{
int dst_index = mad24(dy, dst_step, dst_offset);
__global const int * xmap_tab = map_tab;
__global const int * ymap_tab = (__global const int *)(map_tab + (src_cols << 1));
__global const float * xalpha_tab = alpha_tab;
__global const float * yalpha_tab = (__global const float *)(alpha_tab + (src_cols << 1));
__global const int * xofs_tab = ofs_tab;
__global const int * yofs_tab = (__global const int *)(ofs_tab + dst_cols + 1);
int xk0 = xofs_tab[dx], xk1 = xofs_tab[dx + 1];
int yk0 = yofs_tab[dy], yk1 = yofs_tab[dy + 1];
int sy0 = ymap_tab[yk0], sy1 = ymap_tab[yk1 - 1];
int sx0 = xmap_tab[xk0], sx1 = xmap_tab[xk1 - 1];
WTV sum = (WTV)(0), buf;
int src_index = mad24(sy0, src_step, src_offset);
for (int sy = sy0, yk = yk0; sy <= sy1; ++sy, src_index += src_step, ++yk)
{
WTV beta = (WTV)(yalpha_tab[yk]);
buf = (WTV)(0);
for (int sx = sx0, xk = xk0; sx <= sx1; ++sx, ++xk)
{
WTV alpha = (WTV)(xalpha_tab[xk]);
buf += convertToWTV(loadpix(src + mad24(sx, TSIZE, src_index))) * alpha;
}
sum += buf * beta;
}
storepix(convertToT(sum), dst + mad24(dx, TSIZE, dst_index));
}
}
It is not defined in the source you shared. It appears to be a type, like float. Just guessing: it's defined using "-D WTV=something" while compiling the kernel.
I have written a filter for image blurring in C and it's working fine, I am trying to run in on GPU using CUDA C for faster processing. The program has a few if and else conditions as can be seen below for the C code version,
The input to the function being input image, output image, and size of columns.
void convolve_young1D(double * in, double * out, int datasize) {
int i, j;
/* Compute first 3 output elements */
out[0] = B*in[0];
out[1] = B*in[1] + bf[2]*out[0];
out[2] = B*in[2] + (bf[1]*out[0]+bf[2]*out[1]);
/* Recursive computation of output in forward direction using filter parameters bf and B */
for (i=3; i<datasize; i++) {
out[i] = B*in[i];
for (j=0; j<3; j++) {
out[i] += bf[j]*out[i-(3-j)];
}
}
}
//Calling function below
void convolve_young2D(int rows, int columns, int sigma, double ** ip_padded) {
/** \brief Filter radius */
w = 3*sigma;
/** \brief Filter parameter q */
double q;
if (sigma < 2.5)
q = 3.97156 - 4.14554*sqrt(1-0.26891*sigma);
else
q = 0.98711*sigma - 0.9633;
/** \brief Filter parameters b0, b1, b2, b3 */
double b0 = 1.57825 + 2.44413*q + 1.4281*q*q + 0.422205*q*q*q;
double b1 = 2.44413*q + 2.85619*q*q + 1.26661*q*q*q;
double b2 = -(1.4281*q*q + 1.26661*q*q*q);
double b3 = 0.422205*q*q*q;
/** \brief Filter parameters bf, bb, B */
bf[0] = b3/b0; bf[1] = b2/b0; bf[2] = b1/b0;
bb[0] = b1/b0; bb[1] = b2/b0; bb[2] = b3/b0;
B = 1 - (b1+b2+b3)/b0;
int i,j;
/* Convolve each row with 1D Gaussian filter */
double *out_t = calloc(columns+(2*w),sizeof(double ));
for (i=0; i<rows+2*w; i++) {
convolve_young1D(ip_padded[i], out_t, columns+2*w);
}
free(out_t);
Tried the same approach with blocks and threads in CUDA C but wasn't successful I have been getting zeros as output and even the input values seem to change to Zeros don't know where I am going wrong please do help. I am pretty new to CUDA C programming. Here is my attempted version of the CUDA Kernel.
__global__ void convolve_young2D( float *in, float *out,int rows,int columns, int j,float B,float bf[3],int w) {
int k;
int x = blockIdx.x * blockDim.x + threadIdx.x;
if((x>0) && (x<(rows+2*w)))
{
//printf("%d \t",x);
if(j ==0)
{
// Compute first output elements
out[x*columns] = B*in[x*columns];
}
else if(j==1)
{
out[x*columns +1 ] = B*in[x*columns +1] + bf[2]*out[x*columns];
}
else if (j== 2)
{
out[2] = B*in[x*columns +2] + (bf[1]*out[x*columns]+bf[2]*out[x*columns+1]);
}
else{
// Recursive computation of output in forward direction using filter parameters bf and B
out[x*columns+j] = B*in[x*columns+j];
for (k=0; k<3; k++) {
out[x*columns + j] += bf[k]*out[(x*columns+j)-(3-k)];
}
}
}
}
//Calling function below
void convolve_young2D(int rows, int columns, int sigma, const float * const ip_padded, float * const op_padded) {
float bf[3], bb[3];
float B;
int w;
/** \brief Filter radius */
w = 3*sigma;
/** \brief Filter parameter q */
float q;
if (sigma < 2.5)
q = 3.97156 - 4.14554*sqrt(1-0.26891*sigma);
else
q = 0.98711*sigma - 0.9633;
/** \brief Filter parameters b0, b1, b2, b3 */
float b0 = 1.57825 + 2.44413*q + 1.4281*q*q + 0.422205*q*q*q;
float b1 = 2.44413*q + 2.85619*q*q + 1.26661*q*q*q;
float b2 = -(1.4281*q*q + 1.26661*q*q*q);
float b3 = 0.422205*q*q*q;
/** \brief Filter parameters bf, bb, B */
bf[0] = b3/b0; bf[1] = b2/b0; bf[2] = b1/b0;
bb[0] = b1/b0; bb[1] = b2/b0; bb[2] = b3/b0;
B = 1 - (b1+b2+b3)/b0;
int p;
const int inputBytes = (rows+2*w) * (columns+2*w) * sizeof(float);
float *d_input, *d_output; // arrays in the GPU´s global memory
cudaMalloc(&d_input, inputBytes);
cudaMemcpy(d_input, ip_padded, inputBytes, cudaMemcpyHostToDevice);
cudaMalloc(&d_output,inputBytes);
for (p = 0; p<columns+2*w; p++){
convolve_young<<<4,500>>>(d_input,d_output,rows,columns,p,B,bf,w);
}
cudaMemcpy(op_padded, d_input, inputBytes, cudaMemcpyDeviceToHost);
cudaFree(d_input);
The first problem is that you call convolve_young<<<4,500>>>(d_input,d_output,rows,columns,p,B,bf,w); but you defined a kernel named convolve_young2D.
Another possible problem is that to do the convolution you do:
for (p = 0; p<columns+2*w; p++){
convolve_young<<<4,500>>>(d_input,d_output,rows,columns,p,B,bf,w);
}
Here you're looping over the columns instead of the rows compared to the CPU algorithm:
for (i=0; i<rows+2*w; i++) {
convolve_young1D(ip_padded[i], out_t, columns+2*w);
}
First you should try to do a direct port of your CPU algorithm, computing one line at the time, and then modify it to transfer the whole image.