How can I generate SVE vectors with LLVM - clang

clang version 11.0.0
example.c:
#define ARRAYSIZE 1024
int a[ARRAYSIZE];
int b[ARRAYSIZE];
int c[ARRAYSIZE];
void subtract_arrays(int *restrict a, int *restrict b, int *restrict c)
{
for (int i = 0; i < ARRAYSIZE; i++)
{
a[i] = b[i] - c[i];
}
}
int main()
{
subtract_arrays(a, b, c);
}
command:
clang --target=aarch64-linux-gnu -march=armv8-a+sve -O3 -S example.c
LLVM always generate NEON vectors, but I want it generate SVE vectors.
How can I do this?

Unfortunately Clang version 11 does not support SVE auto-vectorization.
This will come with LLVM 13: Architecture support in LLVM
You can however generate SVE code with intrinsic functions or inline assembly.
Your code with intrinsic functions would look something along the lines of:
#include <arm_sve.h>
void subtract_arrays(int *restrict a, int *restrict b, int *restrict c) {
int i = 0;
svbool_t pg = svwhilelt_b32(i, ARRAYSIZE);
do
{
svint32_t db_vec = svld1(pg, &b[i]);
svint32_t dc_vec = svld1(pg, &c[i]);
svint32_t da_vec = svsub_z(pg, db_vec, dc_vec);
svst1(pg, &a[i], da_vec);
i += svcntw();
pg = svwhilelt_b32(i, ARRAYSIZE);
}
while (svptest_any(svptrue_b32(), pg));
}
I had a similar problem thinking that SVE auto-vectorization is supported.
When targeting SVE with Clang, optimization reports show successful vectorization despite only vectorizing for Neon.

Related

How to sum all 32-bit or 64-bit sub-registers in an SSE XMM, or AVX YMM, and ZMM register?

Say your task results in a subtotal in each floating-point subregister. I'm not seeing an instruction that would sum the subtotals down to one floating-point total. Do I need to store the MM register in plain old memory then do the sum with simple instructions?
(It's unresolved whether these will be double or single-precision, and I plan on coding for every CPU variation up to the forthcoming (?) 512-bit AVX version if I can find the opcodes.)
wget http://www.agner.org/optimize/vectorclass.zip
unzip vectorclass.zip -d vectorclass
cd vectorclass/
This code is GPLv3.
SSE
grep -A11 horizontal_add vectorf128.h
static inline float horizontal_add (Vec4f const & a) {
#if INSTRSET >= 3 // SSE3
__m128 t1 = _mm_hadd_ps(a,a);
__m128 t2 = _mm_hadd_ps(t1,t1);
return _mm_cvtss_f32(t2);
#else
__m128 t1 = _mm_movehl_ps(a,a);
__m128 t2 = _mm_add_ps(a,t1);
__m128 t3 = _mm_shuffle_ps(t2,t2,1);
__m128 t4 = _mm_add_ss(t2,t3);
return _mm_cvtss_f32(t4);
#endif
--
static inline double horizontal_add (Vec2d const & a) {
#if INSTRSET >= 3 // SSE3
__m128d t1 = _mm_hadd_pd(a,a);
return _mm_cvtsd_f64(t1);
#else
__m128 t0 = _mm_castpd_ps(a);
__m128d t1 = _mm_castps_pd(_mm_movehl_ps(t0,t0));
__m128d t2 = _mm_add_sd(a,t1);
return _mm_cvtsd_f64(t2);
#endif
}
AVX
grep -A6 horizontal_add vectorf256.h
static inline float horizontal_add (Vec8f const & a) {
__m256 t1 = _mm256_hadd_ps(a,a);
__m256 t2 = _mm256_hadd_ps(t1,t1);
__m128 t3 = _mm256_extractf128_ps(t2,1);
__m128 t4 = _mm_add_ss(_mm256_castps256_ps128(t2),t3);
return _mm_cvtss_f32(t4);
}
--
static inline double horizontal_add (Vec4d const & a) {
__m256d t1 = _mm256_hadd_pd(a,a);
__m128d t2 = _mm256_extractf128_pd(t1,1);
__m128d t3 = _mm_add_sd(_mm256_castpd256_pd128(t1),t2);
return _mm_cvtsd_f64(t3);
}
AVX512
grep -A3 horizontal_add vectorf512.h
static inline float horizontal_add (Vec16f const & a) {
#if defined(__INTEL_COMPILER)
return _mm512_reduce_add_ps(a);
#else
return horizontal_add(a.get_low() + a.get_high());
#endif
}
--
static inline double horizontal_add (Vec8d const & a) {
#if defined(__INTEL_COMPILER)
return _mm512_reduce_add_pd(a);
#else
return horizontal_add(a.get_low() + a.get_high());
#endif
}
get_high() and get_low()
Vec8f get_high() const {
return _mm256_castpd_ps(_mm512_extractf64x4_pd(_mm512_castps_pd(zmm),1));
}
Vec8f get_low() const {
return _mm512_castps512_ps256(zmm);
}
Vec4d get_low() const {
return _mm512_castpd512_pd256(zmm);
}
Vec4d get_high() const {
return _mm512_extractf64x4_pd(zmm,1);
}
For integers look for horizontal_add in vectori128.h, vectori256.h, and vectori512.h.
You can also use the Vector Class Library (VCL) directly
#include <stdio.h>
#define MAX_VECTOR_SIZE 512
#include "vectorclass.h"
int main(void) {
float x[16]; for(int i=0;i<16;i++) x[i]=i+1;
Vec4f v4 = Vec4f().load(x);
Vec8f v8 = Vec8f().load(x);
Vec16f v16 = Vec16f().load(x);
printf("%f %d\n", horizontal_add(v4), 4*5/2);
printf("%f %d\n", horizontal_add(v8), 8*9/2);
printf("%f %d\n", horizontal_add(v16), 16*17/2);
}
Compile like this (GCC only my KNL is too old for AVX512)
SSE2: g++ -O3 test.cpp
AVX: g++ -O3 -mavx test.cpp
AVX512ER: icpc -O3 -xMIC-AVX512 test.cpp
output
10.000000 10
36.000000 36
136.000000 136
One nice thing with the VCL library is that if you use e.g. Vec8f with a system that only has SSE2 it will emulate AVX using SSE twice.
See the section "Instruction sets and CPU dispatching" in the vectorclass.pdf manual for how to compile for different instruction sets with MSVC, ICC, Clang, and GCC.
I have implemented the following inline function for AVX2. It sums all elements and returns the result. You can look this as a suggestion answer to develop your own function for this purpose.
Note: _mm256_extract_epi32 is not presented for AVX you can use your own method with vmovss such as float _mm256_cvtss_f32 (__m256 a) instead and develop your horizontal addition functions.
// my horizontal addition of epi32
inline int _mm256_hadd2_epi32(__m256i a)
{
__m256i a_hi;
a_hi = _mm256_permute2x128_si256(a, a, 1); //maybe it should be 4
a = _mm256_hadd_epi32(a, a_hi);
a = _mm256_hadd_epi32(a, a);
a = _mm256_hadd_epi32(a, a);
return _mm256_extract_epi32(a,0);
}

gcc - openacc - Compiled program does not function properly

Recently, there have been some efforts in GCC community to support OpenACC in their compiler. So, I wanted to try it out.
Using this step-by-step tutorial (tutorial), which was close to the main documentation on GCC website, I was able to compile and build GCC 6.1 with OpenACC support.
Then, I compiled my program using following command:
gcc pi.c -fopenacc -foffload=nvptx-none -foffload="-O3" -O3
And, everything goes without any errors.
The execution is without error, but no correct answer.
Here are my C code and the output of the running program:
#include <stdio.h>
#include <openacc.h>
#define N 20000
#define vl 1024
int main(void) {
double pi = 0.0f;
long long i;
int change = 0;
printf("Number of devices: %d\n", acc_get_num_devices(acc_device_nvidia));
#pragma acc parallel
{
change = 1;
#pragma acc loop reduction(+:pi) private(i)
for (i=0; i<N; i++) {
double t= (double)((i+0.5)/N);
pi +=4.0/(1.0+t*t);
}
}
printf("Change: %d\n", change);
printf("pi=%11.10f\n",pi/N);
pi = 0.0;
for (i=0; i<N; i++) {
double t= (double)((i+0.5)/N);
pi +=4.0/(1.0+t*t);
}
printf("pi=%11.10f\n",pi/N);
return 0;
}
And this is the output after running a.out:
Number of devices: 1
Change: 0
pi=0.0000000000
pi=3.1415926538
Any ideas?
Try moving "parallel" to the loop instead of the block.
// #pragma acc parallel
{
change = 1;
#pragma acc parallel loop reduction(+:pi)
for (i=0; i<N; i++) {
double t= (double)((i+0.5)/N);
pi +=4.0/(1.0+t*t);
}
}
I just tried this with gcc 6.1 and it worked correctly. Note that there's no need to privatize "i" since scalars are private by default.

Compiler commands for accull while using opencv

I'm trying to accelerate an opencv program I wrote using OpenACC, I'm using the accull compiler to do this. However, I'm having a very hard time finding any documentation or examples that would help me on this issue.
http://scelementary.com/2015/04/30/openacc-on-jetson-tk1.html
I don't have any experience with ACCULL, but I can provide you with an example that uses OpenCV and OpenACC and maybe that'll help you get moving. This has been tested on X86 with PGI on Ubunut 14.04. This will read an image, invert the pixels, and write an image back out.
invert.cpp:
void invert(unsigned char *imgData, int w, int h, int ch, int step)
{
int i,j,c;
#pragma acc parallel loop collapse(3) copy(imgData[:h*w*ch])
for ( i = 0; i < h; i++)
for ( j = 0; j < w; j++ )
for ( c = 0; c < ch; c++ )
imgData[i*step + j*ch + c] = 255 - imgData[i*step + j*ch + c];
}
main.cpp:
#include <stdio.h>
#include <opencv/cv.h>
#include <opencv/cvaux.h>
#include <opencv/highgui.h>
void invert(unsigned char*,int,int,int,int);
int main(int argc, char* argv[])
{
if (argc < 3)
{
fprintf(stderr,"Usage: %s inFilename outFilename\n",argv[0]);
return -1;
}
IplImage* img = cvLoadImage(argv[1]);
printf("%s: %d x %d, %d %d\n", argv[1],img->width, img->height, img->widthStep, img->nChannels);
invert((unsigned char*)img->imageData,img->width,img->height, img->nChannels, img->widthStep);
if(!cvSaveImage(argv[2],img))
fprintf(stderr,"Failed to write to %s.\n",argv[2]);
cvReleaseImage(&img);
return 0;
}
Makefile:
a.out: main.cpp invert.cpp
pgc++ -fast -ta=tesla -c invert.cpp
pgc++ -fast -ta=tesla -c main.cpp
pgc++ -ta=tesla invert.o main.o -lopencv_legacy -lopencv_highgui -lopencv_core

Why is calling my C code from F# very slow (compared to native)?

So I wrote some numerical code in C but wanted to call it from F#. However it runs incredibly slowly.
Times:
gcc -O3 : 4 seconds
gcc -O0 : 30 seconds
fsharp code which calls the optimised gcc code: 2 minutes 30 seconds.
For reference, the c code is
int main(int argc, char** argv)
{
setvals(100,100,15,20.0,0.0504);
float* dmats = malloc(sizeof(float) * factor*factor);
MakeDmat(1.4,-1.92,dmats); //dmat appears to be correct
float* arr1 = malloc(sizeof(float)*xsize*ysize);
float* arr2 = malloc(sizeof(float)*xsize*ysize);
randinit(arr1);
for (int i = 0;i < 10000;i++)
{
evolve(arr1,arr2,dmats);
evolve(arr2,arr1,dmats);
if (i==9999) {print(arr1,xsize,ysize);};
}
return 0;
}
I left out the implementation of the functions. The F# code I am using is
open System.Runtime.InteropServices
open Microsoft.FSharp.NativeInterop
[<DllImport("a.dll")>] extern void main (int argc, char* argv)
[<DllImport("a.dll")>] extern void setvals (int _xsize, int _ysize, int _distlimit,float _tau,float _Iex)
[<DllImport("a.dll")>] extern void MakeDmat(float We,float Wi, float*arr)
[<DllImport("a.dll")>] extern void randinit(float* arr)
[<DllImport("a.dll")>] extern void print(float* arr)
[<DllImport("a.dll")>] extern void evolve (float* input, float* output,float* connections)
let dlimit,xsize,ysize = 15,100,100
let factor = (2*dlimit)+1
setvals(xsize,ysize,dlimit,20.0,0.0504)
let dmat = Array.zeroCreate (factor*factor)
MakeDmat(1.4,-1.92,&&dmat.[0])
let arr1 = Array.zeroCreate (xsize*ysize)
let arr2 = Array.zeroCreate (xsize*ysize)
let addr1 = &&arr1.[0]
let addr2 = &&arr2.[0]
let dmataddr = &&dmat.[0]
randinit(&&dmat.[0])
[0..10000] |> List.iter (fun _ ->
evolve(addr1,addr2,dmataddr)
evolve(addr2,addr1,dmataddr)
)
print(&&arr1.[0])
The F# code is compiled with optimisations on.
Is the mono interface for calling C code really that slow (almost 8ms of overhead per function call) or am I just doing something stupid?
It looks like part of the problem is that you are using float on both the F# and C side of the PInvoke signature. In F# float is really System.Double and hence is 8 bytes. In C a float is generally 4 bytes.
If this were running under the CLR I would expect you to see a PInvoke stack unbalanced error during debugging. I'm not sure if Mono has similar checks or not. But it's possible this is related to the problem you're seeing.

How to solve CUDA Thrust library - for_each synchronization error?

I'm trying to modify a simple dynamic vector in CUDA using the thrust library of CUDA. But I'm getting "launch_closure_by_value" error on the screen indicatiing that the error is related to some synchronization process.
A simple 1D dynamic array modification is not possible due to this error.
My code segment which is causing the error is as follows.
from a .cpp file I call setIndexedGrid, which is defined in System.cu
float* a= (float*)(malloc(8*sizeof(float)));
a[0]= 0; a[1]= 1; a[2]= 2; a[3]= 3; a[4]= 4; a[5]= 5; a[6]= 6; a[7]= 7;
float* b = (float*)(malloc(8*sizeof(float)));
setIndexedGridInfo(a,b);
The code segment at System.cu:
void
setIndexedGridInfo(float* a, float*b)
{
thrust::device_ptr<float> d_oldData(a);
thrust::device_ptr<float> d_newData(b);
float c = 0.0;
thrust::for_each(
thrust::make_zip_iterator(thrust::make_tuple(d_oldData,d_newData)),
thrust::make_zip_iterator(thrust::make_tuple(d_oldData+8,d_newData+8)),
grid_functor(c));
}
grid_functor is defined in _kernel.cu
struct grid_functor
{
float a;
__host__ __device__
grid_functor(float grid_Info) : a(grid_Info) {}
template <typename Tuple>
__device__
void operator()(Tuple t)
{
volatile float data = thrust::get<0>(t);
float pos = data + 0.1;
thrust::get<1>(t) = pos;
}
};
I also get these on the Output window (I use Visual Studio):
First-chance exception at 0x000007fefdc7cacd in Particles.exe:
Microsoft C++ exception: cudaError_enum at memory location
0x0029eb60.. First-chance exception at 0x000007fefdc7cacd in
smokeParticles.exe: Microsoft C++ exception:
thrust::system::system_error at memory location 0x0029ecf0.. Unhandled
exception at 0x000007fefdc7cacd in Particles.exe: Microsoft C++
exception: thrust::system::system_error at memory location
0x0029ecf0..
What is causing the problem?
You are trying to use host memory pointers in functions expecting pointers in device memory. This code is the problem:
float* a= (float*)(malloc(8*sizeof(float)));
a[0]= 0; a[1]= 1; a[2]= 2; a[3]= 3; a[4]= 4; a[5]= 5; a[6]= 6; a[7]= 7;
float* b = (float*)(malloc(8*sizeof(float)));
setIndexedGridInfo(a,b);
.....
thrust::device_ptr<float> d_oldData(a);
thrust::device_ptr<float> d_newData(b);
The thrust::device_ptr is intended for "wrapping" a device memory pointer allocated with the CUDA API so that thrust can use it. You are trying to treat a host pointer directly as a device pointer. That is illegal. You could modify your setIndexedGridInfo function like this:
void setIndexedGridInfo(float* a, float*b, const int n)
{
thrust::device_vector<float> d_oldData(a,a+n);
thrust::device_vector<float> d_newData(b,b+n);
float c = 0.0;
thrust::for_each(
thrust::make_zip_iterator(thrust::make_tuple(d_oldData.begin(),d_newData.begin())),
thrust::make_zip_iterator(thrust::make_tuple(d_oldData.end(),d_newData.end())),
grid_functor(c));
}
The device_vector constructor will allocate device memory and then copy the contents of your host memory to the device. That should fix the error you are seeing, although I am not sure what you are trying to do with the for_each iterator and whether the functor you have wrttien is correct.
Edit:
Here is a complete, compilable, runnable version of your code:
#include <cstdlib>
#include <cstdio>
#include <thrust/device_vector.h>
#include <thrust/for_each.h>
#include <thrust/copy.h>
struct grid_functor
{
float a;
__host__ __device__
grid_functor(float grid_Info) : a(grid_Info) {}
template <typename Tuple>
__device__
void operator()(Tuple t)
{
volatile float data = thrust::get<0>(t);
float pos = data + 0.1f;
thrust::get<1>(t) = pos;
}
};
void setIndexedGridInfo(float* a, float*b, const int n)
{
thrust::device_vector<float> d_oldData(a,a+n);
thrust::device_vector<float> d_newData(b,b+n);
float c = 0.0;
thrust::for_each(
thrust::make_zip_iterator(thrust::make_tuple(d_oldData.begin(),d_newData.begin())),
thrust::make_zip_iterator(thrust::make_tuple(d_oldData.end(),d_newData.end())),
grid_functor(c));
thrust::copy(d_newData.begin(), d_newData.end(), b);
}
int main(void)
{
const int n = 8;
float* a= (float*)(malloc(n*sizeof(float)));
a[0]= 0; a[1]= 1; a[2]= 2; a[3]= 3; a[4]= 4; a[5]= 5; a[6]= 6; a[7]= 7;
float* b = (float*)(malloc(n*sizeof(float)));
setIndexedGridInfo(a,b,n);
for(int i=0; i<n; i++) {
fprintf(stdout, "%d (%f,%f)\n", i, a[i], b[i]);
}
return 0;
}
I can compile and run this code on an OS 10.6.8 host with CUDA 4.1 like this:
$ nvcc -Xptxas="-v" -arch=sm_12 -g -G thrustforeach.cu
./thrustforeach.cu(18): Warning: Cannot tell what pointer points to, assuming global memory space
./thrustforeach.cu(20): Warning: Cannot tell what pointer points to, assuming global memory space
./thrustforeach.cu(18): Warning: Cannot tell what pointer points to, assuming global memory space
./thrustforeach.cu(20): Warning: Cannot tell what pointer points to, assuming global memory space
ptxas info : Compiling entry function '_ZN6thrust6detail7backend4cuda6detail23launch_closure_by_valueINS2_18for_each_n_closureINS_12zip_iteratorINS_5tupleINS0_15normal_iteratorINS_10device_ptrIfEEEESB_NS_9null_typeESC_SC_SC_SC_SC_SC_SC_EEEEi12grid_functorEEEEvT_' for 'sm_12'
ptxas info : Used 14 registers, 160+0 bytes lmem, 16+16 bytes smem, 4 bytes cmem[1]
ptxas info : Compiling entry function '_ZN6thrust6detail7backend4cuda6detail23launch_closure_by_valueINS2_18for_each_n_closureINS_12zip_iteratorINS_5tupleINS0_15normal_iteratorINS_10device_ptrIfEEEESB_NS_9null_typeESC_SC_SC_SC_SC_SC_SC_EEEEj12grid_functorEEEEvT_' for 'sm_12'
ptxas info : Used 14 registers, 160+0 bytes lmem, 16+16 bytes smem, 4 bytes cmem[1]
$ ./a.out
0 (0.000000,0.100000)
1 (1.000000,1.100000)
2 (2.000000,2.100000)
3 (3.000000,3.100000)
4 (4.000000,4.100000)
5 (5.000000,5.100000)
6 (6.000000,6.100000)
7 (7.000000,7.100000)

Resources