I need to access the real part specific element of a cv::Mat that contains std::complex<double>'s.
OpenCV provides codes of how to create a complex cv::Mat_ here (search the page for the keyword "complex" and the first mention of that word is where the example is).
Here is my attempt:
Mat B = Mat_<std::complex<double> >(3, 3);
cout << B.depth() << ", " << B.channels() << endl;
B.at<double>(0, 0) = 0;
cout << "B(0,0) = " << B.at<double>(0, 0).real(); // Error due to .rea()
The Mat is filled with the type std::complex<double> but you're requesting a double when you write B.at<double>(0, 0); the return type is double, which doesn't have a .real() method. Instead you need to return the complex type which your Mat holds:
cout << "B(0,0) = " << B.at<std::complex<double> >(0, 0).real();
B(0,0) = 0
If you want to set an imaginary number, you'll need to actually pass that into the matrix, otherwise it just sets the real part:
B.at<double>(0, 0) = 2;
cout << "B(0,0) = " << B.at<std::complex<double> >(0, 0);
B(0,0) = (2,0)
B.at<std::complex<double> >(0, 0) = std::complex<double> (2, 1);
cout << "B(0,0) = " << B.at<std::complex<double> >(0, 0);
B(0,0) = (2,1)
Related
I want to use clEnqueueReadBufferRect in OpenCL. To do it, I need to define the region as one of its passing arguement. But there is a inconsistency between references of OpenCL
In online reference, it is mention that
The (width, height, depth) in bytes of the 2D or 3D rectangle being read or written. For a 2D rectangle copy, the depth value given by region [2] should be 1.
but in the reference book, page 77, it is mentioned that
region defines the (width in bytes, height in rows, depth in slices) of the 2D or 3D rectangle being read or written. For a 2D rectangle copy, the depth value given by region [2] should be 1. The values in region cannot be 0
but unfortunately, none of those guides worked for me and I should provide region in (width in columns, height in rows, depth in slices), otherwise, when I defined them as byte not rows/columns, I got the error CL_INVALID_VALUE. Now which one is correct?
#define WGX 16
#define WGY 16
#include "misc.hpp"
int main(int argc, char** argv)
{
int i;
int n = 1000;
int filterWidth = 3;
int filterRadius = (int) filterWidth/2;
int padding = filterRadius * 2;
double h = 1.0 / n;
int width_x[2];
int height_x[2];
int deviceWidth[2];
int deviceHeight[2];
int deviceDataSize[2];
for (i = 0; i < 2; ++i)
{
set_domain_length(n, n, height_x[i], width_x[i], i);
}
float* x = new float [height_x[0] * width_x[0]];
init_unknown(x, height_x[0], width_x[0], 0);
set_bndryCond(x, width_x[0], h);
std::vector<cl::Platform> platforms;
cl::Platform::get(&platforms);
assert(platforms.size() > 0);
cl::Platform myPlatform = platforms[0];
std::vector<cl::Device> devices;
myPlatform.getDevices(CL_DEVICE_TYPE_GPU, &devices);
assert(devices.size() > 0);
cl::Device myDevice = devices[0];
cl_display_info(myPlatform, myDevice);
cl::Context context(myDevice);
std::ifstream kernelFile("iterative_scheme.cl");
std::string src(std::istreambuf_iterator<char>(kernelFile), (std::istreambuf_iterator<char>()));
cl::Program::Sources sources(1,std::make_pair(src.c_str(),src.length() + 1));
cl::Program program(context, sources);
cl::CommandQueue queue(context, myDevice);
deviceWidth[0] = roundUp(width_x[0], WGX);
deviceHeight[0] = height_x[0];
deviceDataSize[0] = deviceWidth[0] * deviceHeight[0] * sizeof(float);
cl::Buffer buffer_x;
try
{
buffer_x = cl::Buffer(context, CL_MEM_READ_WRITE, deviceDataSize[0]);
} catch (cl::Error& error)
{
std::cout << " ---> Problem in creating buffer(s) " << std::endl;
std::cout << " ---> " << getErrorString(error) << std::endl;
exit(0);
}
cl::size_t<3> buffer_origin;
buffer_origin[0] = 0;
buffer_origin[1] = 0;
buffer_origin[2] = 0;
cl::size_t<3> host_origin;
host_origin[0] = 0;
host_origin[1] = 0;
host_origin[2] = 0;
cl::size_t<3> region;
region[0] = (size_t)(deviceWidth[0] * sizeof(float));
region[1] = (size_t)(height_x[0]);
region[2] = 1;
std::cout << "===> Start writing data to device" << std::endl;
try
{
queue.enqueueWriteBufferRect(buffer_x, CL_TRUE, buffer_origin, host_origin, region,
deviceWidth[0] * sizeof(float), 0, width_x[0] * sizeof(float), 0, x);
} catch (cl::Error& error)
{
std::cout << " ---> Problem in writing data from Host to Device: " << std::endl;
std::cout << " ---> " << getErrorString(error) << std::endl;
exit(0);
}
// Build the program
std::cout << "===> Start building program" << std::endl;
try
{
program.build("-cl-std=CL2.0");
std::cout << " ---> Build Successfully " << std::endl;
} catch(cl::Error& error)
{
std::cout << " ---> Problem in building program " << std::endl;
std::cout << " ---> " << getErrorString(error) << std::endl;
std::cout << " ---> " << program.getBuildInfo<CL_PROGRAM_BUILD_LOG>(myDevice) << std::endl;
exit(0);
}
std::cout << "===> Start reading data from device" << std::endl;
// read result y and residual from the device
buffer_origin[0] = (size_t)(filterRadius * sizeof(float));
buffer_origin[1] = (size_t)filterRadius;
buffer_origin[2] = 0;
host_origin[0] = (size_t)(filterRadius * sizeof(float));
host_origin[1] = (size_t)filterRadius;
host_origin[2] = 0;
// region of x
region[0] = (size_t)((width_x[0] - padding) * sizeof(float));
region[1] = (size_t)(height_x[0] - padding);
region[2] = 1;
try
{
queue.enqueueReadBufferRect(buffer_x, CL_TRUE, buffer_origin, host_origin,
region, deviceWidth[0] * sizeof(float), 0, deviceWidth[0] * sizeof(float), 0, x);
} catch (cl::Error& error)
{
std::cout << " ---> Problem reading buffer in device: " << std::endl;
std::cout << " ---> " << getErrorString(error) << std::endl;
exit(0);
}
delete[] (x);
return 0;
}
The online reference link you provided says:
region
The (width in bytes, height in rows, depth in slices) of the 2D or 3D rectangle being read or written. For a 2D rectangle copy, the depth value given by region[2] should be 1. The values in region cannot be 0.
This is consistent with what you quoted later as "reference book". That's because your first link points to OpenCL 2.0 while the second link to 1.2.
The inconsistency you mention exist between online manual of 1.2 and the PDF of 1.2, but the online manual of 2.0 is consistent with the PDF. So i assume it was a bug in 1.2 online manual which was fixed in 2.0
otherwise, when I defined them as byte not rows/columns
What's a "column", and how is it different from bytes ?
The "elements" of buffer rect copy are always bytes. If you're reading/writing a 1D rect from a buffer, it simply transfers region[0] bytes. The reason why the API has "rows" and "slices" is because if using 2D/3D regions, you can have padding between data; but you can't have padding between elements in a 1D region.
I found out what is the reason of the problem, that's according to the online reference
CL_INVALID_VALUE if host_row_pitch is not 0 and is less than region[0].
so enqueueWriteBufferRect should change as follow:
queue.enqueueWriteBufferRect(buffer_x, CL_TRUE, buffer_origin, host_origin, region,
deviceWidth[0] * sizeof(float), 0, deviceWidth[0] * sizeof(float), 0, x);
which means host_row_pitch = deviceWidth[0] * sizeof(float) instead of host_row_pitch = width_x[0] * sizeof(float).
Is there any method for converting DICOM (ct scan) images to Point Clouds using VTK?
VTK allows reading DICOM and DICOM series and volume rendering but is it possible to generate a Point Cloud from a series of DICOM images?
If it isn't possible in VTK, is there some other library that I can use for this purpose?
Here is a dicom to point cloud demonstration. Dicom files are pretty variable depending on how the imaging is collected, but this is what we have been using for CT scans for some time. This is the "manual version" ie where you will need to interact with the terminal to navigate the dicom directory. It is possible to automate this but is highly dependent on your application.
I have pcl 8.0 and vtkdicom installed. (i was able to do a limited implementation of this without vtkdicom, but its features make the application far more robust at handling diverse dicom directory structures).
You will need to point the function in the main towards the appropriate directory on your computer (should be the file containing the DICOMDIR file). Once you have loaded the dicom, the visualizer has keyboard inputs m and n to control intensity target to be visualized. (you can easily change the code to filter for any of the parameters: x,y,z,intensity) and can change the width or stepsize as needed.
#include <pcl/common/common_headers.h>
#include <pcl/visualization/pcl_visualizer.h>
#include <pcl/filters/passthrough.h>
#include <boost/thread/thread.hpp>
#include <vtkSmartPointer.h>
#include <vtkDICOMImageReader.h>
#include "vtkImageData.h"
#include "vtkDICOMDirectory.h"
#include "vtkDICOMItem.h"
#include "vtkStringArray.h"
#include "vtkIntArray.h"
#include "vtkDICOMReader.h"
bool loadDICOM(pcl::PointCloud<pcl::PointXYZI>::Ptr outCloud, std::string fullPathToDicomDir)
{
// load DICOM dir file
vtkSmartPointer<vtkDICOMDirectory> ddir =
vtkSmartPointer<vtkDICOMDirectory>::New();
ddir->SetDirectoryName(fullPathToDicomDir.c_str());
ddir->Update();
//select patient
int n = ddir->GetNumberOfPatients();
int patientSelection = 0;
if (n > 1)
{
std::cout << "Select Patient number, total count: " << n << std::endl;
std::string userInput;
std::getline(std::cin, userInput);
patientSelection = std::stoi(userInput);
}
const vtkDICOMItem& patientItem = ddir->GetPatientRecord(patientSelection);
std::cout << "Patient " << patientSelection << ": " << patientItem.Get(DC::PatientID).AsString() << "\n";
//select study
vtkIntArray* studies = ddir->GetStudiesForPatient(patientSelection);
vtkIdType m = studies->GetMaxId() + 1;
int studySelection = 0;
if (m > 1)
{
std::cout << "Select study, total count: " << m << std::endl;
std::string userInput;
std::getline(std::cin, userInput);
studySelection = std::stoi(userInput);
}
int j = studies->GetValue(studySelection);
const vtkDICOMItem& studyItem = ddir->GetStudyRecord(j);
const vtkDICOMItem& studyPItem = ddir->GetPatientRecordForStudy(j);
cout << " Study " << j << ": \""
<< studyItem.Get(DC::StudyDescription).AsString() << "\" \""
<< studyPItem.Get(DC::PatientName).AsString() << "\" "
<< studyItem.Get(DC::StudyDate).AsString() << "\n";
int k0 = ddir->GetFirstSeriesForStudy(j);
int k1 = ddir->GetLastSeriesForStudy(j);
int seriesSelection;
std::cout << "Select series, range: " << k0 << " to " << k1 << std::endl;
for (int i = k0; i <= k1; i++)
{
const vtkDICOMItem& seriesItem = ddir->GetSeriesRecord(i);
vtkStringArray* a = ddir->GetFileNamesForSeries(i);
cout << " Series " << i << ": \""
<< seriesItem.Get(DC::SeriesDescription).AsString() << "\" "
<< seriesItem.Get(DC::SeriesNumber).AsString() << " "
<< seriesItem.Get(DC::Modality).AsString() << ", Images: "
<< a->GetNumberOfTuples() << "\n";
}
std::string userInput;
std::getline(std::cin, userInput);
seriesSelection = std::stoi(userInput);
const vtkDICOMItem& seriesItem = ddir->GetSeriesRecord(seriesSelection);
cout << " Series " << seriesSelection << ": \""
<< seriesItem.Get(DC::SeriesDescription).AsString() << "\" "
<< seriesItem.Get(DC::SeriesNumber).AsString() << " "
<< seriesItem.Get(DC::Modality).AsString() << "\n";
vtkStringArray* a = ddir->GetFileNamesForSeries(seriesSelection);
vtkDICOMReader* reader = vtkDICOMReader::New();
reader->SetFileNames(a);
reader->Update();
vtkSmartPointer<vtkImageData> sliceData = reader->GetOutput();
int numberOfDims = sliceData->GetDataDimension();
int* dims = sliceData->GetDimensions();
std::cout << "Cloud dimensions: ";
int totalPoints = 1;
for (int i = 0; i < numberOfDims; i++)
{
std::cout << dims[i] << " , ";
totalPoints = totalPoints * dims[i];
}
std::cout << std::endl;
std::cout << "Number of dicom points: " << totalPoints << std::endl;
//read data into grayCloud
double* dataRange = sliceData->GetScalarRange();
double* spacingData = reader->GetDataSpacing();
std::cout << "Data intensity bounds... min: " << dataRange[0] << ", max: " << dataRange[1] << std::endl;
if (numberOfDims != 3)
{
std::cout << "Incorrect number of dimensions in dicom file, generation failed..." << std::endl;
return false;
}
else
{
Eigen::RowVector3f spacing = Eigen::RowVector3f(spacingData[0], spacingData[1], spacingData[2]);
Eigen::RowVector3i dimensions = Eigen::RowVector3i(dims[0], dims[1], dims[2]);
outCloud->points.clear();
std::cout << "x spacing: " << spacing(0) << std::endl;
std::cout << "y spacing: " << spacing(1) << std::endl;
std::cout << "z spacing: " << spacing(2) << std::endl;
for (int z = 0; z < dims[2]; z++)
{
if (z % 50 == 0)
{
double percentageComplete = (double)z / (double)dims[2];
std::cout << "Dicom Read Progress: " << (int)(100.0 * percentageComplete) << "%" << std::endl;
}
for (int y = 0; y < dims[1]; y++)
{
for (int x = 0; x < dims[0]; x++)
{
double tempIntensity = sliceData->GetScalarComponentAsDouble(x, y, z, 0);
int tempX = x;
pcl::PointXYZI tempPt = pcl::PointXYZI();
if (!isinf(tempIntensity) && !isnan(tempIntensity))
{
//map value into positive realm
//tempIntensity = ((tempIntensity - dataRange[0]) / (dataRange[1] - dataRange[0]));
if (tempIntensity > SHRT_MAX) { tempIntensity = SHRT_MAX; }
else if (tempIntensity < SHRT_MIN) { tempIntensity = SHRT_MIN; }
}
else
{
tempIntensity = 0;
}
tempPt.x = tempX;
tempPt.y = y;
tempPt.z = z;
tempPt.intensity = tempIntensity;
outCloud->points.push_back(tempPt);
}
}
}
}
std::cout << "Load Dicom Cloud Complete!" << std::endl;
return true;
}
int indexSlice = 0;
void keyboardEventOccurred(const pcl::visualization::KeyboardEvent& event, void* viewer)
{
if (event.getKeySym() == "n" && event.keyDown())
{
indexSlice -= 1;
}
else if (event.getKeySym() == "m" && event.keyDown())
{
indexSlice += 1;
}
}
void displayCloud(pcl::PointCloud<pcl::PointXYZI>::Ptr cloud, std::string field, int step, int width, std::string window_name = "default")
{
boost::shared_ptr<pcl::visualization::PCLVisualizer> viewer(new pcl::visualization::PCLVisualizer(window_name));
viewer->setPointCloudRenderingProperties(pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 2, "id");
viewer->registerKeyboardCallback(keyboardEventOccurred, (void*)viewer.get());
pcl::PointCloud<pcl::PointXYZI>::Ptr tempCloud(new pcl::PointCloud<pcl::PointXYZI>);
pcl::PassThrough<pcl::PointXYZI> pass;
pass.setInputCloud(cloud);
pass.setFilterFieldName(field); //could gate this on intensity if u preferred
int lastIndex = indexSlice-1; //proc first cycle
while (!viewer->wasStopped()) {
if (indexSlice != lastIndex)
{
int low = step * indexSlice - width / 2;
int high = step * indexSlice + width / 2;
pass.setFilterLimits(low, high);
pass.filter(*tempCloud);
lastIndex = indexSlice;
std::cout << field<< " range: " <<low<<" , "<<high<< std::endl;
viewer->removeAllPointClouds();
pcl::visualization::PointCloudColorHandlerGenericField<pcl::PointXYZI> point_cloud_color_handler(tempCloud, "intensity");
viewer->addPointCloud< pcl::PointXYZI >(tempCloud, point_cloud_color_handler, "id");
}
viewer->spinOnce(50);
}
viewer->close();
}
// --------------
// -----Main-----
// --------------
int main(int argc, char** argv)
{
pcl::PointCloud<pcl::PointXYZI>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZI>);
loadDICOM(cloud, "C:/Local Software/voyDICOM/resources/DICOM_Samples/2021APR14 MiniAchors_V0");
displayCloud(cloud,"intensity",100,50);
return 0;
}
Note that in most cases dicom files are relatively massive in terms of raw dimensions and so I very rarely (never?) have loaded a whole dicom file into a point cloud (until for this code). Generally what I do is handle it in a dense format (short array) and then create clouds based on selections from that data. This way you can do certain imaging operations that benefit from a locked data grid (opening, closing, etc) prior to going to the sparse data set (point cloud) where everything becomes profoundly more expensive.
Pretty picture of it working with one of my debug dicom sets:
I think I might have found a way, after all. Haven't tried it yet but in theory it should work.
Firstly, the DICOM image needs to be converted into .vtk format using VTK once the DICOM images have been converted into .vtk they can then be converted into .pcd (Point cloud format) using PCL (point cloud library).
I'm trying to calculate the mean (element by element) of a list of matrix. First, I'm doing the sum element by element and here is the code I'm using
Mat imageResult = videoData[round(timestampInstImages[indexImg] * 100)];
for (double frame = (timestampInstImages[indexImg] + timeBetweenFields); frame < (timestampInstImages[indexImg] + 1); frame += timeBetweenFields)
{
double roundedTimestamp = round(frame * 100);
if (!videoData[roundedTimestamp].empty())
{
cout << "imageResult " << imageResult.at<int>(10,10) << endl;
cout << "videoData[roundedTimestamp] " << videoData[roundedTimestamp].at<int>(10,10) <<endl;
imageResult += videoData[roundedTimestamp];
cout << "Result : " << imageResult.at<int>(10,10) << endl;
}
}
Here are the first lines of the output I got:
imageResult 912924469
videoData[roundedTimestamp] 929701431
Result : 1842625900 //(912924469 + 929701431) It looks good
imageResult 1842625900
videoData[roundedTimestamp] 963386421
Result : -1493214815 // Not sure how the sum of 963386421 and 1842625900 returns this value???
imageResult -1493214815
videoData[roundedTimestamp] 963518006
Result : -536905769
imageResult -536905769
As you can see above, there is something wrong in the sum. Not sure what it is. Any idea what is happening?
to accumulate several frames into a 'sum' frame, you need one with a larger depth, else you will overflow (or saturate) it.
Mat acc(height,width,CV_32FC3,Scalar::all(0));
cv::accumulate(frame,acc);
cv::accumulate(frame,acc);
cv::accumulate(frame,acc);
acc /= 3;
Mat mean;
acc.convertTo(mean, CV_8UC3);
I am trying to read RGB image. However, I can only access with Vec3b type, not each channel.
I am sure what is the problem. Would like to help me out of misery?
imgMod = imread("rgb.png");
for (int iter_x = 0; iter_x < imgMod.cols; ++iter_x)
{
for (int iter_y = 0; iter_y < imgMod.rows; ++iter_y)
{
cout << imgMod.at<cv::Vec3b>(iter_y, iter_x) << "\t";
cout << imgMod.at<cv::Vec3b>(iter_y, iter_x)[0] << "\t";
cout << imgMod.at<cv::Vec3b>(iter_y, iter_x)[1] << "\t";
cout << imgMod.at<cv::Vec3b>(iter_y, iter_x)[2] << endl;
}
}
Here is a result for pixel value of RGB image.
[153, 88, 81] X Q
[161, 94, 85] 。 ^ T
...
Your access is fine.
The type returned by the [] operator is char so the value gets printed as a char - a text character. Just cast it to int to see the grey value as an integer:
cout << int(imgMod.at<cv::Vec3b>(iter_y, iter_x)[0]) << "\t";
A (more readable and explicit) C++ way to do it would be this:
static_cast<int>(imgMod.at<cv::Vec3b>(iter_y, iter_x)[0]) << "\t";
Even more cool is this (obscure?) little trick - note the +:
cout << +imgMod.at<cv::Vec3b>(iter_y, iter_x)[0] << "\t";
// ^
Got a question on how to use the EM algorithm in the latest OpenCV 2.4.2.
I used to use the C version and it worked perfectly fine, but since the system upgrade it seems that the C API has been removed in OpenCV 2.4.2.
This is a simple case for the EM: suppose I have a list of radius that are considered to be from two kinds of balls and I want to estimate the mean/variance of these two types. In other words, it is a 1-D problem.
I tried to write the C++ version of EM using the new APIs, but haven't got it working yet.
int nsamples = radius_list.size();
int ncluster = 2; //we assume a bimodal model
Mat samples = Mat::zeros(nsamples, 1, CV_32FC1);
// init data
for (int i = 0; i < radius_list.size(); ++i) {
int value = radius_list[i];
samples.at<float>(i, 0) = value;
}
EM em_model = EM(ncluster, EM::COV_MAT_SPHERICAL);
if (!em_model.train(samples)) {
cerr << "error training the EM model" << endl;
exit(-1);
}
const Mat& means = em_model.get<Mat>("means");
int mean1 = means.at<float>(0, 0);
int mean2 = means.at<float>(1, 0);
cout << "mean1 = " << mean1 << ", mean2 = " << mean2 << endl;
const vector<Mat>& covs = em_model.get<vector<Mat> >("covs");
int scale1 = covs[0].at<float>(0, 0);
int scale2 = covs[1].at<float>(0, 0);
cout << "scale1 = " << scale1 << ", scale2 = " << scale2 << endl;
The problem is: although the if() didn't complain, the retrieved mean and scale values
are junk values, -2147483648 on my machine.
Please advise on how to modify the code to make it work. I'm still learning all sorts of C++ APIs in OpenCV.
Thank you all!
Your doing implicit type conversions which distracts the compiler. Mean, weights and covariance matrices are not ints but doubles (you can check it by printing Mat.depth() result to the screen) so change all the lines from:
int mean1 = means.at<float>(0, 0);
like code to:
double mean1 = means.at<double>(0, 0);
Regards,
Rafal