Use geographiclib to generate 3d points - geographic-distance
Can geographiclib be used to convert (WGS84) latitude-longitude pairs to 3d/Cartesian/xyz points?
If not, is there an alternative method for converting from one to the other without using a spherial approximation?
Yes. As the following example shows, the Geographiclib::Geocentric class provides such a conversion:
// Example of using the GeographicLib::Geocentric class
#include <iostream>
#include <exception>
#include <cmath>
#include <GeographicLib/Geocentric.hpp>
using namespace std;
using namespace GeographicLib;
int main() {
try {
Geocentric earth(Constants::WGS84_a(), Constants::WGS84_f());
// Alternatively: const Geocentric& earth = Geocentric::WGS84();
{
// Sample forward calculation
double lat = 27.99, lon = 86.93, h = 8820; // Mt Everest
double X, Y, Z;
earth.Forward(lat, lon, h, X, Y, Z);
cout << floor(X / 1000 + 0.5) << " "
<< floor(Y / 1000 + 0.5) << " "
<< floor(Z / 1000 + 0.5) << "\n";
}
{
// Sample reverse calculation
double X = 302e3, Y = 5636e3, Z = 2980e3;
double lat, lon, h;
earth.Reverse(X, Y, Z, lat, lon, h);
cout << lat << " " << lon << " " << h << "\n";
}
}
catch (const exception& e) {
cerr << "Caught exception: " << e.what() << "\n";
return 1;
}
}
Related
I am trying to have stable flight with px4 and ros2 offboard control
Hello guys I have a offboard code which give about 50 setpoints to drone. It draws spiral with that setpoints. My problem is I couldnt get smooth travel. In every setpoint drone gives a high roll or pitch instant and then floats to the next setpoint. Is there a way to have stable velocity while passing the setpoints. Here is the code: #include <px4_msgs/msg/offboard_control_mode.hpp> #include <px4_msgs/msg/trajectory_setpoint.hpp> #include <px4_msgs/msg/timesync.hpp> #include <px4_msgs/msg/vehicle_command.hpp> #include <px4_msgs/msg/vehicle_control_mode.hpp> #include <px4_msgs/msg/vehicle_local_position.hpp> #include <rclcpp/rclcpp.hpp> #include <stdint.h> #include <chrono> #include <iostream> #include "std_msgs/msg/string.hpp" #include <math.h> float X; float Y; using namespace std::chrono; using namespace std::chrono_literals; using namespace px4_msgs::msg; class setpoint : public rclcpp::Node { public: setpoint() : Node("setpoint") { offboard_control_mode_publisher_ = this->create_publisher<OffboardControlMode>("fmu/offboard_control_mode/in", 10); trajectory_setpoint_publisher_ = this->create_publisher<TrajectorySetpoint>("fmu/trajectory_setpoint/in", 10); vehicle_command_publisher_ = this->create_publisher<VehicleCommand>("fmu/vehicle_command/in", 10); // get common timestamp timesync_sub_ = this->create_subscription<px4_msgs::msg::Timesync>("fmu/timesync/out", 10, [this](const px4_msgs::msg::Timesync::UniquePtr msg) { timestamp_.store(msg->timestamp); }); offboard_setpoint_counter_ = 0; auto sendCommands = [this]() -> void { if (offboard_setpoint_counter_ == 10) { // Change to Offboard mode after 10 setpoints this->publish_vehicle_command(VehicleCommand::VEHICLE_CMD_DO_SET_MODE, 1, 6); // Arm the vehicle this->arm(); } //------------- subscription_ = this->create_subscription<px4_msgs::msg::VehicleLocalPosition>( "/fmu/vehicle_local_position/out", #ifdef ROS_DEFAULT_API 10, #endif [this](const px4_msgs::msg::VehicleLocalPosition::UniquePtr msg) { X = msg->x; Y = msg->y; std::cout << "\n\n\n\n\n\n\n\n\n\n"; std::cout << "RECEIVED VEHICLE GPS POSITION DATA" << std::endl; std::cout << "==================================" << std::endl; std::cout << "ts: " << msg->timestamp << std::endl; //std::cout << "lat: " << msg->x << std::endl; //std::cout << "lon: " << msg->y << std::endl; std::cout << "lat: " << X << std::endl; std::cout << "lon: " << Y << std::endl; std::cout << "waypoint: " << waypoints[waypointIndex][0] << std::endl; std::cout << "waypoint: " << waypoints[waypointIndex][1] << std::endl; if((waypoints[waypointIndex][0] + 0.3 > X && waypoints[waypointIndex][0] - 0.3 < X)&&(waypoints[waypointIndex][1] + 0.3 > Y && waypoints[waypointIndex][1] - 0.3 < Y)){ waypointIndex++; if (waypointIndex >= waypoints.size()) exit(0); //waypointIndex = 0; RCLCPP_INFO(this->get_logger(), "Next waypoint: %.2f %.2f %.2f", waypoints[waypointIndex][0], waypoints[waypointIndex][1], waypoints[waypointIndex][2]); } }); //-------------- // offboard_control_mode needs to be paired with trajectory_setpoint publish_offboard_control_mode(); publish_trajectory_setpoint(); // stop the counter after reaching 11 if (offboard_setpoint_counter_ < 11) { offboard_setpoint_counter_++; } }; /* auto nextWaypoint = [this]() -> void { waypointIndex++; if (waypointIndex >= waypoints.size()) waypointIndex = 0; RCLCPP_INFO(this->get_logger(), "Next waypoint: %.2f %.2f %.2f", waypoints[waypointIndex][0], waypoints[waypointIndex][1], waypoints[waypointIndex][2]); }; */ commandTimer = this->create_wall_timer(100ms, sendCommands); //waypointTimer = this->create_wall_timer(2s, nextWaypoint); //EA } void arm() const; void disarm() const; void topic_callback() const; private: std::vector<std::vector<float>> waypoints = {{0,0,-5,}, {2,0,-5,}, {2.35216,0.476806,-5,}, {2.57897,1.09037,-5,}, {2.64107,1.80686,-5,}, {2.50814,2.58248,-5,}, {2.16121,3.36588,-5,}, {1.59437,4.10097,-5,}, {0.815842,4.73016,-5,}, {-0.151838,5.19778,-5,}, {-1.27233,5.45355,-5,}, {-2.49688,5.45578,-5,}, {-3.76641,5.17438,-5,}, {-5.01428,4.59315,-5,}, {-6.1696,3.71161,-5,}, {-7.16089,2.54591,-5,}, {-7.91994,1.12896,-5,}, {-8.38568,-0.490343,-5,}, {-8.50782,-2.24876,-5,}, {-8.25018,-4.07119,-5,}, {-7.59329,-5.87384,-5,}, {-6.53644,-7.56803,-5,}, {-5.09871,-9.06439,-5,}, {-3.31919,-10.2773,-5,}, {-1.25611,-11.1293,-5,}, {1.01499,-11.5555,-5,}, {3.40395,-11.5071,-5,}, {5.8096,-10.9548,-5,}, {8.12407,-9.89139,-5,}, {10.2375,-8.33272,-5,}, {12.0431,-6.31859,-5,}, {13.4424,-3.91182,-5,}, {14.3502,-1.19649,-5,}, {14.6991,1.72493,-5,}, {14.4435,4.73543,-5,}, {13.5626,7.70817,-5,}, {12.0624,10.5118,-5,}, {9.97696,13.0162,-5,}, {7.36759,15.0983,-5,}, {4.32167,16.6482,-5,}, {0.949612,17.5744,-5,}, {-2.619,17.8084,-5,}, {-6.24045,17.3094,-5,}, {-9.76262,16.0665,-5,}, {-13.0314,14.1004,-5,}, {-15.8974,11.4644,-5,}, {-18.2226,8.24237,-5,}, {-19.8868,4.54696,-5,}, {-20.7936,0.515337,-5,}, {-20.8754,-3.69574,-5,}, {-20.0972,-7.91595,-5,}, {-20.8754,-3.69574,-5,}, {-20.7936,0.515337,-5,}, {-19.8868,4.54696,-5,}, {-18.2226,8.24237,-5,}, {-15.8974,11.4644,-5,}, {-13.0314,14.1004,-5,}, {-9.76262,16.0665,-5,}, {-6.24045,17.3094,-5,}, {-2.619,17.8084,-5,}, {0.949612,17.5744,-5,}, {4.32167,16.6482,-5,}, {7.36759,15.0983,-5,}, {9.97696,13.0162,-5,}, {12.0624,10.5118,-5,}, {13.5626,7.70817,-5,}, {14.4435,4.73543,-5,}, {14.6991,1.72493,-5,}, {14.3502,-1.19649,-5,}, {13.4424,-3.91182,-5,}, {12.0431,-6.31859,-5,}, {10.2375,-8.33272,-5,}, {8.12407,-9.89139,-5,}, {5.8096,-10.9548,-5,}, {3.40395,-11.5071,-5,}, {1.01499,-11.5555,-5,}, {-1.25611,-11.1293,-5,}, {-3.31919,-10.2773,-5,}, {-5.09871,-9.06439,-5,}, {-6.53644,-7.56803,-5,}, {-7.59329,-5.87384,-5,}, {-8.25018,-4.07119,-5,}, {-8.50782,-2.24876,-5,}, {-8.38568,-0.490343,-5,}, {-7.91994,1.12896,-5,}, {-7.16089,2.54591,-5,}, {-6.1696,3.71161,-5,}, {-5.01428,4.59315,-5,}, {-3.76641,5.17438,-5,}, {-2.49688,5.45578,-5,}, {-1.27233,5.45355,-5,}, {-0.151838,5.19778,-5,}, {0.815842,4.73016,-5,}, {1.59437,4.10097,-5,}, {2.16121,3.36588,-5,}, {2.50814,2.58248,-5,}, {2.64107,1.80686,-5,}, {2.57897,1.09037,-5,}, {2.35216,0.476806,-5,}, {2,0,-5,}, {0,0,-5,}, {0,0,0,} }; // Land int waypointIndex = 0; rclcpp::TimerBase::SharedPtr commandTimer; rclcpp::TimerBase::SharedPtr waypointTimer; rclcpp::Publisher<OffboardControlMode>::SharedPtr offboard_control_mode_publisher_; rclcpp::Publisher<TrajectorySetpoint>::SharedPtr trajectory_setpoint_publisher_; rclcpp::Publisher<VehicleCommand>::SharedPtr vehicle_command_publisher_; rclcpp::Subscription<px4_msgs::msg::Timesync>::SharedPtr timesync_sub_; // rclcpp::Subscription<px4_msgs::msg::VehicleLocalPosition>::SharedPtr subscription_; // std::atomic<uint64_t> timestamp_; //!< common synced timestamped uint64_t offboard_setpoint_counter_; //!< counter for the number of setpoints sent void publish_offboard_control_mode() const; void publish_trajectory_setpoint() const; void publish_vehicle_command(uint16_t command, float param1 = 0.0, float param2 = 0.0) const; }; void setpoint::arm() const { publish_vehicle_command(VehicleCommand::VEHICLE_CMD_COMPONENT_ARM_DISARM, 1.0); RCLCPP_INFO(this->get_logger(), "Arm command send"); } void setpoint::disarm() const { publish_vehicle_command(VehicleCommand::VEHICLE_CMD_COMPONENT_ARM_DISARM, 0.0); RCLCPP_INFO(this->get_logger(), "Disarm command send"); } void setpoint::publish_offboard_control_mode() const { OffboardControlMode msg{}; msg.timestamp = timestamp_.load(); msg.position = true; msg.velocity = false; msg.acceleration = false; msg.attitude = false; msg.body_rate = false; offboard_control_mode_publisher_->publish(msg); } void setpoint::publish_trajectory_setpoint() const { TrajectorySetpoint msg{}; msg.timestamp = timestamp_.load(); msg.position = {waypoints[waypointIndex][0],waypoints[waypointIndex][1],waypoints[waypointIndex][2]}; msg.yaw = std::nanf("0"); //-3.14; // [-PI:PI] trajectory_setpoint_publisher_->publish(msg); } void setpoint::publish_vehicle_command(uint16_t command, float param1, float param2) const { VehicleCommand msg{}; msg.timestamp = timestamp_.load(); msg.param1 = param1; msg.param2 = param2; msg.command = command; msg.target_system = 1; msg.target_component = 1; msg.source_system = 1; msg.source_component = 1; msg.from_external = true; vehicle_command_publisher_->publish(msg); } int main(int argc, char* argv[]) { std::cout << "Starting setpoint node..." << std::endl; setvbuf(stdout, NULL, _IONBF, BUFSIZ); rclcpp::init(argc, argv); rclcpp::spin(std::make_shared<setpoint>()); rclcpp::shutdown(); return 0; }
We send the setpoints to the controller by giving reference points. The aircraft will then try to maneuver to the given points via its control strategy (usually PID). Therefore, to have a smooth maneuver, it is usually suggested to give a series of discrete points between two waypoints, i.e., trajectory which parameterized by time. It should then solve the abrupt motion of your UAV. I'm no expert, but I hope this helps.
How to track the 2d points over the images for 3d-reconstruction as specified in opencv sfm pipeline?
I am trying to do 3D reconstruction using this code from opencv. As far as I understood, I need a textfile with the 2D points as per the given format. I am wondering if anyone could help me in getting these 2D points they mention in this program. I have a set of images with the calibration parameters but I have been not able to understand how could I track and save these 2D points e.g. the first point in frame 1 is the same point in frame 2 in the format they specified . #include <opencv2/core.hpp> #include <opencv2/sfm.hpp> #include <opencv2/viz.hpp> #include <iostream> #include <fstream> #include <string> using namespace std; using namespace cv; using namespace cv::sfm; static void help() { cout << "\n------------------------------------------------------------\n" << " This program shows the camera trajectory reconstruction capabilities\n" << " in the OpenCV Structure From Motion (SFM) module.\n" << " \n" << " Usage:\n" << " example_sfm_trajectory_reconstruction <path_to_tracks_file> <f> <cx> <cy>\n" << " where: is the tracks file absolute path into your system. \n" << " \n" << " The file must have the following format: \n" << " row1 : x1 y1 x2 y2 ... x36 y36 for track 1\n" << " row2 : x1 y1 x2 y2 ... x36 y36 for track 2\n" << " etc\n" << " \n" << " i.e. a row gives the 2D measured position of a point as it is tracked\n" << " through frames 1 to 36. If there is no match found in a view then x\n" << " and y are -1.\n" << " \n" << " Each row corresponds to a different point.\n" << " \n" << " f is the focal lenght in pixels. \n" << " cx is the image principal point x coordinates in pixels. \n" << " cy is the image principal point y coordinates in pixels. \n" << "------------------------------------------------------------------\n\n" << endl; } /* Build the following structure data * * frame1 frame2 frameN * track1 | (x11,y11) | -> | (x12,y12) | -> | (x1N,y1N) | * track2 | (x21,y11) | -> | (x22,y22) | -> | (x2N,y2N) | * trackN | (xN1,yN1) | -> | (xN2,yN2) | -> | (xNN,yNN) | * * * In case a marker (x,y) does not appear in a frame its * values will be (-1,-1). */ void parser_2D_tracks(const string &_filename, std::vector<Mat> &points2d ) { ifstream myfile(_filename.c_str()); if (!myfile.is_open()) { cout << "Unable to read file: " << _filename << endl; exit(0); } else { double x, y; string line_str; int n_frames = 0, n_tracks = 0; // extract data from text file vector<vector<Vec2d> > tracks; for ( ; getline(myfile,line_str); ++n_tracks) { istringstream line(line_str); vector<Vec2d> track; for ( n_frames = 0; line >> x >> y; ++n_frames) { if ( x > 0 && y > 0) track.push_back(Vec2d(x,y)); else track.push_back(Vec2d(-1)); } tracks.push_back(track); } // embed data in reconstruction api format for (int i = 0; i < n_frames; ++i) { Mat_<double> frame(2, n_tracks); for (int j = 0; j < n_tracks; ++j) { frame(0,j) = tracks[j][i][0]; frame(1,j) = tracks[j][i][1]; } points2d.push_back(Mat(frame)); } myfile.close(); } } /* Keyboard callback to control 3D visualization */ bool camera_pov = false; void keyboard_callback(const viz::KeyboardEvent &event, void* cookie) { if ( event.action == 0 &&!event.symbol.compare("s") ) camera_pov = !camera_pov; } /* Sample main code */ int main(int argc, char** argv) { // Read input parameters if ( argc != 5 ) { help(); exit(0); } // Read 2D points from text file std::vector<Mat> points2d; parser_2D_tracks( argv[1], points2d ); // Set the camera calibration matrix const double f = atof(argv[2]), cx = atof(argv[3]), cy = atof(argv[4]); Matx33d K = Matx33d( f, 0, cx, 0, f, cy, 0, 0, 1); bool is_projective = true; vector<Mat> Rs_est, ts_est, points3d_estimated; reconstruct(points2d, Rs_est, ts_est, K, points3d_estimated, is_projective); // Print output cout << "\n----------------------------\n" << endl; cout << "Reconstruction: " << endl; cout << "============================" << endl; cout << "Estimated 3D points: " << points3d_estimated.size() << endl; cout << "Estimated cameras: " << Rs_est.size() << endl; cout << "Refined intrinsics: " << endl << K << endl << endl; cout << "3D Visualization: " << endl; cout << "============================" << endl; viz::Viz3d window_est("Estimation Coordinate Frame"); window_est.setBackgroundColor(); // black by default window_est.registerKeyboardCallback(&keyboard_callback); // Create the pointcloud cout << "Recovering points ... "; // recover estimated points3d vector<Vec3f> point_cloud_est; for (int i = 0; i < points3d_estimated.size(); ++i) point_cloud_est.push_back(Vec3f(points3d_estimated[i])); cout << "[DONE]" << endl; cout << "Recovering cameras ... "; vector<Affine3d> path_est; for (size_t i = 0; i < Rs_est.size(); ++i) path_est.push_back(Affine3d(Rs_est[i],ts_est[i])); cout << "[DONE]" << endl; cout << "Rendering Trajectory ... "; cout << endl << "Press: " << endl; cout << " 's' to switch the camera pov" << endl; cout << " 'q' to close the windows " << endl; if ( path_est.size() > 0 ) { // animated trajectory int idx = 0, forw = -1, n = static_cast<int>(path_est.size()); while(!window_est.wasStopped()) { for (size_t i = 0; i < point_cloud_est.size(); ++i) { Vec3d point = point_cloud_est[i]; Affine3d point_pose(Mat::eye(3,3,CV_64F), point); char buffer[50]; sprintf (buffer, "%d", static_cast<int>(i)); viz::WCube cube_widget(Point3f(0.1,0.1,0.0), Point3f(0.0,0.0,-0.1), true, viz::Color::blue()); cube_widget.setRenderingProperty(viz::LINE_WIDTH, 2.0); window_est.showWidget("Cube"+string(buffer), cube_widget, point_pose); } Affine3d cam_pose = path_est[idx]; viz::WCameraPosition cpw(0.25); // Coordinate axes viz::WCameraPosition cpw_frustum(K, 0.3, viz::Color::yellow()); // Camera frustum if ( camera_pov ) window_est.setViewerPose(cam_pose); else { // render complete trajectory window_est.showWidget("cameras_frames_and_lines_est", viz::WTrajectory(path_est, viz::WTrajectory::PATH, 1.0, viz::Color::green())); window_est.showWidget("CPW", cpw, cam_pose); window_est.showWidget("CPW_FRUSTUM", cpw_frustum, cam_pose); } // update trajectory index (spring effect) forw *= (idx==n || idx==0) ? -1: 1; idx += forw; // frame rate 1s window_est.spinOnce(1, true); window_est.removeAllWidgets(); } } return 0; } I would be really grateful if someone can help me through this. Thank you.
How to refresh the displayed window of imshow?
Every time I click on the displayed window, I want a circle to be drawn. The following code does not refresh the window. How to do so? #include <iostream> #include <opencv2/opencv.hpp> using namespace cv; using namespace std; const string filename = "family.jpg"; const string sourceWindow = "source"; void onMouse(int event, int x, int y, int flags, void* param) { Mat* image = reinterpret_cast<Mat*>(param); switch (event) { case cv::EVENT_LBUTTONDOWN: cout << "at (" << x << "," << y << ") values is: " << static_cast<int>(image->at<uchar>(Point(x, y))) << endl; circle(*image, Point(x, y), 65, 0, 5); break; } } void main() { Mat src = imread(filename, IMREAD_GRAYSCALE); if (!src.empty()) { namedWindow(sourceWindow, WINDOW_NORMAL); imshow(sourceWindow, src); setMouseCallback(sourceWindow, onMouse, reinterpret_cast<void*>(&src)); } waitKey(0); }
Just as it is done for video capturing (see VideoCapture) You simply call imshowagain with the new image. void onMouse(int event, int x, int y, int flags, void* param) { Mat* image = reinterpret_cast<Mat*>(param); switch (event) { case cv::EVENT_LBUTTONDOWN: cout << "at (" << x << "," << y << ") values is: " << static_cast<int>(image->at<uchar>(Point(x, y))) << endl; circle(*image, Point(x, y), 65, 0, 5); imshow(sourceWindow, image); break; } }
DICOM to Point Cloud in VTK
Is there any method for converting DICOM (ct scan) images to Point Clouds using VTK? VTK allows reading DICOM and DICOM series and volume rendering but is it possible to generate a Point Cloud from a series of DICOM images? If it isn't possible in VTK, is there some other library that I can use for this purpose?
Here is a dicom to point cloud demonstration. Dicom files are pretty variable depending on how the imaging is collected, but this is what we have been using for CT scans for some time. This is the "manual version" ie where you will need to interact with the terminal to navigate the dicom directory. It is possible to automate this but is highly dependent on your application. I have pcl 8.0 and vtkdicom installed. (i was able to do a limited implementation of this without vtkdicom, but its features make the application far more robust at handling diverse dicom directory structures). You will need to point the function in the main towards the appropriate directory on your computer (should be the file containing the DICOMDIR file). Once you have loaded the dicom, the visualizer has keyboard inputs m and n to control intensity target to be visualized. (you can easily change the code to filter for any of the parameters: x,y,z,intensity) and can change the width or stepsize as needed. #include <pcl/common/common_headers.h> #include <pcl/visualization/pcl_visualizer.h> #include <pcl/filters/passthrough.h> #include <boost/thread/thread.hpp> #include <vtkSmartPointer.h> #include <vtkDICOMImageReader.h> #include "vtkImageData.h" #include "vtkDICOMDirectory.h" #include "vtkDICOMItem.h" #include "vtkStringArray.h" #include "vtkIntArray.h" #include "vtkDICOMReader.h" bool loadDICOM(pcl::PointCloud<pcl::PointXYZI>::Ptr outCloud, std::string fullPathToDicomDir) { // load DICOM dir file vtkSmartPointer<vtkDICOMDirectory> ddir = vtkSmartPointer<vtkDICOMDirectory>::New(); ddir->SetDirectoryName(fullPathToDicomDir.c_str()); ddir->Update(); //select patient int n = ddir->GetNumberOfPatients(); int patientSelection = 0; if (n > 1) { std::cout << "Select Patient number, total count: " << n << std::endl; std::string userInput; std::getline(std::cin, userInput); patientSelection = std::stoi(userInput); } const vtkDICOMItem& patientItem = ddir->GetPatientRecord(patientSelection); std::cout << "Patient " << patientSelection << ": " << patientItem.Get(DC::PatientID).AsString() << "\n"; //select study vtkIntArray* studies = ddir->GetStudiesForPatient(patientSelection); vtkIdType m = studies->GetMaxId() + 1; int studySelection = 0; if (m > 1) { std::cout << "Select study, total count: " << m << std::endl; std::string userInput; std::getline(std::cin, userInput); studySelection = std::stoi(userInput); } int j = studies->GetValue(studySelection); const vtkDICOMItem& studyItem = ddir->GetStudyRecord(j); const vtkDICOMItem& studyPItem = ddir->GetPatientRecordForStudy(j); cout << " Study " << j << ": \"" << studyItem.Get(DC::StudyDescription).AsString() << "\" \"" << studyPItem.Get(DC::PatientName).AsString() << "\" " << studyItem.Get(DC::StudyDate).AsString() << "\n"; int k0 = ddir->GetFirstSeriesForStudy(j); int k1 = ddir->GetLastSeriesForStudy(j); int seriesSelection; std::cout << "Select series, range: " << k0 << " to " << k1 << std::endl; for (int i = k0; i <= k1; i++) { const vtkDICOMItem& seriesItem = ddir->GetSeriesRecord(i); vtkStringArray* a = ddir->GetFileNamesForSeries(i); cout << " Series " << i << ": \"" << seriesItem.Get(DC::SeriesDescription).AsString() << "\" " << seriesItem.Get(DC::SeriesNumber).AsString() << " " << seriesItem.Get(DC::Modality).AsString() << ", Images: " << a->GetNumberOfTuples() << "\n"; } std::string userInput; std::getline(std::cin, userInput); seriesSelection = std::stoi(userInput); const vtkDICOMItem& seriesItem = ddir->GetSeriesRecord(seriesSelection); cout << " Series " << seriesSelection << ": \"" << seriesItem.Get(DC::SeriesDescription).AsString() << "\" " << seriesItem.Get(DC::SeriesNumber).AsString() << " " << seriesItem.Get(DC::Modality).AsString() << "\n"; vtkStringArray* a = ddir->GetFileNamesForSeries(seriesSelection); vtkDICOMReader* reader = vtkDICOMReader::New(); reader->SetFileNames(a); reader->Update(); vtkSmartPointer<vtkImageData> sliceData = reader->GetOutput(); int numberOfDims = sliceData->GetDataDimension(); int* dims = sliceData->GetDimensions(); std::cout << "Cloud dimensions: "; int totalPoints = 1; for (int i = 0; i < numberOfDims; i++) { std::cout << dims[i] << " , "; totalPoints = totalPoints * dims[i]; } std::cout << std::endl; std::cout << "Number of dicom points: " << totalPoints << std::endl; //read data into grayCloud double* dataRange = sliceData->GetScalarRange(); double* spacingData = reader->GetDataSpacing(); std::cout << "Data intensity bounds... min: " << dataRange[0] << ", max: " << dataRange[1] << std::endl; if (numberOfDims != 3) { std::cout << "Incorrect number of dimensions in dicom file, generation failed..." << std::endl; return false; } else { Eigen::RowVector3f spacing = Eigen::RowVector3f(spacingData[0], spacingData[1], spacingData[2]); Eigen::RowVector3i dimensions = Eigen::RowVector3i(dims[0], dims[1], dims[2]); outCloud->points.clear(); std::cout << "x spacing: " << spacing(0) << std::endl; std::cout << "y spacing: " << spacing(1) << std::endl; std::cout << "z spacing: " << spacing(2) << std::endl; for (int z = 0; z < dims[2]; z++) { if (z % 50 == 0) { double percentageComplete = (double)z / (double)dims[2]; std::cout << "Dicom Read Progress: " << (int)(100.0 * percentageComplete) << "%" << std::endl; } for (int y = 0; y < dims[1]; y++) { for (int x = 0; x < dims[0]; x++) { double tempIntensity = sliceData->GetScalarComponentAsDouble(x, y, z, 0); int tempX = x; pcl::PointXYZI tempPt = pcl::PointXYZI(); if (!isinf(tempIntensity) && !isnan(tempIntensity)) { //map value into positive realm //tempIntensity = ((tempIntensity - dataRange[0]) / (dataRange[1] - dataRange[0])); if (tempIntensity > SHRT_MAX) { tempIntensity = SHRT_MAX; } else if (tempIntensity < SHRT_MIN) { tempIntensity = SHRT_MIN; } } else { tempIntensity = 0; } tempPt.x = tempX; tempPt.y = y; tempPt.z = z; tempPt.intensity = tempIntensity; outCloud->points.push_back(tempPt); } } } } std::cout << "Load Dicom Cloud Complete!" << std::endl; return true; } int indexSlice = 0; void keyboardEventOccurred(const pcl::visualization::KeyboardEvent& event, void* viewer) { if (event.getKeySym() == "n" && event.keyDown()) { indexSlice -= 1; } else if (event.getKeySym() == "m" && event.keyDown()) { indexSlice += 1; } } void displayCloud(pcl::PointCloud<pcl::PointXYZI>::Ptr cloud, std::string field, int step, int width, std::string window_name = "default") { boost::shared_ptr<pcl::visualization::PCLVisualizer> viewer(new pcl::visualization::PCLVisualizer(window_name)); viewer->setPointCloudRenderingProperties(pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 2, "id"); viewer->registerKeyboardCallback(keyboardEventOccurred, (void*)viewer.get()); pcl::PointCloud<pcl::PointXYZI>::Ptr tempCloud(new pcl::PointCloud<pcl::PointXYZI>); pcl::PassThrough<pcl::PointXYZI> pass; pass.setInputCloud(cloud); pass.setFilterFieldName(field); //could gate this on intensity if u preferred int lastIndex = indexSlice-1; //proc first cycle while (!viewer->wasStopped()) { if (indexSlice != lastIndex) { int low = step * indexSlice - width / 2; int high = step * indexSlice + width / 2; pass.setFilterLimits(low, high); pass.filter(*tempCloud); lastIndex = indexSlice; std::cout << field<< " range: " <<low<<" , "<<high<< std::endl; viewer->removeAllPointClouds(); pcl::visualization::PointCloudColorHandlerGenericField<pcl::PointXYZI> point_cloud_color_handler(tempCloud, "intensity"); viewer->addPointCloud< pcl::PointXYZI >(tempCloud, point_cloud_color_handler, "id"); } viewer->spinOnce(50); } viewer->close(); } // -------------- // -----Main----- // -------------- int main(int argc, char** argv) { pcl::PointCloud<pcl::PointXYZI>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZI>); loadDICOM(cloud, "C:/Local Software/voyDICOM/resources/DICOM_Samples/2021APR14 MiniAchors_V0"); displayCloud(cloud,"intensity",100,50); return 0; } Note that in most cases dicom files are relatively massive in terms of raw dimensions and so I very rarely (never?) have loaded a whole dicom file into a point cloud (until for this code). Generally what I do is handle it in a dense format (short array) and then create clouds based on selections from that data. This way you can do certain imaging operations that benefit from a locked data grid (opening, closing, etc) prior to going to the sparse data set (point cloud) where everything becomes profoundly more expensive. Pretty picture of it working with one of my debug dicom sets:
I think I might have found a way, after all. Haven't tried it yet but in theory it should work. Firstly, the DICOM image needs to be converted into .vtk format using VTK once the DICOM images have been converted into .vtk they can then be converted into .pcd (Point cloud format) using PCL (point cloud library).
How to use magnitude and absdiff OpenCV functions to compute distances?
How can I use magnitude and absdiff? I read the explanation in the documentation, but every time it gives an error because I do not understand how exactly should be input arrays and output. Should it be vector, Mat or Scalar? I tried some but I failed, same with cartToPolar. Can anyone give me a small snippet of that, since I didn't find any examples in the documentation? More precisely, I have the vector vector<Vec4f> lines; that contains the end point and start point of 30 lines, so I want to use magnitude to find length of each line. I learned how to use norm by for loop but I would like to use magnitude so I did it like: double x; length=magnitude(lines[i][2]-lines[i][0],lines[i][3]-lines[i][1],x) but it doesn't work. I tried to define x as 1 array vector, but I couldn't.
You already got how to use norm to compute the distance: Point2f a = ... Point2f b = .. double length = norm(a - b); // NORM_L2, NORM_L1 You can also work on all points at once. You first need to convert the coordinates from vector to matrix form, then it's just simple math: #include <opencv2\opencv.hpp> #include <iostream> using namespace std; using namespace cv; int main() { vector<Vec4f> lines{ { 1, 2, 4, 6 }, { 5, 7, 1, 3 }, { 11, 12, 12, 11 } }; Mat1f coordinates = Mat4f(lines).reshape(1); Mat1f diff_x = coordinates.col(0) - coordinates.col(2); Mat1f diff_y = coordinates.col(1) - coordinates.col(3); cout << "coordinates: \n" << coordinates << "\n\n"; cout << "diff_x: \n" << diff_x << "\n\n"; cout << "diff_y: \n" << diff_y << "\n\n"; cout << endl; // sqrt((x2 - x1)^2 + (y2 - y1)^2) Mat1f euclidean_distance; magnitude(diff_x, diff_y, euclidean_distance); cout << "euclidean_distance: \n" << euclidean_distance << "\n\n"; // abs(x2 - x1) + abs(y2 - y1) Mat1f manhattan_distance = abs(diff_x) + abs(diff_y); cout << "manhattan_distance: \n" << manhattan_distance << "\n\n"; // Another way to compute L1 distance, with absdiff // abs(x2 - x1) + abs(y2 - y1) Mat1f points1 = coordinates(Range::all(), Range(0, 2)); Mat1f points2 = coordinates(Range::all(), Range(2, 4)); Mat1f other_manhattan_distance; absdiff(points1, points2, other_manhattan_distance); other_manhattan_distance = other_manhattan_distance.col(0) + other_manhattan_distance.col(1); cout << "other_manhattan_distance: \n" << other_manhattan_distance << "\n\n"; return 0; }