I am using openmp to convert an image from RGB to gray scale.But openmp is not faster than normal, I wonder why? why single thread is faster than multi-threads? How to accelerate the speed?
here is the code:
#include <iostream>
#include <omp.h>
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
int main() {
Mat image = imread("../family.jpg");
Mat gray(image.rows, image.cols, CV_8UC1);
time_t start, stop;
start = clock();
for (int i = 0; i < image.rows; i++)
for (int j = 0; j < image.cols; j++) {
gray.at<uchar>(i,j) = image.at<cv::Vec3b>(i,j)[0]*0.114
+ image.at<cv::Vec3b>(i,j)[1]*0.587
+ image.at<cv::Vec3b>(i,j)[2]*0.299;
}
stop = clock() - start;
cout << "time: " << stop * 1.0 / CLOCKS_PER_SEC << endl;
start = clock();
omp_set_num_threads(4);
#pragma omp parallel
{
#pragma omp for
for (int i = 0; i < image.rows; i++)
for (int j = 0; j < image.cols; j++) {
gray.at<uchar>(i,j) = image.at<cv::Vec3b>(i,j)[0]*0.114
+ image.at<cv::Vec3b>(i,j)[1]*0.587
+ image.at<cv::Vec3b>(i,j)[2]*0.299;
}
}
stop = clock() - start;
cout << "time: " << stop * 1.0 / CLOCKS_PER_SEC << endl;
return 0;
}
result:
normal time: 0.035253
openmp time: 0.069894
Related
I'm trying to create a char matrix using dynamic allocation (char**). It represents a board where the margins are '#' character and in the middle is the ASCII 32 (blank space). When I run the code this massage appear: "Exception thrown at 0x00007FFD9ABF024E (ucrtbased.dll) in myapp.exe: 0xC0000005: Access violation reading location " in some cpp file.
Here's my code:
#include <iostream>
using namespace std;
char** allocateBoard(int n)
{
char** Board = 0;
Board = new char* [n+2];
int i;
for (i = 0; i < n + 2; i++)
{
Board[i] = new char[n * 2 + 2];
}
return Board;
}
void initBoard(char**& Board, int n)
{
int i, j;
for (i = 0; i < n; i++)
{
for (j = 0; j < n * 2; j++)
{
if (i == 0 || i == n - 1) Board[i][j] = '#';
else if (j == 0 || j == n * 2 - 1) Board[i][j] = '#';
else Board[i][j] = 32;
}
}
}
void showBoard(char** Board, int n)
{
int i, j;
for (i = 0; i < n; i++)
{
for (j = 0; j < n * 2; j++)
{
cout << Board[i][j];
}
cout << endl;
}
}
int main()
{
int n = 4;
char** Board = 0;
Board = allocateBoard(n);
initBoard(Board, n);
showBoard(Board, n);
cout << endl;
showBoard(Board, n);
for (int i = 0; i < n * 2 + 4; i++)
{
delete[] Board[i];
}
delete[] Board;
return 0;
}
Does anyone know where is the problem? As a very beginner I can't see where is the mistake. I've allocated more space in the matrix than I'm actually using so I can't figure why this message appears. Is the deallocation the problem?
Thanks!
I'm new in OpenCV, and I want to thresholding the image by myself without using Threshold function in opencv, because the time spend on function threshold is to high for me.
Here is my code:
Mat src = imread("D:\\DataBox\\7.jpg", 0);
for (int i = 0; i < src.cols; i++) {
cout << i << endl;
for (int j = 0; j < src.rows; j++) {
if (src.at<uchar>(i, j) > 70) {
src.at<uchar>(i, j) = 0;
cout << j << endl;
}
else
src.at<uchar>(i, j) = 255;
}
}
but it still says:
"OpenCV Error: Assertion failed (dims <= 2 && data && (unsigned)i0 < (unsigned)size.p[0] && (unsigned)(i1 * DataType<_Tp>::channels) < (unsigned)(size.p[1] * channels()) && ((((sizeof(size_t)<<28)|0x8442211) >> ((DataType<_Tp>::depth) & ((1 << 3) - 1))*4) & 15) == elemSize1()) in cv::Mat::at, file C:\Program Files\opencv\build\include\opencv2/core/mat.inl.hpp, line 894"
I can print j from 0~719(since the size of the image is 720*960), but as long as the parameter i want to become 2 from 1, the error occurs.
You mixed up rows and cols:
Try this:
Mat src = imread("path_to_image", IMREAD_GRAYSCALE);
for (int i = 0; i < src.rows; i++)
{
//cout << i << endl;
for (int j = 0; j < src.cols; j++)
{
if (src.at<uchar>(i, j) > 70) {
src.at<uchar>(i, j) = 0;
//cout << j << endl;
}
else
src.at<uchar>(i, j) = 255;
}
}
This is, however very unlikely to perform better than OpenCV implementation. You can gain a little speed working on raw pointers, with a little trick to work on continuous data when possible:
#include <opencv2\opencv.hpp>
using namespace cv;
int main()
{
Mat src = imread("D:\\SO\\img\\nice.jpg", IMREAD_GRAYSCALE);
int rows = src.rows;
int cols = src.cols;
if (src.isContinuous())
{
cols = rows * cols;
rows = 1;
}
for (int i = 0; i < rows; i++)
{
uchar* pdata = src.ptr<uchar>(i);
int base = i*cols;
for (int j = 0; j < cols; j++)
{
if (pdata[base + j] > 70)
{
pdata[base + j] = 0;
}
else
{
pdata[base + j] = 255;
}
}
}
return 0;
}
Actually, on my PC my version is a little bit faster than OpenCV one:
Time #HenryChen (ms): 2.83266
Time #Miki (ms): 1.09597
Time #OpenCV (ms): 2.10727
You can test on your PC with the following code, since time depends on many factor, e.g. optimizations enabled in OpenCV:
#include <opencv2\opencv.hpp>
#include <iostream>
using namespace cv;
using namespace std;
int main()
{
Mat1b src(720,960);
randu(src, 0, 256);
Mat1b src1 = src.clone();
Mat1b src2 = src.clone();
Mat1b src3 = src.clone();
double tic1 = double(getTickCount());
// Method #HenryChen (corrected)
for (int i = 0; i < src1.rows; i++)
{
//cout << i << endl;
for (int j = 0; j < src1.cols; j++)
{
if (src1.at<uchar>(i, j) > 70) {
src1.at<uchar>(i, j) = 0;
//cout << j << endl;
}
else
src1.at<uchar>(i, j) = 255;
}
}
double toc1 = (double(getTickCount()) - tic1) * 1000.0 / getTickFrequency();
cout << "Time #HenryChen (ms): \t" << toc1 << endl;
//-------------------------------------
double tic2 = double(getTickCount());
// Method #Miki
int rows = src2.rows;
int cols = src2.cols;
if (src2.isContinuous())
{
cols = rows * cols;
rows = 1;
}
for (int i = 0; i < rows; i++)
{
uchar* pdata = src2.ptr<uchar>(0);
int base = i*cols;
for (int j = 0; j < cols; j++)
{
pdata[base + j] = (pdata[base + j] > 70) ? uchar(0) : uchar(255);
}
}
double toc2 = (double(getTickCount()) - tic2) * 1000.0 / getTickFrequency();
cout << "Time #Miki (ms): \t" << toc2 << endl;
//-------------------------------------
double tic3 = double(getTickCount());
// Method #OpenCV
threshold(src3, src3, 70, 255, THRESH_BINARY_INV);
double toc3 = (double(getTickCount()) - tic3) * 1000.0 / getTickFrequency();
cout << "Time #OpenCV (ms): \t" << toc3 << endl;
getchar();
return 0;
}
Use test.at<uchar>(cv::Point(i, j)) instead. I always get lost when accessing cv::Mat directly - cv::Point clears it up a little bit.
Anyway, I agree with Miki - it is very unlikely to create a function that performs better that a library one.
I wanted to binarize low quality images and found that the existing solutions or programs which are implementations of global and local binarization techniques such as Sauvola’s method, NiBlack's method etc are not off much use.
I did find a few good papers regarding much better methods like the ones given in the papers:
1) http://www.ski.org/sites/default/files/publications/wacv11-display-reader.pdf#cite.adap-binar
2) https://www.jstage.jst.go.jp/article/elex/1/16/1_16_501/_pdf
But I haven't worked on image processing much before and so I wanted to know how I could proceed to implement it and what knowledge I need to implement these algorithms
I implemented the binarization of the first paper in like 10 minutes (less time than processing the 2nd image) - no guarantee that it's correct, better have a look at the formulas yourself:
int main()
{
//cv::Mat input = cv::imread("../inputData/Lenna.png");
cv::Mat input = cv::imread("../inputData/LongLineColor.jpg");
cv::Mat gray;
cv::cvtColor(input,gray,CV_BGR2GRAY);
cv::Mat binaryImage = cv::Mat::zeros(gray.rows, gray.cols, CV_8UC1);
// binarization:
// TODO: adjust to your application:
int smallWindowSize = 17; // suggested by the paper
int bigWindowSize = 35; // suggested by the paper
// TODO: adjust to your application
double minTau = 10 ;
// create roi relative to (0,0)
cv::Rect roiTemplate1 = cv::Rect(-smallWindowSize/2,-smallWindowSize/2, smallWindowSize, smallWindowSize);
cv::Rect roiTemplate2 = cv::Rect(-bigWindowSize/2,-bigWindowSize/2, bigWindowSize, bigWindowSize);
cv::Rect imgROI = cv::Rect(0,0, gray.cols, gray.rows);
for(int y=0; y<gray.rows; ++y)
{
std::cout << y << std::endl;
for(int x=0; x<gray.cols; ++x)
{
double pixelThreshold = 255;
// small roi
cv::Rect cROIs = roiTemplate1 + cv::Point(x,y);
// test whether ROI is inside the image. Reduce otherwise:
cROIs = cROIs & imgROI;
if(cROIs.width == 0 || cROIs.height == 0)
continue; // ignore this pixel
// large roi
cv::Rect cROIl = roiTemplate2 + cv::Point(x,y);
cROIl = cROIl & imgROI;
if(cROIl.width == 0 || cROIl.height == 0)
continue; // ignore this pixel
cv::Mat subSmall = gray(cROIs);
cv::Mat subLarge = gray(cROIl);
// evaluate subimages:
// standard deviations
double stdDevS =0;
double stdDevL =0;
// mean value
double meanS =0;
double minL =DBL_MAX;
double meanL =0;
// mean of small region
for(int j=0; j<subSmall.rows; ++j)
for(int i=0; i<subSmall.cols; ++i)
{
meanS += subSmall.at<unsigned char>(j,i);
}
meanS = meanS/ (double)(subSmall.cols*subSmall.rows);
// stddev of small region
for(int j=0; j<subSmall.rows; ++j)
for(int i=0; i<subSmall.cols; ++i)
{
double diff = subSmall.at<unsigned char>(j,i) - meanS;
stdDevS += diff*diff;
}
stdDevS = sqrt(stdDevS/(double)(subSmall.cols*subSmall.rows));
// mean and min of large region
for(int j=0; j<subLarge.rows; ++j)
for(int i=0; i<subLarge.cols; ++i)
{
if(subLarge.at<unsigned char>(j,i) < minL)
{
minL = subLarge.at<unsigned char>(j,i);
meanL += subLarge.at<unsigned char>(j,i);
}
}
meanL = meanL/ (double)(subLarge.cols*subLarge.rows);
// stddef of large region
for(int j=0; j<subLarge.rows; ++j)
for(int i=0; i<subLarge.cols; ++i)
{
double diff = subLarge.at<unsigned char>(j,i) - meanL;
stdDevL += diff*diff;
}
stdDevL = sqrt(stdDevL/(double)(subLarge.cols*subLarge.rows));
// formula (2)
double tau = ((meanS - minL) * (1-stdDevS/stdDevL))/2.0;
// minimum
if(tau < minTau) tau = minTau;
// formula (1)
double Threshold = meanS - tau;
// for debugging:
/*
std::cout << " meanS:" << meanS << std::endl;
std::cout << " std S:" << stdDevS << std::endl;
std::cout << " min L:" << minL << std::endl;
std::cout << " meanL:" << meanL << std::endl;
std::cout << " std L:" << stdDevL << std::endl;
std::cout << " threshold: " << Threshold << std::endl;
*/
unsigned char pixelVal = gray.at<unsigned char>(y,x);
if(pixelVal >= Threshold)
binaryImage.at<unsigned char>(y,x) = 255;
else
binaryImage.at<unsigned char>(y,x) = 0;
}
}
cv::imshow("input", input);
cv::imshow("binary", binaryImage);
//cv::imwrite("../outputData/binaryCustom.png", binaryImage);
cv::waitKey(0);
return 0;
}
giving me these results:
and
It is very slow but not optimized or encapsulated at all ;)
And the results aren't sooo good imho. Probably you have to adjust the windowSizes to your application/task/objectSize
I have a trivial problem but I don't know how to solve it. I just wanna do a simple "foreach" of a Mat to view rgb values. I have next code:
for(int i=0; i<mat.rows; i++)
{
for(int j=0; j<mat.cols; j++)
{
int value_rgb = mat.at<uchar>(i,j);
cout << "(" << i << "," << j << ") : " << value_rgb <<endl;
}
}
The mat is 200 rows x 200 cols. When I print on console the results, just in the final the programs fails with next error:
**OpenCV Error: Assertion failed (dims <= 2 && data && (unsigned)i0 <(unsigned)size.p[0] && (unsigned)(i1*DataType<_Tp>::channels) < (unsigned)(size.p[1]*channels()) && ((((sizeof(size_t)<<28)|0x8442211) >> ((DataType<_Tp>::depth) & ((1 << 3) - 1))*4) & 1 5) == elemSize1()) in unknown function, file c:\opencv\build\include\opencv2\core\mat.hpp, line 537**
Anyone can help me?
Thanks.
The below piece of code will help you in accessing the rgb pixel values.You have to access three channels to view RGB values.
for(int i = 0; i < i<mat.rows; i++)
{
for(int j = 0; j < mat.cols; j++)
{
int b = mat.at<cv::Vec3b>(i,j)[0];
int g = mat.at<cv::Vec3b>(i,j)[1];
int r = mat.at<cv::Vec3b>(i,j)[2];
cout << r << " " << g << " " << b << value_rgb <<endl ;
}
}
To read pixel value from a grayscale image
#include <opencv\cv.h>
#include <highgui\highgui.hpp>
using namespace std;
using namespace cv;
int main()
{
cv::Mat img = cv::imread("5.jpg",0);
for(int j=0;j<img.rows;j++)
{
for (int i=0;i<img.cols;i++)
{
int a;
a=img.at<uchar>(j,i);
cout<<a<<endl;
}
}
cv::imshow("After",img);
waitKey(0);
}
Updated
This code reads all the grayscale values from an image and results in frequent occurring vales (Number of times the value as occurred). i.e
Number of times pixel value '0' as appeared,
Number of times pixel value '1' as appeared, ... & so on till 256.
#include <opencv\cv.h>
#include <highgui\highgui.hpp>
using namespace std;
using namespace cv;
int main()
{
cv::Mat img = cv::imread("5.jpg",0);
//for(int j=0;j<img.rows;j++)
//{
// for (int i=0;i<img.cols;i++)
// {
// int a;
// a=img.at<uchar>(j,i);
// cout<<a<<endl;
// }
//}
vector<int> values_rgb;
for(int i=0; i<20; i++)
{
for(int j=0; j<20; j++)
{
int value_rgb = img.at<uchar>(i,j);
values_rgb.push_back(value_rgb);
//cout << "(" << i << "," << j << ") : " << value_rgb <<endl;
}
}
// Sorting of values in ascending order
vector<int> counter_rg_values;
for(int l=0; l<256; l++)
{
for(int k=0; k<values_rgb.size(); k++)
{
if(values_rgb.at(k) == l)
{
counter_rg_values.push_back(l);
}
}
}
//for(int m=0;m<counter_rg_values.size();m++)
//cout<<m<<" "<< counter_rg_values[m] <<endl;
int m=0;
for(int n=0;n<256;n++)
{
int c=0;
for(int q=0;q<counter_rg_values.size();q++)
{
if(n==counter_rg_values[q])
{
//int c;
c++;
m++;
}
}
cout<<n<<"= "<< c<<endl;
}
cout<<"Total number of elements "<< m<<endl;
cv::imshow("After",img);
waitKey(0);
}
I am using NS3 (v3.13) Wi-Fi model in infrastructure topology configured as follows (simulation file attached):
Single AP (BSS)
Multiple STAs (stations)
Application duration = 10s
Saturated downlink traffic (OnOffApplication with OnTime=2s and OffTime=0) from AP to all STAs
Phy: 802.11a
Default YansWifiChannelHelper and YansWifiPhyHelper
Rate control: ConstantRateWifiManager
Mobility: ConstantPositionMobilityModel (STAs are positioned on a circle of 2 meters radius around the AP)
Although all is going well, for a high bitrate (saturated traffic), when the number of STAs per BSS increases a lot, some STAs don't receive any BYTE !!
Experiments:
OnOffApplication DataRate = 60Mb/s, Phy DataMode=OfdmRate54Mbps and 30 STAs, one STA receives packets with a bitrate of 7.2Mb/s and another with 15.3Mb/s (all other 28 STAs don't receive any BYTE)
OnOffApplication DataRate = 60Mb/s, DataMode=OfdmRate6Mbps and 30 STAs, one STA receives packets with a bitrate of 1.95Mb/s and another with 4.3Mb/s (all other 28 STAs don't receive any BYTE)
I think that the problem comes from the OnOff Application configurations; how should I configure it to simulate a full buffer downlink traffic?
Thanks in advance for any suggestion.
#include "ns3/core-module.h"
#include "ns3/point-to-point-module.h"
#include "ns3/network-module.h"
#include "ns3/applications-module.h"
#include "ns3/wifi-module.h"
#include "ns3/mobility-module.h"
#include "ns3/csma-module.h"
#include "ns3/internet-module.h"
#include "ns3/flow-monitor-helper.h"
#include "ns3/flow-monitor-module.h"
#include "ns3/applications-module.h"
#include "ns3/internet-module.h"
#include "ns3/gnuplot.h"
#include "ns3/constant-velocity-helper.h"
#include "ns3/integer.h"
#include "ns3/mpi-interface.h"
#include "math.h"
#include <iostream>
/**
* PARAMETERS
*/
#define StaNb 30
#define Distance 2
#define Duration 10
#define DataRate 90000000
#define PacketSize 1500
#define couleur(param) printf("\033[%sm",param)
using namespace ns3;
class Experiment {
public:
Experiment();
void CreateArchi(void);
void CreateApplis();
private:
Ptr<ListPositionAllocator> positionAllocAp;
Ptr<ListPositionAllocator> positionAllocSta;
Ptr<GridPositionAllocator> positionAllocStaCouloir;
Ptr<RandomDiscPositionAllocator> positionAllocStaAmphi;
std::vector<Ptr<ConstantPositionMobilityModel> > constant;
NodeContainer m_wifiAP, m_wifiQSta;
NetDeviceContainer m_APDevice;
NetDeviceContainer m_QStaDevice;
YansWifiChannelHelper m_channel;
Ptr<YansWifiChannel> channel;
YansWifiPhyHelper m_phyLayer_Sta, m_phyLayer_AP;
WifiHelper m_wifi;
QosWifiMacHelper m_macSta, m_macAP;
InternetStackHelper m_stack;
Ipv4InterfaceContainer m_StaInterface;
Ipv4InterfaceContainer m_ApInterface;
Ssid m_ssid;
};
Experiment::Experiment() {
positionAllocStaCouloir = CreateObject<GridPositionAllocator>();
positionAllocAp = CreateObject<ListPositionAllocator>();
positionAllocSta = CreateObject<ListPositionAllocator>();
positionAllocStaAmphi = CreateObject<RandomDiscPositionAllocator>();
m_wifi = WifiHelper::Default();
constant.resize(StaNb + 1);
for (int i = 0; i < StaNb + 1; i++) {
constant[i] = CreateObject<ConstantPositionMobilityModel>();
}
}
void Experiment::CreateArchi(void) {
m_wifiQSta.Create(StaNb);
m_wifiAP.Create(1);
m_ssid = Ssid("BSS_circle");
m_channel = YansWifiChannelHelper::Default();
channel = m_channel.Create();
m_wifi.SetStandard(WIFI_PHY_STANDARD_80211a);
m_wifi.SetRemoteStationManager("ns3::ConstantRateWifiManager", "DataMode",
StringValue("OfdmRate6Mbps"));
m_phyLayer_Sta = YansWifiPhyHelper::Default();
m_phyLayer_AP = YansWifiPhyHelper::Default();
m_phyLayer_Sta.SetChannel(channel);
m_phyLayer_AP.SetChannel(channel);
positionAllocAp->Add(Vector3D(0.0, 0.0, 0.0));
MobilityHelper mobilityAp;
mobilityAp.SetPositionAllocator(positionAllocAp);
mobilityAp.SetMobilityModel("ns3::ConstantPositionMobilityModel");
mobilityAp.Install(m_wifiAP.Get(0));
constant[0]->SetPosition(Vector3D(0.0, 0.0, 0.0));
float deltaAngle = 2 * M_PI / StaNb;
float angle = 0.0;
double x = 0.0;
double y = 0.0;
for (int i = 0; i < StaNb; i++) {
x = cos(angle) * Distance;
y = sin(angle) * Distance;
positionAllocSta->Add(Vector3D(x, y, 0.0));
MobilityHelper mobilitySta;
mobilitySta.SetPositionAllocator(positionAllocSta);
mobilitySta.SetMobilityModel("ns3::ConstantPositionMobilityModel");
mobilitySta.Install(m_wifiQSta.Get(i));
constant[i]->SetPosition(Vector3D(x, y, 0.0));
angle += deltaAngle;
}
m_macSta = QosWifiMacHelper::Default();
m_macSta.SetType("ns3::StaWifiMac", "ActiveProbing", BooleanValue(true),
"Ssid", SsidValue(m_ssid));
m_macAP = QosWifiMacHelper::Default();
m_macAP.SetType("ns3::ApWifiMac", "Ssid", SsidValue(m_ssid),
"BeaconInterval", TimeValue(Time(std::string("100ms"))));
m_APDevice.Add(m_wifi.Install(m_phyLayer_AP, m_macAP, m_wifiAP));
for (int i = 0; i < StaNb; i++) {
m_QStaDevice.Add(
m_wifi.Install(m_phyLayer_Sta, m_macSta, m_wifiQSta.Get(i)));
}
m_stack.Install(m_wifiAP);
m_stack.Install(m_wifiQSta);
Ipv4AddressHelper address;
address.SetBase("192.168.1.0", "255.255.255.0");
m_ApInterface.Add(address.Assign(m_APDevice.Get(0)));
for (int i = 0; i < StaNb; i++) {
m_StaInterface.Add(address.Assign(m_QStaDevice.Get(i)));
}
Ipv4GlobalRoutingHelper::PopulateRoutingTables();
}
void Experiment::CreateApplis() {
ApplicationContainer source;
OnOffHelper onoff("ns3::UdpSocketFactory", Address());
onoff.SetAttribute("OnTime", RandomVariableValue(ConstantVariable(2)));
onoff.SetAttribute("OffTime", RandomVariableValue(ConstantVariable(0)));
onoff.SetAttribute("DataRate", StringValue("500kb/s"));
for (int i = 0; i < StaNb; i++) {
AddressValue remoteAddress(
InetSocketAddress(m_StaInterface.GetAddress(i), 5010));
onoff.SetAttribute("Remote", remoteAddress);
source.Add(onoff.Install(m_wifiAP.Get(0)));
source.Start(Seconds(3.0));
source.Stop(Seconds(Duration));
}
ApplicationContainer sinks;
PacketSinkHelper packetSinkHelper("ns3::UdpSocketFactory",
Address(InetSocketAddress(Ipv4Address::GetAny(), 5010)));
for (int i = 0; i < StaNb; i++) {
sinks.Add(packetSinkHelper.Install(m_wifiQSta.Get(i)));
sinks.Start(Seconds(3.0));
sinks.Stop(Seconds(Duration));
}
}
int main(int argc, char *argv[]) {
Experiment exp = Experiment();
Config::SetDefault("ns3::WifiRemoteStationManager::RtsCtsThreshold",
StringValue("2346"));
exp.CreateArchi();
exp.CreateApplis();
FlowMonitorHelper flowmon;
Ptr<FlowMonitor> monitor = flowmon.InstallAll();
Simulator::Stop(Seconds(Duration));
Simulator::Run();
monitor->CheckForLostPackets();
Ptr<Ipv4FlowClassifier> classifier = DynamicCast<Ipv4FlowClassifier>(
flowmon.GetClassifier());
std::map<FlowId, FlowMonitor::FlowStats> stats = monitor->GetFlowStats();
int c = 0;
for (std::map<FlowId, FlowMonitor::FlowStats>::const_iterator i =
stats.begin(); i != stats.end(); ++i) {
Ipv4FlowClassifier::FiveTuple t = classifier->FindFlow(i->first);
std::cout << "Flux " << i->first << " (" << t.sourceAddress << " -> "
<< t.destinationAddress << ")\n";
std::cout << " Tx Bytes : " << i->second.txBytes << "\n";
std::cout << " Rx Bytes : " << i->second.rxBytes << "\n";
couleur("33");
std::cout << " Bitrate : "
<< i->second.rxBytes * 8.0
/ (i->second.timeLastRxPacket.GetSeconds()
- i->second.timeFirstRxPacket.GetSeconds())
/ 1000000 << " Mbps\n\n";
couleur("0");
if (i->second.rxBytes > 0)
c++;
}
std::cout << " Number of receiving nodes : " << c << "\n";
Simulator::Destroy();
}
I think the medium is too busy.
You need to tuning down onoff datarate e.g 1 mbps. Practically, full buffer 720p video only need no more than 1mbps
You may also check tracing using pcap, ascii or netanim too see either the packet dropping, packet never being send or bug in your code