Related
I want to change the loss calculation method in loss layer when the iteration times reach a certain number.
In order to realize it I think I need to get the current learning rate or iteration times, then I use if phrase to choose changing loss calculation method or not.
You can add a member variable in Caffe class to save the current learning rate or iteration times and access it in the layer where you want.
For example, to get the current iteration times where you want you need to make 3 key modifications(for simplification):
In common.hpp:
class Caffe {
public:
static Caffe& Get();
...//Some other public members
//Returns the current iteration times
inline static int current_iter() { return Get().cur_iter_; }
//Sets the current iteration times
inline static void set_cur_iter(int iter) { Get().cur_iter_ = iter; }
protected:
//The variable to save the current itertion times
int cur_iter_;
...//Some other protected members
}
In solver.cpp:
template <typename Dtype>
void Solver<Dtype>::Step(int iters) {
...
while (iter_ < stop_iter) {
Caffe::set_cur_iter(iter_ );
...//Left Operations
}
}
The place where you want to access the current iteration times:
template <typename Dtype>
void SomeLayer<Dtype>::some_func() {
int current_iter = Caffe::current_iter();
...//Operations you want
}
AFAIK there is no direct access from within a python layer to the solver's iteration count and the learning rate.
However, you can keep a counter of your own
import caffe
class IterCounterLossLayer(caffe.Layer):
def setup(self, bottom, top):
# do your setup here...
self.iter_counter = 0 # setup a counter
def reshape(self, bottom, top):
# reshape code here...
# loss output is scalar
top[0].reshape(1)
def forward(self, bottom, top):
if self.iter_counter < 1000:
# some way of computing the loss
# ...
else:
# another way
# ...
self.iter_counter += 1 # increment, you may consider incrementing by bottom[0].shape[0] the batch size...
def backward(self, top, propagate_down, bottom):
if self.iter_counter < 1000:
# gradients need to fit the loss
# ...
else:
# another way
# ...
To get iteration, you can use my count_layer as a bottom layer of your custom layer, with which you can benefit from following aspects:
When you finetune with weights, the iteration number continues from the weights you save.
Have a modular implementation.
No need to change existing caffe codes.
train_val.prototxt
layer {
name: "iteration"
top: "iteration"
type: "Count"
}
count_layer.hpp
#ifndef CAFFE_COUNT_LAYER_HPP_
#define CAFFE_COUNT_LAYER_HPP_
#include <vector>
#include "caffe/common.hpp"
#include "caffe/filler.hpp"
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
class CountLayer : public Layer<Dtype> {
public:
explicit CountLayer(const LayerParameter& param)
: Layer<Dtype>(param), delta_(1) {}
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
if (this->blobs_.size() > 0) {
LOG(INFO) << "Skipping parameter initialization";
} else {
this->blobs_.resize(1);
this->blobs_[0].reset(new Blob<Dtype>());
if (this->layer_param_.count_param().has_shape()){
this->blobs_[0]->Reshape(this->layer_param_.count_param().shape());
} else{
this->blobs_[0]->Reshape(vector<int>{1, 1});
}
shared_ptr<Filler<Dtype> > base_filler(GetFiller<Dtype>(
this->layer_param_.count_param().base_filler()));
base_filler->Fill(this->blobs_[0].get());
}
top[0]->Reshape(this->blobs_[0]->shape());
string name = this->layer_param().name();
if (name == ""){
name = "Count";
}
if (this->layer_param_.param_size() <= 0){
LOG(INFO) << "Layer " << name << "'s decay_mult has been set to 0";
this->layer_param_.add_param()->set_decay_mult(Dtype(0));
} else if (!this->layer_param_.param(0).has_decay_mult()){
LOG(INFO) << "Layer " << name << "'s decay_mult has been set to 0";
this->layer_param_.mutable_param(0)->set_decay_mult(0);
}
delta_ = Dtype(this->layer_param_.count_param().delta());
//this make top start from base and make finetune correct
caffe_add_scalar(this->blobs_[0]->count(), -delta_, this->blobs_[0]->mutable_cpu_data());
}
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) { }
virtual inline const char* type() const { return "Parameter"; }
virtual inline int ExactNumBottomBlobs() const { return 0; }
virtual inline int ExactNumTopBlobs() const { return 1; }
protected:
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
caffe_add_scalar(this->blobs_[0]->count(), delta_, this->blobs_[0]->mutable_cpu_data());
top[0]->ShareData(*(this->blobs_[0]));
top[0]->ShareDiff(*(this->blobs_[0]));
}
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom)
{
}
private:
Dtype delta_;
};
} // namespace caffe
#endif
caffe_layer.cpp
#include "caffe/layers/count_layer.hpp"
namespace caffe {
INSTANTIATE_CLASS(CountLayer);
REGISTER_LAYER_CLASS(Count);
} // namespace caffe
caffe.proto
optional CountParameter count_param = 666;
...
message CountParameter {
optional BlobShape shape = 1;
optional FillerParameter base_filler = 2; // The filler for the base
optional float delta = 3 [default = 1];
}
How can I get a window position (x,y) created by using OpenCV?
I can create a named window: namedWindow("my window");
I can move this window: moveWindow("my window", x, y);
But how can I get current position's coordinates of this window?
Also, there is function void loadWindowParameters("my window");, which can as written:
loads size, location, flags, trackbars value, zoom and panning
location of the window window_name
But does it load these parameters to where? Declaration of this function does not return anything - get only window name and return void.
Until the feature request for this feature gets done, if you need this functionality and can't wait, you can implement it yourself. You'll need the opencv source code, then you'll have to edit some opencv files and rebuild part of opencv.
I followed the source code for moveWindow() as a model.
That's what I did:
add to opencv/sources/modules/highgui/source/window_w32.cpp this function (I added it just below the cvMoveWindow definition):
CV_IMPL void cvGetWindowRect( const char* name, int &x, int &y, int &width, int &height)
{
CV_FUNCNAME( "cvGetWindowRect" );
__BEGIN__;
CvWindow* window;
RECT rect;
if( !name )
CV_ERROR( CV_StsNullPtr, "NULL name" );
window = icvFindWindowByName(name);
if(!window)
EXIT;
GetWindowRect( window->frame, &rect );
x = rect.left;
y = rect.top;
width = rect.right - rect.left;
height = rect.bottom - rect.top;
__END__;
}
and add its declaration in opencv/sources/modules/highgui/include/opencv2/highgui_c.h :
CVAPI(void) cvGetWindowRect( const char* name, int &x, int &y, int &width, int &height);
This alone would allow you to use the cvGetWindowRect from C/C++ to get the window rect. But if you want to use the C++ interface or the python interface (as I do) you can edit two files more:
add to opencv/sources/modules/highgui/source/window.cpp this function:
void cv::getWindowRect( const String& winname, CV_OUT int &x, CV_OUT int &y, CV_OUT int &width, CV_OUT int &height)
{
cvGetWindowRect(winname.c_str(), x, y, width, height);
}
and add its declaration in opencv/sources/modules/highgui/include/opencv2/highgui.hpp :
CV_EXPORTS_W void getWindowRect( const String& winname, CV_OUT int &x, CV_OUT int &y, CV_OUT int &width, CV_OUT int &height);
Then you'll have to rebuild the opencv_highgui project (I do this for windows with Visual Studio 2015). If you need the python bindings then rebuild the opencv_python3 project too. The CV_EXPORTS_W and CV_OUT macros are needed to expose the function and recognize the output parameters when building the python bindings. From python you'll get a 4-tuple as return value, ->eg:
>>> cv2.getWindowRect("my window")
(1024, 0, 817, 639)
For the python bindings you'll have to copy the new cv2.cp35-win_amd64.pyd and opencv_highgui300.dll to the PythonEnv\Lib\site-packages.
I'm confused by the OpenCV Mat element types. This is from the docs:
There is a limited fixed set of primitive data types the library can operate on.
That is, array elements should have one of the following types:
8-bit unsigned integer (uchar)
8-bit signed integer (schar)
16-bit unsigned integer (ushort)
16-bit signed integer (short)
32-bit signed integer (int)
32-bit floating-point number (float)
64-bit floating-point number (double)
...
For these basic types, the following enumeration is applied:
enum { CV_8U=0, CV_8S=1, CV_16U=2, CV_16S=3, CV_32S=4, CV_32F=5, CV_64F=6 };
It's known that C++ standard doesn't define the size of basic types in bytes, so how do they use such assumptions? And what type should I expect from, let's say, CV_32S, is it int32_t or int?
Developing from Miki's answer,
In OpenCV 3 definition has moved to modules/core/include/opencv2/core/traits.hpp, where you can find:
/** #brief A helper class for cv::DataType
The class is specialized for each fundamental numerical data type supported by OpenCV. It provides
DataDepth<T>::value constant.
*/
template<typename _Tp> class DataDepth
{
public:
enum
{
value = DataType<_Tp>::depth,
fmt = DataType<_Tp>::fmt
};
};
template<int _depth> class TypeDepth
{
enum { depth = CV_USRTYPE1 };
typedef void value_type;
};
template<> class TypeDepth<CV_8U>
{
enum { depth = CV_8U };
typedef uchar value_type;
};
template<> class TypeDepth<CV_8S>
{
enum { depth = CV_8S };
typedef schar value_type;
};
template<> class TypeDepth<CV_16U>
{
enum { depth = CV_16U };
typedef ushort value_type;
};
template<> class TypeDepth<CV_16S>
{
enum { depth = CV_16S };
typedef short value_type;
};
template<> class TypeDepth<CV_32S>
{
enum { depth = CV_32S };
typedef int value_type;
};
template<> class TypeDepth<CV_32F>
{
enum { depth = CV_32F };
typedef float value_type;
};
template<> class TypeDepth<CV_64F>
{
enum { depth = CV_64F };
typedef double value_type;
};
In most of the cases/compilers you should be fine using C++ exact data types. You wouldn't have problems with single byte data types (CV_8U -> uint8_t and CV_8U -> int8_t) as unambiguously defined in C++. The same for float (32bit) and double (64bit). However, it is true that for other data types to be completely sure you use the correct data type (for example when using the at<> method) you should use for example:
typedef TypeDepth<CV_WHATEVER_YOU_USED_TO_CREATE_YOUR_MAT>::value_type access_type;
myMat.at<access_type>(y,x) = 0;
As a side note, I am surprised they decided to take such an ambiguous approach, instead of simply using exact data types.
Therefore, regarding your last question:
What type should I expect from, let's say, CV_32S?
I believe the most precise answer, in OpenCV 3, is:
TypeDepth<CV_32S>::value_type
In core.hpp you can find the following:
/*!
A helper class for cv::DataType
The class is specialized for each fundamental numerical data type supported by OpenCV.
It provides DataDepth<T>::value constant.
*/
template<typename _Tp> class DataDepth {};
template<> class DataDepth<bool> { public: enum { value = CV_8U, fmt=(int)'u' }; };
template<> class DataDepth<uchar> { public: enum { value = CV_8U, fmt=(int)'u' }; };
template<> class DataDepth<schar> { public: enum { value = CV_8S, fmt=(int)'c' }; };
template<> class DataDepth<char> { public: enum { value = CV_8S, fmt=(int)'c' }; };
template<> class DataDepth<ushort> { public: enum { value = CV_16U, fmt=(int)'w' }; };
template<> class DataDepth<short> { public: enum { value = CV_16S, fmt=(int)'s' }; };
template<> class DataDepth<int> { public: enum { value = CV_32S, fmt=(int)'i' }; };
// this is temporary solution to support 32-bit unsigned integers
template<> class DataDepth<unsigned> { public: enum { value = CV_32S, fmt=(int)'i' }; };
template<> class DataDepth<float> { public: enum { value = CV_32F, fmt=(int)'f' }; };
template<> class DataDepth<double> { public: enum { value = CV_64F, fmt=(int)'d' }; };
template<typename _Tp> class DataDepth<_Tp*> { public: enum { value = CV_USRTYPE1, fmt=(int)'r' }; };
You can see that CV_32S is the value for the type int, not int32_t.
While C++ doesn't define the size of an element, the question is hypothetical: for systems OpenCV is run on, the sizes are known. Given
cv::Mat m(32,32,CV_32SC1, cv:Scalar(0));
std::cout << "size of the element in bytes: " << m.depth() << std::endl;
std::cout << "or " << m.step.p[ m.dims-1 ]/m.channels() << std::endl;
So how can you be sure it is int?
An attempt to call
int pxVal = m.at<int>(0,0);
will
CV_DbgAssert( elemSize()==sizeof(int) );
Where the left hand is defined via the cv::Mat::flags -- in this example as the predefined depth of the CV_32SC1 equal to
CV_DbgAssert( m.depth() == sizeof(int) )
or
CV_DbgAssert( 4 == sizeof(int) )
So if you succeeded you are left only the endianness. And that was checked when the cvconfig.h was generated (by CMake).
TL;DR, expect the types given in the header and you'll be fine.
You can find all definitions on your questions in opencv's sources.
See https://github.com/Itseez/opencv/blob/master/modules/core/include/opencv2/core/cvdef.h file.
I have found several #define in OpenCV's code related to CV_8UC1, CV_32SC1, etc. To make the enumerations work, OpenCV put additional codes to convert the plain numbers together as a parameter (i.e, CV_8UC1, CV_16UC2...are all represented by their respective numbers), and break the depth and channels apart in the definition of CvMat(I guess Mat may have similar codes in its definition). Then, it uses create() to allocate spaces for the matrix. Since create() is inline, I can only guess that it is similar to malloc() or something.
As source codes changes a lot from 2.4.9 to 3.0.0, I need to post more evidence later. Please allow me a little time to find out more and edit my answer.
In short the table you provided is correct.
If you want to directly access a pixel, you typecast it to the specifier to the right, for example CV_32S is a signed 32-bit.
The S always means a signed integral number (signed char, signed short, signed int)
The F always means a floating point number (float, double)
The U always means an unsigned integral number.
The enumeration is used only when creating or converting a Mat. It's a way of telling the mat which is the desired type, as I understand it it's the C predecessor to when templates were not used.
I use the C functionality exclusively, and in order to create an image, it would be an error to pass the following:
cvCreateImage(mySize,char, nChannels);
Instead, I pass the following:
cvCreateImage(mySize, IPL_DEPTH_8U, nChannels);
Here, the IPL_DEPTH_8U is a flag that is used by the function. The function itself has a switch-type statement that checks the flag. The actual value of the flag is most often meaningless as it's most often controlled by conditional, not algebraic statements.
Below email, user will enter email n below password a user will enter password, i m able to make square but upper square should be curve in upper side and lower square in lower side.
Something I made that I think will do what you want. You just have to add them to this Manager. Note that I don't think I have it compensating for things like margin or padding, so you can either figure that in with the paint() method, or just enclose it in another Manager that has the proper padding/margin:
public class GroupFieldManager extends VerticalFieldManager {
private int _rounding;
private int _bgColor;
private int _borderColor;
private boolean _divider;
private int _dividerColor;
public GroupFieldManager(boolean divider, long style) {
super(style);
_rounding = 20;
_bgColor = 0xFFFFFF;
_borderColor = 0xAAAAAA;
_divider = divider;
_dividerColor = 0xAAAAAA;
}
public GroupFieldManager(boolean divider) {
this(divider, 0);
}
public GroupFieldManager() {
this(false, 0);
}
/**
* Sets whether or not to draw a divider
* #param on
*/
public void setDivider(boolean on) {
_divider = on;
}
/**
* Sets the color for the divider (also turns divider on)
* #param color
*/
public void setDividerColor(int color){
_dividerColor = color;
_divider = true;
}
/**
* Sets the background color for the grouping
* #param color
*/
public void setBackgroundColor(int color) {
_bgColor = color;
}
/**
* Sets the border color for the grouping
* #param color
*/
public void setBorderColor(int color) {
_borderColor = color;
}
/**
* Sets the amount of rounding for the border
* #param rounding
*/
public void setRounding(int rounding) {
_rounding = rounding;
}
protected void paint(Graphics graphics) {
int oldColor = graphics.getColor();
//draw the background
graphics.setColor(_bgColor);
graphics.fillRoundRect(0, 0, getWidth(), getHeight(), _rounding, _rounding);
//draw the border
graphics.setColor(_borderColor);
graphics.drawRoundRect(0, 0, getWidth(), getHeight(), _rounding, _rounding);
//draw dividers
if(_divider) {
graphics.setColor(_dividerColor);
int y = 0;
//go through each field, figure it's height, and draw a line under it
for(int i=0;i<getFieldCount();i++) {
if(i != getFieldCount() - 1) {
int height = getField(i).getHeight();
y += height;
graphics.drawLine(0, y, getWidth(), y);
}
}
}
graphics.setColor(oldColor);
super.paint(graphics);
}
}
Since there are only methods available to draw four round corners you can use a clipping rectangle to clip the drawing at a straight line, i.e. you actually paint a larger rounded rect but clip the lower (or upper) part of it.
When my Floating-Point Guide was yesterday published on slashdot, I got a lot of flak for my suggested comparison function, which was indeed inadequate. So I finally did the sensible thing and wrote a test suite to see whether I could get them all to pass. Here is my result so far. And I wonder if this is really as good as one can get with a generic (i.e. not application specific) float comparison function, or whether I still missed some edge cases.
(Code updated to fix error)
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import org.junit.Test;
/**
* Test suite to demonstrate a good method for comparing floating-point values using an epsilon. Run via JUnit 4.
*
* Note: this function attempts a "one size fits all" solution. There may be some edge cases for which it still
* produces unexpected results, and some of the tests it was developed to pass probably specify behaviour that is
* not appropriate for some applications. Before using it, make sure it's appropriate for your application!
*
* From http://floating-point-gui.de
*
* #author Michael Borgwardt
*/
public class NearlyEqualsTest {
public static boolean nearlyEqual(float a, float b, float epsilon) {
final float absA = Math.abs(a);
final float absB = Math.abs(b);
final float diff = Math.abs(a - b);
if (a * b == 0) { // a or b or both are zero
// relative error is not meaningful here
return diff < (epsilon * epsilon);
} else { // use relative error
return diff / (absA + absB) < epsilon;
}
}
public static boolean nearlyEqual(float a, float b) {
return nearlyEqual(a, b, 0.000001f);
}
/** Regular large numbers - generally not problematic */
#Test
public void big() {
assertTrue(nearlyEqual(1000000f, 1000001f));
assertTrue(nearlyEqual(1000001f, 1000000f));
assertFalse(nearlyEqual(10000f, 10001f));
assertFalse(nearlyEqual(10001f, 10000f));
}
/** Negative large numbers */
#Test
public void bigNeg() {
assertTrue(nearlyEqual(-1000000f, -1000001f));
assertTrue(nearlyEqual(-1000001f, -1000000f));
assertFalse(nearlyEqual(-10000f, -10001f));
assertFalse(nearlyEqual(-10001f, -10000f));
}
/** Numbers around 1 */
#Test
public void mid() {
assertTrue(nearlyEqual(1.0000001f, 1.0000002f));
assertTrue(nearlyEqual(1.0000002f, 1.0000001f));
assertFalse(nearlyEqual(1.0002f, 1.0001f));
assertFalse(nearlyEqual(1.0001f, 1.0002f));
}
/** Numbers around -1 */
#Test
public void midNeg() {
assertTrue(nearlyEqual(-1.000001f, -1.000002f));
assertTrue(nearlyEqual(-1.000002f, -1.000001f));
assertFalse(nearlyEqual(-1.0001f, -1.0002f));
assertFalse(nearlyEqual(-1.0002f, -1.0001f));
}
/** Numbers between 1 and 0 */
#Test
public void small() {
assertTrue(nearlyEqual(0.000000001000001f, 0.000000001000002f));
assertTrue(nearlyEqual(0.000000001000002f, 0.000000001000001f));
assertFalse(nearlyEqual(0.000000000001002f, 0.000000000001001f));
assertFalse(nearlyEqual(0.000000000001001f, 0.000000000001002f));
}
/** Numbers between -1 and 0 */
#Test
public void smallNeg() {
assertTrue(nearlyEqual(-0.000000001000001f, -0.000000001000002f));
assertTrue(nearlyEqual(-0.000000001000002f, -0.000000001000001f));
assertFalse(nearlyEqual(-0.000000000001002f, -0.000000000001001f));
assertFalse(nearlyEqual(-0.000000000001001f, -0.000000000001002f));
}
/** Comparisons involving zero */
#Test
public void zero() {
assertTrue(nearlyEqual(0.0f, 0.0f));
assertTrue(nearlyEqual(0.0f, -0.0f));
assertTrue(nearlyEqual(-0.0f, -0.0f));
assertFalse(nearlyEqual(0.00000001f, 0.0f));
assertFalse(nearlyEqual(0.0f, 0.00000001f));
assertFalse(nearlyEqual(-0.00000001f, 0.0f));
assertFalse(nearlyEqual(0.0f, -0.00000001f));
assertTrue(nearlyEqual(0.0f, 0.00000001f, 0.01f));
assertTrue(nearlyEqual(0.00000001f, 0.0f, 0.01f));
assertFalse(nearlyEqual(0.00000001f, 0.0f, 0.000001f));
assertFalse(nearlyEqual(0.0f, 0.00000001f, 0.000001f));
assertTrue(nearlyEqual(0.0f, -0.00000001f, 0.1f));
assertTrue(nearlyEqual(-0.00000001f, 0.0f, 0.1f));
assertFalse(nearlyEqual(-0.00000001f, 0.0f, 0.00000001f));
assertFalse(nearlyEqual(0.0f, -0.00000001f, 0.00000001f));
}
/** Comparisons of numbers on opposite sides of 0 */
#Test
public void opposite() {
assertFalse(nearlyEqual(1.000000001f, -1.0f));
assertFalse(nearlyEqual(-1.0f, 1.000000001f));
assertFalse(nearlyEqual(-1.000000001f, 1.0f));
assertFalse(nearlyEqual(1.0f, -1.000000001f));
assertTrue(nearlyEqual(1e10f * Float.MIN_VALUE, -1e10f * Float.MIN_VALUE));
}
/**
* The really tricky part - comparisons of numbers very close to zero.
*/
#Test
public void ulp() {
assertTrue(nearlyEqual(Float.MIN_VALUE, -Float.MIN_VALUE));
assertTrue(nearlyEqual(-Float.MIN_VALUE, Float.MIN_VALUE));
assertTrue(nearlyEqual(Float.MIN_VALUE, 0));
assertTrue(nearlyEqual(0, Float.MIN_VALUE));
assertTrue(nearlyEqual(-Float.MIN_VALUE, 0));
assertTrue(nearlyEqual(0, -Float.MIN_VALUE));
assertFalse(nearlyEqual(0.000000001f, -Float.MIN_VALUE));
assertFalse(nearlyEqual(0.000000001f, Float.MIN_VALUE));
assertFalse(nearlyEqual(Float.MIN_VALUE, 0.000000001f));
assertFalse(nearlyEqual(-Float.MIN_VALUE, 0.000000001f));
assertFalse(nearlyEqual(1e25f * Float.MIN_VALUE, 0.0f, 1e-12f));
assertFalse(nearlyEqual(0.0f, 1e25f * Float.MIN_VALUE, 1e-12f));
assertFalse(nearlyEqual(1e25f * Float.MIN_VALUE, -1e25f * Float.MIN_VALUE, 1e-12f));
assertTrue(nearlyEqual(1e25f * Float.MIN_VALUE, 0.0f, 1e-5f));
assertTrue(nearlyEqual(0.0f, 1e25f * Float.MIN_VALUE, 1e-5f));
assertTrue(nearlyEqual(1e20f * Float.MIN_VALUE, -1e20f * Float.MIN_VALUE, 1e-5f));
}
}
The main problem I see is you don't allow the user to control epsilon.
Also epsilon changes depending on the order of magnitude of the numbers being compared. Near zero epsilon is small, near the maximum power epsilon is large.
I think whenever you need to talk about such concepts as "close enough" it becomes an application level design decision. You can't write a generic library for that.
After sleeping over it, I've realized that this part was rubbish:
if (a*b==0) {
return diff < Float.MIN_VALUE / epsilon;
This becomes less strict as epsilon gets smaller! A more sensible version:
if (a * b == 0) {
return diff < (epsilon * epsilon);
Still, the two branches of the if are not very consistent with each other. It's much stricter when a or b is very small than when one of them is zero. I'm really starting to think that using integer comparison is an overall better method.