Lucene.net proximity search - geolocation

Does anybody have any experience with having lucene.net index latitude and longitude values then return an ordered set of results based on distance from a single point?
Will the Lucene.Net.Spatial library help me at all with this?

A little late to the party but yes, the Spatial library is the place to start with this. The basics behind it are to:
1) Add Lat and Long fields to your document
doc.Add(new Field("Latitude",
NumericUtils.DoubleToPrefixCoded(Latitude),
Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.Add(new Field("Longitude",
NumericUtils.DoubleToPrefixCoded(Longitude),
Field.Store.YES, Field.Index.NOT_ANALYZED));
2) Create plotters for the each tier of granularity that your search needs to support
IProjector projector = new SinusoidalProjector();
var ctp = new CartesianTierPlotter(0, projector,
Fields.LocationTierPrefix);
StartTier = ctp.BestFit(MaxKms);
EndTier = ctp.BestFit(MinKms);
Plotters = new Dictionary<int, CartesianTierPlotter>();
for (var tier = StartTier; tier <= EndTier; tier++)
{
Plotters.Add(tier, new CartesianTierPlotter(tier,
projector,
Fields.LocationTierPrefix));
}
3) Use your plotters to index your document for each tier
private static void AddCartesianTiers(double latitude,
double longitude,
Document document)
{
for (var tier = StartTier; tier <= EndTier; tier++)
{
var ctp = Plotters[tier];
var boxId = ctp.GetTierBoxId(latitude, longitude);
document.Add(new Field(ctp.GetTierFieldName(),
NumericUtils.DoubleToPrefixCoded(boxId),
Field.Store.YES,
Field.Index.NOT_ANALYZED_NO_NORMS));
}
}
With your document indexed you can move onto building a query. This example uses a ConstantScoreQuery but you can swap that out for your ranged scoring:
/* Builder allows us to build a polygon which we will use to limit
* search scope on our cartesian tiers, this is like putting a grid
* over a map */
var builder = new CartesianPolyFilterBuilder(Fields.LocationTierPrefix);
/* Bounding area draws the polygon, this can be thought of as working
* out which squares of the grid over a map to search */
var boundingArea = builder.GetBoundingArea(Latitude,
Longitude,
DistanceInKilometres * ProductSearchEngine.KmsToMiles);
/* We refine, this is the equivalent of drawing a circle on the map,
* within our grid squares, ignoring the parts the squares we are
* searching that aren't within the circle - ignoring extraneous corners
* and such */
var distFilter = new LatLongDistanceFilter(boundingArea,
DistanceInKilometres * KmsToMiles,
Latitude,
Longitude,
ProductSearchEngine.Fields.Latitude,
ProductSearchEngine.Fields.Longitude);
/* We add a query stating we will only search against products that have
* GeoCode information */
var query = new TermQuery(new Term(Fields.HasGeoCode,
FieldFlags.HasField));
/* Add our filter, this will stream through our results and
* determine eligibility */
masterQuery.Add(new ConstantScoreQuery(distanceFilter),
BooleanClause.Occur.MUST);
All of this is taken from a blog post I just wrote whilst looking at a similar problem. You can see it at http://www.leapinggorilla.com/Blog/Read/1005/spatial-search-in-lucenenet

Related

How to create helical curve using inventor api and c# or vb.net

I have code in c# windows app creating a line in a 2D sketch. And then I created a 3D sketch. Eventually I want to add a helical curve around this line from the 3D sketch. Can anyone help me please to solve this issue? Thanks in advance.
public void MyMethod(Inventor.Application ThisApplication)
{
PartDocument oSheetMetalDoc = (PartDocument)m_oInventorApp.Documents.Add(DocumentTypeEnum.kPartDocumentObject, m_oInventorApp.FileManager.GetTemplateFile(DocumentTypeEnum.kPartDocumentObject, SystemOfMeasureEnum.kMetricSystemOfMeasure, DraftingStandardEnum.kDefault_DraftingStandard, "{9C464203-9BAE-11D3-8BAD-0060B0CE6BB4}"), true);
// Set a reference to the component definition.
SheetMetalComponentDefinition oCompDef = (SheetMetalComponentDefinition)oSheetMetalDoc.ComponentDefinition;
// Set a reference to the sheet
// metal features collection.
SheetMetalFeatures oSheetMetalFeatures = (SheetMetalFeatures)oCompDef.Features;
// Create a new sketch on the X-Y work plane.
PlanarSketch oSketch = default(PlanarSketch);
oSketch = oCompDef.Sketches.Add(oCompDef.WorkPlanes[3]);
TransientGeometry oTransGeom = (TransientGeometry)ThisApplication.TransientGeometry;
// Draw a 4cm x 3cm rectangle with the
// corner at (0,0)
SketchLine line = (SketchLine)oSketch.SketchLines.AddByTwoPoints(oTransGeom.CreatePoint2d(0, 0), oTransGeom.CreatePoint2d(0, 500)); // 2. ihtimal
// skecth line turn to centerline
line.Centerline = true;
ThisApplication.ActiveView.GoHome();
// Create a 3D sketch.
Sketch3D sketch3 = (Sketch3D)oCompDef.Sketches3D.Add();
SketchEntity3D selectObj = m_oInventorApp.CommandManager.Pick(SelectionFilterEnum.kSketch3DCurveFilter, "Select 3d sketch entity");
if (selectObj == null)
{
}
// HelicalConstraint3D . want to add helical curve around the line above
}
This is iLogic/VB.net code for creating helix curve based on selected SketchLine.
Part document must be active, and at least one SketchLine must be visible for selection.
Sub Main()
Dim oSketchLine As SketchLine = ThisApplication.CommandManager.Pick(SelectionFilterEnum.kSketchCurveLinearFilter,
"Select line")
CreateHelicalCurve(oSketchLine)
End Sub
Private Sub CreateHelicalCurve(oSketchLine As SketchLine)
Dim partDef As PartComponentDefinition = oSketchLine.Parent.Parent
Dim sketch3D As Sketch3D = partDef.Sketches3D.Add()
Dim axisStartPoint As Point = oSketchLine.StartSketchPoint.Geometry3d
Dim axisEndPoint As Point = oSketchLine.EndSketchPoint.Geometry3d
Dim curveStartPoint As Point = axisStartPoint.Copy()
curveStartPoint.TranslateBy(ThisApplication.TransientGeometry.CreateVector(0, 0, 1))
Dim diameter As Double = 5 ' [cm]
Dim pitch As Double = 1 ' [cm]
Dim revolution As Object = Nothing ' Optional argument
Dim height As Double = 5 ' [cm]
Dim helicalCurveDefinition As HelicalCurveConstantShapeDefinition = sketch3D.HelicalCurves.
CreateConstantShapeDefinition(
HelicalShapeDefinitionTypeEnum.kPitchAndHeightShapeType,
axisStartPoint,
axisEndPoint,
curveStartPoint,
diameter,
pitch,
revolution,
height
)
sketch3D.HelicalCurves.Add(helicalCurveDefinition)
End Sub

Count of the biggest bin in histogram, C#, sharp

I want to make histogram of my data so, I use histogram class at c# using MathNet.Numerics.Statistics.
double[] array = { 2, 2, 5,56,78,97,3,3,5,23,34,67,12,45,65 };
Vector<double> data = Vector<double>.Build.DenseOfArray(array);
int binAmount = 3;
Histogram _currentHistogram = new Histogram(data, binAmount);
How can I get the count of the biggest bin? Or just the index of the bigest bin? I try to get it by using GetBucketOf but to do this I need the element in this bucket :(
Is there any other way to do this? I read the documentation and Google and I can't find anything.
(Hi, I would use a comment for this but i just joined so today and don't yet have 50 reputation to comment!) I just had a look at - http://numerics.mathdotnet.com/api/MathNet.Numerics.Statistics/Histogram.htm. That documentation page (footer says it was built using http://docu.jagregory.com/) shows a public property named Item which returns a Bucket. I'm wondering if that is the property you need to use because the automatically generated documentation states that the Item property "Gets' the n'th bucket" but isn't clear how the Item property acts as an indexer. Looking at your code i would try _currentHistogram.Item[n] first (if that doesn't work try _currentHistogram[n]) where you are iterating the Buckets in the histogram using something like -
var countOfBiggest = -1;
var indexOfBiggest = -1;
for (var n = 0; n < _currentHistogram.BucketCount; n++)
{
if (_currentHistogram.Item[n].Count > countOfBiggest)
{
countOfBiggest = _currentHistogram.Item[n].Count;
indexOfBiggest = n;
}
}
The code above assumes that Histogram uses 0-based and not 1-based indexing.

Parsing Projection WKT in OpenLayers 3

I am looking for a method (javascript) (external or internal part of ol3) that can parse a projection WKT to its proj4 text and create the projection.
<SRS>PROJCS["GDA94 / MGA zone 53",GEOGCS["GDA94",DATUM["Geocentric_Datum_of_Australia_1994",SPHEROID["GRS 1980",6378137,298.2572221010002,AUTHORITY["EPSG","7019"]],AUTHORITY["EPSG","6283"]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433],AUTHORITY["EPSG","4283"]],PROJECTION["Transverse_Mercator"],PARAMETER["latitude_of_origin",0],PARAMETER["central_meridian",135],PARAMETER["scale_factor",0.9996],PARAMETER["false_easting",500000],PARAMETER["false_northing",10000000],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AUTHORITY["EPSG","28353"]]</SRS>
Above shows an example of the XML element containing the SRS.
I have found out that if I can convert this to proj4 text. Then i can add it as a projection this way:
proj4.defs("EPSG:28353", "+proj=utm +zone=53 +south +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs");
var def = proj4.defs(grid.srs);
var units = def.units;
var proj = new ol.proj.Projection({
code: grid.srs,
units: units,
axisOrientation: def.axis
});
proj.setExtent(/*...*/);
ol.proj.addProjection(proj);
var proj4Transform1 = proj4('EPSG:3857', grid.srs);
ol.proj.addCoordinateTransforms(ol.proj.get('EPSG:3857'), proj,
proj4Transform1.forward, proj4Transform1.inverse);
var proj4Transform2 = proj4('EPSG:4326', grid.srs);
ol.proj.addCoordinateTransforms(ol.proj.get('EPSG:4326'), proj,
proj4Transform2.forward, proj4Transform2.inverse);
Is it possible to find the extend of the projection in the WKT also or should I look this up also external?
proj4js supports WKT strings too. You can throw what you have (excluding the <SRS> tag) directly at proj4js, using the same syntax:
proj4.defs("EPSG:28353", "PROJCS["GDA94 / MGA zone 53",GEOGCS["GDA94",DATUM["Geocentric_Datum_of_Australia_1994",SPHEROID["GRS 1980",6378137,298.2572221010002,AUTHORITY["EPSG","7019"]],AUTHORITY["EPSG","6283"]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433],AUTHORITY["EPSG","4283"]],PROJECTION["Transverse_Mercator"],PARAMETER["latitude_of_origin",0],PARAMETER["central_meridian",135],PARAMETER["scale_factor",0.9996],PARAMETER["false_easting",500000],PARAMETER["false_northing",10000000],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AUTHORITY["EPSG","28353"]]");
Once you have defined a projection using proj4.defs(), it will be immediately be available in OpenLayers 3, and all transforms will be registered. So the proj4.defs() line is all you need unless you want to set additional options, like the projection extent in your case. There are two options to do this: 1) globally:
ol.proj.get('EPSG:28353').setExtent(*/ ... */);
or 2) just for one ol.proj.Projection instance that you use to e.g. configure your ol.View:
var projection = new ol.proj.Projection({
code: 'EPSG:28353',
extent: /* ... */
});

How to make a function for atan. I want to get result manually for SQLLITE

How to make a function for atan. This will be working inside SQLLite Query. I have needed acos but I got formula of acos in this needed atan.
newCos = 2 * atan( sqrt(1-pow(var,2))/(1+var) );
But we need atan function for run this
Distance function for sqlite..... in SQL acos working can manage using this .. May Be helpfull
As part of an iPhone SDK project, I have an sqlite database with a table full of geographic locations, each stored as a latitude and longitude value in degrees. I wanted to be able to perform an SQL SELECT on this table and ORDER BY each row’s distance from an arbitrary point. I’ve achieved this by defining a custom sqlite function. This article contains the code for the function, together with instructions on using it.
Here’s the function, together with a convenience macro to convert from degrees to radians. This function is based on an online distance calculator I found which makes use of the spherical law of cosines.
#define DEG2RAD(degrees) (degrees * 0.01745327) // degrees * pi over 180
static void distanceFunc(sqlite3_context *context, int argc, sqlite3_value **argv)
{
// check that we have four arguments (lat1, lon1, lat2, lon2)
assert(argc == 4);
// check that all four arguments are non-null
if (sqlite3_value_type(argv[0]) == SQLITE_NULL || sqlite3_value_type(argv[1]) == SQLITE_NULL || sqlite3_value_type(argv[2]) == SQLITE_NULL || sqlite3_value_type(argv[3]) == SQLITE_NULL) {
sqlite3_result_null(context);
return;
}
// get the four argument values
double lat1 = sqlite3_value_double(argv[0]);
double lon1 = sqlite3_value_double(argv[1]);
double lat2 = sqlite3_value_double(argv[2]);
double lon2 = sqlite3_value_double(argv[3]);
// convert lat1 and lat2 into radians now, to avoid doing it twice below
double lat1rad = DEG2RAD(lat1);
double lat2rad = DEG2RAD(lat2);
// apply the spherical law of cosines to our latitudes and longitudes, and set the result appropriately
// 6378.1 is the approximate radius of the earth in kilometres
sqlite3_result_double(context, acos(sin(lat1rad) * sin(lat2rad) + cos(lat1rad) * cos(lat2rad) * cos(DEG2RAD(lon2) - DEG2RAD(lon1))) * 6378.1);
}
This defines an SQL function distance(Latitude1, Longitude1, Latitude2, Longitude2), which returns the distance (in kilometres) between two points.
To use this function, add the code above to your Xcode project, and then add this line immediately after you call sqlite3_open:
sqlite3_create_function(sqliteDatabasePtr, "distance", 4, SQLITE_UTF8, NULL, &distanceFunc, NULL, NULL);
…where sqliteDatabasePtr is the database pointer returned by your call to sqlite3_open.
Assuming you have a table called Locations, with columns called Latitude and Longitude (both of type double) containing values in degrees, you can then use this function in your SQL like this:
SELECT * FROM Locations ORDER BY distance(Latitude, Longitude, 51.503357, -0.1199)
Your question is a bit unclear but it seems that you know how to write your own custom SQLite functions meaning that your actual question is about how to write the various trigonometric functions.
You don't need to write them. Simply use the standard math functions.
#import <math.h>
double newCos = cos(someRadianAngle);
See the man page for cos, sin, tan, atan, etc.
Do 1 thing pass var1 & var2 to function & then use getValue in query. Xcode has in built atan,acos function you just need to pass value in radians.
float getValue=[self calculatetan:(float)var1 withSecondValue:(float)var2];
-(float)calculatetan:(float)var1 withSecondValue:(float)var2
{
newCos = 2 * atan( sqrt(1-pow(var,2))/(1+var) )// or split this
return newCos;
}

How do I use Emgu CV's Surf library to match to a library of images

Emgu CV's Example set has an example on how to use SURFDetector to detect features from a feature and then use Features2DTracker's MatchFeature call (which seems to use KNN) to match a "model" image to an "observed" image. This part makes sense.
Now, if I wanted to build a library of images, each using image's SURF features to find the best match for a given image, what are my options? Instead of doing a brute force match with each image in my library, can I build a tree? I'm confused because Emgu seems to be building some sort of tree, but only between two images:
//Create a SURF Tracker using k-d Tree
SURFTracker tracker = new SURFTracker(modelFeatures);
I've read almost every thread on the site on the subject but can't understand how to get started. I also though about using histogram matching -- splitting each RGB channel into bins and comparing the normalized count. Instead of calculating the euclidean distance to each image in the library, if I wanted to partition my search space based on RGB count, that would still mean branching on one of R,G,B -- and I'm not sure how to build that decision tree.
I only started reading about this topic a few days ago, so apologies for my naivety.
You could take a lookat EMGU CV's TrafficSignRecognition. It is the same as the SURFFeature example, but applied in real life. It is able to detect whether the given image matches with the image given and how many there are. I tried it. You can take a look at it.
SURFTracker seems to use the FLANN (Fast Library for Approximate Nearest Neighbors) library that comes with OpenCV (so has Emgu bindings, too) while it:
builds a tree from the descriptors extracted from the template image (so that it's faster to match the points of the sample to those of the template). So the tree is built for one image only (the template).
when given a sample, it extracts the descriptors, calculates a match (a pairing between the template and the image descriptors), taking into account the spatial consistency of the matching points, too (right side to right side, left side to left side)
Supposing you'd like to be faster than simply doing the above procedure for every image, you would have to build one tree out of every descriptor for every image, and put that into a FLANN Index while keeping track of which descriptor came from which image (probably in a separate array).
When given an image, you could extract all the descriptors from it, and match them one by one to the FLANN tree (this is faster than doing it with a different tree for every template descriptor collection). So for every descriptor X in the sample, you get a most similar descriptor Y that comes from image Z. These can be used as votes for similar images (see http://en.wikipedia.org/wiki/Bag_of_words_model).
However, this method doesn't take into account the spatial consistency of the points... but it's possible to check that, too, for the top k images we have votes for (k << N, the number of all images in the system).
This code makes a Matrix of each image, appends them all together, then gets makes a FLANN Index, does an KNN search on it, and then returns the matches. All the code is here:
/// <summary>
/// Main method.
/// </summary>
public IList<IndecesMapping> Match()
{
string[] dbImages = {"1.jpg", "2.jpg", "3.jpg"};
string queryImage = "query.jpg";
IList<IndecesMapping> imap;
// compute descriptors for each image
var dbDescsList = ComputeMultipleDescriptors(dbImages, out imap);
// concatenate all DB images descriptors into single Matrix
Matrix<float> dbDescs = ConcatDescriptors(dbDescsList);
// compute descriptors for the query image
Matrix<float> queryDescriptors = ComputeSingleDescriptors(queryImage);
FindMatches(dbDescs, queryDescriptors, ref imap);
return imap;
}
/// <summary>
/// Computes image descriptors.
/// </summary>
/// <param name="fileName">Image filename.</param>
/// <returns>The descriptors for the given image.</returns>
public Matrix<float> ComputeSingleDescriptors(string fileName)
{
Matrix<float> descs;
using (Image<Gray, Byte> img = new Image<Gray, byte>(fileName))
{
VectorOfKeyPoint keyPoints = detector.DetectKeyPointsRaw(img, null);
descs = detector.ComputeDescriptorsRaw(img, null, keyPoints);
}
return descs;
}
/// <summary>
/// Convenience method for computing descriptors for multiple images.
/// On return imap is filled with structures specifying which descriptor ranges in the concatenated matrix belong to what image.
/// </summary>
/// <param name="fileNames">Filenames of images to process.</param>
/// <param name="imap">List of IndecesMapping to hold descriptor ranges for each image.</param>
/// <returns>List of descriptors for the given images.</returns>
public IList<Matrix<float>> ComputeMultipleDescriptors(string[] fileNames, out IList<IndecesMapping> imap)
{
imap = new List<IndecesMapping>();
IList<Matrix<float>> descs = new List<Matrix<float>>();
int r = 0;
for (int i = 0; i < fileNames.Length; i++)
{
var desc = ComputeSingleDescriptors(fileNames[i]);
descs.Add(desc);
imap.Add(new IndecesMapping()
{
fileName = fileNames[i],
IndexStart = r,
IndexEnd = r + desc.Rows - 1
});
r += desc.Rows;
}
return descs;
}
/// <summary>
/// Computes 'similarity' value (IndecesMapping.Similarity) for each image in the collection against our query image.
/// </summary>
/// <param name="dbDescriptors">Query image descriptor.</param>
/// <param name="queryDescriptors">Consolidated db images descriptors.</param>
/// <param name="images">List of IndecesMapping to hold the 'similarity' value for each image in the collection.</param>
public void FindMatches(Matrix<float> dbDescriptors, Matrix<float> queryDescriptors, ref IList<IndecesMapping> imap)
{
var indices = new Matrix<int>(queryDescriptors.Rows, 2); // matrix that will contain indices of the 2-nearest neighbors found
var dists = new Matrix<float>(queryDescriptors.Rows, 2); // matrix that will contain distances to the 2-nearest neighbors found
// create FLANN index with 4 kd-trees and perform KNN search over it look for 2 nearest neighbours
var flannIndex = new Index(dbDescriptors, 4);
flannIndex.KnnSearch(queryDescriptors, indices, dists, 2, 24);
for (int i = 0; i < indices.Rows; i++)
{
// filter out all inadequate pairs based on distance between pairs
if (dists.Data[i, 0] < (0.6 * dists.Data[i, 1]))
{
// find image from the db to which current descriptor range belongs and increment similarity value.
// in the actual implementation this should be done differently as it's not very efficient for large image collections.
foreach (var img in imap)
{
if (img.IndexStart <= i && img.IndexEnd >= i)
{
img.Similarity++;
break;
}
}
}
}
}
/// <summary>
/// Concatenates descriptors from different sources (images) into single matrix.
/// </summary>
/// <param name="descriptors">Descriptors to concatenate.</param>
/// <returns>Concatenated matrix.</returns>
public Matrix<float> ConcatDescriptors(IList<Matrix<float>> descriptors)
{
int cols = descriptors[0].Cols;
int rows = descriptors.Sum(a => a.Rows);
float[,] concatedDescs = new float[rows, cols];
int offset = 0;
foreach (var descriptor in descriptors)
{
// append new descriptors
Buffer.BlockCopy(descriptor.ManagedArray, 0, concatedDescs, offset, sizeof(float) * descriptor.ManagedArray.Length);
offset += sizeof(float) * descriptor.ManagedArray.Length;
}
return new Matrix<float>(concatedDescs);
}
public class IndecesMapping
{
public int IndexStart { get; set; }
public int IndexEnd { get; set; }
public int Similarity { get; set; }
public string fileName { get; set; }
}
private const double surfHessianThresh = 300;
private const bool surfExtendedFlag = true;
private SURFDetector detector = new SURFDetector(surfHessianThresh, surfExtendedFlag);

Resources