Cypher results return certain structure - neo4j

I'm trying to return a certain structure.
Here is my query:
MATCH (tracker:tracker { active: true }) OPTIONAL MATCH (tracker { active: true })--(timer:timer) RETURN { tracker:tracker, timers:COLLECT(timer) } as trackers
Here is what I am returning so far:
{
"results": [{
"columns": ["trackers"],
"data": [{
"row": [{
"tracker": {
"title": "a",
"id": "04e3fddc-5aef-4c3a-9aeb-62a9fb15bd75",
"active": true
},
"timers": []
}]
}]
}],
"errors": []
}
I would like the timers to be nested under the "tracker" with the tracker's properties, like this:
{
"results": [{
"columns": ["trackers"],
"data": [{
"row": [{
"tracker": {
"title": "a",
"id": "04e3fddc-5aef-4c3a-9aeb-62a9fb15bd75",
"active": true,
"timers": []
}]
}]
}],
"errors": []
}

Try this:
MATCH (tr:tracker {active: true})
OPTIONAL MATCH (tr)--(ti:timer)
WITH {
title: tr.title,
id: tr.id,
active: tr.active,
timers: COLLECT(ti)
} as trackers
RETURN trackers

Related

Rails Searchkick not returning results when I use a where statement

I run
Post.search("daniel")
I get 60+ results
Post.where(archive: true)
I get 60+ results
Post.search("daniel", where: { archive: true }
Here is the full searchkick query.
I get 0 results
{
"query": {
"bool": {
"must": {
"bool": {
"should": [
{
"dis_max": {
"queries": [
{
"multi_match": {
"query": "daniel",
"boost": 10,
"operator": "and",
"analyzer": "searchkick_search",
"fields": [
"*.analyzed"
],
"type": "best_fields"
}
},
{
"multi_match": {
"query": "daniel",
"boost": 10,
"operator": "and",
"analyzer": "searchkick_search2",
"fields": [
"*.analyzed"
],
"type": "best_fields"
}
},
{
"multi_match": {
"query": "daniel",
"boost": 1,
"operator": "and",
"analyzer": "searchkick_search",
"fuzziness": 1,
"prefix_length": 0,
"max_expansions": 3,
"fuzzy_transpositions": true,
"fields": [
"*.analyzed"
],
"type": "best_fields"
}
},
{
"multi_match": {
"query": "daniel",
"boost": 1,
"operator": "and",
"analyzer": "searchkick_search2",
"fuzziness": 1,
"prefix_length": 0,
"max_expansions": 3,
"fuzzy_transpositions": true,
"fields": [
"*.analyzed"
],
"type": "best_fields"
}
}
]
}
}
]
}
},
"filter": [
{
"term": {
"archive": {
"value": true
}
...
I looked at the searchkick gem doc and I am following exactly what they have listed to do. The normal search works fine and it only returns 0 posts when I add the where clause.
Without the where clause it shows all the posts which have "daniel" in the and it breaks when the where clause is added.
What am I doing wrong here? Is more information needed?
require 'elasticsearch/model'
class Post < ApplicationRecord
searchkick text_start: [:title]

OData V4: How to sum individual $count values

I have the following OData V4 query:
.../odata/Locations/?$select=Id&$filter=(Id eq 9bb29421-5160-4546-b87f-a78c0074f5c5 or Id eq 8b2a9727-a642-446e-b992-76c6d1584989)&$expand=Assets($select=Id;$filter=ParentAssetId eq null;$expand=Jobs($select=Id;$count=true;$top=0))
with the following, as expected, result:
{
"#odata.context": ".../odata/$metadata#Locations(Id,Assets(Id,Jobs(Id)))",
"value": [
{
"Id": "8b2a9727-a642-446e-b992-76c6d1584989",
"Assets": [
{
"Id": "540d0855-aa1c-4e94-9d0b-332c99ec00b6",
"Jobs#odata.count": 2,
"Jobs": []
},
{
"Id": "6dcaa0e8-fc31-4d86-9f1a-a64300f8815c",
"Jobs#odata.count": 1,
"Jobs": []
},
{
"Id": "db4cf86b-9f42-4a99-b7b4-a64300f8c740",
"Jobs#odata.count": 1,
"Jobs": []
}
]
},
{
"Id": "9bb29421-5160-4546-b87f-a78c0074f5c5",
"Assets": [
{
"Id": "08a2a046-86c1-41a9-b2b1-a7ac007bed64",
"Jobs#odata.count": 1,
"Jobs": []
},
{
"Id": "2b76dad1-4058-4261-8a40-a7af00cb8bd5",
"Jobs#odata.count": 1,
"Jobs": []
},
{
"Id": "3a5472a1-68c4-4dc4-b2cd-a797007b0068",
"Jobs#odata.count": 2,
"Jobs": []
}
]
}
]
}
How can I sum the Jobs#odata.count values and return a single value: 8?
I have tried some &$apply=aggregate(Assets/Jobs#odata.count with sum as Total) but it is an incorrect syntax :(
I fixed this by changing the query perspective and count the Jobs from the beginning:
.../odata/Jobs/$count/?$expand=Asset($select=LocationId)&$filter=Asset/LocationId in (9bb29421-5160-4546-b87f-a78c0074f5c5, 8b2a9727-a642-446e-b992-76c6d1584989)

How to normalize this recursive nested JSON

Having a bit of an issue trying to normalise the my payload that contains a nested schema of the same type as the parent using Normalizr
For example
{
id: 123,
sections:{
section: [{
id: 1,
name: "test",
sections: {
section: {
id: 125,
name: "test125"
}
}
}, {
id: 2,
name: "test2"
sections: {
section: [
{
id: 124,
name: "test124"
}
]
}
}, {
id: 3,
name: "test3"
}]
}
}
In the above json structure, nested section may be an object or an array.
I had a similar issue with nested comments. Here's how I solved it:
export const comment = new schema.Entity("comment", {}, {
idAttribute: "key"
})
comment.define({
user: user,
reactions: {
data: [reaction]
},
children: [comment]
})
export const post = new schema.Entity("post", {
user: user,
files: [image],
comments: {
data: [comment]
},
reactions: {
data: [reaction]
}
}, {
idAttribute: "key"
})
Basically, I define comment as a new entity before I use it in its own entity definition. The other schemas I define as usual, in the constructor (see the post schema for an example). Hope this helps.
Here is a jq filter which will normalize the data:
def enumerate:
if type=="array" then .[] else . end ;
def sectionids:
[ .sections.section | enumerate | .id // empty | tostring ]
| if .==[] then {} else {sections:.} end ;
def sections:
{sections:{section:[.]}}
| .. | .section? | enumerate | objects | del(.sections) + sectionids ;
{
"result": .id,
"entities": {
"sections": (reduce sections as $s ({};.["\($s.id)"]=$s))
}
}
Sample Run (assumes proper json data in data.json and the above filter in filter.jq)
$ jq -M -f filter.jq data.json
{
"result": 123,
"entities": {
"sections": {
"123": {
"id": 123,
"sections": [
"1",
"2",
"3"
]
},
"1": {
"id": 1,
"name": "test",
"sections": [
"125"
]
},
"2": {
"id": 2,
"name": "test2",
"sections": [
"124"
]
},
"3": {
"id": 3,
"name": "test3"
},
"125": {
"id": 125,
"name": "test125"
},
"124": {
"id": 124,
"name": "test124"
}
}
}
}
Try it online!

Elasticsearch Facet List doesn't Match Results

Problem
When I filter by a particular facet, that specific field's facets are correctly filtered in the result but the other facet fields remain the same. Best way to explain this is with the query and the response.
Query
{
query: {
match_all: {}
},
filter: {
and: [{
term: {
"address.state": "oregon"
}
}]
},
facets: {
"address.city": {
terms: {
field: "address.city"
},
facet_filter: {}
},
"address.state": {
terms: {
field: "address.state"
},
facet_filter: {
and: [{
term: {
"address.state": "oregon"
}
}]
}
},
"address.country": {
terms: {
field: "address.country"
},
facet_filter: {}
}
}
}
Result
{
"took": 2,
"timed_out": false,
"_shards": {
"total": 1,
"successful": 1,
"failed": 0
},
"hits": {
"total": 1,
"max_score": 1,
"hits": [
{
"_index": "races",
"_type": "race",
"_id": "6",
"_score": 1,
"_source": {
"id": 6,
"name": "Eugene Marathon",
"description": "...",
"created_at": "2015-05-24T19:41:45.043Z",
"updated_at": "2015-05-24T19:41:45.046Z",
"address": {
"race_id": 6,
"id": 7,
"line1": null,
"line2": null,
"city": "Eugene",
"state": "oregon",
"country": "united_states",
"zip": null,
"user_id": null,
"created_at": "2015-05-24T19:41:45.044Z",
"updated_at": "2015-05-24T19:41:45.044Z"
},
"race_years": []
}
}
]
},
"facets": {
"address.city": {
"_type": "terms",
"missing": 0,
"total": 7,
"other": 0,
"terms": [
{
"term": "long beach",
"count": 1
},
{
"term": "lincoln",
"count": 1
},
{
"term": "las vegas",
"count": 1
},
{
"term": "jackson",
"count": 1
},
{
"term": "eugene",
"count": 1
},
{
"term": "duluth",
"count": 1
},
{
"term": "denver",
"count": 1
}
]
},
"address.state": {
"_type": "terms",
"missing": 0,
"total": 1,
"other": 0,
"terms": [
{
"term": "oregon",
"count": 1
}
]
},
"address.country": {
"_type": "terms",
"missing": 0,
"total": 7,
"other": 0,
"terms": [
{
"term": "united_states",
"count": 7
}
]
}
}
}
So as you can see it returns all the address.city facets even though the only result is located in Eugene. It is also returning a count of 7 on the united_states. Why would it be returning all of these extra facets and with incorrect counts? My ruby mapping is found below.
Ruby Mapping
settings index: {
number_of_shards: 1,
analysis: {
analyzer: {
facet_analyzer: {
type: 'custom',
tokenizer: 'keyword',
filter: ['lowercase', 'trim']
}
}
}
} do
mapping do
indexes :name, type: 'string', analyzer: 'english', boost: 10
indexes :description, type: 'string', analyzer: 'english'
indexes :address do
indexes :city, type: 'string', analyzer: 'facet_analyzer'
indexes :state, type: 'string'
indexes :country, type: 'string'
end
end
end
This is the normal behavior of facets when ran against a filter. From the official documentation:
There’s one important distinction to keep in mind. While search
queries restrict both the returned documents and facet counts, search
filters restrict only returned documents — but not facet counts.
In your case, your query matches all documents (i.e. match_all) so the facet counts are counted against all documents, too.
Change your query to this and your facet counts will change (in this case you don't need the facet_filter anymore):
{
query: {
term: {
"address.state": "oregon"
}
},
facets: {
"address.city": {
terms: {
field: "address.city"
}
},
"address.state": {
terms: {
field: "address.state"
}
},
"address.country": {
terms: {
field: "address.country"
}
}
}
}
Another thing worth noting is that facets are deprecated and have been replaced by the much more powerful aggregations.

Elastic Search- Searching Multiple Queries in Single Field

I'm new to elastic Search. I have a field name clearance in my users table and I'm trying to filter my results based on this.
match: {
clearance: {
query: 'None',
type: 'phrase'
}
}
When I give the above match query i get 3 results. What I'm trying to get is to pass one more string along with None. For eg I want to find the users with clearance None and First Level
I tried this.
multi_match: {
clearance: {
query: 'None OR First Level',
type: 'phrase'
}
}
But ended up in some error. Please Help. Correct me if my question is wrong.
One way would be making clearance as not_analyzed field in the mapping and using terms filter.
Example:
PUT test
{
"mappings": {
"e1":{
"properties": {
"clearance":{
"type": "string",
"index": "not_analyzed"
}
}
}
}
}
Some test data:
PUT test/e1/1
{
"clearance":"None"
}
PUT test/e1/2
{
"clearance":"First Level"
}
PUT test/e1/3
{
"clearance":"Second Level"
}
Now query part:
GET test/e1/_search
{
"query": {
"filtered": {
"query": {
"match_all": {}
},
"filter": {
"terms": {
"clearance": [
"None",
"First Level"
],
"execution": "or"
}
}
}
}
}
Result verfication:
{
"took": 1,
"timed_out": false,
"_shards": {
"total": 1,
"successful": 1,
"failed": 0
},
"hits": {
"total": 2,
"max_score": 1,
"hits": [
{
"_index": "test",
"_type": "e1",
"_id": "1",
"_score": 1,
"_source": {
"clearance": "None"
}
},
{
"_index": "test",
"_type": "e1",
"_id": "2",
"_score": 1,
"_source": {
"clearance": "First Level"
}
}
]
}
}

Resources