How to add counter to set via libnftables-json? - netfilter

In nftables since v0.9.5 named sets have specification counter (for each element in set). I can use it with nft cli or configuration file like:
set my_flows_meter {
type ipv4_addr . ipv4_addr . inet_service . inet_service . inet_proto
flags dynamic, timeout
counter
}
But I try to add named set with counter specification with libnftables-json add didn't find anything about that in mans.
I tried:
{"add": {
"set": {
"family": "ip",
"name": "my_flows_meter",
"table": "filter",
"type": [
"ipv4_addr",
"ipv4_addr",
"inet_service",
"inet_service",
"inet_proto"
],
"handle": 2,
"size": 65535,
"flags": [
"timeout"
],
"counter": null
}
}
}
But that's not working. Looks like libnftables-json just ignored that, bc set is added.

In version nftables v1.0.5 (Lester Gooch #4), the nft -j list set ... ... ... seem to print out the counters as well. But it prints it for each element and that's probably what you need to to as well.
{
"nftables" : [
{
"metainfo" : {
"json_schema_version" : 1,
"release_name" : "Lester Gooch #4",
"version" : "1.0.5"
}
},
{
"set" : {
"elem" : [
{
"elem" : {
"counter" : {
"bytes" : 0,
"packets" : 0
},
"val" : {
"concat" : [
"udp",
67
]
}
}
},
{
"elem" : {
"counter" : {
"bytes" : 0,
"packets" : 0
},
"val" : {
"concat" : [
"udp",
25
]
}
}
},
{
"elem" : {
"counter" : {
"bytes" : 0,
"packets" : 0
},
"val" : {
"concat" : [
"udp",
110
]
}
}
},
{
"elem" : {
"counter" : {
"bytes" : 0,
"packets" : 0
},
"val" : {
"concat" : [
"udp",
220
]
}
}
},
{
"elem" : {
"counter" : {
"bytes" : 0,
"packets" : 0
},
"val" : {
"concat" : [
"udp",
995
]
}
}
},
{
"elem" : {
"counter" : {
"bytes" : 0,
"packets" : 0
},
"val" : {
"concat" : [
"udp",
873
]
}
}
},
{
"elem" : {
"counter" : {
"bytes" : 0,
"packets" : 0
},
"val" : {
"concat" : [
"udp",
115
]
}
}
},
{
"elem" : {
"counter" : {
"bytes" : 170088,
"packets" : 2585
},
"val" : {
"concat" : [
"udp",
53
]
}
}
},
{
"elem" : {
"counter" : {
"bytes" : 0,
"packets" : 0
},
"val" : {
"concat" : [
"udp",
22
]
}
}
},
{
"elem" : {
"counter" : {
"bytes" : 0,
"packets" : 0
},
"val" : {
"concat" : [
"udp",
5900
]
}
}
},
{
"elem" : {
"counter" : {
"bytes" : 0,
"packets" : 0
},
"val" : {
"concat" : [
"udp",
3389
]
}
}
},
{
"elem" : {
"counter" : {
"bytes" : 11418,
"packets" : 6
},
"val" : {
"concat" : [
"udp",
{
"range" : [
1714,
1764
]
}
]
}
}
},
{
"elem" : {
"counter" : {
"bytes" : 0,
"packets" : 0
},
"val" : {
"concat" : [
"tcp",
4713
]
}
}
},
{
"elem" : {
"counter" : {
"bytes" : 0,
"packets" : 0
},
"val" : {
"concat" : [
"tcp",
{
"range" : [
1714,
1764
]
}
]
}
}
},
{
"elem" : {
"counter" : {
"bytes" : 0,
"packets" : 0
},
"val" : {
"concat" : [
"tcp",
143
]
}
}
},
{
"elem" : {
"counter" : {
"bytes" : 240,
"packets" : 4
},
"val" : {
"concat" : [
"tcp",
993
]
}
}
},
{
"elem" : {
"counter" : {
"bytes" : 180,
"packets" : 3
},
"val" : {
"concat" : [
"tcp",
25
]
}
}
},
{
"elem" : {
"counter" : {
"bytes" : 0,
"packets" : 0
},
"val" : {
"concat" : [
"tcp",
110
]
}
}
},
{
"elem" : {
"counter" : {
"bytes" : 0,
"packets" : 0
},
"val" : {
"concat" : [
"tcp",
220
]
}
}
},
{
"elem" : {
"counter" : {
"bytes" : 0,
"packets" : 0
},
"val" : {
"concat" : [
"tcp",
995
]
}
}
},
{
"elem" : {
"counter" : {
"bytes" : 0,
"packets" : 0
},
"val" : {
"concat" : [
"tcp",
873
]
}
}
},
{
"elem" : {
"counter" : {
"bytes" : 0,
"packets" : 0
},
"val" : {
"concat" : [
"tcp",
115
]
}
}
},
{
"elem" : {
"counter" : {
"bytes" : 540,
"packets" : 9
},
"val" : {
"concat" : [
"tcp",
53
]
}
}
},
{
"elem" : {
"counter" : {
"bytes" : 0,
"packets" : 0
},
"val" : {
"concat" : [
"tcp",
22
]
}
}
},
{
"elem" : {
"counter" : {
"bytes" : 0,
"packets" : 0
},
"val" : {
"concat" : [
"tcp",
5900
]
}
}
},
{
"elem" : {
"counter" : {
"bytes" : 0,
"packets" : 0
},
"val" : {
"concat" : [
"tcp",
3389
]
}
}
},
{
"elem" : {
"counter" : {
"bytes" : 0,
"packets" : 0
},
"val" : {
"concat" : [
"sctp",
22
]
}
}
},
{
"elem" : {
"counter" : {
"bytes" : 0,
"packets" : 0
},
"val" : {
"concat" : [
"tcp",
10240
]
}
}
},
{
"elem" : {
"counter" : {
"bytes" : 14200,
"packets" : 71
},
"val" : {
"concat" : [
"udp",
5678
]
}
}
},
{
"elem" : {
"counter" : {
"bytes" : 0,
"packets" : 0
},
"val" : {
"concat" : [
"tcp",
8291
]
}
}
}
],
"family" : "inet",
"flags" : [
"interval"
],
"handle" : 25,
"name" : "protocols",
"table" : "filter",
"type" : [
"inet_proto",
"inet_service"
]
}
}
]
}

Related

how to create a loop to find available values until it shows up true

I am working on a timeslot system that will give available time slots and unavailable but I am filtering available slots and loading the array. Sometimes many days don't have available timeslots. I wanted to find the next available slots on recently available days through the loop from the "available" Bool Parameter from the response below (I pasted only part of the response.
{
"timeslots" : [
{
"slots" : [
{
"day" : "tuesday",
"selected" : false,
"pk" : 160,
"available" : false,
"timeframe" : "10:00 - 12:00"
},
{
"day" : "tuesday",
"selected" : false,
"pk" : 161,
"available" : true,
"timeframe" : "12:00 - 14:00"
},
{
"day" : "tuesday",
"selected" : false,
"pk" : 162,
"available" : true,
"timeframe" : "15:00 - 17:00"
},
{
"day" : "tuesday",
"selected" : false,
"pk" : 163,
"available" : true,
"timeframe" : "17:00 - 19:00"
},
{
"day" : "tuesday",
"selected" : false,
"pk" : 164,
"available" : false,
"timeframe" : "19:00 - 21:00"
}
],
"day_name" : "tuesday",
"date" : "2022-04-26",
"available" : 3,
"day_abbr" : "26 Apr"
},
{
"slots" : [
{
"pk" : 160,
"day" : "wednesday",
"timeframe" : "10:00 - 12:00",
"selected" : false,
"available" : true
},
{
"pk" : 161,
"day" : "wednesday",
"timeframe" : "12:00 - 14:00",
"selected" : false,
"available" : true
},
{
"pk" : 162,
"day" : "wednesday",
"timeframe" : "15:00 - 17:00",
"selected" : false,
"available" : true
},
{
"pk" : 163,
"day" : "wednesday",
"timeframe" : "17:00 - 19:00",
"selected" : false,
"available" : true
},
{
"pk" : 164,
"day" : "wednesday",
"timeframe" : "19:00 - 21:00",
"selected" : false,
"available" : false
}
],
"day_name" : "wednesday",
"date" : "2022-04-27",
"available" : 4,
"day_abbr" : "27 Apr"
},
I did an iteration like this so far.
switch response.result {
case let .success(value):
let json = JSON(value)
print("Time slot response: \(json)")
self.slots.removeAll()
self.daySlots.removeAll()
self.typeOfSlot = slotType
if let slotArray = json[slotType][self.dayIndex ?? 0]["slots"].array{
if !slotArray.isEmpty || slotArray != [] {
for slotJSON in slotArray {
let slot = Slots.parseSlots(slotJSON: slotJSON)
self.slots.append(slot)
}
self.slots = self.slots.filter({ $0.available ?? false })
//
if self.slots.count != 0{
switch self.typeOfSlot{
case "seasonal_timeslots":
self.seasonalTimeSlotPK = self.slots.first?.pk
print("time slot pk \(self.seasonalTimeSlotPK ?? 0)")
UserDefaults.standard.set(self.seasonalTimeSlotPK, forKey:uds.kSeasonalTimeslotPK)
self.selectedSeasonalTimeRange = self.slots.first?.timeFrame
UserDefaults.standard.set(self.selectedSeasonalTimeRange, forKey:uds.kSeasonalTimeRange)
case "timeslots":
self.timeSlotPK = self.slots.first?.pk
print("time slot pk \(self.timeSlotPK ?? 0)")
UserDefaults.standard.set(self.timeSlotPK ?? 0, forKey:uds.kTimeslotPK)
self.selectedTimeRange = self.slots.first?.timeFrame
UserDefaults.standard.set(self.selectedTimeRange ?? "", forKey:uds.kTimeRange)
default:
break
}
}
// timeframe
}
else {
// self.tableView.setEmptyView(title: "No available slots found for current date!", message: "Please tap on other days for next available slots.")
self.view.makeToast("No Slots Found!", duration: 3.0, position: .bottom)
}
Please give me code example for me to search for available slots and stops until it's found.
instead of using the json raw data,
you could re-structure your code and use specific models to do what you ask. Here is some example test code to show
how to get the slots.
struct Response: Identifiable, Codable {
let id = UUID()
var timeslots: [Timeslot]
}
struct Timeslot: Codable {
let slots: [Slot]
let dayName: String
let date: String
let available: Int
let dayAbbr: String
enum CodingKeys: String, CodingKey {
case slots
case dayName = "day_name"
case date, available
case dayAbbr = "day_abbr"
}
}
struct Slot: Identifiable, Codable {
let id = UUID()
let day: String
let selected: Bool
let pk: Int
let available: Bool
let timeframe: String
}
struct ContentView: View {
var body: some View {
Text("testing")
.onAppear {
let json = """
{
"timeslots" : [
{
"slots" : [
{
"day" : "tuesday",
"selected" : false,
"pk" : 160,
"available" : false,
"timeframe" : "10:00 - 12:00"
},
{
"day" : "tuesday",
"selected" : false,
"pk" : 161,
"available" : true,
"timeframe" : "12:00 - 14:00"
},
{
"day" : "tuesday",
"selected" : false,
"pk" : 162,
"available" : true,
"timeframe" : "15:00 - 17:00"
},
{
"day" : "tuesday",
"selected" : false,
"pk" : 163,
"available" : true,
"timeframe" : "17:00 - 19:00"
},
{
"day" : "tuesday",
"selected" : false,
"pk" : 164,
"available" : false,
"timeframe" : "19:00 - 21:00"
}
],
"day_name" : "tuesday",
"date" : "2022-04-26",
"available" : 3,
"day_abbr" : "26 Apr"
},
{
"slots" : [
{
"pk" : 160,
"day" : "wednesday",
"timeframe" : "10:00 - 12:00",
"selected" : false,
"available" : true
},
{
"pk" : 161,
"day" : "wednesday",
"timeframe" : "12:00 - 14:00",
"selected" : false,
"available" : true
},
{
"pk" : 162,
"day" : "wednesday",
"timeframe" : "15:00 - 17:00",
"selected" : false,
"available" : true
},
{
"pk" : 163,
"day" : "wednesday",
"timeframe" : "17:00 - 19:00",
"selected" : false,
"available" : true
},
{
"pk" : 164,
"day" : "wednesday",
"timeframe" : "19:00 - 21:00",
"selected" : false,
"available" : false
}
],
"day_name" : "wednesday",
"date" : "2022-04-27",
"available" : 4,
"day_abbr" : "27 Apr"
}
]
}
"""
let data = json.data(using: .utf8)!
do {
let response = try JSONDecoder().decode(Response.self, from: data)
// print("\n--> response: \(response) \n")
// here loop over the timeslots
for tmslot in response.timeslots {
// here loop over the slots
for slot in tmslot.slots {
if slot.available {
print("slot available: \(slot.day) \(slot.timeframe)")
}
}
}
} catch {
print("error: \(error)")
}
}
}
}
I'm not really sure what goes om in your code. But you are asking to find the first available slot in your array of slots?
i made some structs to keep track of the data and used Swift 4's Decodable to easier represent the data:
struct Timeslots: Decodable {
let slots: [Slot]
var firstFreeTime: Slot? {
slots.first(where: { $0.available })
}
}
struct Slot: Decodable {
let day: String
let selected: Bool
let pk: Int
let available: Bool
let timeFrame: String
}

how to json list after converting into json

i am trying this but getting error . whats other way to sort this list of items .
getting error Value of tuple type
'(String, JSON)' has no member 'subscript'
let jsonData = try JSONSerialization.data(withJSONObject: bizSnapshot.value as Any, options: .prettyPrinted)
var bizOfferingsJson = try JSON(data: jsonData)
bizOfferingsJson items contains itemIndex as key as Int , i want to sort based on that itemIndex
bizOfferingsJson = bizOfferingsJson.sorted(by: { $0.["itemIndex"] > $1.["itemIndex"] })
print("bizOfferingsJson===",bizOfferingsJson)
sample bizOfferingsJson data
[
{
"isVisible" : true,
"itemRetailInfo" : {
"units" : "GRAMS",
"quantityPrice" : [
{
"discountPrice" : 0,
"regularPrice" : 25,
"quantity" : 1000,
"quantitySelected" : 1000,
"isEditable" : false
}
]
},
"imageStoragePathList" : [
"ONION_RED.png"
],
"bizOfferingsID" : "Vegetables_0",
"masterImage" : 0,
"vegNonVegInfoEnum" : "NONE",
"description" : "",
"itemQunatitySelected" : 0,
"bizOfferingsHeaderTypeThree" : {
"indexOrder" : 0,
"headerText" : "Vegetables",
"isVisible" : true
},
"itemIndex" : 0,
"totalCountQunatity" : 0,
"primaryText" : "Onion Red"
},
{
"isVisible" : true,
"itemRetailInfo" : {
"units" : "GRAMS",
"quantityPrice" : [
{
"discountPrice" : 0,
"regularPrice" : 25,
"quantity" : 1000,
"quantitySelected" : 1000,
"isEditable" : false
}
]
},
"imageStoragePathList" : [
"POTATO.png"
],
"bizOfferingsID" : "Vegetables_1",
"masterImage" : 0,
"vegNonVegInfoEnum" : "NONE",
"description" : "",
"itemQunatitySelected" : 0,
"bizOfferingsHeaderTypeThree" : {
"indexOrder" : 0,
"headerText" : "Vegetables",
"isVisible" : true
},
"itemIndex" : 1,
"totalCountQunatity" : 0,
"primaryText" : "Potato"
},
{
"isVisible" : true,
"itemRetailInfo" : {
"units" : "PIECES",
"quantityPrice" : [
{
"discountPrice" : 0,
"regularPrice" : 3,
"quantity" : 1,
"quantitySelected" : 0.10000000000000001,
"isEditable" : false
}
]
},
"imageStoragePathList" : [
"CORIANDER_LEAVES_BUNCH.png"
],
"bizOfferingsID" : "Vegetables_2",
"masterImage" : 0,
"vegNonVegInfoEnum" : "NONE",
"description" : "",
"itemQunatitySelected" : 0,
"bizOfferingsHeaderTypeThree" : {
"indexOrder" : 0,
"headerText" : "Vegetables",
"isVisible" : true
},
"itemIndex" : 2,
"totalCountQunatity" : 0,
"primaryText" : "Coriander Leaves Bunch"
},
{
"isVisible" : true,
"itemRetailInfo" : {
"units" : "GRAMS",
"quantityPrice" : [
{
"discountPrice" : 0,
"regularPrice" : 8,
"quantity" : 500,
"quantitySelected" : 500,
"isEditable" : false
}
]
},
"imageStoragePathList" : [
"TOMATO_STANDARD.png"
],
"bizOfferingsID" : "Vegetables_3",
"masterImage" : 0,
"vegNonVegInfoEnum" : "NONE",
"description" : "",
"itemQunatitySelected" : 0,
"bizOfferingsHeaderTypeThree" : {
"indexOrder" : 0,
"headerText" : "Vegetables",
"isVisible" : true
},
"itemIndex" : 3,
"totalCountQunatity" : 0,
"primaryText" : "Tomato Standard"
}
]
You need to clarify which type you sort assuming an array
var bizOfferingsJson = try JSON(data: jsonData).array!
bizOfferingsJson = bizOfferingsJson.sorted(by: { $0["itemIndex"] > $1["itemIndex"] })
or using mutating sort
bizOfferingsJson.sort{ $0["itemIndex"] > $1["itemIndex"] }
using Codable is much better given your json
struct Root: Codable {
let isVisible: Bool
let itemRetailInfo: ItemRetailInfo
let imageStoragePathList: [String]
let bizOfferingsID: String
let masterImage: Int
let vegNonVegInfoEnum, purpleDescription: String
let itemQunatitySelected: Int
let bizOfferingsHeaderTypeThree: BizOfferingsHeaderTypeThree
let itemIndex, totalCountQunatity: Int
let primaryText: String
enum CodingKeys: String, CodingKey {
case isVisible, itemRetailInfo, imageStoragePathList, bizOfferingsID, masterImage, vegNonVegInfoEnum
case purpleDescription = "description"
case itemQunatitySelected, bizOfferingsHeaderTypeThree, itemIndex, totalCountQunatity, primaryText
}
}
// MARK: - BizOfferingsHeaderTypeThree
struct BizOfferingsHeaderTypeThree: Codable {
let indexOrder: Int
let headerText: String
let isVisible: Bool
}
// MARK: - ItemRetailInfo
struct ItemRetailInfo: Codable {
let units: String
let quantityPrice: [QuantityPrice]
}
// MARK: - QuantityPrice
struct QuantityPrice: Codable {
let discountPrice, regularPrice, quantity: Int
let quantitySelected: Double
let isEditable: Bool
}
var res = try! JSONDecoder().decode([Root].self, from: jsonData)
res.sort { $0.itemIndex > $1.itemIndex }

NSMutableDictionary addEntriesFromDictionary not merging dictionaries properly

I have a JSON response that I store as an NSMutableDictionary that looks like this:
{
"list": { "ID1", "ID2", "ID3" },
"items": {
"ID1" : { "name" : "shoe" },
"ID2" : { "name" : "pants" },
"ID3" : { "name" : "hat" }
}
}
i need to have the NSMutableDictionary add entries from any additional JSON responses, so if i receive a new response as follows:
{
"list": { "ID4", "ID5", "ID6" },
"items": {
"ID4" : { "name" : "shirt" },
"ID5" : { "name" : "tie" },
"ID6" : { "name" : "glasses" }
}
}
the updated NSMutableDictionary needs to appear as follows:
{
"list": { "ID1", "ID2", "ID3", "ID4", "ID5", "ID6" },
"items": {
"ID1" : { "name" : "shoe" },
"ID2" : { "name" : "pants" },
"ID3" : { "name" : "hat" },
"ID4" : { "name" : "shirt" },
"ID5" : { "name" : "tie" },
"ID6" : { "name" : "glasses" }
}
}
Unfortunately, when i call addEntriesFromDictionary with the additions, i get this:
{
"list": { "ID1", "ID2", "ID3" },
"items": {
"ID1" : { "name" : "shoe" },
"ID2" : { "name" : "pants" },
"ID3" : { "name" : "hat" }
}
}
"list": { "ID4", "ID5", "ID6" },
"items": {
"ID4" : { "name" : "shirt" },
"ID5" : { "name" : "tie" },
"ID6" : { "name" : "glasses" }
}
}
Assuming we have the same dictionaries as in your example:
let key1 = "list"
let key2 = "items"
var rec = [key1:["ID1","ID2","ID3"],
key2:["ID1":["name":"shoe"],"ID2":["name":"pants"],"ID3":["name":"hat"]]] as [String : Any]
let inc = [key1:["ID4","ID5","ID6"],
key2:["ID4":["name":"shirt"],"ID5":["name":"tie"],"ID6":["name":"glasses"]]] as [String : Any]
I used the following rationale to find a solution:
... implemented this code snippet hereafter:
func merge(_ inc:[String:Any], into rec: inout [String:Any]) -> [String:Any] {
for (_, vals) in inc {
if var recKeys = rec[key1] as? [String],
var recItems = rec[key2] as? [String:[String:String]],
let incItems = inc[key2] as? [String:[String:String]] {
if let incValIds = vals as? [String] {
for id in incValIds {
if let newVal = incItems[id] {
if recKeys.contains(id) {
for (newValId, newValObj) in newVal {
guard var tab = recItems[id] else { continue }
tab[newValId] = newValObj
recItems[id] = tab
}
} else {
recKeys.append(id)
recItems[id] = newVal
}
}
}
}
rec[key1] = recKeys
rec[key2] = recItems
}
}
return rec
}
... and used this function as described hereunder to get the result defined below:
let updatedInfo = merge(inc, into: &rec)
print(updatedInfo)
You can now properly merge the two provided dictionaries as desired.

Searchkick Aggregations behavior

I am working with the Searchkick Gem and Elastic search and am trying to understand the aggregations behavior.
I have three facets (Aggregations): City, State and Company.
If I filter by any one of them, the counts of other two are reduced to reflect the total in the result set. But the selected facet comes back with all values. So say I had 100 items in the index, and I filtered by a Company that had 2 total items in the index, the City and State counts are updated to reflect no more than 2. But the Company count remains at 100.
Example (filtered to City=Atlanta)
{
"query": {
"function_score": {
"functions": [
{
"filter": {
"and": [
{
"term": {
"featured": true
}
}
]
},
"boost_factor": 1000
}
],
"query": {
"match_all": {}
},
"score_mode": "sum"
}
},
"size": 20,
"from": 0,
"post_filter": {
"bool": {
"filter": [
{
"range": {
"expiration_date": {
"from": "2016-08-18T23:07:15.670-04:00",
"include_lower": true
}
}
},
{
"range": {
"created_at": {
"to": "2016-08-18T23:07:15.670-04:00",
"include_upper": true
}
}
},
{
"term": {
"published": true
}
},
{
"term": {
"tenant_id": 4
}
},
{
"term": {
"city": "Atlanta"
}
}
]
}
},
"aggs": {
"company": {
"filter": {
"bool": {
"must": [
{
"range": {
"expiration_date": {
"from": "2016-08-18T23:07:15.670-04:00",
"include_lower": true
}
}
},
{
"range": {
"created_at": {
"to": "2016-08-18T23:07:15.670-04:00",
"include_upper": true
}
}
},
{
"term": {
"published": true
}
},
{
"term": {
"tenant_id": 4
}
},
{
"term": {
"city": "Atlanta"
}
}
]
}
},
"aggs": {
"company": {
"terms": {
"field": "company",
"size": 10
}
}
}
},
"city": {
"filter": {
"bool": {
"must": [
{
"range": {
"expiration_date": {
"from": "2016-08-18T23:07:15.670-04:00",
"include_lower": true
}
}
},
{
"range": {
"created_at": {
"to": "2016-08-18T23:07:15.670-04:00",
"include_upper": true
}
}
},
{
"term": {
"published": true
}
},
{
"term": {
"tenant_id": 4
}
}
]
}
},
"aggs": {
"city": {
"terms": {
"field": "city",
"size": 10
}
}
}
},
"state": {
"filter": {
"bool": {
"must": [
{
"range": {
"expiration_date": {
"from": "2016-08-18T23:07:15.670-04:00",
"include_lower": true
}
}
},
{
"range": {
"created_at": {
"to": "2016-08-18T23:07:15.670-04:00",
"include_upper": true
}
}
},
{
"term": {
"published": true
}
},
{
"term": {
"tenant_id": 4
}
},
{
"term": {
"city": "Atlanta"
}
}
]
}
},
"aggs": {
"state": {
"terms": {
"field": "state",
"size": 10
}
}
}
}
},
"fields": []
}
Result (2 result returned, but 58 City Aggregations come back). Note Company and City return correct # of Aggregations:
{
"took": 114,
"timed_out": false,
"_shards": {
"total": 5,
"successful": 5,
"failed": 0
},
"hits": {
"total": 2,
"max_score": 1,
"hits": [
{
"_index": "jobs_development_20160818140128648",
"_type": "job",
"_id": "457134",
"_score": 1
},
{
"_index": "jobs_development_20160818140128648",
"_type": "job",
"_id": "457137",
"_score": 1
}
]
},
"aggregations": {
"city": {
"doc_count": 58,
"city": {
"doc_count_error_upper_bound": 0,
"sum_other_doc_count": 19,
"buckets": [
{
"key": "Los Angeles",
"doc_count": 8
},
{
"key": "London",
"doc_count": 7
},
{
"key": "New York",
"doc_count": 7
},
{
"key": "Burbank",
"doc_count": 5
},
{
"key": "Pasig",
"doc_count": 3
},
{
"key": "Atlanta",
"doc_count": 2
},
{
"key": "Chicago",
"doc_count": 2
},
{
"key": "Culver City",
"doc_count": 2
},
{
"key": "London Borough of Hackney",
"doc_count": 2
},
{
"key": "Birmingham",
"doc_count": 1
}
]
}
},
"company": {
"doc_count": 2,
"company": {
"doc_count_error_upper_bound": 0,
"sum_other_doc_count": 0,
"buckets": [
{
"key": "Second Story",
"doc_count": 2
}
]
}
},
"state": {
"doc_count": 2,
"state": {
"doc_count_error_upper_bound": 0,
"sum_other_doc_count": 0,
"buckets": [
{
"key": "Georgia",
"doc_count": 2
}
]
}
}
}
}
What am I missing? Is this correct behavior?

Elasticsearch - using the path hierarchy tokenizer to access different level of categories

I'm very new in Elasticsearch and have a question about the hierarchical tokenizer of a path. Here is my code example:
My mapping code:
PUT /my_index
{
"settings": {
"analysis": {
"analyzer": {
"path-analyzer": {
"type": "custom",
"tokenizer": "path-tokenizer"
}
},
"tokenizer": {
"path-tokenizer": {
"type": "path_hierarchy",
"delimiter": "."
}
}
}
},
"mappings": {
"my_type": {
"dynamic": "strict",
"properties": {
"group_path": {
"type": "string",
"index_analyzer": "path-analyzer",
"search_analyzer": "keyword"
}
}
}
}
}
This is my PUT:
PUT /my_index/my_type/1
{
"group_path": ["Book.Thriller.Adult","DVD.Comedy.Kids"]
}
This is my Query:
GET /my_index/my_type/_search?search_type=count
{
"aggs": {
"category": {
"terms": {
"field": "group_path",
"size": 0
}
}
}
}
And the result:
{
...
"aggregations": {
"category": {
"buckets": [
{
"key": "Book",
"doc_count": 1
},
{
"key": "Book.Thriller",
"doc_count": 1
},
{
"key": "Book.Thriller.Adult",
"doc_count": 1
},
{
"key": "DVD",
"doc_count": 1
},
{
"key": "DVD.Comedy",
"doc_count": 1
},
{
"key": "DVD.Comedy.Kids",
"doc_count": 1
}
]
}
}
}
So far is everything good. What I'm looking for is that how can I create buckets for example only for the first category. How can I get result like that:
{
...
"aggregations": {
"category": {
"buckets": [
{
"key": "Book",
"doc_count": 1
},
{
"key": "DVD",
"doc_count": 1
}
]
}
}
}
Thank you for any help.
The only way I found to do this is to use the exclude syntax to exclude the levels you don't want.
{
"aggs": {
"category": {
"terms": {
"field": "group_path",
"size": 0,
"exclude" : ".*\\..*"
}
}
}
}
Will then return
aggregations: {
category: {
buckets: [
{
key: Book
doc_count: 1
}
{
key: DVD
doc_count: 1
}
]
}
}
If you select book, you can then search like this
{
"query" : {
"filtered": {
"filter": {
"prefix": {
"group_path": "Book"
}
}
}
},
"aggs" : {
"category": {
"terms": {
"field": "group_path",
"size": 0,
"include" : "Book\\..*",
"exclude": ".*\\..*\\..*"
}
}
}
}
Will then return
aggregations: {
category: {
buckets: [
{
key: Book.Thriller
doc_count: 1
}
]
}
}

Resources