Elastic Search Filter Bucket Values - java

My use case is as follows, I need to find out all the unique colors that had appeared in last 1 year but went disappearing in last 3 months. So my documents looks like this
{
doc_id: 1,
color: "red",
timestamp: epoch time here
},
{
doc_id: 2,
color: "blue",
timestamp: epoch time here
}
So For example if any document with attribute color (from now referred to just as color) blue appeared in last year, but didn't appear in the last 3 months then we need to include blue in the result. On the other hand if documents with color red appeared in last year and also appeared in the last 3 months then we need to exclude red from the result.
The 1 year in the above example also includes the 3 months in it when computing. So if all the documents with Color blue happened only between May 2018 - Feb 2019, this means that documents with blue occurred in last year but went missing in last 3 months (March 2019 - May 2019), then blue should be in the result set. On the other hand if the documents with Color Red happened between May 2018 - Feb 2019 as well as March 2019 - May 2019, then we need to exclude this color red in the result set. I couldn't get this with terms query in Elastic search.

I have taken a range from "2019-01-01"- "2019-12-30", with excluded months as "2019-09-01"- "2019-12-30"
Mapping :
{
"testindex" : {
"mappings" : {
"properties" : {
"color" : {
"type" : "keyword"
},
"doc_id" : {
"type" : "long"
},
"timestamp" : {
"type" : "date"
}
}
}
}
}
Data:
"hits" : [
{
"_index" : "testindex",
"_type" : "_doc",
"_id" : "GPv0zWoB8AL5aj8D_wLG",
"_score" : 1.0,
"_source" : {
"doc_id" : 1,
"color" : "blue",
"timestamp" : "2019-03-30"
}
},
{
"_index" : "testindex",
"_type" : "_doc",
"_id" : "Gfv1zWoB8AL5aj8DJAKU",
"_score" : 1.0,
"_source" : {
"doc_id" : 1,
"color" : "red",
"timestamp" : "2019-12-30"
}
},
{
"_index" : "testindex",
"_type" : "_doc",
"_id" : "Gvv1zWoB8AL5aj8DOwKf",
"_score" : 1.0,
"_source" : {
"doc_id" : 1,
"color" : "red",
"timestamp" : "2019-01-01"
}
}
]
}
Final Query:
GET testindex/_search
{
"size": 0,
"query": {
"range": {
"timestamp": {
"gte": "2019-01-01",
"lte": "2019-12-30"
}
}
},
"aggs": {
"colors": {
"terms": {
"field": "color"
},
"aggs": {
"excluded_range": {
"date_range": {
"field": "timestamp",
"ranges": [
{
"from": "2019-09-01",
"to": "2019-12-31"
}
]
}
},
"excluded_docs_count": {
"sum_bucket": {
"buckets_path": "excluded_range>_count"
}
},
"myfinal": {
"bucket_selector": {
"buckets_path": {
"out_of_range_docs": "excluded_docs_count"
},
"script": {
"inline": "params.out_of_range_docs==0"
}
}
}
}
}
}
}

Related

Opensearch - get inner aggregations from aggregations using opensearch-java client

There is this opensearch query constructed using openserch-java
GET eventsearch/_search
{
"aggregations": {
"WEB": {
"aggregations": {
"eventDate": {
"date_histogram": {
"extended_bounds": {
"max": "2022-12-01T00:00:00Z",
"min": "2022-01-01T00:00:00Z"
},
"field": "eventDate",
"fixed_interval": "1d",
"min_doc_count": 0
}
}
},
"filter": {
"term": {
"channel": {
"value": "WEB",
"case_insensitive": true
}
}
}
}
},
"query": {
"bool": {
"filter": [
{
"range": {
"eventDate": {
"from": "2022-01-01T00:00:00Z",
"to": "2022-12-01T00:00:00Z"
}
}
}
],
"must": [
{
"match_all": {}
}
]
}
},
"size": 0
}
Running query, the response is this:
{
"took" : 2,
"timed_out" : false,
"_shards" : {
"total" : 1,
"successful" : 1,
"skipped" : 0,
"failed" : 0
},
"hits" : {
"total" : {
"value" : 26,
"relation" : "eq"
},
"max_score" : null,
"hits" : [ ]
},
"aggregations" : {
"WEB" : {
"doc_count" : 25,
"eventDate" : {
"buckets" : [
{
"key_as_string" : "2022-01-01T00:00:00.000Z",
"key" : 1640995200000,
"doc_count" : 0
},
{
"key_as_string" : "2022-01-02T00:00:00.000Z",
"key" : 1641081600000,
"doc_count" : 0
},
{
"key_as_string" : "2022-01-03T00:00:00.000Z",
"key" : 1641168000000,
"doc_count" : 0
},
{
"key_as_string" : "2022-01-04T00:00:00.000Z",
"key" : 1641254400000,
"doc_count" : 0
},
....................
]
}
}
}
}
In java I need to perform this query and get the results from there.
But after using the opensearchclient.search and then get the "aggregations" list method, I receive this (image attached) and get
If I try to get the "WEB" from the Map, there is no other "eventDate" aggregation to fetch.
Is there a way to fetch this inner aggregation using opensearch-java client? I had no luck with documentation.
opensearch-java 2.1.0
There is currently no feature like this, it exists an open bug, with merged code, but not released.
https://github.com/opensearch-project/opensearch-java/issues/197

Mongo upsert do not return modified object _id on update

Mongodb 4.2.15
I'm tring to use mongo Updates with Aggregation Pipeline
My request is very big but here it's core structure
db.runCommand({
"update": "collectionName",
"updates": [
{
"q": { ... },
"u": [
{
"$project": { ... },
"$set": { ... },
"$set": { ... },
"$project": { ... }
}
],
"multi": false,
"upsert": true
}
]
});
After the first execute I receive a result with newly created object's _id
{
"n" : 1,
"nModified" : 0,
"upserted" : [
{
"index" : 0,
"_id" : ObjectId("619997f11501d6eb40c6f64a")
}
],
"opTime" : {
"ts" : Timestamp(1637455857, 61),
"t" : NumberLong(5)
},
"electionId" : ObjectId("7fffffff0000000000000005"),
"ok" : 1.0,
"$clusterTime" : {
"clusterTime" : Timestamp(1637455857, 61),
"signature" : {
"hash" : { "$binary" : "hA0tf5DXMqTNmnVXdMVnpnAKCU0=", "$type" : "00" },
"keyId" : NumberLong(7018546168816730116)
}
},
"operationTime" : Timestamp(1637455857, 61)
}
After the second execution of the same request there is no modified object's _id
{
"n" : 1,
"nModified" : 1,
"opTime" : {
"ts" : Timestamp(1637456057, 19),
"t" : NumberLong(5)
},
"electionId" : ObjectId("7fffffff0000000000000005"),
"ok" : 1.0,
"$clusterTime" : {
"clusterTime" : Timestamp(1637456057, 19),
"signature" : {
"hash" : { "$binary" : "U2yCP6nXUrjBN9ZiLanyl0rgxww=", "$type" : "00" },
"keyId" : NumberLong(7018546168816730116)
}
},
"operationTime" : Timestamp(1637456057, 19)
}
The thing is that my filter conditions do not contain _id of the object but I need to return it with response. I see no useful request configurations. Any suggestions is it possible to get _id at response on update case?

Search match text in Elasticsearch SpringBoot by using percentage

I'm a new Elasticsearch SpringBoot here. I don't know how to search match text in Elasticsearch SpringBoot by using percentage. For example, I have a text "Hello world". Can I set a percentage of 50% or 70% to match with my text? I try with property minimumShouldMatch already but it seems doesn't work for my case right now.
Anyone help me please, Thank
You could use should query, split your search phrase by term, and set minimum_should_match according to your pourcentage
Example query
{
"query": {
"bool": {
"should": [
{
"term": {
"my_field": "hello"
}
},
{
"term": {
"my_field": "world"
}
},
{
"term": {
"my_field": "i'm"
}
},
{
"term": {
"my_field": "alive"
}
}
],
"minimum_should_match": 2
}
}
}
Will find hello world, hello alive etc...
To split a text in terms you should use _analyse of your index
Analyze and split terms
POST myindex/_analyze
{
"field": "my_field",
"text": "hello world i'm alive"
}
Which give you result like that to populate your query, and match term analyser with the query, if for example you use custom analyzer
{
"tokens" : [
{
"token" : "hello",
"start_offset" : 0,
"end_offset" : 5,
"type" : "<ALPHANUM>",
"position" : 0
},
{
"token" : "world",
"start_offset" : 6,
"end_offset" : 11,
"type" : "<ALPHANUM>",
"position" : 1
},
{
"token" : "i'm",
"start_offset" : 12,
"end_offset" : 15,
"type" : "<ALPHANUM>",
"position" : 2
},
{
"token" : "alive",
"start_offset" : 16,
"end_offset" : 21,
"type" : "<ALPHANUM>",
"position" : 3
}
]
}

Elasticsearch exclude documents containing specific terms

I've indexed documents like bellow in elasticsearch.
{
"category": "clothing (f)",
"description": "Women's Unstoppable Graphic T-Shirt - Women’s Short Sleeve Shirt",
"name": "Women's Unstoppable Graphic T-Shirt",
"price": "$34.99"
}
There are categories like clothing (m), clothing (f) etc. I am trying to exclude the cloting (m) category items if the search is for female items. The query I am trying is:
{
"query": {
"bool": {
"must": [
{
"match": {
"description": "women's black shirt"
}
}
],
"must_not": [
{
"term": {
"category": "clothing (m)"
}
}
]
}
},
"from": 0,
"size": 50
}
But this is not working as expected. There are always few results with clothing (m) document with other documents. How can I exclude documents which have a particular category?
In order to exclude a specific term (exact match) you will have to use keyword datatype.
Keyword datatypes are typically used for filtering (Find me all blog posts where status is published), for sorting, and for aggregations. Keyword fields are only searchable by their exact value.
Keyword Datatype
Your current query catches clothing (m) in the results because when you indexed your documents they were analyzed with elasticsearch standard analyzer which analyzes clothing (m) as clothing and (m).
In your query you searched for category as text datatype.
Text datatype fields are analyzed, that is they are passed through an analyzer to convert the string into a list of individual terms before being indexed.
Run this command:
POST my_index/_analyze
{
"text": ["clothing (m)"]
}
Results:
{
"tokens" : [
{
"token" : "clothing",
"start_offset" : 0,
"end_offset" : 8,
"type" : "<ALPHANUM>",
"position" : 0
},
{
"token" : "m",
"start_offset" : 10,
"end_offset" : 11,
"type" : "<ALPHANUM>",
"position" : 1
}
]
}
A working example:
Assuming you mappings look like that:
{
"my_index" : {
"mappings" : {
"properties" : {
"category" : {
"type" : "text",
"fields" : {
"keyword" : {
"type" : "keyword",
"ignore_above" : 256
}
}
},
"description" : {
"type" : "text",
"fields" : {
"keyword" : {
"type" : "keyword",
"ignore_above" : 256
}
}
},
"name" : {
"type" : "text",
"fields" : {
"keyword" : {
"type" : "keyword",
"ignore_above" : 256
}
}
},
"price" : {
"type" : "text",
"fields" : {
"keyword" : {
"type" : "keyword",
"ignore_above" : 256
}
}
}
}
}
}
}
Let's post a few documents:
POST my_index/_doc/1
{
"category": "clothing (m)",
"description": "Women's Unstoppable Graphic T-Shirt - Women’s Short Sleeve Shirt",
"name": "Women's Unstoppable Graphic T-Shirt",
"price": "$34.99"
}
POST my_index/_doc/2
{
"category": "clothing (f)",
"description": "Women's Unstoppable Graphic T-Shirt - Women’s Short Sleeve Shirt",
"name": "Women's Unstoppable Graphic T-Shirt",
"price": "$34.99"
}
Now our query should look like this:
GET my_index/_search
{
"query": {
"bool": {
"must": {
"match": {
"description": "women's black shirt"
}
},
"filter": {
"bool": {
"must_not": {
"term": {
"category.keyword": "clothing (m)"
}
}
}
}
}
},
"from": 0,
"size": 50
}
The results:
{
"took" : 0,
"timed_out" : false,
"_shards" : {
"total" : 1,
"successful" : 1,
"skipped" : 0,
"failed" : 0
},
"hits" : {
"total" : {
"value" : 1,
"relation" : "eq"
},
"max_score" : 0.43301374,
"hits" : [
{
"_index" : "my_index",
"_type" : "_doc",
"_id" : "1",
"_score" : 0.43301374,
"_source" : {
"category" : "clothing (f)",
"description" : "Women's Unstoppable Graphic T-Shirt - Women’s Short Sleeve Shirt",
"name" : "Women's Unstoppable Graphic T-Shirt",
"price" : "$34.99"
}
}
]
}
}
Results without using keyword
{
"took" : 1,
"timed_out" : false,
"_shards" : {
"total" : 1,
"successful" : 1,
"skipped" : 0,
"failed" : 0
},
"hits" : {
"total" : {
"value" : 2,
"relation" : "eq"
},
"max_score" : 0.43301374,
"hits" : [
{
"_index" : "my_index",
"_type" : "_doc",
"_id" : "1",
"_score" : 0.43301374,
"_source" : {
"category" : "clothing (f)",
"description" : "Women's Unstoppable Graphic T-Shirt - Women’s Short Sleeve Shirt",
"name" : "Women's Unstoppable Graphic T-Shirt",
"price" : "$34.99"
}
},
{
"_index" : "my_index",
"_type" : "_doc",
"_id" : "2",
"_score" : 0.43301374,
"_source" : {
"category" : "clothing (m)",
"description" : "Women's Unstoppable Graphic T-Shirt - Women’s Short Sleeve Shirt",
"name" : "Women's Unstoppable Graphic T-Shirt",
"price" : "$34.99"
}
}
]
}
}
As you can see from the last results we got also clothing (m).
BTW don't use term for text datatype. use match.
Hope this helps.

Gets documents and total count of them in single query include pagination

I'm new in mongo and use mongodb aggregation framework for my queries. I need to retrieve some records which satisfy certain conditions(include pagination+sorting) and also get total count of records.
Now, I perform next steps:
Create $match operator
{ "$match" : { "year" : "2012" , "author.authorName" : { "$regex" :
"au" , "$options" : "i"}}}
Added sorting and pagination
{ "$sort" : { "some_field" : -1}} , { "$limit" : 10} , { "$skip" : 0}
After querying I receive the expected result: 10 documents with all fields.
For pagination I need to know the total count of records which satisfy these conditions, in my case 25.
I use next query to get count : { "$match" : { "year" : "2012" , "author.authorName" : { "$regex" : "au" , "$options" : "i"}}} , { "$group" : { "_id" : "$all" , "reviewsCount" : { "$sum" : 1}}} , { "$sort" : { "some_field" : -1}} , { "$limit" : 10} , { "$skip" : 0}
But I don't want to perform two separate queries: one for retrieving documents and second for total counts of records which satisfy certain conditions.
I want do it in one single query and get result in next format:
{
"result" : [
{
"my_documets": [
{
"_id" : ObjectId("512f1f47a411dc06281d98c0"),
"author" : {
"authorName" : "author name1",
"email" : "email1#email.com"
}
},
{
"_id" : ObjectId("512f1f47a411dc06281d98c0"),
"author" : {
"authorName" : "author name2",
"email" : "email2#email.com"
}
}, .......
],
"total" : 25
}
],
"ok" : 1
}
I tried modify the group operator : { "$group" : { "_id" : "$all" , "author" : "$author" "reviewsCount" : { "$sum" : 1}}}
But in this case I got : "exception: the group aggregate field 'author' must be defined as an expression inside an object". If add all fields in _id then reviewsCount always = 1 because all records are different.
Nobody know how it can be implement in single query ? Maybe mongodb has some features or operators for this case? Implementation with using two separate query reduces performance for querying thousand or millions records. In my application it's very critical performance issue.
I've been working on this all day and haven't been able to find a solution, so thought i'd turn to the stackoverflow community.
Thanks.
You can try using $facet in the aggregation pipeline as
db.name.aggregate([
{$match:{your match criteria}},
{$facet: {
data: [{$sort: sort},{$skip:skip},{$limit: limit}],
count:[{$group: {_id: null, count: {$sum: 1}}}]
}}
])
In data, you'll get your list with pagination and in the count, count variable will have a total count of matched documents.
Ok, I have one example, but I think it's really crazy query, I put it only for fun, but if this example faster than 2 query, tell us about it in the comments please.
For this question i create collection called "so", and put into this collection 25 documents like this:
{
"_id" : ObjectId("512fa86cd99d0adda2a744cd"),
"authorName" : "author name1",
"email" : "email1#email.com",
"c" : 1
}
My query use aggregation framework:
db.so.aggregate([
{ $group:
{
_id: 1,
collection: { $push : { "_id": "$_id", "authorName": "$authorName", "email": "$email", "c": "$c" } },
count: { $sum: 1 }
}
},
{ $unwind:
"$collection"
},
{ $project:
{ "_id": "$collection._id", "authorName": "$collection.authorName", "email": "$collection.email", "c": "$collection.c", "count": "$count" }
},
{ $match:
{ c: { $lte: 10 } }
},
{ $sort :
{ c: -1 }
},
{ $skip:
2
},
{ $limit:
3
},
{ $group:
{
_id: "$count",
my_documets: {
$push: {"_id": "$_id", "authorName":"$authorName", "email":"$email", "c":"$c" }
}
}
},
{ $project:
{ "_id": 0, "my_documets": "$my_documets", "total": "$_id" }
}
])
Result for this query:
{
"result" : [
{
"my_documets" : [
{
"_id" : ObjectId("512fa900d99d0adda2a744d4"),
"authorName" : "author name8",
"email" : "email8#email.com",
"c" : 8
},
{
"_id" : ObjectId("512fa900d99d0adda2a744d3"),
"authorName" : "author name7",
"email" : "email7#email.com",
"c" : 7
},
{
"_id" : ObjectId("512fa900d99d0adda2a744d2"),
"authorName" : "author name6",
"email" : "email6#email.com",
"c" : 6
}
],
"total" : 25
}
],
"ok" : 1
}
By the end, I think that for big collection 2 query (first for data, second for count) works faster. For example, you can count total for collection like this:
db.so.count()
or like this:
db.so.find({},{_id:1}).sort({_id:-1}).count()
I don't fully sure in first example, but in second example we use only cursor, which means higher speed:
db.so.find({},{_id:1}).sort({_id:-1}).explain()
{
"cursor" : "BtreeCursor _id_ reverse",
"isMultiKey" : false,
"n" : 25,
"nscannedObjects" : 25,
"nscanned" : 25,
"nscannedObjectsAllPlans" : 25,
"nscannedAllPlans" : 25,
"scanAndOrder" : false,
!!!!!>>> "indexOnly" : true, <<<!!!!!
"nYields" : 0,
"nChunkSkips" : 0,
"millis" : 0,
...
}
For completeness (full discussion was on the MongoDB Google Groups) here is the aggregation you want:
db.collection.aggregate(db.docs.aggregate( [
{
"$match" : {
"year" : "2012"
}
},
{
"$group" : {
"_id" : null,
"my_documents" : {
"$push" : {
"_id" : "$_id",
"year" : "$year",
"author" : "$author"
}
},
"reviewsCount" : {
"$sum" : 1
}
}
},
{
"$project" : {
"_id" : 0,
"my_documents" : 1,
"total" : "$reviewsCount"
}
}
] )
By the way, you don't need aggregation framework here - you can just use a regular find. You can get count() from a cursor without having to re-query.

Categories

Resources