Skip to content

Commit d8414ff

Browse files
committed
CONSOLEify percentile and percentile-ranks docs
Related #18160
1 parent 268923e commit d8414ff

File tree

3 files changed

+136
-59
lines changed

3 files changed

+136
-59
lines changed

docs/build.gradle

Lines changed: 32 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -31,8 +31,6 @@ buildRestTests.expectedUnconvertedCandidates = [
3131
'reference/aggregations/bucket/significantterms-aggregation.asciidoc',
3232
'reference/aggregations/bucket/terms-aggregation.asciidoc',
3333
'reference/aggregations/matrix/stats-aggregation.asciidoc',
34-
'reference/aggregations/metrics/percentile-aggregation.asciidoc',
35-
'reference/aggregations/metrics/percentile-rank-aggregation.asciidoc',
3634
'reference/aggregations/metrics/scripted-metric-aggregation.asciidoc',
3735
'reference/aggregations/metrics/tophits-aggregation.asciidoc',
3836
'reference/cluster/allocation-explain.asciidoc',
@@ -476,3 +474,35 @@ buildRestTests.setups['analyze_sample'] = '''
476474
properties:
477475
obj1.field1:
478476
type: text'''
477+
478+
// Used by percentile/percentile-rank aggregations
479+
buildRestTests.setups['latency'] = '''
480+
- do:
481+
indices.create:
482+
index: latency
483+
body:
484+
settings:
485+
number_of_shards: 1
486+
number_of_replicas: 1
487+
mappings:
488+
data:
489+
properties:
490+
load_time:
491+
type: long
492+
- do:
493+
bulk:
494+
index: latency
495+
type: data
496+
refresh: true
497+
body: |'''
498+
499+
500+
for (int i = 0; i < 100; i++) {
501+
def value = i
502+
if (i % 10) {
503+
value = i*10
504+
}
505+
buildRestTests.setups['latency'] += """
506+
{"index":{}}
507+
{"load_time": "$value"}"""
508+
}

docs/reference/aggregations/metrics/percentile-aggregation.asciidoc

Lines changed: 54 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,9 @@ Let's look at a range of percentiles representing load time:
2626

2727
[source,js]
2828
--------------------------------------------------
29+
GET latency/data/_search
2930
{
31+
"size": 0,
3032
"aggs" : {
3133
"load_time_outlier" : {
3234
"percentiles" : {
@@ -36,6 +38,8 @@ Let's look at a range of percentiles representing load time:
3638
}
3739
}
3840
--------------------------------------------------
41+
// CONSOLE
42+
// TEST[setup:latency]
3943
<1> The field `load_time` must be a numeric field
4044

4145
By default, the `percentile` metric will generate a range of
@@ -49,18 +53,19 @@ percentiles: `[ 1, 5, 25, 50, 75, 95, 99 ]`. The response will look like this:
4953
"aggregations": {
5054
"load_time_outlier": {
5155
"values" : {
52-
"1.0": 15,
53-
"5.0": 20,
54-
"25.0": 23,
55-
"50.0": 25,
56-
"75.0": 29,
57-
"95.0": 60,
58-
"99.0": 150
56+
"1.0": 9.9,
57+
"5.0": 29.500000000000004,
58+
"25.0": 167.5,
59+
"50.0": 445.0,
60+
"75.0": 722.5,
61+
"95.0": 940.5,
62+
"99.0": 980.1000000000001
5963
}
6064
}
6165
}
6266
}
6367
--------------------------------------------------
68+
// TESTRESPONSE[s/\.\.\./"took": $body.took,"timed_out": false,"_shards": $body._shards,"hits": $body.hits,/]
6469

6570
As you can see, the aggregation will return a calculated value for each percentile
6671
in the default range. If we assume response times are in milliseconds, it is
@@ -73,7 +78,9 @@ must be a value between 0-100 inclusive):
7378

7479
[source,js]
7580
--------------------------------------------------
81+
GET latency/data/_search
7682
{
83+
"size": 0,
7784
"aggs" : {
7885
"load_time_outlier" : {
7986
"percentiles" : {
@@ -84,6 +91,8 @@ must be a value between 0-100 inclusive):
8491
}
8592
}
8693
--------------------------------------------------
94+
// CONSOLE
95+
// TEST[setup:latency]
8796
<1> Use the `percents` parameter to specify particular percentiles to calculate
8897

8998
==== Keyed Response
@@ -92,20 +101,21 @@ By default the `keyed` flag is set to `true` which associates a unique string ke
92101

93102
[source,js]
94103
--------------------------------------------------
95-
POST bank/account/_search?size=0
104+
GET latency/data/_search
96105
{
106+
"size": 0,
97107
"aggs": {
98-
"balance_outlier": {
108+
"load_time_outlier": {
99109
"percentiles": {
100-
"field": "balance",
110+
"field": "load_time",
101111
"keyed": false
102112
}
103113
}
104114
}
105115
}
106116
--------------------------------------------------
107117
// CONSOLE
108-
// TEST[setup:bank]
118+
// TEST[setup:latency]
109119

110120
Response:
111121

@@ -115,49 +125,42 @@ Response:
115125
...
116126
117127
"aggregations": {
118-
"balance_outlier": {
128+
"load_time_outlier": {
119129
"values": [
120130
{
121131
"key": 1.0,
122-
"value": 1462.8400000000001
132+
"value": 9.9
123133
},
124134
{
125135
"key": 5.0,
126-
"value": 3591.85
136+
"value": 29.500000000000004
127137
},
128138
{
129139
"key": 25.0,
130-
"value": 13709.333333333334
140+
"value": 167.5
131141
},
132142
{
133143
"key": 50.0,
134-
"value": 26020.11666666667
144+
"value": 445.0
135145
},
136146
{
137147
"key": 75.0,
138-
"value": 38139.648148148146
148+
"value": 722.5
139149
},
140150
{
141151
"key": 95.0,
142-
"value": 47551.549999999996
152+
"value": 940.5
143153
},
144154
{
145155
"key": 99.0,
146-
"value": 49339.16
156+
"value": 980.1000000000001
147157
}
148158
]
149159
}
150160
}
151161
}
152162
--------------------------------------------------
153163
// TESTRESPONSE[s/\.\.\./"took": $body.took,"timed_out": false,"_shards": $body._shards,"hits": $body.hits,/]
154-
// TESTRESPONSE[s/1462.8400000000001/$body.aggregations.balance_outlier.values.0.value/]
155-
// TESTRESPONSE[s/3591.85/$body.aggregations.balance_outlier.values.1.value/]
156-
// TESTRESPONSE[s/13709.333333333334/$body.aggregations.balance_outlier.values.2.value/]
157-
// TESTRESPONSE[s/26020.11666666667/$body.aggregations.balance_outlier.values.3.value/]
158-
// TESTRESPONSE[s/38139.648148148146/$body.aggregations.balance_outlier.values.4.value/]
159-
// TESTRESPONSE[s/47551.549999999996/$body.aggregations.balance_outlier.values.5.value/]
160-
// TESTRESPONSE[s/49339.16/$body.aggregations.balance_outlier.values.6.value/]
161164

162165
==== Script
163166

@@ -167,7 +170,9 @@ a script to convert them on-the-fly:
167170

168171
[source,js]
169172
--------------------------------------------------
173+
GET latency/data/_search
170174
{
175+
"size": 0,
171176
"aggs" : {
172177
"load_time_outlier" : {
173178
"percentiles" : {
@@ -183,6 +188,9 @@ a script to convert them on-the-fly:
183188
}
184189
}
185190
--------------------------------------------------
191+
// CONSOLE
192+
// TEST[setup:latency]
193+
186194
<1> The `field` parameter is replaced with a `script` parameter, which uses the
187195
script to generate values which percentiles are calculated on
188196
<2> Scripting supports parameterized input just like any other script
@@ -191,21 +199,25 @@ This will interpret the `script` parameter as an `inline` script with the `painl
191199

192200
[source,js]
193201
--------------------------------------------------
202+
GET latency/data/_search
194203
{
204+
"size": 0,
195205
"aggs" : {
196206
"load_time_outlier" : {
197207
"percentiles" : {
198208
"script" : {
199209
"id": "my_script",
200-
"params" : {
201-
"timeUnit" : 1000
210+
"params": {
211+
"field": "load_time"
202212
}
203213
}
204214
}
205215
}
206216
}
207217
}
208218
--------------------------------------------------
219+
// CONSOLE
220+
// TEST[setup:latency,stored_example_script]
209221

210222
[[search-aggregations-metrics-percentile-aggregation-approximation]]
211223
==== Percentiles are (usually) approximate
@@ -252,7 +264,9 @@ This balance can be controlled using a `compression` parameter:
252264

253265
[source,js]
254266
--------------------------------------------------
267+
GET latency/data/_search
255268
{
269+
"size": 0,
256270
"aggs" : {
257271
"load_time_outlier" : {
258272
"percentiles" : {
@@ -265,6 +279,9 @@ This balance can be controlled using a `compression` parameter:
265279
}
266280
}
267281
--------------------------------------------------
282+
// CONSOLE
283+
// TEST[setup:latency]
284+
268285
<1> Compression controls memory usage and approximation error
269286

270287
The TDigest algorithm uses a number of "nodes" to approximate percentiles -- the
@@ -298,7 +315,9 @@ The HDR Histogram can be used by specifying the `method` parameter in the reques
298315

299316
[source,js]
300317
--------------------------------------------------
318+
GET latency/data/_search
301319
{
320+
"size": 0,
302321
"aggs" : {
303322
"load_time_outlier" : {
304323
"percentiles" : {
@@ -312,6 +331,9 @@ The HDR Histogram can be used by specifying the `method` parameter in the reques
312331
}
313332
}
314333
--------------------------------------------------
334+
// CONSOLE
335+
// TEST[setup:latency]
336+
315337
<1> `hdr` object indicates that HDR Histogram should be used to calculate the percentiles and specific settings for this algorithm can be specified inside the object
316338
<2> `number_of_significant_value_digits` specifies the resolution of values for the histogram in number of significant digits
317339

@@ -326,7 +348,9 @@ had a value.
326348

327349
[source,js]
328350
--------------------------------------------------
351+
GET latency/data/_search
329352
{
353+
"size": 0,
330354
"aggs" : {
331355
"grade_percentiles" : {
332356
"percentiles" : {
@@ -337,5 +361,7 @@ had a value.
337361
}
338362
}
339363
--------------------------------------------------
364+
// CONSOLE
365+
// TEST[setup:latency]
340366

341367
<1> Documents without a value in the `grade` field will fall into the same bucket as documents that have the value `10`.

0 commit comments

Comments
 (0)