Skip to content

Commit 5de2bf6

Browse files
Load tracks from external file
With this commit we load all tracks from an external file. Closes elastic#32
1 parent 3fa9802 commit 5de2bf6

File tree

5 files changed

+410
-384
lines changed

5 files changed

+410
-384
lines changed

README.md

Lines changed: 11 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,13 @@
11
## Night Rally
22

3-
Night Rally is a set of driver scripts for [running nightly macrobenchmarks for Elasticsearch](https://elasticsearch-ci.elastic.co/view/All/job/elastic+elasticsearch+master+macrobenchmark-periodic). The macrobenchmarks themselves are executed by [Rally](https://github.com/elastic/rally) and [publishing the results](https://elasticsearch-benchmarks.elastic.co/).
3+
Night Rally is a set of driver scripts for [running nightly macrobenchmarks for Elasticsearch](https://elasticsearch-ci.elastic.co/view/All/job/elastic+elasticsearch+master+macrobenchmark-periodic) and [publishing the results](https://elasticsearch-benchmarks.elastic.co/). The macrobenchmarks themselves are executed by [Rally](https://github.com/elastic/rally).
44

55
### Prerequisites
66

77
* Python 3.4+ available as `python3` on the path (verify with: `python3 --version` which should print `Python 3.4.0` (or higher))
88
* `pip3` available on the path (verify with `pip3 --version`)
99
* `awscli` available on the command line and properly set up to write to the bucket `s3://elasticsearch-benchmarks.elasticsearch.org`.
10+
* `Ansible` available on the command line (only needed in our nightly benchmarking environment)
1011
* All prerequisites for [Rally](https://github.com/elastic/rally)
1112

1213
Night Rally is only tested on Mac OS X and Linux.
@@ -16,7 +17,7 @@ Night Rally is only tested on Mac OS X and Linux.
1617
1. Clone this repo: `git clone [email protected]:elastic/night-rally.git`
1718
2. Ensure that all prerequisites of [Rally](https://github.com/elastic/rally) are properly setup. Hint. It is *not* required to install Rally manually. Just ensure that its prerequisites are installed.
1819

19-
Now you can invoke night_rally regularly with the startup script `night_rally.sh` e.g. via cron. The script can also self-update if invoked as `night_rally.sh --self-update`.
20+
Now you can invoke Night Rally regularly with the startup script `night_rally.sh` e.g. via cron. The script can also self-update if invoked as `night_rally.sh --self-update`.
2021

2122

2223
### How do I ...?
@@ -50,9 +51,10 @@ For more details, please issue `python3 admin.py delete annotation --help`.
5051

5152
The following steps are necessary to add a new track:
5253

53-
1. Copy a directory in `external/pages` and adjust the names accordingly.
54-
2. Adjust the menu structure in all other files (if this happens more often, we should think about using a template engine for that...)
55-
3. Add your track and the challenges to run in the `tracks` hash in `night_rally.py`
54+
1. Add your track and the challenges to run in `resources/track.json`
55+
2. Create nightly and release charts and the corresponding dashboards on the Kibana instance https://ece78408d8df7290d4ad6e3ffac5af6a.us-east-1.aws.found.io (it's mapped to be publicly reachable). You can generate release charts with `python3 generate_release_charts.py YOUR_TRACK_NAME`. At the moment there is no such generator for nightly charts though.
56+
3. Copy a directory in `external/pages`, adjust the names accordingly and reference the UUID of the dashboards that you've created in step 2.
57+
4. Adjust the menu structure in all other files (if this happens more often, we should think about using a template engine for that...)
5658

5759
If you're finished, please submit a PR. After the PR is merged, the new track will show up after the next benchmark.
5860

@@ -61,11 +63,11 @@ If you're finished, please submit a PR. After the PR is merged, the new track wi
6163

6264
Suppose we want to publish a new release benchmark of the Elasticsearch release `5.3.1` on our benchmark page. To do that, start a new [macrobenchmark build](https://elasticsearch-ci.elastic.co/view/All/job/elastic+elasticsearch+master+macrobenchmark-periodic/) with the following parameters:
6365

64-
* MODE: release
65-
* RELEASE: 5.3.1
66-
* TARGET_HOST: Just use the default value
66+
* `MODE`: `release`
67+
* `RELEASE`: `5.3.1`
68+
* `TARGET_HOST`: Just use the default value
6769

68-
The results will show up automatically as soon as the build is finished
70+
The results will show up automatically as soon as the build is finished.
6971

7072
#### Run an ad-hoc benchmark
7173

generate_release_charts.py

Lines changed: 55 additions & 82 deletions
Original file line numberDiff line numberDiff line change
@@ -1,58 +1,7 @@
11
import uuid
22
import json
33
import os
4-
5-
all_tracks_and_challenges = {
6-
"nested": [
7-
["nested-search-challenge", "4gheap"],
8-
],
9-
"geonames": [
10-
["append-no-conflicts", "defaults"],
11-
["append-no-conflicts-index-only", "4gheap"],
12-
["append-sorted-no-conflicts", "4gheap"],
13-
["append-fast-with-conflicts", "4gheap"],
14-
["append-no-conflicts-index-only-1-replica", "two_nodes"],
15-
],
16-
"percolator": [
17-
["append-no-conflicts", "4gheap"],
18-
],
19-
"geopoint": [
20-
["append-no-conflicts", "defaults"],
21-
["append-no-conflicts-index-only", "4gheap"],
22-
["append-fast-with-conflicts", "4gheap"],
23-
["append-no-conflicts-index-only-1-replica", "two_nodes"],
24-
],
25-
"pmc": [
26-
["append-no-conflicts-index-only", "defaults"],
27-
["append-no-conflicts", "4gheap"],
28-
["append-sorted-no-conflicts", "4gheap"],
29-
["append-fast-with-conflicts", "4gheap"],
30-
["append-no-conflicts-index-only-1-replica", "two_nodes"],
31-
],
32-
"nyc_taxis": [
33-
["append-no-conflicts", "4gheap"],
34-
["append-sorted-no-conflicts-index-only", "4gheap"],
35-
],
36-
"logging": [
37-
["append-no-conflicts-index-only", "defaults"],
38-
["append-no-conflicts", "4gheap"],
39-
["append-sorted-no-conflicts", "4gheap"],
40-
],
41-
"noaa": [
42-
["append-no-conflicts", "defaults"]
43-
]
44-
}
45-
46-
defaults = {
47-
"geonames": ("append-no-conflicts", "defaults"),
48-
"percolator": ("append-no-conflicts", "4gheap"),
49-
"geopoint": ("append-no-conflicts", "defaults"),
50-
"pmc": ("append-no-conflicts", "4gheap"),
51-
"nyc_taxis": ("append-no-conflicts", "4gheap"),
52-
"nested": ("nested-search-challenge", "4gheap"),
53-
"logging": ("append-no-conflicts", "4gheap"),
54-
"noaa": ("append-no-conflicts", "defaults"),
55-
}
4+
import sys
565

576

587
def challenges(track_name):
@@ -69,18 +18,20 @@ def find_challenge(all_challenges, name):
6918
return None
7019

7120

72-
def generate_index_ops():
21+
def generate_index_ops(tracks):
7322
def tracks_for_index():
7423
all_tracks = []
75-
for track, challenges_cars in all_tracks_and_challenges.items():
24+
for track_structure in tracks:
25+
track = track_structure["track"]
7626
challenges_of_track = challenges(track)
77-
cci = []
78-
for cc in challenges_cars:
79-
challenge = cc[0]
80-
car = cc[1]
81-
index_op = find_challenge(challenges_of_track, challenge)["schedule"][0]["operation"]
82-
cci.append((challenge, car, index_op))
83-
all_tracks.append((track, cci))
27+
for combination in track_structure["combinations"]:
28+
if combination.get("release-charts", True):
29+
challenge = combination["challenge"]
30+
car = combination["car"]
31+
cci = []
32+
index_op = find_challenge(challenges_of_track, challenge)["schedule"][0]["operation"]
33+
cci.append((challenge, car, index_op))
34+
all_tracks.append((track, cci))
8435
return all_tracks
8536

8637
structures = []
@@ -93,7 +44,8 @@ def tracks_for_index():
9344
if idx > 0:
9445
filters = filters + ","
9546
label = "%s-%s" % (challenge, car)
96-
filters = filters + "{\"input\":{\"query\":{\"query_string\":{\"analyze_wildcard\":true,\"query\":\"operation:%s AND challenge:%s AND car:%s\"}}},\"label\":\"%s\"}" % (index_op, challenge, car, label)
47+
filters = filters + "{\"input\":{\"query\":{\"query_string\":{\"analyze_wildcard\":true,\"query\":\"operation:%s AND challenge:%s AND car:%s\"}}},\"label\":\"%s\"}" % (
48+
index_op, challenge, car, label)
9749

9850
s = {
9951
"_id": str(uuid.uuid4()),
@@ -115,7 +67,8 @@ def tracks_for_index():
11567
"\"histogram\",\"valueAxis\":\"ValueAxis-1\"}],\"setYExtents\":false,\"showCircles\":true,\"times\":[],"
11668
"\"valueAxes\":[{\"id\":\"ValueAxis-1\",\"labels\":{\"filter\":false,\"rotate\":0,\"show\":true,\"truncate\":100},"
11769
"\"name\":\"LeftAxis-1\",\"position\":\"left\",\"scale\":{\"mode\":\"normal\",\"type\":\"linear\"},\"show\":true,"
118-
"\"style\":{},\"title\":{\"text\":\"Median Indexing Throughput [docs/s]\"},\"type\":\"value\"}]},\"title\":\"%s\",\"type\":\"histogram\"}" % (filters, title),
70+
"\"style\":{},\"title\":{\"text\":\"Median Indexing Throughput [docs/s]\"},\"type\":\"value\"}]},\"title\":\"%s\",\"type\":\"histogram\"}" % (
71+
filters, title),
11972
"uiStateJSON": "{\"vis\":{\"legendOpen\":true}}",
12073
"description": "",
12174
"version": 1,
@@ -129,25 +82,29 @@ def tracks_for_index():
12982
return structures
13083

13184

132-
def default_tracks():
85+
def default_tracks(tracks):
13386
all_tracks = []
134-
for track, challenge_car in defaults.items():
135-
challenge, car = challenge_car
136-
default_challenge = challenges(track)[0]
137-
# default challenge is usually the first one. No need for complex logic
138-
assert default_challenge["default"]
139-
# filter queries
140-
queries = [t["operation"] for t in default_challenge["schedule"] if
141-
not (t["operation"].startswith("index") or t["operation"] in ["force-merge", "node-stats"])]
142-
all_tracks.append((track, challenge, car, queries))
87+
for track_structure in tracks:
88+
for combination in track_structure["combinations"]:
89+
if combination.get("default-combination", False):
90+
track = track_structure["track"]
91+
challenge = combination["challenge"]
92+
car = combination["car"]
93+
default_challenge = challenges(track)[0]
94+
# default challenge is usually the first one. No need for complex logic
95+
assert default_challenge["default"]
96+
# filter queries
97+
queries = [t["operation"] for t in default_challenge["schedule"] if
98+
not (t["operation"].startswith("index") or t["operation"] in ["force-merge", "node-stats"])]
99+
all_tracks.append((track, challenge, car, queries))
143100

144101
return all_tracks
145102

146103

147-
def generate_queries():
104+
def generate_queries(tracks):
148105
# output JSON structures
149106
structures = []
150-
for track, challenge, car, queries in default_tracks():
107+
for track, challenge, car, queries in default_tracks(tracks):
151108
for q in queries:
152109
title = "release-%s-%s-p99-latency" % (track, q)
153110
label = "Query Latency [ms]"
@@ -189,10 +146,10 @@ def generate_queries():
189146
return structures
190147

191148

192-
def generate_io():
149+
def generate_io(tracks):
193150
# output JSON structures
194151
structures = []
195-
for track, challenge, car, queries in default_tracks():
152+
for track, challenge, car, queries in default_tracks(tracks):
196153
title = "release-%s-io" % track
197154

198155
s = {
@@ -205,7 +162,8 @@ def generate_io():
205162
"description": "",
206163
"version": 1,
207164
"kibanaSavedObjectMeta": {
208-
"searchSourceJSON": "{\"index\":\"rally-results-*\",\"query\":{\"query_string\":{\"query\":\"environment:release AND active:true AND track:%s AND challenge:%s AND car:%s\",\"analyze_wildcard\":true}},\"filter\":[]}" % (track, challenge, car)
165+
"searchSourceJSON": "{\"index\":\"rally-results-*\",\"query\":{\"query_string\":{\"query\":\"environment:release AND active:true AND track:%s AND challenge:%s AND car:%s\",\"analyze_wildcard\":true}},\"filter\":[]}" % (
166+
track, challenge, car)
209167
}
210168
}
211169
}
@@ -214,9 +172,9 @@ def generate_io():
214172
return structures
215173

216174

217-
def generate_gc():
175+
def generate_gc(tracks):
218176
structures = []
219-
for track, challenge, car, queries in default_tracks():
177+
for track, challenge, car, queries in default_tracks(tracks):
220178
title = "release-%s-gc" % track
221179
s = {
222180
"_id": str(uuid.uuid4()),
@@ -228,7 +186,8 @@ def generate_gc():
228186
"description": "",
229187
"version": 1,
230188
"kibanaSavedObjectMeta": {
231-
"searchSourceJSON": "{\"index\":\"rally-results-*\",\"query\":{\"query_string\":{\"query\":\"environment:release AND active:true AND track:%s AND challenge:%s AND car:%s\",\"analyze_wildcard\":true}},\"filter\":[]}" % (track, challenge, car)
189+
"searchSourceJSON": "{\"index\":\"rally-results-*\",\"query\":{\"query_string\":{\"query\":\"environment:release AND active:true AND track:%s AND challenge:%s AND car:%s\",\"analyze_wildcard\":true}},\"filter\":[]}" % (
190+
track, challenge, car)
232191
}
233192
}
234193
}
@@ -237,8 +196,22 @@ def generate_gc():
237196
return structures
238197

239198

199+
def load_tracks(track_filter):
200+
import json
201+
root = os.path.dirname(os.path.realpath(__file__))
202+
with open("%s/resources/tracks.json" % root, "r") as tracks_file:
203+
all_tracks = json.load(tracks_file)
204+
if track_filter:
205+
return [t for t in all_tracks if t["track"] == track_filter]
206+
else:
207+
return all_tracks
208+
209+
240210
def main():
241-
structures = generate_index_ops() + generate_queries() + generate_io() + generate_gc()
211+
track_filter = sys.argv[1] if len(sys.argv) > 1 else None
212+
tracks = load_tracks(track_filter)
213+
214+
structures = generate_index_ops(tracks) + generate_queries(tracks) + generate_io(tracks) + generate_gc(tracks)
242215
print(json.dumps(structures, indent=4))
243216

244217

0 commit comments

Comments
 (0)