Skip to content

Commit b006f3b

Browse files
author
Vladimir Dolzhenko
committed
Include size of snapshot in snapshot metadata (#29602)
Adds difference of number of files (and file sizes) between prev and current snapshot. Total number/size reflects total number/size of files in snapshot. Closes #18543 (cherry picked from commit 81eb8ba)
1 parent f561bad commit b006f3b

File tree

13 files changed

+512
-120
lines changed

13 files changed

+512
-120
lines changed
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
[[breaking_70_snapshotstats_changes]]
2+
=== Snapshot stats changes
3+
4+
Snapshot stats details are provided in a new structured way:
5+
6+
* `total` section for all the files that are referenced by the snapshot.
7+
* `incremental` section for those files that actually needed to be copied over as part of the incremental snapshotting.
8+
* In case of a snapshot that's still in progress, there's also a `processed` section for files that are in the process of being copied.
9+
10+
==== Deprecated `number_of_files`, `processed_files`, `total_size_in_bytes` and `processed_size_in_bytes` snapshot stats properties have been removed
11+
12+
* Properties `number_of_files` and `total_size_in_bytes` are removed and should be replaced by values of nested object `total`.
13+
* Properties `processed_files` and `processed_size_in_bytes` are removed and should be replaced by values of nested object `processed`.

docs/reference/modules/snapshots.asciidoc

+56
Original file line numberDiff line numberDiff line change
@@ -563,6 +563,62 @@ GET /_snapshot/my_backup/snapshot_1/_status
563563
// CONSOLE
564564
// TEST[continued]
565565

566+
The output looks similar to the following:
567+
568+
[source,js]
569+
--------------------------------------------------
570+
{
571+
"snapshots": [
572+
{
573+
"snapshot": "snapshot_1",
574+
"repository": "my_backup",
575+
"uuid": "XuBo4l4ISYiVg0nYUen9zg",
576+
"state": "SUCCESS",
577+
"include_global_state": true,
578+
"shards_stats": {
579+
"initializing": 0,
580+
"started": 0,
581+
"finalizing": 0,
582+
"done": 5,
583+
"failed": 0,
584+
"total": 5
585+
},
586+
"stats": {
587+
"incremental": {
588+
"file_count": 8,
589+
"size_in_bytes": 4704
590+
},
591+
"processed": {
592+
"file_count": 7,
593+
"size_in_bytes": 4254
594+
},
595+
"total": {
596+
"file_count": 8,
597+
"size_in_bytes": 4704
598+
},
599+
"start_time_in_millis": 1526280280355,
600+
"time_in_millis": 358,
601+
602+
"number_of_files": 8,
603+
"processed_files": 8,
604+
"total_size_in_bytes": 4704,
605+
"processed_size_in_bytes": 4704
606+
}
607+
}
608+
]
609+
}
610+
--------------------------------------------------
611+
// TESTRESPONSE
612+
613+
The output is composed of different sections. The `stats` sub-object provides details on the number and size of files that were
614+
snapshotted. As snapshots are incremental, copying only the Lucene segments that are not already in the repository,
615+
the `stats` object contains a `total` section for all the files that are referenced by the snapshot, as well as an `incremental` section
616+
for those files that actually needed to be copied over as part of the incremental snapshotting. In case of a snapshot that's still
617+
in progress, there's also a `processed` section that contains information about the files that are in the process of being copied.
618+
619+
_Note_: Properties `number_of_files`, `processed_files`, `total_size_in_bytes` and `processed_size_in_bytes` are used for
620+
backward compatibility reasons with older 5.x and 6.x versions. These fields will be removed in Elasticsearch v7.0.0.
621+
566622
Multiple ids are also supported:
567623

568624
[source,sh]

rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.status/10_basic.yml

+39-1
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,9 @@ setup:
1111

1212
---
1313
"Get snapshot status":
14-
14+
- skip:
15+
version: " - 6.99.99"
16+
reason: "backporting in progress: https://github.com/elastic/elasticsearch/pull/29602"
1517
- do:
1618
indices.create:
1719
index: test_index
@@ -32,6 +34,42 @@ setup:
3234
snapshot: test_snapshot
3335

3436
- is_true: snapshots
37+
- match: { snapshots.0.snapshot: test_snapshot }
38+
- match: { snapshots.0.state: SUCCESS }
39+
- gt: { snapshots.0.stats.incremental.file_count: 0 }
40+
- gt: { snapshots.0.stats.incremental.size_in_bytes: 0 }
41+
- gt: { snapshots.0.stats.total.file_count: 0 }
42+
- is_true: snapshots.0.stats.start_time_in_millis
43+
- is_true: snapshots.0.stats.time_in_millis
44+
45+
---
46+
"Get snapshot status with BWC fields":
47+
- do:
48+
indices.create:
49+
index: test_index
50+
body:
51+
settings:
52+
number_of_shards: 1
53+
number_of_replicas: 0
54+
55+
- do:
56+
snapshot.create:
57+
repository: test_repo_status_1
58+
snapshot: test_snapshot_bwc
59+
wait_for_completion: true
60+
61+
- do:
62+
snapshot.status:
63+
repository: test_repo_status_1
64+
snapshot: test_snapshot_bwc
65+
66+
- is_true: snapshots
67+
- match: { snapshots.0.snapshot: test_snapshot_bwc }
68+
- match: { snapshots.0.state: SUCCESS }
69+
- gt: { snapshots.0.stats.number_of_files: 0 }
70+
- gt: { snapshots.0.stats.processed_files: 0 }
71+
- gt: { snapshots.0.stats.total_size_in_bytes: 0 }
72+
- gt: { snapshots.0.stats.processed_size_in_bytes: 0 }
3573

3674
---
3775
"Get missing snapshot status throws an exception":

server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStatus.java

+2-2
Original file line numberDiff line numberDiff line change
@@ -74,8 +74,8 @@ private SnapshotIndexShardStatus() {
7474
throw new IllegalArgumentException("Unknown stage type " + indexShardStatus.getStage());
7575
}
7676
this.stats = new SnapshotStats(indexShardStatus.getStartTime(), indexShardStatus.getTotalTime(),
77-
indexShardStatus.getNumberOfFiles(), indexShardStatus.getProcessedFiles(),
78-
indexShardStatus.getTotalSize(), indexShardStatus.getProcessedSize());
77+
indexShardStatus.getIncrementalFileCount(), indexShardStatus.getTotalFileCount(), indexShardStatus.getProcessedFileCount(),
78+
indexShardStatus.getIncrementalSize(), indexShardStatus.getTotalSize(), indexShardStatus.getProcessedSize());
7979
this.failure = indexShardStatus.getFailure();
8080
this.nodeId = nodeId;
8181
}

server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStats.java

+103-32
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919

2020
package org.elasticsearch.action.admin.cluster.snapshots.status;
2121

22+
import org.elasticsearch.Version;
2223
import org.elasticsearch.common.io.stream.StreamInput;
2324
import org.elasticsearch.common.io.stream.StreamOutput;
2425
import org.elasticsearch.common.io.stream.Streamable;
@@ -34,19 +35,25 @@ public class SnapshotStats implements Streamable, ToXContentFragment {
3435

3536
private long startTime;
3637
private long time;
37-
private int numberOfFiles;
38-
private int processedFiles;
38+
private int incrementalFileCount;
39+
private int totalFileCount;
40+
private int processedFileCount;
41+
private long incrementalSize;
3942
private long totalSize;
4043
private long processedSize;
4144

4245
SnapshotStats() {
4346
}
4447

45-
SnapshotStats(long startTime, long time, int numberOfFiles, int processedFiles, long totalSize, long processedSize) {
48+
SnapshotStats(long startTime, long time,
49+
int incrementalFileCount, int totalFileCount, int processedFileCount,
50+
long incrementalSize, long totalSize, long processedSize) {
4651
this.startTime = startTime;
4752
this.time = time;
48-
this.numberOfFiles = numberOfFiles;
49-
this.processedFiles = processedFiles;
53+
this.incrementalFileCount = incrementalFileCount;
54+
this.totalFileCount = totalFileCount;
55+
this.processedFileCount = processedFileCount;
56+
this.incrementalSize = incrementalSize;
5057
this.totalSize = totalSize;
5158
this.processedSize = processedSize;
5259
}
@@ -66,17 +73,31 @@ public long getTime() {
6673
}
6774

6875
/**
69-
* Returns number of files in the snapshot
76+
* Returns incremental file count of the snapshot
7077
*/
71-
public int getNumberOfFiles() {
72-
return numberOfFiles;
78+
public int getIncrementalFileCount() {
79+
return incrementalFileCount;
80+
}
81+
82+
/**
83+
* Returns total number of files in the snapshot
84+
*/
85+
public int getTotalFileCount() {
86+
return totalFileCount;
7387
}
7488

7589
/**
7690
* Returns number of files in the snapshot that were processed so far
7791
*/
78-
public int getProcessedFiles() {
79-
return processedFiles;
92+
public int getProcessedFileCount() {
93+
return processedFileCount;
94+
}
95+
96+
/**
97+
* Return incremental files size of the snapshot
98+
*/
99+
public long getIncrementalSize() {
100+
return incrementalSize;
80101
}
81102

82103
/**
@@ -105,59 +126,109 @@ public void writeTo(StreamOutput out) throws IOException {
105126
out.writeVLong(startTime);
106127
out.writeVLong(time);
107128

108-
out.writeVInt(numberOfFiles);
109-
out.writeVInt(processedFiles);
129+
out.writeVInt(incrementalFileCount);
130+
out.writeVInt(processedFileCount);
110131

111-
out.writeVLong(totalSize);
132+
out.writeVLong(incrementalSize);
112133
out.writeVLong(processedSize);
134+
135+
if (out.getVersion().onOrAfter(Version.V_6_4_0)) {
136+
out.writeVInt(totalFileCount);
137+
out.writeVLong(totalSize);
138+
}
113139
}
114140

115141
@Override
116142
public void readFrom(StreamInput in) throws IOException {
117143
startTime = in.readVLong();
118144
time = in.readVLong();
119145

120-
numberOfFiles = in.readVInt();
121-
processedFiles = in.readVInt();
146+
incrementalFileCount = in.readVInt();
147+
processedFileCount = in.readVInt();
122148

123-
totalSize = in.readVLong();
149+
incrementalSize = in.readVLong();
124150
processedSize = in.readVLong();
151+
152+
if (in.getVersion().onOrAfter(Version.V_6_4_0)) {
153+
totalFileCount = in.readVInt();
154+
totalSize = in.readVLong();
155+
} else {
156+
totalFileCount = incrementalFileCount;
157+
totalSize = incrementalSize;
158+
}
125159
}
126160

127161
static final class Fields {
128162
static final String STATS = "stats";
163+
164+
static final String INCREMENTAL = "incremental";
165+
static final String PROCESSED = "processed";
166+
static final String TOTAL = "total";
167+
168+
static final String FILE_COUNT = "file_count";
169+
static final String SIZE = "size";
170+
static final String SIZE_IN_BYTES = "size_in_bytes";
171+
172+
static final String START_TIME_IN_MILLIS = "start_time_in_millis";
173+
static final String TIME_IN_MILLIS = "time_in_millis";
174+
static final String TIME = "time";
175+
176+
// BWC
129177
static final String NUMBER_OF_FILES = "number_of_files";
130178
static final String PROCESSED_FILES = "processed_files";
131-
static final String TOTAL_SIZE_IN_BYTES = "total_size_in_bytes";
132179
static final String TOTAL_SIZE = "total_size";
180+
static final String TOTAL_SIZE_IN_BYTES = "total_size_in_bytes";
133181
static final String PROCESSED_SIZE_IN_BYTES = "processed_size_in_bytes";
134182
static final String PROCESSED_SIZE = "processed_size";
135-
static final String START_TIME_IN_MILLIS = "start_time_in_millis";
136-
static final String TIME_IN_MILLIS = "time_in_millis";
137-
static final String TIME = "time";
183+
138184
}
139185

140186
@Override
141187
public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException {
142-
builder.startObject(Fields.STATS);
143-
builder.field(Fields.NUMBER_OF_FILES, getNumberOfFiles());
144-
builder.field(Fields.PROCESSED_FILES, getProcessedFiles());
145-
builder.humanReadableField(Fields.TOTAL_SIZE_IN_BYTES, Fields.TOTAL_SIZE, new ByteSizeValue(getTotalSize()));
146-
builder.humanReadableField(Fields.PROCESSED_SIZE_IN_BYTES, Fields.PROCESSED_SIZE, new ByteSizeValue(getProcessedSize()));
147-
builder.field(Fields.START_TIME_IN_MILLIS, getStartTime());
148-
builder.humanReadableField(Fields.TIME_IN_MILLIS, Fields.TIME, new TimeValue(getTime()));
149-
builder.endObject();
150-
return builder;
188+
builder.startObject(Fields.STATS)
189+
// incremental starts
190+
.startObject(Fields.INCREMENTAL)
191+
.field(Fields.FILE_COUNT, getIncrementalFileCount())
192+
.humanReadableField(Fields.SIZE_IN_BYTES, Fields.SIZE, new ByteSizeValue(getIncrementalSize()))
193+
// incremental ends
194+
.endObject();
195+
196+
if (getProcessedFileCount() != getIncrementalFileCount()) {
197+
// processed starts
198+
builder.startObject(Fields.PROCESSED)
199+
.field(Fields.FILE_COUNT, getProcessedFileCount())
200+
.humanReadableField(Fields.SIZE_IN_BYTES, Fields.SIZE, new ByteSizeValue(getProcessedSize()))
201+
// processed ends
202+
.endObject();
203+
}
204+
// total starts
205+
builder.startObject(Fields.TOTAL)
206+
.field(Fields.FILE_COUNT, getTotalFileCount())
207+
.humanReadableField(Fields.SIZE_IN_BYTES, Fields.SIZE, new ByteSizeValue(getTotalSize()))
208+
// total ends
209+
.endObject();
210+
// timings stats
211+
builder.field(Fields.START_TIME_IN_MILLIS, getStartTime())
212+
.humanReadableField(Fields.TIME_IN_MILLIS, Fields.TIME, new TimeValue(getTime()));
213+
214+
// BWC part
215+
return builder.field(Fields.NUMBER_OF_FILES, getIncrementalFileCount())
216+
.field(Fields.PROCESSED_FILES, getProcessedFileCount())
217+
.humanReadableField(Fields.TOTAL_SIZE_IN_BYTES, Fields.TOTAL_SIZE, new ByteSizeValue(getIncrementalSize()))
218+
.humanReadableField(Fields.PROCESSED_SIZE_IN_BYTES, Fields.PROCESSED_SIZE, new ByteSizeValue(getProcessedSize()))
219+
// BWC part ends
220+
.endObject();
151221
}
152222

153223
void add(SnapshotStats stats) {
154-
numberOfFiles += stats.numberOfFiles;
155-
processedFiles += stats.processedFiles;
224+
incrementalFileCount += stats.incrementalFileCount;
225+
totalFileCount += stats.totalFileCount;
226+
processedFileCount += stats.processedFileCount;
156227

228+
incrementalSize += stats.incrementalSize;
157229
totalSize += stats.totalSize;
158230
processedSize += stats.processedSize;
159231

160-
161232
if (startTime == 0) {
162233
// First time here
163234
startTime = stats.startTime;

0 commit comments

Comments
 (0)