Skip to content

Support max_single_primary_size in Resize Action and exposed in ILM #67705

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 11 commits into from
Jan 29, 2021
Merged
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,12 @@
*/
package org.elasticsearch.client.ilm;

import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
Expand All @@ -31,31 +34,53 @@
public class ShrinkAction implements LifecycleAction, ToXContentObject {
public static final String NAME = "shrink";
private static final ParseField NUMBER_OF_SHARDS_FIELD = new ParseField("number_of_shards");
private static final ParseField MAX_SINGLE_PRIMARY_SIZE = new ParseField("max_single_primary_size");

private static final ConstructingObjectParser<ShrinkAction, Void> PARSER =
new ConstructingObjectParser<>(NAME, true, a -> new ShrinkAction((Integer) a[0]));
new ConstructingObjectParser<>(NAME, true, a -> new ShrinkAction((Integer) a[0], (ByteSizeValue) a[1]));

static {
PARSER.declareInt(ConstructingObjectParser.constructorArg(), NUMBER_OF_SHARDS_FIELD);
PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), NUMBER_OF_SHARDS_FIELD);
PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(),
(p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), MAX_SINGLE_PRIMARY_SIZE.getPreferredName()),
MAX_SINGLE_PRIMARY_SIZE, ObjectParser.ValueType.STRING);
}

private int numberOfShards;
private Integer numberOfShards;
private ByteSizeValue maxSinglePrimarySize;

public static ShrinkAction parse(XContentParser parser) throws IOException {
return PARSER.parse(parser, null);
}

public ShrinkAction(int numberOfShards) {
if (numberOfShards <= 0) {
throw new IllegalArgumentException("[" + NUMBER_OF_SHARDS_FIELD.getPreferredName() + "] must be greater than 0");
public ShrinkAction(@Nullable Integer numberOfShards, ByteSizeValue maxSinglePrimarySize) {
if (numberOfShards != null && maxSinglePrimarySize != null) {
throw new IllegalArgumentException("Cannot set both [number_of_shards] and [max_single_primary_size]");
}
if (numberOfShards == null && maxSinglePrimarySize == null) {
throw new IllegalArgumentException("Either [number_of_shards] or [max_single_primary_size] must be set");
}
if (maxSinglePrimarySize != null) {
if (maxSinglePrimarySize.getBytes() <= 0) {
throw new IllegalArgumentException("[max_single_primary_size] must be greater than 0");
}
this.maxSinglePrimarySize = maxSinglePrimarySize;
} else {
if (numberOfShards <= 0) {
throw new IllegalArgumentException("[" + NUMBER_OF_SHARDS_FIELD.getPreferredName() + "] must be greater than 0");
}
this.numberOfShards = numberOfShards;
}
this.numberOfShards = numberOfShards;
}

int getNumberOfShards() {
Integer getNumberOfShards() {
return numberOfShards;
}

ByteSizeValue getMaxSinglePrimarySize() {
return maxSinglePrimarySize;
}

@Override
public String getName() {
return NAME;
Expand All @@ -64,7 +89,12 @@ public String getName() {
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(NUMBER_OF_SHARDS_FIELD.getPreferredName(), numberOfShards);
if (numberOfShards != null) {
builder.field(NUMBER_OF_SHARDS_FIELD.getPreferredName(), numberOfShards);
}
if (maxSinglePrimarySize != null) {
builder.field(MAX_SINGLE_PRIMARY_SIZE.getPreferredName(), maxSinglePrimarySize);
}
builder.endObject();
return builder;
}
Expand All @@ -74,12 +104,14 @@ public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ShrinkAction that = (ShrinkAction) o;
return Objects.equals(numberOfShards, that.numberOfShards);

return Objects.equals(numberOfShards, that.numberOfShards) &&
Objects.equals(maxSinglePrimarySize, that.maxSinglePrimarySize);
}

@Override
public int hashCode() {
return Objects.hash(numberOfShards);
return Objects.hash(numberOfShards, maxSinglePrimarySize);
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
import org.elasticsearch.client.Validatable;
import org.elasticsearch.client.ValidationException;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;

Expand All @@ -45,6 +46,7 @@ public class ResizeRequest extends TimedRequest implements Validatable, ToXConte
private final String targetIndex;
private Settings settings = Settings.EMPTY;
private Set<Alias> aliases = new HashSet<>();
private ByteSizeValue maxSinglePrimarySize;

/**
* Creates a new resize request
Expand Down Expand Up @@ -87,6 +89,20 @@ public Set<Alias> getAliases() {
return Collections.unmodifiableSet(this.aliases);
}

/**
* Sets the max single primary shard size of the target index
*/
public void setMaxSinglePrimarySize(ByteSizeValue maxSinglePrimarySize) {
this.maxSinglePrimarySize = maxSinglePrimarySize;
}

/**
* Return the max single primary shard size of the target index
*/
public ByteSizeValue getMaxSinglePrimarySize() {
return maxSinglePrimarySize;
}

@Override
public Optional<ValidationException> validate() {
ValidationException validationException = new ValidationException();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ public void testExplainLifecycle() throws Exception {
Map<String, LifecycleAction> warmActions = new HashMap<>();
warmActions.put(UnfollowAction.NAME, new UnfollowAction());
warmActions.put(AllocateAction.NAME, new AllocateAction(null, null, null, Collections.singletonMap("_name", "node-1")));
warmActions.put(ShrinkAction.NAME, new ShrinkAction(1));
warmActions.put(ShrinkAction.NAME, new ShrinkAction(1, null));
warmActions.put(ForceMergeAction.NAME, new ForceMergeAction(1000));
lifecyclePhases.put("warm", new Phase("warm", TimeValue.timeValueSeconds(1000), warmActions));

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,7 @@
import org.elasticsearch.common.CheckedFunction;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.CollectionUtils;
import org.elasticsearch.common.xcontent.XContentType;
Expand Down Expand Up @@ -655,6 +656,9 @@ private void resizeTest(ResizeType resizeType, CheckedFunction<ResizeRequest, Re
if (resizeType == ResizeType.SPLIT) {
resizeRequest.setSettings(Settings.builder().put("index.number_of_shards", 2).build());
}
if (resizeType == ResizeType.SHRINK) {
resizeRequest.setMaxSinglePrimarySize(new ByteSizeValue(randomIntBetween(1, 100)));
}

Request request = function.apply(resizeRequest);
Assert.assertEquals(HttpPut.METHOD_NAME, request.getMethod());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -264,7 +264,7 @@ public void testGetLifecyclePolicy() throws IOException, InterruptedException {
PutLifecyclePolicyRequest putRequest = new PutLifecyclePolicyRequest(myPolicyAsPut);

Map<String, Phase> otherPolicyPhases = new HashMap<>(phases);
Map<String, LifecycleAction> warmActions = Collections.singletonMap(ShrinkAction.NAME, new ShrinkAction(1));
Map<String, LifecycleAction> warmActions = Collections.singletonMap(ShrinkAction.NAME, new ShrinkAction(1, null));
otherPolicyPhases.put("warm", new Phase("warm", new TimeValue(30, TimeUnit.DAYS), warmActions));
otherPolicyAsPut = new LifecyclePolicy("other_policy", otherPolicyPhases);

Expand Down Expand Up @@ -614,7 +614,7 @@ public void testRetryPolicy() throws Exception {
{
Map<String, Phase> phases = new HashMap<>();
Map<String, LifecycleAction> warmActions = new HashMap<>();
warmActions.put(ShrinkAction.NAME, new ShrinkAction(3));
warmActions.put(ShrinkAction.NAME, new ShrinkAction(3, null));
phases.put("warm", new Phase("warm", TimeValue.ZERO, warmActions));

LifecyclePolicy policy = new LifecyclePolicy("my_policy",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1609,11 +1609,19 @@ public void testShrinkIndex() throws Exception {
request.setWaitForActiveShards(2); // <1>
request.setWaitForActiveShards(ActiveShardCount.DEFAULT); // <2>
// end::shrink-index-request-waitForActiveShards
// tag::shrink-index-request-settings
request.getTargetIndexRequest().settings(Settings.builder()
if (randomBoolean()) {
// tag::shrink-index-request-settings
request.getTargetIndexRequest().settings(Settings.builder()
.put("index.number_of_shards", 2) // <1>
.putNull("index.routing.allocation.require._name")); // <2>
// end::shrink-index-request-settings
// end::shrink-index-request-settings
} else {
request.getTargetIndexRequest().settings(Settings.builder()
.putNull("index.routing.allocation.require._name"));
// tag::shrink-index-request-maxSinglePrimarySize
request.setMaxSinglePrimarySize(new ByteSizeValue(50, ByteSizeUnit.GB)); // <1>
// end::shrink-index-request-maxSinglePrimarySize
}
// tag::shrink-index-request-aliases
request.getTargetIndexRequest().alias(new Alias("target_alias")); // <1>
// end::shrink-index-request-aliases
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
*/
package org.elasticsearch.client.ilm;

import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.test.AbstractXContentTestCase;

Expand All @@ -38,7 +39,11 @@ protected ShrinkAction createTestInstance() {
}

static ShrinkAction randomInstance() {
return new ShrinkAction(randomIntBetween(1, 100));
if (randomBoolean()) {
return new ShrinkAction(randomIntBetween(1, 100), null);
} else {
return new ShrinkAction(null, new ByteSizeValue(randomIntBetween(1, 100)));
}
}

@Override
Expand All @@ -47,7 +52,17 @@ protected boolean supportsUnknownFields() {
}

public void testNonPositiveShardNumber() {
Exception e = expectThrows(Exception.class, () -> new ShrinkAction(randomIntBetween(-100, 0)));
Exception e = expectThrows(Exception.class, () -> new ShrinkAction(randomIntBetween(-100, 0), null));
assertThat(e.getMessage(), equalTo("[number_of_shards] must be greater than 0"));
}

public void testMaxSinglePrimarySize() {
ByteSizeValue maxSinglePrimarySize1 = new ByteSizeValue(10);
Exception e1 = expectThrows(Exception.class, () -> new ShrinkAction(randomIntBetween(1, 100), maxSinglePrimarySize1));
assertThat(e1.getMessage(), equalTo("Cannot set both [number_of_shards] and [max_single_primary_size]"));

ByteSizeValue maxSinglePrimarySize2 = new ByteSizeValue(0);
Exception e2 = expectThrows(Exception.class, () -> new org.elasticsearch.client.ilm.ShrinkAction(null, maxSinglePrimarySize2));
assertThat(e2.getMessage(), equalTo("[max_single_primary_size] must be greater than 0"));
}
}
8 changes: 6 additions & 2 deletions docs/java-rest/high-level/indices/shrink_index.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,12 @@ include-tagged::{doc-tests-file}[{api}-request-settings]
<1> The number of shards on the target of the shrink index request
<2> Remove the allocation requirement copied from the source index

["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests-file}[{api}-request-maxSinglePrimarySize]
--------------------------------------------------
<1> The max single primary shard size of the target index

["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests-file}[{api}-request-aliases]
Expand All @@ -75,5 +81,3 @@ include-tagged::{doc-tests-file}[{api}-response]
<1> Indicates whether all of the nodes have acknowledged the request
<2> Indicates whether the requisite number of shard copies were started for
each shard in the index before timing out


64 changes: 51 additions & 13 deletions docs/reference/ilm/actions/ilm-shrink.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -4,24 +4,24 @@

Phases allowed: hot, warm.

Sets an index to <<dynamic-index-settings, read-only>>
and shrinks it into a new index with fewer primary shards.
The name of the new index is of the form `shrink-<original-index-name>`.
For example, if the name of the source index is _logs_,
Sets an index to <<dynamic-index-settings, read-only>>
and shrinks it into a new index with fewer primary shards.
The name of the new index is of the form `shrink-<original-index-name>`.
For example, if the name of the source index is _logs_,
the name of the shrunken index is _shrink-logs_.

The shrink action allocates all primary shards of the index to one node so it
The shrink action allocates all primary shards of the index to one node so it
can call the <<indices-shrink-index,Shrink API>> to shrink the index.
After shrinking, it swaps aliases that point to the original index to the new shrunken index.
After shrinking, it swaps aliases that point to the original index to the new shrunken index.

To use the `shrink` action in the `hot` phase, the `rollover` action *must* be present.
If no rollover action is configured, {ilm-init} will reject the policy.

[IMPORTANT]
If the shrink action is used on a <<ccr-put-follow,follower index>>,
If the shrink action is used on a <<ccr-put-follow,follower index>>,
policy execution waits until the leader index rolls over (or is
<<skipping-rollover, otherwise marked complete>>),
then converts the follower index into a regular index with the
<<skipping-rollover, otherwise marked complete>>),
then converts the follower index into a regular index with the
<<ilm-unfollow,unfollow>> action before performing the shrink operation.

If the managed index is part of a <<data-streams, data stream>>,
Expand All @@ -40,14 +40,30 @@ managed indices.
[[ilm-shrink-options]]
==== Shrink options
`number_of_shards`::
(Required, integer)
Number of shards to shrink to.
Must be a factor of the number of shards in the source index.
(Optional, integer)
Number of shards to shrink to.
Must be a factor of the number of shards in the source index. This parameter conflicts with
`max_single_primary_size`, only one of them may be set.

`max_single_primary_size`::
(Optional, <<byte-units, byte units>>)
The max single primary shard size for the target index. Used to find the optimum number of shards for the target index.
When this parameter is set, each shard's storage in the target index will not be greater than the parameter.
The shards count of the target index will still be a factor of the source index's shards count, but if the parameter
is less than the single shard size in the source index, the shards count for the target index will be equal to the source index's shards count.
For example, when this parameter is set to 50gb, if the source index has 60 primary shards with totaling 100gb, then the
target index will have 2 primary shards, with each shard size of 50gb; if the source index has 60 primary shards
with totaling 1000gb, then the target index will have 20 primary shards; if the source index has 60 primary shards
with totaling 4000gb, then the target index will still have 60 primary shards. This parameter conflicts
with `number_of_shards` in the `settings`, only one of them may be set.


[[ilm-shrink-ex]]
==== Example


[[ilm-shrink-shards-ex]]
===== Set the number of shards of the new shrunken index explicitly

[source,console]
--------------------------------------------------
PUT _ilm/policy/my_policy
Expand All @@ -65,3 +81,25 @@ PUT _ilm/policy/my_policy
}
}
--------------------------------------------------

[[ilm-shrink-size-ex]]
===== Calculate the number of shards of the new shrunken index based on the storage of the
source index and the `max_single_primary_size` parameter

[source,console]
--------------------------------------------------
PUT _ilm/policy/my_policy
{
"policy": {
"phases": {
"warm": {
"actions": {
"shrink" : {
"max_single_primary_size": "50gb"
}
}
}
}
}
}
--------------------------------------------------
14 changes: 13 additions & 1 deletion docs/reference/indices/shrink-index.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ PUT /my_source_index/_settings
// TEST[s/^/PUT my_source_index\n{"settings":{"index.number_of_shards":2}}\n/]

<1> Removes replica shards for the index.
<2> Relocates the index's shards to the `shrink_node_name` node.
<2> Relocates the index's shards to the `shrink_node_name` node.
See <<shard-allocation-filtering>>.
<3> Prevents write operations to this index. Metadata changes, such as deleting
the index, are still allowed.
Expand Down Expand Up @@ -230,3 +230,15 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=timeoutparms]
include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=target-index-aliases]

include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=target-index-settings]

`max_single_primary_size`::
(Optional, <<byte-units, byte units>>)
The max single primary shard size for the target index. Used to find the optimum number of shards for the target index.
When this parameter is set, each shard's storage in the target index will not be greater than the parameter.
The shards count of the target index will still be a factor of the source index's shards count, but if the parameter
is less than the single shard size in the source index, the shards count for the target index will be equal to the source index's shards count.
For example, when this parameter is set to 50gb, if the source index has 60 primary shards with totaling 100gb, then the
target index will have 2 primary shards, with each shard size of 50gb; if the source index has 60 primary shards
with totaling 1000gb, then the target index will have 20 primary shards; if the source index has 60 primary shards
with totaling 4000gb, then the target index will still have 60 primary shards. This parameter conflicts
with `number_of_shards` in the `settings`, only one of them may be set.
Loading