|
44 | 44 | import org.elasticsearch.common.UUIDs;
|
45 | 45 | import org.elasticsearch.common.collect.ImmutableOpenMap;
|
46 | 46 | import org.elasticsearch.common.settings.ClusterSettings;
|
| 47 | +import org.elasticsearch.common.settings.Setting; |
47 | 48 | import org.elasticsearch.common.settings.Settings;
|
48 | 49 | import org.elasticsearch.index.Index;
|
49 | 50 | import org.elasticsearch.index.shard.ShardId;
|
@@ -925,141 +926,15 @@ Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLU
|
925 | 926 | assertThat(result.routingTable().index("test").getShards().get(1).primaryShard().relocatingNodeId(), equalTo("node2"));
|
926 | 927 | }
|
927 | 928 |
|
928 |
| - public void testForSingleDataNode() { |
929 |
| - // remove test in 9.0 |
930 |
| - Settings diskSettings = Settings.builder() |
931 |
| - .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), true) |
932 |
| - .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "60%") |
933 |
| - .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "70%").build(); |
934 |
| - |
935 |
| - ImmutableOpenMap.Builder<String, DiskUsage> usagesBuilder = ImmutableOpenMap.builder(); |
936 |
| - usagesBuilder.put("node1", new DiskUsage("node1", "n1", "/dev/null", 100, 100)); // 0% used |
937 |
| - usagesBuilder.put("node2", new DiskUsage("node2", "n2", "/dev/null", 100, 20)); // 80% used |
938 |
| - usagesBuilder.put("node3", new DiskUsage("node3", "n3", "/dev/null", 100, 100)); // 0% used |
939 |
| - ImmutableOpenMap<String, DiskUsage> usages = usagesBuilder.build(); |
940 |
| - |
941 |
| - // We have an index with 1 primary shards each taking 40 bytes. Each node has 100 bytes available |
942 |
| - ImmutableOpenMap.Builder<String, Long> shardSizes = ImmutableOpenMap.builder(); |
943 |
| - shardSizes.put("[test][0][p]", 40L); |
944 |
| - shardSizes.put("[test][1][p]", 40L); |
945 |
| - final ClusterInfo clusterInfo = new DevNullClusterInfo(usages, usages, shardSizes.build()); |
946 |
| - |
947 |
| - DiskThresholdDecider diskThresholdDecider = makeDecider(diskSettings); |
948 |
| - Metadata metadata = Metadata.builder() |
949 |
| - .put(IndexMetadata.builder("test").settings(settings(Version.CURRENT)).numberOfShards(2).numberOfReplicas(0)) |
950 |
| - .build(); |
951 |
| - |
952 |
| - RoutingTable initialRoutingTable = RoutingTable.builder() |
953 |
| - .addAsNew(metadata.index("test")) |
954 |
| - .build(); |
955 |
| - |
956 |
| - logger.info("--> adding one master node, one data node"); |
957 |
| - DiscoveryNode discoveryNode1 = new DiscoveryNode("", "node1", buildNewFakeTransportAddress(), emptyMap(), |
958 |
| - singleton(DiscoveryNodeRole.MASTER_ROLE), Version.CURRENT); |
959 |
| - DiscoveryNode discoveryNode2 = new DiscoveryNode("", "node2", buildNewFakeTransportAddress(), emptyMap(), |
960 |
| - singleton(DiscoveryNodeRole.DATA_ROLE), Version.CURRENT); |
961 |
| - |
962 |
| - DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(discoveryNode1).add(discoveryNode2).build(); |
963 |
| - ClusterState baseClusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) |
964 |
| - .metadata(metadata) |
965 |
| - .routingTable(initialRoutingTable) |
966 |
| - .nodes(discoveryNodes) |
967 |
| - .build(); |
968 |
| - |
969 |
| - // Two shards consumes 80% of disk space in data node, but we have only one data node, shards should remain. |
970 |
| - ShardRouting firstRouting = TestShardRouting.newShardRouting("test", 0, "node2", null, true, ShardRoutingState.STARTED); |
971 |
| - ShardRouting secondRouting = TestShardRouting.newShardRouting("test", 1, "node2", null, true, ShardRoutingState.STARTED); |
972 |
| - RoutingNode firstRoutingNode = new RoutingNode("node2", discoveryNode2, firstRouting, secondRouting); |
973 |
| - |
974 |
| - RoutingTable.Builder builder = RoutingTable.builder().add( |
975 |
| - IndexRoutingTable.builder(firstRouting.index()) |
976 |
| - .addIndexShard(new IndexShardRoutingTable.Builder(firstRouting.shardId()) |
977 |
| - .addShard(firstRouting) |
978 |
| - .build() |
979 |
| - ) |
980 |
| - .addIndexShard(new IndexShardRoutingTable.Builder(secondRouting.shardId()) |
981 |
| - .addShard(secondRouting) |
982 |
| - .build() |
983 |
| - ) |
984 |
| - ); |
985 |
| - ClusterState clusterState = ClusterState.builder(baseClusterState).routingTable(builder.build()).build(); |
986 |
| - RoutingAllocation routingAllocation = new RoutingAllocation(null, new RoutingNodes(clusterState), clusterState, clusterInfo, |
987 |
| - null, System.nanoTime()); |
988 |
| - routingAllocation.debugDecision(true); |
989 |
| - Decision decision = diskThresholdDecider.canRemain(firstRouting, firstRoutingNode, routingAllocation); |
990 |
| - |
991 |
| - // Two shards should start happily |
992 |
| - assertThat(decision.type(), equalTo(Decision.Type.YES)); |
993 |
| - assertThat(decision.getExplanation(), containsString("there is only a single data node present")); |
994 |
| - ClusterInfoService cis = () -> { |
995 |
| - logger.info("--> calling fake getClusterInfo"); |
996 |
| - return clusterInfo; |
997 |
| - }; |
998 |
| - |
999 |
| - AllocationDeciders deciders = new AllocationDeciders(new HashSet<>(Arrays.asList( |
1000 |
| - new SameShardAllocationDecider( |
1001 |
| - Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) |
1002 |
| - ), |
1003 |
| - diskThresholdDecider |
1004 |
| - ))); |
1005 |
| - |
1006 |
| - AllocationService strategy = new AllocationService(deciders, new TestGatewayAllocator(), |
1007 |
| - new BalancedShardsAllocator(Settings.EMPTY), cis, EmptySnapshotsInfoService.INSTANCE); |
1008 |
| - ClusterState result = strategy.reroute(clusterState, "reroute"); |
1009 |
| - |
1010 |
| - assertThat(result.routingTable().index("test").getShards().get(0).primaryShard().state(), equalTo(STARTED)); |
1011 |
| - assertThat(result.routingTable().index("test").getShards().get(0).primaryShard().currentNodeId(), equalTo("node2")); |
1012 |
| - assertThat(result.routingTable().index("test").getShards().get(0).primaryShard().relocatingNodeId(), nullValue()); |
1013 |
| - assertThat(result.routingTable().index("test").getShards().get(1).primaryShard().state(), equalTo(STARTED)); |
1014 |
| - assertThat(result.routingTable().index("test").getShards().get(1).primaryShard().currentNodeId(), equalTo("node2")); |
1015 |
| - assertThat(result.routingTable().index("test").getShards().get(1).primaryShard().relocatingNodeId(), nullValue()); |
1016 |
| - |
1017 |
| - // Add another datanode, it should relocate. |
1018 |
| - logger.info("--> adding node3"); |
1019 |
| - DiscoveryNode discoveryNode3 = new DiscoveryNode("", "node3", buildNewFakeTransportAddress(), emptyMap(), |
1020 |
| - singleton(DiscoveryNodeRole.DATA_ROLE), Version.CURRENT); |
1021 |
| - ClusterState updateClusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) |
1022 |
| - .add(discoveryNode3)).build(); |
1023 |
| - |
1024 |
| - firstRouting = TestShardRouting.newShardRouting("test", 0, "node2", null, true, ShardRoutingState.STARTED); |
1025 |
| - secondRouting = TestShardRouting.newShardRouting("test", 1, "node2", "node3", true, ShardRoutingState.RELOCATING); |
1026 |
| - firstRoutingNode = new RoutingNode("node2", discoveryNode2, firstRouting, secondRouting); |
1027 |
| - builder = RoutingTable.builder().add( |
1028 |
| - IndexRoutingTable.builder(firstRouting.index()) |
1029 |
| - .addIndexShard(new IndexShardRoutingTable.Builder(firstRouting.shardId()) |
1030 |
| - .addShard(firstRouting) |
1031 |
| - .build() |
1032 |
| - ) |
1033 |
| - .addIndexShard(new IndexShardRoutingTable.Builder(secondRouting.shardId()) |
1034 |
| - .addShard(secondRouting) |
1035 |
| - .build() |
1036 |
| - ) |
1037 |
| - ); |
1038 |
| - |
1039 |
| - clusterState = ClusterState.builder(updateClusterState).routingTable(builder.build()).build(); |
1040 |
| - routingAllocation = new RoutingAllocation(null, new RoutingNodes(clusterState), clusterState, clusterInfo, null, |
1041 |
| - System.nanoTime()); |
1042 |
| - routingAllocation.debugDecision(true); |
1043 |
| - decision = diskThresholdDecider.canRemain(firstRouting, firstRoutingNode, routingAllocation); |
1044 |
| - assertThat(decision.type(), equalTo(Decision.Type.YES)); |
1045 |
| - assertThat(((Decision.Single) decision).getExplanation(), containsString( |
1046 |
| - "there is enough disk on this node for the shard to remain, free: [60b]")); |
1047 |
| - |
1048 |
| - result = strategy.reroute(clusterState, "reroute"); |
1049 |
| - assertThat(result.routingTable().index("test").getShards().get(0).primaryShard().state(), equalTo(STARTED)); |
1050 |
| - assertThat(result.routingTable().index("test").getShards().get(0).primaryShard().currentNodeId(), equalTo("node2")); |
1051 |
| - assertThat(result.routingTable().index("test").getShards().get(0).primaryShard().relocatingNodeId(), nullValue()); |
1052 |
| - assertThat(result.routingTable().index("test").getShards().get(1).primaryShard().state(), equalTo(RELOCATING)); |
1053 |
| - assertThat(result.routingTable().index("test").getShards().get(1).primaryShard().currentNodeId(), equalTo("node2")); |
1054 |
| - assertThat(result.routingTable().index("test").getShards().get(1).primaryShard().relocatingNodeId(), equalTo("node3")); |
1055 |
| - } |
1056 |
| - |
1057 | 929 | public void testWatermarksEnabledForSingleDataNode() {
|
1058 |
| - Settings diskSettings = Settings.builder() |
1059 |
| - .put(DiskThresholdDecider.ENABLE_FOR_SINGLE_DATA_NODE.getKey(), true) |
| 930 | + Settings.Builder builder = Settings.builder() |
1060 | 931 | .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), true)
|
1061 | 932 | .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "60%")
|
1062 |
| - .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "70%").build(); |
| 933 | + .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "70%"); |
| 934 | + if (randomBoolean()) { |
| 935 | + builder.put(DiskThresholdDecider.ENABLE_FOR_SINGLE_DATA_NODE.getKey(), true); |
| 936 | + } |
| 937 | + Settings diskSettings = builder.build(); |
1063 | 938 |
|
1064 | 939 | ImmutableOpenMap.Builder<String, DiskUsage> usagesBuilder = ImmutableOpenMap.builder();
|
1065 | 940 | usagesBuilder.put("data", new DiskUsage("data", "data", "/dev/null", 100, 20)); // 80% used
|
@@ -1131,6 +1006,25 @@ Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLU
|
1131 | 1006 | "the shard cannot remain on this node because it is above the high watermark cluster setting" +
|
1132 | 1007 | " [cluster.routing.allocation.disk.watermark.high=70%] and there is less than the required [30.0%] free disk on node," +
|
1133 | 1008 | " actual free: [20.0%]"));
|
| 1009 | + |
| 1010 | + if (DiskThresholdDecider.ENABLE_FOR_SINGLE_DATA_NODE.exists(diskSettings)) { |
| 1011 | + assertSettingDeprecationsAndWarnings(new Setting<?>[] { DiskThresholdDecider.ENABLE_FOR_SINGLE_DATA_NODE }); |
| 1012 | + } |
| 1013 | + } |
| 1014 | + |
| 1015 | + public void testSingleDataNodeDeprecationWarning() { |
| 1016 | + Settings settings = Settings.builder() |
| 1017 | + .put(DiskThresholdDecider.ENABLE_FOR_SINGLE_DATA_NODE.getKey(), false) |
| 1018 | + .build(); |
| 1019 | + |
| 1020 | + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, |
| 1021 | + () -> new DiskThresholdDecider(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS))); |
| 1022 | + |
| 1023 | + assertThat(e.getCause().getMessage(), |
| 1024 | + equalTo("setting [cluster.routing.allocation.disk.watermark.enable_for_single_data_node=false] is not allowed," + |
| 1025 | + " only true is valid")); |
| 1026 | + |
| 1027 | + assertSettingDeprecationsAndWarnings(new Setting<?>[]{ DiskThresholdDecider.ENABLE_FOR_SINGLE_DATA_NODE }); |
1134 | 1028 | }
|
1135 | 1029 |
|
1136 | 1030 | public void testDiskThresholdWithSnapshotShardSizes() {
|
|
0 commit comments