[2019-05-28T05:40:07,487][INFO ][o.e.x.c.IndexFollowingIT ] [testSyncMappings] before test [2019-05-28T05:40:07,536][INFO ][o.e.t.InternalTestCluster] [testSyncMappings] Setup InternalTestCluster [leader_cluster] with seed [8401A95D42D1D92A] using [0] dedicated masters, [2] (data) nodes and [0] coord only nodes (min_master_nodes are [auto-managed]) [2019-05-28T05:40:07,671][INFO ][o.e.e.NodeEnvironment ] [testSyncMappings] using [4] data paths, mounts [[/ (rootfs)]], net usable_space [269.5gb], net total_space [349.9gb], types [rootfs] [2019-05-28T05:40:07,672][INFO ][o.e.e.NodeEnvironment ] [testSyncMappings] heap size [512mb], compressed ordinary object pointers [true] [2019-05-28T05:40:07,754][INFO ][o.e.n.Node ] [testSyncMappings] node name [leader0], node ID [9HWb90BFR1eAyo6kqv0ZYA], cluster name [leader_cluster] [2019-05-28T05:40:07,754][INFO ][o.e.n.Node ] [testSyncMappings] version[7.3.0-SNAPSHOT], pid[269107], build[unknown/unknown/2077f9f/2019-05-28T04:12:42.128520Z], OS[Linux/3.10.0-957.12.2.el7.x86_64/amd64], JVM[Azul Systems, Inc./OpenJDK 64-Bit Server VM/12.0.1/12.0.1+12] [2019-05-28T05:40:07,754][INFO ][o.e.n.Node ] [testSyncMappings] JVM home [/var/lib/jenkins/.java/zulu-12.0.1-linux] [2019-05-28T05:40:07,755][INFO ][o.e.n.Node ] [testSyncMappings] JVM arguments [-Dfile.encoding=UTF8, -Dcompiler.java=12, -Des.scripting.update.ctx_in_params=false, -Des.set.netty.runtime.available.processors=false, -Dgradle.dist.lib=/var/lib/jenkins/.gradle/wrapper/dists/gradle-5.4.1-all/3221gyojl5jsh0helicew7rwx/gradle-5.4.1/lib, -Dgradle.user.home=/var/lib/jenkins/.gradle, -Dgradle.worker.jar=/var/lib/jenkins/.gradle/caches/5.4.1/workerMain/gradle-worker.jar, -Djava.awt.headless=true, -Djna.nosys=true, -Dorg.gradle.native=false, -Druntime.java=12, -Dtests.artifact=ccr, -Dtests.gradle=true, -Dtests.logger.level=WARN, -Dtests.security.manager=true, -Dtests.seed=B5A055715D18E549, -Dtests.task=:x-pack:plugin:ccr:internalClusterTest, -XX:+HeapDumpOnOutOfMemoryError, -XX:HeapDumpPath=/var/lib/jenkins/workspace/elastic+elasticsearch+7.x+matrix-java-periodic/ES_BUILD_JAVA/openjdk12/ES_RUNTIME_JAVA/zulu12/nodes/immutable&&linux&&docker/x-pack/plugin/ccr/build/heapdump, --illegal-access=warn, -esa, -Xms512m, -Xmx512m, -Dfile.encoding=UTF-8, -Djava.io.tmpdir=./temp, -Duser.country=US, -Duser.language=en, -Duser.variant, -ea] [2019-05-28T05:40:07,755][WARN ][o.e.n.Node ] [testSyncMappings] version [7.3.0-SNAPSHOT] is a pre-release version of Elasticsearch and is not suitable for production [2019-05-28T05:40:07,802][INFO ][o.e.p.PluginsService ] [testSyncMappings] no modules loaded [2019-05-28T05:40:07,802][INFO ][o.e.p.PluginsService ] [testSyncMappings] loaded plugin [org.elasticsearch.analysis.common.CommonAnalysisPlugin] [2019-05-28T05:40:07,802][INFO ][o.e.p.PluginsService ] [testSyncMappings] loaded plugin [org.elasticsearch.test.ESIntegTestCase$TestSeedPlugin] [2019-05-28T05:40:07,802][INFO ][o.e.p.PluginsService ] [testSyncMappings] loaded plugin [org.elasticsearch.test.InternalSettingsPlugin] [2019-05-28T05:40:07,803][INFO ][o.e.p.PluginsService ] [testSyncMappings] loaded plugin [org.elasticsearch.test.MockHttpTransport$TestPlugin] [2019-05-28T05:40:07,803][INFO ][o.e.p.PluginsService ] [testSyncMappings] loaded plugin [org.elasticsearch.test.transport.MockTransportService$TestPlugin] [2019-05-28T05:40:07,803][INFO ][o.e.p.PluginsService ] [testSyncMappings] loaded plugin [org.elasticsearch.transport.nio.MockNioTransportPlugin] [2019-05-28T05:40:07,803][INFO ][o.e.p.PluginsService ] [testSyncMappings] loaded plugin [org.elasticsearch.xpack.ccr.LocalStateCcr] [2019-05-28T05:40:08,397][INFO ][o.e.d.DiscoveryModule ] [testSyncMappings] using discovery type [zen] and seed hosts providers [settings, file] [2019-05-28T05:40:08,437][INFO ][o.e.n.Node ] [testSyncMappings] initialized [2019-05-28T05:40:08,573][INFO ][o.e.e.NodeEnvironment ] [testSyncMappings] using [4] data paths, mounts [[/ (rootfs)]], net usable_space [269.5gb], net total_space [349.9gb], types [rootfs] [2019-05-28T05:40:08,574][INFO ][o.e.e.NodeEnvironment ] [testSyncMappings] heap size [512mb], compressed ordinary object pointers [true] [2019-05-28T05:40:08,621][INFO ][o.e.n.Node ] [testSyncMappings] node name [leader1], node ID [rUlMFBY9QXukTRws0zFidg], cluster name [leader_cluster] [2019-05-28T05:40:08,622][INFO ][o.e.n.Node ] [testSyncMappings] version[7.3.0-SNAPSHOT], pid[269107], build[unknown/unknown/2077f9f/2019-05-28T04:12:42.128520Z], OS[Linux/3.10.0-957.12.2.el7.x86_64/amd64], JVM[Azul Systems, Inc./OpenJDK 64-Bit Server VM/12.0.1/12.0.1+12] [2019-05-28T05:40:08,623][INFO ][o.e.n.Node ] [testSyncMappings] JVM home [/var/lib/jenkins/.java/zulu-12.0.1-linux] [2019-05-28T05:40:08,623][INFO ][o.e.n.Node ] [testSyncMappings] JVM arguments [-Dfile.encoding=UTF8, -Dcompiler.java=12, -Des.scripting.update.ctx_in_params=false, -Des.set.netty.runtime.available.processors=false, -Dgradle.dist.lib=/var/lib/jenkins/.gradle/wrapper/dists/gradle-5.4.1-all/3221gyojl5jsh0helicew7rwx/gradle-5.4.1/lib, -Dgradle.user.home=/var/lib/jenkins/.gradle, -Dgradle.worker.jar=/var/lib/jenkins/.gradle/caches/5.4.1/workerMain/gradle-worker.jar, -Djava.awt.headless=true, -Djna.nosys=true, -Dorg.gradle.native=false, -Druntime.java=12, -Dtests.artifact=ccr, -Dtests.gradle=true, -Dtests.logger.level=WARN, -Dtests.security.manager=true, -Dtests.seed=B5A055715D18E549, -Dtests.task=:x-pack:plugin:ccr:internalClusterTest, -XX:+HeapDumpOnOutOfMemoryError, -XX:HeapDumpPath=/var/lib/jenkins/workspace/elastic+elasticsearch+7.x+matrix-java-periodic/ES_BUILD_JAVA/openjdk12/ES_RUNTIME_JAVA/zulu12/nodes/immutable&&linux&&docker/x-pack/plugin/ccr/build/heapdump, --illegal-access=warn, -esa, -Xms512m, -Xmx512m, -Dfile.encoding=UTF-8, -Djava.io.tmpdir=./temp, -Duser.country=US, -Duser.language=en, -Duser.variant, -ea] [2019-05-28T05:40:08,623][WARN ][o.e.n.Node ] [testSyncMappings] version [7.3.0-SNAPSHOT] is a pre-release version of Elasticsearch and is not suitable for production [2019-05-28T05:40:08,629][INFO ][o.e.p.PluginsService ] [testSyncMappings] no modules loaded [2019-05-28T05:40:08,646][INFO ][o.e.p.PluginsService ] [testSyncMappings] loaded plugin [org.elasticsearch.analysis.common.CommonAnalysisPlugin] [2019-05-28T05:40:08,647][INFO ][o.e.p.PluginsService ] [testSyncMappings] loaded plugin [org.elasticsearch.test.ESIntegTestCase$TestSeedPlugin] [2019-05-28T05:40:08,647][INFO ][o.e.p.PluginsService ] [testSyncMappings] loaded plugin [org.elasticsearch.test.InternalSettingsPlugin] [2019-05-28T05:40:08,647][INFO ][o.e.p.PluginsService ] [testSyncMappings] loaded plugin [org.elasticsearch.test.MockHttpTransport$TestPlugin] [2019-05-28T05:40:08,647][INFO ][o.e.p.PluginsService ] [testSyncMappings] loaded plugin [org.elasticsearch.test.transport.MockTransportService$TestPlugin] [2019-05-28T05:40:08,647][INFO ][o.e.p.PluginsService ] [testSyncMappings] loaded plugin [org.elasticsearch.transport.nio.MockNioTransportPlugin] [2019-05-28T05:40:08,647][INFO ][o.e.p.PluginsService ] [testSyncMappings] loaded plugin [org.elasticsearch.xpack.ccr.LocalStateCcr] [2019-05-28T05:40:08,933][INFO ][o.e.d.DiscoveryModule ] [testSyncMappings] using discovery type [zen] and seed hosts providers [settings, file] [2019-05-28T05:40:08,987][INFO ][o.e.n.Node ] [testSyncMappings] initialized [2019-05-28T05:40:08,994][INFO ][o.e.n.Node ] [[test_leader_cluster[T#1]]] starting ... [2019-05-28T05:40:09,011][INFO ][o.e.n.Node ] [[test_leader_cluster[T#2]]] starting ... [2019-05-28T05:40:09,036][INFO ][o.e.t.TransportService ] [[test_leader_cluster[T#1]]] publish_address {127.0.0.1:43808}, bound_addresses {[::1]:46337}, {127.0.0.1:43808} [2019-05-28T05:40:09,050][INFO ][o.e.t.TransportService ] [[test_leader_cluster[T#2]]] publish_address {127.0.0.1:39962}, bound_addresses {[::1]:36943}, {127.0.0.1:39962} [2019-05-28T05:40:09,095][INFO ][o.e.c.c.Coordinator ] [leader0] setting initial configuration to VotingConfiguration{9HWb90BFR1eAyo6kqv0ZYA,rUlMFBY9QXukTRws0zFidg} [2019-05-28T05:40:09,402][INFO ][o.e.c.s.MasterService ] [leader0] elected-as-master ([2] nodes joined)[{leader1}{rUlMFBY9QXukTRws0zFidg}{32Lq1xRRQDK2ZlI7Jq6fDg}{127.0.0.1}{127.0.0.1:39962}{xpack.installed=true} elect leader, {leader0}{9HWb90BFR1eAyo6kqv0ZYA}{Z9OIVCxxTJehM-UX-y2vKA}{127.0.0.1}{127.0.0.1:43808}{xpack.installed=true} elect leader, _BECOME_MASTER_TASK_, _FINISH_ELECTION_], term: 1, version: 1, reason: master node changed {previous [], current [{leader0}{9HWb90BFR1eAyo6kqv0ZYA}{Z9OIVCxxTJehM-UX-y2vKA}{127.0.0.1}{127.0.0.1:43808}{xpack.installed=true}]}, added {{leader1}{rUlMFBY9QXukTRws0zFidg}{32Lq1xRRQDK2ZlI7Jq6fDg}{127.0.0.1}{127.0.0.1:39962}{xpack.installed=true},} [2019-05-28T05:40:09,561][INFO ][o.e.c.c.CoordinationState] [leader0] cluster UUID set to [SXSaPUu7S_Szjw-z7GJhaQ] [2019-05-28T05:40:09,570][INFO ][o.e.c.c.CoordinationState] [leader1] cluster UUID set to [SXSaPUu7S_Szjw-z7GJhaQ] [2019-05-28T05:40:09,686][INFO ][o.e.c.s.ClusterApplierService] [leader1] master node changed {previous [], current [{leader0}{9HWb90BFR1eAyo6kqv0ZYA}{Z9OIVCxxTJehM-UX-y2vKA}{127.0.0.1}{127.0.0.1:43808}{xpack.installed=true}]}, added {{leader0}{9HWb90BFR1eAyo6kqv0ZYA}{Z9OIVCxxTJehM-UX-y2vKA}{127.0.0.1}{127.0.0.1:43808}{xpack.installed=true},}, term: 1, version: 1, reason: ApplyCommitRequest{term=1, version=1, sourceNode={leader0}{9HWb90BFR1eAyo6kqv0ZYA}{Z9OIVCxxTJehM-UX-y2vKA}{127.0.0.1}{127.0.0.1:43808}{xpack.installed=true}} [2019-05-28T05:40:09,698][INFO ][o.e.n.Node ] [[test_leader_cluster[T#2]]] started [2019-05-28T05:40:09,700][INFO ][o.e.c.s.ClusterApplierService] [leader0] master node changed {previous [], current [{leader0}{9HWb90BFR1eAyo6kqv0ZYA}{Z9OIVCxxTJehM-UX-y2vKA}{127.0.0.1}{127.0.0.1:43808}{xpack.installed=true}]}, added {{leader1}{rUlMFBY9QXukTRws0zFidg}{32Lq1xRRQDK2ZlI7Jq6fDg}{127.0.0.1}{127.0.0.1:39962}{xpack.installed=true},}, term: 1, version: 1, reason: Publication{term=1, version=1} [2019-05-28T05:40:09,714][INFO ][o.e.n.Node ] [[test_leader_cluster[T#1]]] started [2019-05-28T05:40:10,152][INFO ][o.e.g.GatewayService ] [leader0] recovered [0] indices into cluster_state [2019-05-28T05:40:10,341][INFO ][o.e.l.LicenseService ] [leader1] license [5f083602-ad16-4ce9-b2ef-f28662d11334] mode [trial] - valid [2019-05-28T05:40:10,397][INFO ][o.e.l.LicenseService ] [leader0] license [5f083602-ad16-4ce9-b2ef-f28662d11334] mode [trial] - valid [2019-05-28T05:40:10,758][INFO ][o.e.t.InternalTestCluster] [testSyncMappings] Setup InternalTestCluster [follower_cluster] with seed [7D81EA1452344D27] using [3] dedicated masters, [2] (data) nodes and [0] coord only nodes (min_master_nodes are [auto-managed]) [2019-05-28T05:40:10,816][INFO ][o.e.e.NodeEnvironment ] [testSyncMappings] using [1] data paths, mounts [[/ (rootfs)]], net usable_space [269.5gb], net total_space [349.9gb], types [rootfs] [2019-05-28T05:40:10,816][INFO ][o.e.e.NodeEnvironment ] [testSyncMappings] heap size [512mb], compressed ordinary object pointers [true] [2019-05-28T05:40:10,826][INFO ][o.e.n.Node ] [testSyncMappings] node name [followerm0], node ID [UYz5fl8mR7eWQMfDxioQTg], cluster name [follower_cluster] [2019-05-28T05:40:10,826][INFO ][o.e.n.Node ] [testSyncMappings] version[7.3.0-SNAPSHOT], pid[269107], build[unknown/unknown/2077f9f/2019-05-28T04:12:42.128520Z], OS[Linux/3.10.0-957.12.2.el7.x86_64/amd64], JVM[Azul Systems, Inc./OpenJDK 64-Bit Server VM/12.0.1/12.0.1+12] [2019-05-28T05:40:10,827][INFO ][o.e.n.Node ] [testSyncMappings] JVM home [/var/lib/jenkins/.java/zulu-12.0.1-linux] [2019-05-28T05:40:10,828][INFO ][o.e.n.Node ] [testSyncMappings] JVM arguments [-Dfile.encoding=UTF8, -Dcompiler.java=12, -Des.scripting.update.ctx_in_params=false, -Des.set.netty.runtime.available.processors=false, -Dgradle.dist.lib=/var/lib/jenkins/.gradle/wrapper/dists/gradle-5.4.1-all/3221gyojl5jsh0helicew7rwx/gradle-5.4.1/lib, -Dgradle.user.home=/var/lib/jenkins/.gradle, -Dgradle.worker.jar=/var/lib/jenkins/.gradle/caches/5.4.1/workerMain/gradle-worker.jar, -Djava.awt.headless=true, -Djna.nosys=true, -Dorg.gradle.native=false, -Druntime.java=12, -Dtests.artifact=ccr, -Dtests.gradle=true, -Dtests.logger.level=WARN, -Dtests.security.manager=true, -Dtests.seed=B5A055715D18E549, -Dtests.task=:x-pack:plugin:ccr:internalClusterTest, -XX:+HeapDumpOnOutOfMemoryError, -XX:HeapDumpPath=/var/lib/jenkins/workspace/elastic+elasticsearch+7.x+matrix-java-periodic/ES_BUILD_JAVA/openjdk12/ES_RUNTIME_JAVA/zulu12/nodes/immutable&&linux&&docker/x-pack/plugin/ccr/build/heapdump, --illegal-access=warn, -esa, -Xms512m, -Xmx512m, -Dfile.encoding=UTF-8, -Djava.io.tmpdir=./temp, -Duser.country=US, -Duser.language=en, -Duser.variant, -ea] [2019-05-28T05:40:10,828][WARN ][o.e.n.Node ] [testSyncMappings] version [7.3.0-SNAPSHOT] is a pre-release version of Elasticsearch and is not suitable for production [2019-05-28T05:40:10,843][INFO ][o.e.p.PluginsService ] [testSyncMappings] no modules loaded [2019-05-28T05:40:10,843][INFO ][o.e.p.PluginsService ] [testSyncMappings] loaded plugin [org.elasticsearch.analysis.common.CommonAnalysisPlugin] [2019-05-28T05:40:10,843][INFO ][o.e.p.PluginsService ] [testSyncMappings] loaded plugin [org.elasticsearch.test.ESIntegTestCase$TestSeedPlugin] [2019-05-28T05:40:10,844][INFO ][o.e.p.PluginsService ] [testSyncMappings] loaded plugin [org.elasticsearch.test.InternalSettingsPlugin] [2019-05-28T05:40:10,844][INFO ][o.e.p.PluginsService ] [testSyncMappings] loaded plugin [org.elasticsearch.test.MockHttpTransport$TestPlugin] [2019-05-28T05:40:10,844][INFO ][o.e.p.PluginsService ] [testSyncMappings] loaded plugin [org.elasticsearch.test.transport.MockTransportService$TestPlugin] [2019-05-28T05:40:10,844][INFO ][o.e.p.PluginsService ] [testSyncMappings] loaded plugin [org.elasticsearch.transport.nio.MockNioTransportPlugin] [2019-05-28T05:40:10,844][INFO ][o.e.p.PluginsService ] [testSyncMappings] loaded plugin [org.elasticsearch.xpack.ccr.LocalStateCcr] [2019-05-28T05:40:10,916][INFO ][o.e.d.DiscoveryModule ] [testSyncMappings] using discovery type [zen] and seed hosts providers [settings, file] [2019-05-28T05:40:10,971][INFO ][o.e.n.Node ] [testSyncMappings] initialized [2019-05-28T05:40:11,000][INFO ][o.e.e.NodeEnvironment ] [testSyncMappings] using [1] data paths, mounts [[/ (rootfs)]], net usable_space [269.5gb], net total_space [349.9gb], types [rootfs] [2019-05-28T05:40:11,000][INFO ][o.e.e.NodeEnvironment ] [testSyncMappings] heap size [512mb], compressed ordinary object pointers [true] [2019-05-28T05:40:11,011][INFO ][o.e.n.Node ] [testSyncMappings] node name [followerm1], node ID [GBcXYgu6TaaDLGU6scqZsQ], cluster name [follower_cluster] [2019-05-28T05:40:11,012][INFO ][o.e.n.Node ] [testSyncMappings] version[7.3.0-SNAPSHOT], pid[269107], build[unknown/unknown/2077f9f/2019-05-28T04:12:42.128520Z], OS[Linux/3.10.0-957.12.2.el7.x86_64/amd64], JVM[Azul Systems, Inc./OpenJDK 64-Bit Server VM/12.0.1/12.0.1+12] [2019-05-28T05:40:11,013][INFO ][o.e.n.Node ] [testSyncMappings] JVM home [/var/lib/jenkins/.java/zulu-12.0.1-linux] [2019-05-28T05:40:11,013][INFO ][o.e.n.Node ] [testSyncMappings] JVM arguments [-Dfile.encoding=UTF8, -Dcompiler.java=12, -Des.scripting.update.ctx_in_params=false, -Des.set.netty.runtime.available.processors=false, -Dgradle.dist.lib=/var/lib/jenkins/.gradle/wrapper/dists/gradle-5.4.1-all/3221gyojl5jsh0helicew7rwx/gradle-5.4.1/lib, -Dgradle.user.home=/var/lib/jenkins/.gradle, -Dgradle.worker.jar=/var/lib/jenkins/.gradle/caches/5.4.1/workerMain/gradle-worker.jar, -Djava.awt.headless=true, -Djna.nosys=true, -Dorg.gradle.native=false, -Druntime.java=12, -Dtests.artifact=ccr, -Dtests.gradle=true, -Dtests.logger.level=WARN, -Dtests.security.manager=true, -Dtests.seed=B5A055715D18E549, -Dtests.task=:x-pack:plugin:ccr:internalClusterTest, -XX:+HeapDumpOnOutOfMemoryError, -XX:HeapDumpPath=/var/lib/jenkins/workspace/elastic+elasticsearch+7.x+matrix-java-periodic/ES_BUILD_JAVA/openjdk12/ES_RUNTIME_JAVA/zulu12/nodes/immutable&&linux&&docker/x-pack/plugin/ccr/build/heapdump, --illegal-access=warn, -esa, -Xms512m, -Xmx512m, -Dfile.encoding=UTF-8, -Djava.io.tmpdir=./temp, -Duser.country=US, -Duser.language=en, -Duser.variant, -ea] [2019-05-28T05:40:11,013][WARN ][o.e.n.Node ] [testSyncMappings] version [7.3.0-SNAPSHOT] is a pre-release version of Elasticsearch and is not suitable for production [2019-05-28T05:40:11,018][INFO ][o.e.p.PluginsService ] [testSyncMappings] no modules loaded [2019-05-28T05:40:11,019][INFO ][o.e.p.PluginsService ] [testSyncMappings] loaded plugin [org.elasticsearch.analysis.common.CommonAnalysisPlugin] [2019-05-28T05:40:11,020][INFO ][o.e.p.PluginsService ] [testSyncMappings] loaded plugin [org.elasticsearch.test.ESIntegTestCase$TestSeedPlugin] [2019-05-28T05:40:11,020][INFO ][o.e.p.PluginsService ] [testSyncMappings] loaded plugin [org.elasticsearch.test.InternalSettingsPlugin] [2019-05-28T05:40:11,020][INFO ][o.e.p.PluginsService ] [testSyncMappings] loaded plugin [org.elasticsearch.test.MockHttpTransport$TestPlugin] [2019-05-28T05:40:11,020][INFO ][o.e.p.PluginsService ] [testSyncMappings] loaded plugin [org.elasticsearch.test.transport.MockTransportService$TestPlugin] [2019-05-28T05:40:11,020][INFO ][o.e.p.PluginsService ] [testSyncMappings] loaded plugin [org.elasticsearch.transport.nio.MockNioTransportPlugin] [2019-05-28T05:40:11,020][INFO ][o.e.p.PluginsService ] [testSyncMappings] loaded plugin [org.elasticsearch.xpack.ccr.LocalStateCcr] [2019-05-28T05:40:11,081][INFO ][o.e.d.DiscoveryModule ] [testSyncMappings] using discovery type [zen] and seed hosts providers [settings, file] [2019-05-28T05:40:11,151][INFO ][o.e.n.Node ] [testSyncMappings] initialized [2019-05-28T05:40:11,176][INFO ][o.e.e.NodeEnvironment ] [testSyncMappings] using [1] data paths, mounts [[/ (rootfs)]], net usable_space [269.5gb], net total_space [349.9gb], types [rootfs] [2019-05-28T05:40:11,176][INFO ][o.e.e.NodeEnvironment ] [testSyncMappings] heap size [512mb], compressed ordinary object pointers [true] [2019-05-28T05:40:11,184][INFO ][o.e.n.Node ] [testSyncMappings] node name [followerm2], node ID [MS3IO8EbQIWl4ZBtlRtfAw], cluster name [follower_cluster] [2019-05-28T05:40:11,184][INFO ][o.e.n.Node ] [testSyncMappings] version[7.3.0-SNAPSHOT], pid[269107], build[unknown/unknown/2077f9f/2019-05-28T04:12:42.128520Z], OS[Linux/3.10.0-957.12.2.el7.x86_64/amd64], JVM[Azul Systems, Inc./OpenJDK 64-Bit Server VM/12.0.1/12.0.1+12] [2019-05-28T05:40:11,184][INFO ][o.e.n.Node ] [testSyncMappings] JVM home [/var/lib/jenkins/.java/zulu-12.0.1-linux] [2019-05-28T05:40:11,184][INFO ][o.e.n.Node ] [testSyncMappings] JVM arguments [-Dfile.encoding=UTF8, -Dcompiler.java=12, -Des.scripting.update.ctx_in_params=false, -Des.set.netty.runtime.available.processors=false, -Dgradle.dist.lib=/var/lib/jenkins/.gradle/wrapper/dists/gradle-5.4.1-all/3221gyojl5jsh0helicew7rwx/gradle-5.4.1/lib, -Dgradle.user.home=/var/lib/jenkins/.gradle, -Dgradle.worker.jar=/var/lib/jenkins/.gradle/caches/5.4.1/workerMain/gradle-worker.jar, -Djava.awt.headless=true, -Djna.nosys=true, -Dorg.gradle.native=false, -Druntime.java=12, -Dtests.artifact=ccr, -Dtests.gradle=true, -Dtests.logger.level=WARN, -Dtests.security.manager=true, -Dtests.seed=B5A055715D18E549, -Dtests.task=:x-pack:plugin:ccr:internalClusterTest, -XX:+HeapDumpOnOutOfMemoryError, -XX:HeapDumpPath=/var/lib/jenkins/workspace/elastic+elasticsearch+7.x+matrix-java-periodic/ES_BUILD_JAVA/openjdk12/ES_RUNTIME_JAVA/zulu12/nodes/immutable&&linux&&docker/x-pack/plugin/ccr/build/heapdump, --illegal-access=warn, -esa, -Xms512m, -Xmx512m, -Dfile.encoding=UTF-8, -Djava.io.tmpdir=./temp, -Duser.country=US, -Duser.language=en, -Duser.variant, -ea] [2019-05-28T05:40:11,185][WARN ][o.e.n.Node ] [testSyncMappings] version [7.3.0-SNAPSHOT] is a pre-release version of Elasticsearch and is not suitable for production [2019-05-28T05:40:11,189][INFO ][o.e.p.PluginsService ] [testSyncMappings] no modules loaded [2019-05-28T05:40:11,189][INFO ][o.e.p.PluginsService ] [testSyncMappings] loaded plugin [org.elasticsearch.analysis.common.CommonAnalysisPlugin] [2019-05-28T05:40:11,189][INFO ][o.e.p.PluginsService ] [testSyncMappings] loaded plugin [org.elasticsearch.test.ESIntegTestCase$TestSeedPlugin] [2019-05-28T05:40:11,189][INFO ][o.e.p.PluginsService ] [testSyncMappings] loaded plugin [org.elasticsearch.test.InternalSettingsPlugin] [2019-05-28T05:40:11,189][INFO ][o.e.p.PluginsService ] [testSyncMappings] loaded plugin [org.elasticsearch.test.MockHttpTransport$TestPlugin] [2019-05-28T05:40:11,189][INFO ][o.e.p.PluginsService ] [testSyncMappings] loaded plugin [org.elasticsearch.test.transport.MockTransportService$TestPlugin] [2019-05-28T05:40:11,189][INFO ][o.e.p.PluginsService ] [testSyncMappings] loaded plugin [org.elasticsearch.transport.nio.MockNioTransportPlugin] [2019-05-28T05:40:11,189][INFO ][o.e.p.PluginsService ] [testSyncMappings] loaded plugin [org.elasticsearch.xpack.ccr.LocalStateCcr] [2019-05-28T05:40:11,264][INFO ][o.e.d.DiscoveryModule ] [testSyncMappings] using discovery type [zen] and seed hosts providers [settings, file] [2019-05-28T05:40:11,294][INFO ][o.e.n.Node ] [testSyncMappings] initialized [2019-05-28T05:40:11,321][INFO ][o.e.e.NodeEnvironment ] [testSyncMappings] using [1] data paths, mounts [[/ (rootfs)]], net usable_space [269.5gb], net total_space [349.9gb], types [rootfs] [2019-05-28T05:40:11,321][INFO ][o.e.e.NodeEnvironment ] [testSyncMappings] heap size [512mb], compressed ordinary object pointers [true] [2019-05-28T05:40:11,328][INFO ][o.e.n.Node ] [testSyncMappings] node name [followerd3], node ID [imikiXyHQzWPRqBx1bSawA], cluster name [follower_cluster] [2019-05-28T05:40:11,329][INFO ][o.e.n.Node ] [testSyncMappings] version[7.3.0-SNAPSHOT], pid[269107], build[unknown/unknown/2077f9f/2019-05-28T04:12:42.128520Z], OS[Linux/3.10.0-957.12.2.el7.x86_64/amd64], JVM[Azul Systems, Inc./OpenJDK 64-Bit Server VM/12.0.1/12.0.1+12] [2019-05-28T05:40:11,329][INFO ][o.e.n.Node ] [testSyncMappings] JVM home [/var/lib/jenkins/.java/zulu-12.0.1-linux] [2019-05-28T05:40:11,329][INFO ][o.e.n.Node ] [testSyncMappings] JVM arguments [-Dfile.encoding=UTF8, -Dcompiler.java=12, -Des.scripting.update.ctx_in_params=false, -Des.set.netty.runtime.available.processors=false, -Dgradle.dist.lib=/var/lib/jenkins/.gradle/wrapper/dists/gradle-5.4.1-all/3221gyojl5jsh0helicew7rwx/gradle-5.4.1/lib, -Dgradle.user.home=/var/lib/jenkins/.gradle, -Dgradle.worker.jar=/var/lib/jenkins/.gradle/caches/5.4.1/workerMain/gradle-worker.jar, -Djava.awt.headless=true, -Djna.nosys=true, -Dorg.gradle.native=false, -Druntime.java=12, -Dtests.artifact=ccr, -Dtests.gradle=true, -Dtests.logger.level=WARN, -Dtests.security.manager=true, -Dtests.seed=B5A055715D18E549, -Dtests.task=:x-pack:plugin:ccr:internalClusterTest, -XX:+HeapDumpOnOutOfMemoryError, -XX:HeapDumpPath=/var/lib/jenkins/workspace/elastic+elasticsearch+7.x+matrix-java-periodic/ES_BUILD_JAVA/openjdk12/ES_RUNTIME_JAVA/zulu12/nodes/immutable&&linux&&docker/x-pack/plugin/ccr/build/heapdump, --illegal-access=warn, -esa, -Xms512m, -Xmx512m, -Dfile.encoding=UTF-8, -Djava.io.tmpdir=./temp, -Duser.country=US, -Duser.language=en, -Duser.variant, -ea] [2019-05-28T05:40:11,329][WARN ][o.e.n.Node ] [testSyncMappings] version [7.3.0-SNAPSHOT] is a pre-release version of Elasticsearch and is not suitable for production [2019-05-28T05:40:11,333][INFO ][o.e.p.PluginsService ] [testSyncMappings] no modules loaded [2019-05-28T05:40:11,334][INFO ][o.e.p.PluginsService ] [testSyncMappings] loaded plugin [org.elasticsearch.analysis.common.CommonAnalysisPlugin] [2019-05-28T05:40:11,334][INFO ][o.e.p.PluginsService ] [testSyncMappings] loaded plugin [org.elasticsearch.test.ESIntegTestCase$TestSeedPlugin] [2019-05-28T05:40:11,334][INFO ][o.e.p.PluginsService ] [testSyncMappings] loaded plugin [org.elasticsearch.test.InternalSettingsPlugin] [2019-05-28T05:40:11,334][INFO ][o.e.p.PluginsService ] [testSyncMappings] loaded plugin [org.elasticsearch.test.MockHttpTransport$TestPlugin] [2019-05-28T05:40:11,334][INFO ][o.e.p.PluginsService ] [testSyncMappings] loaded plugin [org.elasticsearch.test.transport.MockTransportService$TestPlugin] [2019-05-28T05:40:11,334][INFO ][o.e.p.PluginsService ] [testSyncMappings] loaded plugin [org.elasticsearch.transport.nio.MockNioTransportPlugin] [2019-05-28T05:40:11,334][INFO ][o.e.p.PluginsService ] [testSyncMappings] loaded plugin [org.elasticsearch.xpack.ccr.LocalStateCcr] [2019-05-28T05:40:11,387][INFO ][o.e.d.DiscoveryModule ] [testSyncMappings] using discovery type [zen] and seed hosts providers [settings, file] [2019-05-28T05:40:11,418][INFO ][o.e.n.Node ] [testSyncMappings] initialized [2019-05-28T05:40:11,447][INFO ][o.e.e.NodeEnvironment ] [testSyncMappings] using [1] data paths, mounts [[/ (rootfs)]], net usable_space [269.5gb], net total_space [349.9gb], types [rootfs] [2019-05-28T05:40:11,447][INFO ][o.e.e.NodeEnvironment ] [testSyncMappings] heap size [512mb], compressed ordinary object pointers [true] [2019-05-28T05:40:11,454][INFO ][o.e.n.Node ] [testSyncMappings] node name [followerd4], node ID [XK3Qc8oGSAqdSsvD2_yMPw], cluster name [follower_cluster] [2019-05-28T05:40:11,454][INFO ][o.e.n.Node ] [testSyncMappings] version[7.3.0-SNAPSHOT], pid[269107], build[unknown/unknown/2077f9f/2019-05-28T04:12:42.128520Z], OS[Linux/3.10.0-957.12.2.el7.x86_64/amd64], JVM[Azul Systems, Inc./OpenJDK 64-Bit Server VM/12.0.1/12.0.1+12] [2019-05-28T05:40:11,454][INFO ][o.e.n.Node ] [testSyncMappings] JVM home [/var/lib/jenkins/.java/zulu-12.0.1-linux] [2019-05-28T05:40:11,454][INFO ][o.e.n.Node ] [testSyncMappings] JVM arguments [-Dfile.encoding=UTF8, -Dcompiler.java=12, -Des.scripting.update.ctx_in_params=false, -Des.set.netty.runtime.available.processors=false, -Dgradle.dist.lib=/var/lib/jenkins/.gradle/wrapper/dists/gradle-5.4.1-all/3221gyojl5jsh0helicew7rwx/gradle-5.4.1/lib, -Dgradle.user.home=/var/lib/jenkins/.gradle, -Dgradle.worker.jar=/var/lib/jenkins/.gradle/caches/5.4.1/workerMain/gradle-worker.jar, -Djava.awt.headless=true, -Djna.nosys=true, -Dorg.gradle.native=false, -Druntime.java=12, -Dtests.artifact=ccr, -Dtests.gradle=true, -Dtests.logger.level=WARN, -Dtests.security.manager=true, -Dtests.seed=B5A055715D18E549, -Dtests.task=:x-pack:plugin:ccr:internalClusterTest, -XX:+HeapDumpOnOutOfMemoryError, -XX:HeapDumpPath=/var/lib/jenkins/workspace/elastic+elasticsearch+7.x+matrix-java-periodic/ES_BUILD_JAVA/openjdk12/ES_RUNTIME_JAVA/zulu12/nodes/immutable&&linux&&docker/x-pack/plugin/ccr/build/heapdump, --illegal-access=warn, -esa, -Xms512m, -Xmx512m, -Dfile.encoding=UTF-8, -Djava.io.tmpdir=./temp, -Duser.country=US, -Duser.language=en, -Duser.variant, -ea] [2019-05-28T05:40:11,455][WARN ][o.e.n.Node ] [testSyncMappings] version [7.3.0-SNAPSHOT] is a pre-release version of Elasticsearch and is not suitable for production [2019-05-28T05:40:11,459][INFO ][o.e.p.PluginsService ] [testSyncMappings] no modules loaded [2019-05-28T05:40:11,460][INFO ][o.e.p.PluginsService ] [testSyncMappings] loaded plugin [org.elasticsearch.analysis.common.CommonAnalysisPlugin] [2019-05-28T05:40:11,460][INFO ][o.e.p.PluginsService ] [testSyncMappings] loaded plugin [org.elasticsearch.test.ESIntegTestCase$TestSeedPlugin] [2019-05-28T05:40:11,460][INFO ][o.e.p.PluginsService ] [testSyncMappings] loaded plugin [org.elasticsearch.test.InternalSettingsPlugin] [2019-05-28T05:40:11,460][INFO ][o.e.p.PluginsService ] [testSyncMappings] loaded plugin [org.elasticsearch.test.MockHttpTransport$TestPlugin] [2019-05-28T05:40:11,460][INFO ][o.e.p.PluginsService ] [testSyncMappings] loaded plugin [org.elasticsearch.test.transport.MockTransportService$TestPlugin] [2019-05-28T05:40:11,461][INFO ][o.e.p.PluginsService ] [testSyncMappings] loaded plugin [org.elasticsearch.transport.nio.MockNioTransportPlugin] [2019-05-28T05:40:11,461][INFO ][o.e.p.PluginsService ] [testSyncMappings] loaded plugin [org.elasticsearch.xpack.ccr.LocalStateCcr] [2019-05-28T05:40:11,520][INFO ][o.e.d.DiscoveryModule ] [testSyncMappings] using discovery type [zen] and seed hosts providers [settings, file] [2019-05-28T05:40:11,544][INFO ][o.e.n.Node ] [testSyncMappings] initialized [2019-05-28T05:40:11,553][INFO ][o.e.n.Node ] [[test_follower_cluster[T#1]]] starting ... [2019-05-28T05:40:11,555][INFO ][o.e.n.Node ] [[test_follower_cluster[T#2]]] starting ... [2019-05-28T05:40:11,556][INFO ][o.e.n.Node ] [[test_follower_cluster[T#3]]] starting ... [2019-05-28T05:40:11,558][INFO ][o.e.n.Node ] [[test_follower_cluster[T#4]]] starting ... [2019-05-28T05:40:11,558][INFO ][o.e.n.Node ] [[test_follower_cluster[T#5]]] starting ... [2019-05-28T05:40:11,574][INFO ][o.e.t.TransportService ] [[test_follower_cluster[T#3]]] publish_address {127.0.0.1:44497}, bound_addresses {[::1]:43309}, {127.0.0.1:44497} [2019-05-28T05:40:11,589][INFO ][o.e.t.TransportService ] [[test_follower_cluster[T#2]]] publish_address {127.0.0.1:35665}, bound_addresses {[::1]:45111}, {127.0.0.1:35665} [2019-05-28T05:40:11,593][INFO ][o.e.t.TransportService ] [[test_follower_cluster[T#1]]] publish_address {127.0.0.1:45522}, bound_addresses {[::1]:43022}, {127.0.0.1:45522} [2019-05-28T05:40:11,621][INFO ][o.e.t.TransportService ] [[test_follower_cluster[T#4]]] publish_address {127.0.0.1:37483}, bound_addresses {[::1]:40683}, {127.0.0.1:37483} [2019-05-28T05:40:11,640][INFO ][o.e.t.TransportService ] [[test_follower_cluster[T#5]]] publish_address {127.0.0.1:34928}, bound_addresses {[::1]:42630}, {127.0.0.1:34928} [2019-05-28T05:40:11,720][INFO ][o.e.c.c.Coordinator ] [followerm0] setting initial configuration to VotingConfiguration{UYz5fl8mR7eWQMfDxioQTg,{bootstrap-placeholder}-followerm2,GBcXYgu6TaaDLGU6scqZsQ} [2019-05-28T05:40:11,750][WARN ][o.e.t.n.MockNioTransport ] [followerd3] Slow execution on network thread [201 milliseconds] java.lang.RuntimeException: Slow exception on network thread at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.maybeLogElapsedTime(MockNioTransport.java:367) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.unregister(MockNioTransport.java:359) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleClose(TestEventHandler.java:202) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.closePendingChannels(NioSelector.java:437) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:152) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:40:11,818][WARN ][o.e.t.n.MockNioTransport ] [followerm2] Slow execution on network thread [214 milliseconds] java.lang.RuntimeException: Slow exception on network thread at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.maybeLogElapsedTime(MockNioTransport.java:367) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.unregister(MockNioTransport.java:359) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleConnect(TestEventHandler.java:105) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.attemptConnect(NioSelector.java:405) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.setUpNewChannels(NioSelector.java:422) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.preSelect(NioSelector.java:261) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:153) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:40:11,949][INFO ][o.e.c.s.MasterService ] [followerm0] elected-as-master ([2] nodes joined)[{followerm1}{GBcXYgu6TaaDLGU6scqZsQ}{GoKNx8JdTN2QLOYTsDztbg}{127.0.0.1}{127.0.0.1:35665}{xpack.installed=true} elect leader, {followerm0}{UYz5fl8mR7eWQMfDxioQTg}{cveYeD_-Q5Sl4iFbTxGJAA}{127.0.0.1}{127.0.0.1:45522}{xpack.installed=true} elect leader, _BECOME_MASTER_TASK_, _FINISH_ELECTION_], term: 1, version: 1, reason: master node changed {previous [], current [{followerm0}{UYz5fl8mR7eWQMfDxioQTg}{cveYeD_-Q5Sl4iFbTxGJAA}{127.0.0.1}{127.0.0.1:45522}{xpack.installed=true}]}, added {{followerm1}{GBcXYgu6TaaDLGU6scqZsQ}{GoKNx8JdTN2QLOYTsDztbg}{127.0.0.1}{127.0.0.1:35665}{xpack.installed=true},} [2019-05-28T05:40:11,975][INFO ][o.e.c.c.CoordinationState] [followerm0] cluster UUID set to [D0-tEwcAT-W1Q9UCAAoQ9g] [2019-05-28T05:40:11,975][INFO ][o.e.c.c.CoordinationState] [followerm1] cluster UUID set to [D0-tEwcAT-W1Q9UCAAoQ9g] [2019-05-28T05:40:12,057][INFO ][o.e.c.s.ClusterApplierService] [followerm1] master node changed {previous [], current [{followerm0}{UYz5fl8mR7eWQMfDxioQTg}{cveYeD_-Q5Sl4iFbTxGJAA}{127.0.0.1}{127.0.0.1:45522}{xpack.installed=true}]}, added {{followerm0}{UYz5fl8mR7eWQMfDxioQTg}{cveYeD_-Q5Sl4iFbTxGJAA}{127.0.0.1}{127.0.0.1:45522}{xpack.installed=true},}, term: 1, version: 1, reason: ApplyCommitRequest{term=1, version=1, sourceNode={followerm0}{UYz5fl8mR7eWQMfDxioQTg}{cveYeD_-Q5Sl4iFbTxGJAA}{127.0.0.1}{127.0.0.1:45522}{xpack.installed=true}} [2019-05-28T05:40:12,062][INFO ][o.e.n.Node ] [[test_follower_cluster[T#2]]] started [2019-05-28T05:40:12,066][INFO ][o.e.c.s.ClusterApplierService] [followerm0] master node changed {previous [], current [{followerm0}{UYz5fl8mR7eWQMfDxioQTg}{cveYeD_-Q5Sl4iFbTxGJAA}{127.0.0.1}{127.0.0.1:45522}{xpack.installed=true}]}, added {{followerm1}{GBcXYgu6TaaDLGU6scqZsQ}{GoKNx8JdTN2QLOYTsDztbg}{127.0.0.1}{127.0.0.1:35665}{xpack.installed=true},}, term: 1, version: 1, reason: Publication{term=1, version=1} [2019-05-28T05:40:12,070][INFO ][o.e.n.Node ] [[test_follower_cluster[T#1]]] started [2019-05-28T05:40:12,085][INFO ][o.e.c.s.MasterService ] [followerm0] node-join[{followerm2}{MS3IO8EbQIWl4ZBtlRtfAw}{RPrLwBemRn-MYDStU7iJug}{127.0.0.1}{127.0.0.1:44497}{xpack.installed=true} join existing leader], term: 1, version: 2, reason: added {{followerm2}{MS3IO8EbQIWl4ZBtlRtfAw}{RPrLwBemRn-MYDStU7iJug}{127.0.0.1}{127.0.0.1:44497}{xpack.installed=true},} [2019-05-28T05:40:12,101][INFO ][o.e.c.s.ClusterApplierService] [followerm1] added {{followerm2}{MS3IO8EbQIWl4ZBtlRtfAw}{RPrLwBemRn-MYDStU7iJug}{127.0.0.1}{127.0.0.1:44497}{xpack.installed=true},}, term: 1, version: 2, reason: ApplyCommitRequest{term=1, version=2, sourceNode={followerm0}{UYz5fl8mR7eWQMfDxioQTg}{cveYeD_-Q5Sl4iFbTxGJAA}{127.0.0.1}{127.0.0.1:45522}{xpack.installed=true}} [2019-05-28T05:40:12,113][INFO ][o.e.c.s.ClusterApplierService] [followerm2] master node changed {previous [], current [{followerm0}{UYz5fl8mR7eWQMfDxioQTg}{cveYeD_-Q5Sl4iFbTxGJAA}{127.0.0.1}{127.0.0.1:45522}{xpack.installed=true}]}, added {{followerm0}{UYz5fl8mR7eWQMfDxioQTg}{cveYeD_-Q5Sl4iFbTxGJAA}{127.0.0.1}{127.0.0.1:45522}{xpack.installed=true},{followerm1}{GBcXYgu6TaaDLGU6scqZsQ}{GoKNx8JdTN2QLOYTsDztbg}{127.0.0.1}{127.0.0.1:35665}{xpack.installed=true},}, term: 1, version: 2, reason: ApplyCommitRequest{term=1, version=2, sourceNode={followerm0}{UYz5fl8mR7eWQMfDxioQTg}{cveYeD_-Q5Sl4iFbTxGJAA}{127.0.0.1}{127.0.0.1:45522}{xpack.installed=true}} [2019-05-28T05:40:12,120][INFO ][o.e.n.Node ] [[test_follower_cluster[T#3]]] started [2019-05-28T05:40:12,124][INFO ][o.e.c.s.ClusterApplierService] [followerm0] added {{followerm2}{MS3IO8EbQIWl4ZBtlRtfAw}{RPrLwBemRn-MYDStU7iJug}{127.0.0.1}{127.0.0.1:44497}{xpack.installed=true},}, term: 1, version: 2, reason: Publication{term=1, version=2} [2019-05-28T05:40:12,233][INFO ][o.e.g.GatewayService ] [followerm0] recovered [0] indices into cluster_state [2019-05-28T05:40:12,317][INFO ][o.e.l.LicenseService ] [followerm2] license [b455810e-47ec-4468-bdbe-a0a03c9e2e88] mode [trial] - valid [2019-05-28T05:40:12,318][INFO ][o.e.l.LicenseService ] [followerm1] license [b455810e-47ec-4468-bdbe-a0a03c9e2e88] mode [trial] - valid [2019-05-28T05:40:12,354][INFO ][o.e.l.LicenseService ] [followerm0] license [b455810e-47ec-4468-bdbe-a0a03c9e2e88] mode [trial] - valid [2019-05-28T05:40:12,735][INFO ][o.e.c.s.MasterService ] [followerm0] node-join[{followerd3}{imikiXyHQzWPRqBx1bSawA}{C6gT8G-yRjyyJ4rx_dUrGA}{127.0.0.1}{127.0.0.1:37483}{xpack.installed=true} join existing leader], term: 1, version: 6, reason: added {{followerd3}{imikiXyHQzWPRqBx1bSawA}{C6gT8G-yRjyyJ4rx_dUrGA}{127.0.0.1}{127.0.0.1:37483}{xpack.installed=true},} [2019-05-28T05:40:12,748][INFO ][o.e.c.s.ClusterApplierService] [followerm1] added {{followerd3}{imikiXyHQzWPRqBx1bSawA}{C6gT8G-yRjyyJ4rx_dUrGA}{127.0.0.1}{127.0.0.1:37483}{xpack.installed=true},}, term: 1, version: 6, reason: ApplyCommitRequest{term=1, version=6, sourceNode={followerm0}{UYz5fl8mR7eWQMfDxioQTg}{cveYeD_-Q5Sl4iFbTxGJAA}{127.0.0.1}{127.0.0.1:45522}{xpack.installed=true}} [2019-05-28T05:40:12,749][INFO ][o.e.c.s.ClusterApplierService] [followerm2] added {{followerd3}{imikiXyHQzWPRqBx1bSawA}{C6gT8G-yRjyyJ4rx_dUrGA}{127.0.0.1}{127.0.0.1:37483}{xpack.installed=true},}, term: 1, version: 6, reason: ApplyCommitRequest{term=1, version=6, sourceNode={followerm0}{UYz5fl8mR7eWQMfDxioQTg}{cveYeD_-Q5Sl4iFbTxGJAA}{127.0.0.1}{127.0.0.1:45522}{xpack.installed=true}} [2019-05-28T05:40:12,749][INFO ][o.e.c.s.ClusterApplierService] [followerd3] master node changed {previous [], current [{followerm0}{UYz5fl8mR7eWQMfDxioQTg}{cveYeD_-Q5Sl4iFbTxGJAA}{127.0.0.1}{127.0.0.1:45522}{xpack.installed=true}]}, added {{followerm0}{UYz5fl8mR7eWQMfDxioQTg}{cveYeD_-Q5Sl4iFbTxGJAA}{127.0.0.1}{127.0.0.1:45522}{xpack.installed=true},{followerm2}{MS3IO8EbQIWl4ZBtlRtfAw}{RPrLwBemRn-MYDStU7iJug}{127.0.0.1}{127.0.0.1:44497}{xpack.installed=true},{followerm1}{GBcXYgu6TaaDLGU6scqZsQ}{GoKNx8JdTN2QLOYTsDztbg}{127.0.0.1}{127.0.0.1:35665}{xpack.installed=true},}, term: 1, version: 6, reason: ApplyCommitRequest{term=1, version=6, sourceNode={followerm0}{UYz5fl8mR7eWQMfDxioQTg}{cveYeD_-Q5Sl4iFbTxGJAA}{127.0.0.1}{127.0.0.1:45522}{xpack.installed=true}} [2019-05-28T05:40:12,832][INFO ][o.e.l.LicenseService ] [followerd3] license [b455810e-47ec-4468-bdbe-a0a03c9e2e88] mode [trial] - valid [2019-05-28T05:40:12,833][INFO ][o.e.n.Node ] [[test_follower_cluster[T#4]]] started [2019-05-28T05:40:12,834][INFO ][o.e.c.s.ClusterApplierService] [followerm0] added {{followerd3}{imikiXyHQzWPRqBx1bSawA}{C6gT8G-yRjyyJ4rx_dUrGA}{127.0.0.1}{127.0.0.1:37483}{xpack.installed=true},}, term: 1, version: 6, reason: Publication{term=1, version=6} [2019-05-28T05:40:12,838][INFO ][o.e.c.s.MasterService ] [followerm0] node-join[{followerd4}{XK3Qc8oGSAqdSsvD2_yMPw}{Gk8vRK_-TJuZ67jtxVU98w}{127.0.0.1}{127.0.0.1:34928}{xpack.installed=true} join existing leader], term: 1, version: 7, reason: added {{followerd4}{XK3Qc8oGSAqdSsvD2_yMPw}{Gk8vRK_-TJuZ67jtxVU98w}{127.0.0.1}{127.0.0.1:34928}{xpack.installed=true},} [2019-05-28T05:40:12,850][INFO ][o.e.c.s.ClusterApplierService] [followerm1] added {{followerd4}{XK3Qc8oGSAqdSsvD2_yMPw}{Gk8vRK_-TJuZ67jtxVU98w}{127.0.0.1}{127.0.0.1:34928}{xpack.installed=true},}, term: 1, version: 7, reason: ApplyCommitRequest{term=1, version=7, sourceNode={followerm0}{UYz5fl8mR7eWQMfDxioQTg}{cveYeD_-Q5Sl4iFbTxGJAA}{127.0.0.1}{127.0.0.1:45522}{xpack.installed=true}} [2019-05-28T05:40:12,850][INFO ][o.e.c.s.ClusterApplierService] [followerd3] added {{followerd4}{XK3Qc8oGSAqdSsvD2_yMPw}{Gk8vRK_-TJuZ67jtxVU98w}{127.0.0.1}{127.0.0.1:34928}{xpack.installed=true},}, term: 1, version: 7, reason: ApplyCommitRequest{term=1, version=7, sourceNode={followerm0}{UYz5fl8mR7eWQMfDxioQTg}{cveYeD_-Q5Sl4iFbTxGJAA}{127.0.0.1}{127.0.0.1:45522}{xpack.installed=true}} [2019-05-28T05:40:12,850][INFO ][o.e.c.s.ClusterApplierService] [followerd4] master node changed {previous [], current [{followerm0}{UYz5fl8mR7eWQMfDxioQTg}{cveYeD_-Q5Sl4iFbTxGJAA}{127.0.0.1}{127.0.0.1:45522}{xpack.installed=true}]}, added {{followerd3}{imikiXyHQzWPRqBx1bSawA}{C6gT8G-yRjyyJ4rx_dUrGA}{127.0.0.1}{127.0.0.1:37483}{xpack.installed=true},{followerm0}{UYz5fl8mR7eWQMfDxioQTg}{cveYeD_-Q5Sl4iFbTxGJAA}{127.0.0.1}{127.0.0.1:45522}{xpack.installed=true},{followerm2}{MS3IO8EbQIWl4ZBtlRtfAw}{RPrLwBemRn-MYDStU7iJug}{127.0.0.1}{127.0.0.1:44497}{xpack.installed=true},{followerm1}{GBcXYgu6TaaDLGU6scqZsQ}{GoKNx8JdTN2QLOYTsDztbg}{127.0.0.1}{127.0.0.1:35665}{xpack.installed=true},}, term: 1, version: 7, reason: ApplyCommitRequest{term=1, version=7, sourceNode={followerm0}{UYz5fl8mR7eWQMfDxioQTg}{cveYeD_-Q5Sl4iFbTxGJAA}{127.0.0.1}{127.0.0.1:45522}{xpack.installed=true}} [2019-05-28T05:40:12,851][INFO ][o.e.c.s.ClusterApplierService] [followerm2] added {{followerd4}{XK3Qc8oGSAqdSsvD2_yMPw}{Gk8vRK_-TJuZ67jtxVU98w}{127.0.0.1}{127.0.0.1:34928}{xpack.installed=true},}, term: 1, version: 7, reason: ApplyCommitRequest{term=1, version=7, sourceNode={followerm0}{UYz5fl8mR7eWQMfDxioQTg}{cveYeD_-Q5Sl4iFbTxGJAA}{127.0.0.1}{127.0.0.1:45522}{xpack.installed=true}} [2019-05-28T05:40:12,922][INFO ][o.e.l.LicenseService ] [followerd4] license [b455810e-47ec-4468-bdbe-a0a03c9e2e88] mode [trial] - valid [2019-05-28T05:40:12,923][INFO ][o.e.n.Node ] [[test_follower_cluster[T#5]]] started [2019-05-28T05:40:12,928][INFO ][o.e.c.s.ClusterApplierService] [followerm0] added {{followerd4}{XK3Qc8oGSAqdSsvD2_yMPw}{Gk8vRK_-TJuZ67jtxVU98w}{127.0.0.1}{127.0.0.1:34928}{xpack.installed=true},}, term: 1, version: 7, reason: Publication{term=1, version=7} [2019-05-28T05:40:13,125][INFO ][o.e.c.m.MetaDataCreateIndexService] [leader0] [index1] creating index, cause [api], templates [], shards [2]/[1], mappings [doc] [2019-05-28T05:40:14,373][WARN ][o.e.t.n.MockNioTransport ] [leader1] Slow execution on network thread [201 milliseconds] java.lang.RuntimeException: Slow exception on network thread at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.maybeLogElapsedTime(MockNioTransport.java:367) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.unregister(MockNioTransport.java:359) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleRead(TestEventHandler.java:130) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.handleRead(NioSelector.java:397) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.processKey(NioSelector.java:246) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:172) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:40:14,796][INFO ][o.e.c.r.a.AllocationService] [leader0] Cluster health status changed from [YELLOW] to [GREEN] (reason: [shards started [[index1][0]] ...]). [2019-05-28T05:40:15,388][WARN ][o.e.t.n.MockNioTransport ] [followerm0] Slow execution on network thread [200 milliseconds] java.lang.RuntimeException: Slow exception on network thread at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.maybeLogElapsedTime(MockNioTransport.java:367) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.unregister(MockNioTransport.java:359) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleRead(TestEventHandler.java:130) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.handleRead(NioSelector.java:397) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.processKey(NioSelector.java:246) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:172) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:40:15,494][INFO ][o.e.x.c.a.ShardFollowTasksExecutor] [followerd3] [index2][0] Starting to track leader shard [index1][0] [2019-05-28T05:40:15,620][INFO ][o.e.c.r.a.AllocationService] [followerm0] Cluster health status changed from [YELLOW] to [GREEN] (reason: [shards started [[index2][1]] ...]). [2019-05-28T05:40:15,624][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd3] [index2][0] following leader shard [index1][0], follower global checkpoint=[-1], mapping version=[1], settings version=[1] [2019-05-28T05:40:15,743][INFO ][o.e.x.c.a.ShardFollowTasksExecutor] [followerd4] [index2][1] Starting to track leader shard [index1][1] [2019-05-28T05:40:15,788][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd4] [index2][1] following leader shard [index1][1], follower global checkpoint=[-1], mapping version=[1], settings version=[1] [2019-05-28T05:40:16,345][WARN ][o.e.t.n.MockNioTransport ] [leader0] Slow execution on network thread [202 milliseconds] java.lang.RuntimeException: Slow exception on network thread at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.maybeLogElapsedTime(MockNioTransport.java:367) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.unregister(MockNioTransport.java:359) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleRead(TestEventHandler.java:130) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.handleRead(NioSelector.java:397) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.processKey(NioSelector.java:246) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:172) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:40:16,377][WARN ][o.e.t.n.MockNioTransport ] [leader1] Slow execution on network thread [200 milliseconds] java.lang.RuntimeException: Slow exception on network thread at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.maybeLogElapsedTime(MockNioTransport.java:367) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.unregister(MockNioTransport.java:359) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleRead(TestEventHandler.java:130) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.handleRead(NioSelector.java:397) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.processKey(NioSelector.java:246) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:172) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:40:16,376][WARN ][o.e.t.n.MockNioTransport ] [leader1] Slow execution on network thread [200 milliseconds] java.lang.RuntimeException: Slow exception on network thread at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.maybeLogElapsedTime(MockNioTransport.java:367) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.unregister(MockNioTransport.java:359) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleRead(TestEventHandler.java:130) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.handleRead(NioSelector.java:397) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.processKey(NioSelector.java:246) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:172) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:40:16,907][INFO ][o.e.c.m.MetaDataMappingService] [leader0] [index1/pfQ1-qBYQui-UoUUABEP_A] update_mapping [doc] [2019-05-28T05:40:16,961][WARN ][o.e.t.n.MockNioTransport ] [leader0] Slow execution on network thread [203 milliseconds] java.lang.RuntimeException: Slow exception on network thread at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.maybeLogElapsedTime(MockNioTransport.java:367) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.unregister(MockNioTransport.java:359) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleRead(TestEventHandler.java:130) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.handleRead(NioSelector.java:397) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.processKey(NioSelector.java:246) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:172) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:40:17,091][INFO ][o.e.c.m.MetaDataMappingService] [followerm0] [index2/V5GU3GGBS5G8uenqgMrfqw] update_mapping [doc] [2019-05-28T05:40:17,384][WARN ][o.e.t.n.MockNioTransport ] [leader1] Slow execution on network thread [200 milliseconds] java.lang.RuntimeException: Slow exception on network thread at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.maybeLogElapsedTime(MockNioTransport.java:367) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.unregister(MockNioTransport.java:359) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleRead(TestEventHandler.java:130) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.handleRead(NioSelector.java:397) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.processKey(NioSelector.java:246) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:172) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:40:17,785][WARN ][o.e.t.n.MockNioTransport ] [leader0] Slow execution on network thread [203 milliseconds] java.lang.RuntimeException: Slow exception on network thread at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.maybeLogElapsedTime(MockNioTransport.java:367) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.unregister(MockNioTransport.java:359) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleWrite(TestEventHandler.java:154) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.handleWrite(NioSelector.java:389) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.writeToChannel(NioSelector.java:345) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.handleQueuedWrites(NioSelector.java:448) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.preSelect(NioSelector.java:262) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:153) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:40:17,786][WARN ][o.e.t.n.MockNioTransport ] [followerd3] Slow execution on network thread [208 milliseconds] java.lang.RuntimeException: Slow exception on network thread at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.maybeLogElapsedTime(MockNioTransport.java:367) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.unregister(MockNioTransport.java:359) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleRead(TestEventHandler.java:130) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.handleRead(NioSelector.java:397) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.processKey(NioSelector.java:246) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:172) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:40:18,191][WARN ][o.e.t.n.MockNioTransport ] [leader0] Slow execution on network thread [221 milliseconds] java.lang.RuntimeException: Slow exception on network thread at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.maybeLogElapsedTime(MockNioTransport.java:367) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.unregister(MockNioTransport.java:359) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleRead(TestEventHandler.java:130) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.handleRead(NioSelector.java:397) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.processKey(NioSelector.java:246) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:172) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:40:19,381][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd3] [index2][0] shard follow task has been stopped [2019-05-28T05:40:19,445][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd4] [index2][1] shard follow task has been stopped [2019-05-28T05:40:19,702][INFO ][o.e.c.m.MetaDataDeleteIndexService] [leader0] [index1/pfQ1-qBYQui-UoUUABEP_A] deleting index [2019-05-28T05:40:20,355][INFO ][o.e.c.m.MetaDataDeleteIndexService] [followerm0] [index2/V5GU3GGBS5G8uenqgMrfqw] deleting index [2019-05-28T05:40:20,545][INFO ][o.e.x.c.IndexFollowingIT ] [testSyncMappings] after test [2019-05-28T05:40:20,633][INFO ][o.e.x.c.IndexFollowingIT ] [testDeleteLeaderIndex] before test [2019-05-28T05:40:20,640][INFO ][o.e.c.m.MetaDataCreateIndexService] [leader0] [index1] creating index, cause [api], templates [], shards [1]/[0], mappings [] [2019-05-28T05:40:20,931][INFO ][o.e.c.r.a.AllocationService] [leader0] Cluster health status changed from [YELLOW] to [GREEN] (reason: [shards started [[index1][0]] ...]). [2019-05-28T05:40:21,605][INFO ][o.e.c.r.a.AllocationService] [followerm0] Cluster health status changed from [YELLOW] to [GREEN] (reason: [shards started [[index2][0]] ...]). [2019-05-28T05:40:21,885][INFO ][o.e.x.c.a.ShardFollowTasksExecutor] [followerd3] [index2][0] Starting to track leader shard [index1][0] [2019-05-28T05:40:21,901][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd3] [index2][0] following leader shard [index1][0], follower global checkpoint=[-1], mapping version=[1], settings version=[1] [2019-05-28T05:40:21,924][INFO ][o.e.c.m.MetaDataMappingService] [leader0] [index1/YLcnAElSTlGGv0i8Hpgomg] create_mapping [doc] [2019-05-28T05:40:22,014][WARN ][o.e.t.n.MockNioTransport ] [leader0] Slow execution on network thread [212 milliseconds] java.lang.RuntimeException: Slow exception on network thread at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.maybeLogElapsedTime(MockNioTransport.java:367) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.unregister(MockNioTransport.java:359) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleRead(TestEventHandler.java:130) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.handleRead(NioSelector.java:397) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.processKey(NioSelector.java:246) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:172) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:40:22,113][INFO ][o.e.c.m.MetaDataMappingService] [followerm0] [index2/N4E8sr5URVigYCJVCE4qug] create_mapping [doc] [2019-05-28T05:40:22,511][INFO ][o.e.c.m.MetaDataDeleteIndexService] [leader0] [index1/YLcnAElSTlGGv0i8Hpgomg] deleting index [2019-05-28T05:40:22,681][WARN ][o.e.x.c.a.ShardFollowNodeTask] [followerd3] shard follow task encounter non-retryable error org.elasticsearch.transport.RemoteTransportException: [leader1][127.0.0.1:39962][indices:data/read/xpack/ccr/shard_changes] Caused by: org.elasticsearch.index.IndexNotFoundException: no such index [index1] at org.elasticsearch.cluster.routing.RoutingTable.shardRoutingTable(RoutingTable.java:136) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.xpack.ccr.action.ShardChangesAction$TransportAction.shards(ShardChangesAction.java:467) ~[main/:?] at org.elasticsearch.action.support.single.shard.TransportSingleShardAction$AsyncSingleAction.(TransportSingleShardAction.java:176) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.single.shard.TransportSingleShardAction$AsyncSingleAction.(TransportSingleShardAction.java:141) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.single.shard.TransportSingleShardAction.doExecute(TransportSingleShardAction.java:104) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.single.shard.TransportSingleShardAction.doExecute(TransportSingleShardAction.java:63) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction$RequestFilterChain.proceed(TransportAction.java:145) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:121) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:64) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.single.shard.TransportSingleShardAction$TransportHandler.messageReceived(TransportSingleShardAction.java:283) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.single.shard.TransportSingleShardAction$TransportHandler.messageReceived(TransportSingleShardAction.java:278) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.RequestHandlerRegistry.processMessageReceived(RequestHandlerRegistry.java:63) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.InboundHandler$RequestHandler.doRun(InboundHandler.java:267) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.util.concurrent.EsExecutors$DirectExecutorService.execute(EsExecutors.java:192) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.InboundHandler.handleRequest(InboundHandler.java:188) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.InboundHandler.messageReceived(InboundHandler.java:121) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.InboundHandler.inboundMessage(InboundHandler.java:105) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.TcpTransport.inboundMessage(TcpTransport.java:660) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.TcpTransport.consumeNetworkReads(TcpTransport.java:684) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.MockNioTransport$MockTcpReadWriteHandler.consumeReads(MockNioTransport.java:255) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.SocketChannelContext.handleReadBytes(SocketChannelContext.java:215) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.BytesChannelContext.read(BytesChannelContext.java:47) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.EventHandler.handleRead(EventHandler.java:119) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleRead(TestEventHandler.java:127) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.handleRead(NioSelector.java:397) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.processKey(NioSelector.java:246) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:172) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:40:23,065][INFO ][o.e.c.m.MetaDataDeleteIndexService] [followerm0] [index2/N4E8sr5URVigYCJVCE4qug] deleting index [2019-05-28T05:40:23,198][INFO ][o.e.x.c.IndexFollowingIT ] [testDeleteLeaderIndex] after test [2019-05-28T05:40:23,459][INFO ][o.e.x.c.IndexFollowingIT ] [testCloseFollowIndex] before test [2019-05-28T05:40:23,470][INFO ][o.e.c.m.MetaDataCreateIndexService] [leader0] [index1] creating index, cause [api], templates [], shards [1]/[0], mappings [] [2019-05-28T05:40:23,761][INFO ][o.e.c.r.a.AllocationService] [leader0] Cluster health status changed from [YELLOW] to [GREEN] (reason: [shards started [[index1][0]] ...]). [2019-05-28T05:40:24,454][INFO ][o.e.c.r.a.AllocationService] [followerm0] Cluster health status changed from [YELLOW] to [GREEN] (reason: [shards started [[index2][0]] ...]). [2019-05-28T05:40:24,640][INFO ][o.e.x.c.a.ShardFollowTasksExecutor] [followerd3] [index2][0] Starting to track leader shard [index1][0] [2019-05-28T05:40:24,660][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd3] [index2][0] following leader shard [index1][0], follower global checkpoint=[-1], mapping version=[1], settings version=[1] [2019-05-28T05:40:24,667][INFO ][o.e.c.m.MetaDataMappingService] [leader0] [index1/2aRshfOkT76gBd3-Wz69eQ] create_mapping [doc] [2019-05-28T05:40:24,839][INFO ][o.e.c.m.MetaDataMappingService] [followerm0] [index2/EAXU78K-Timx2zweshQl2Q] create_mapping [doc] [2019-05-28T05:40:25,171][INFO ][o.e.c.m.MetaDataIndexStateService] [followerm0] closing indices [index2/EAXU78K-Timx2zweshQl2Q] [2019-05-28T05:40:25,258][INFO ][o.e.c.m.MetaDataIndexStateService] [followerm0] completed closing of indices [index2] [2019-05-28T05:40:25,393][WARN ][o.e.t.n.MockNioTransport ] [followerm0] Slow execution on network thread [200 milliseconds] java.lang.RuntimeException: Slow exception on network thread at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.maybeLogElapsedTime(MockNioTransport.java:367) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.unregister(MockNioTransport.java:359) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleRead(TestEventHandler.java:130) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.handleRead(NioSelector.java:397) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.processKey(NioSelector.java:246) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:172) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:40:25,443][WARN ][o.e.x.c.a.b.TransportBulkShardOperationsAction] [followerd3] unexpected error during the primary phase for action [indices:data/write/bulk_shard_operations[s]], request [BulkShardOperationsRequest{historyUUID=3WmRhgV5TQSeN6agbCWJTQ, operations=1, maxSeqNoUpdates=-1, shardId=[index2][0], timeout=1m, index='index2', waitForActiveShards=DEFAULT}] org.elasticsearch.indices.IndexClosedException: closed at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteIndices(IndexNameExpressionResolver.java:234) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteIndices(IndexNameExpressionResolver.java:120) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteSingleIndex(IndexNameExpressionResolver.java:282) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.concreteIndex(TransportReplicationAction.java:235) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.access$1600(TransportReplicationAction.java:97) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction$ReroutePhase.doRun(TransportReplicationAction.java:656) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.doExecute(TransportReplicationAction.java:165) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.doExecute(TransportReplicationAction.java:97) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction$RequestFilterChain.proceed(TransportAction.java:145) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:121) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:64) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.node.NodeClient.executeLocally(NodeClient.java:83) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.node.NodeClient.doExecute(NodeClient.java:72) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.support.AbstractClient.execute(AbstractClient.java:394) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.xpack.ccr.action.ShardFollowTasksExecutor$1.innerSendBulkShardOperationsRequest(ShardFollowTasksExecutor.java:242) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.sendBulkShardOperationsRequest(ShardFollowNodeTask.java:377) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.coordinateWrites(ShardFollowNodeTask.java:247) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.innerHandleReadResponse(ShardFollowNodeTask.java:359) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.lambda$handleReadResponse$6(ShardFollowNodeTask.java:310) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.maybeUpdateMapping(ShardFollowNodeTask.java:413) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.lambda$handleReadResponse$7(ShardFollowNodeTask.java:312) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.maybeUpdateSettings(ShardFollowNodeTask.java:428) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.handleReadResponse(ShardFollowNodeTask.java:314) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.lambda$sendShardChangesRequest$3(ShardFollowNodeTask.java:285) [main/:?] at org.elasticsearch.action.ActionListener$1.onResponse(ActionListener.java:62) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.ActionListenerResponseHandler.handleResponse(ActionListenerResponseHandler.java:54) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.TransportService$ContextRestoreResponseHandler.handleResponse(TransportService.java:1101) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.InboundHandler$1.doRun(InboundHandler.java:224) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.util.concurrent.EsExecutors$DirectExecutorService.execute(EsExecutors.java:192) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.InboundHandler.handleResponse(InboundHandler.java:216) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.InboundHandler.messageReceived(InboundHandler.java:141) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.InboundHandler.inboundMessage(InboundHandler.java:105) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.TcpTransport.inboundMessage(TcpTransport.java:660) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.TcpTransport.consumeNetworkReads(TcpTransport.java:684) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.MockNioTransport$MockTcpReadWriteHandler.consumeReads(MockNioTransport.java:255) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.SocketChannelContext.handleReadBytes(SocketChannelContext.java:215) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.BytesChannelContext.read(BytesChannelContext.java:47) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.EventHandler.handleRead(EventHandler.java:119) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleRead(TestEventHandler.java:127) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.handleRead(NioSelector.java:397) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.processKey(NioSelector.java:246) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:172) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:40:25,458][WARN ][o.e.x.c.a.b.TransportBulkShardOperationsAction] [followerd3] unexpected error during the primary phase for action [indices:data/write/bulk_shard_operations[s]], request [BulkShardOperationsRequest{historyUUID=3WmRhgV5TQSeN6agbCWJTQ, operations=1, maxSeqNoUpdates=-1, shardId=[index2][0], timeout=1m, index='index2', waitForActiveShards=DEFAULT}] org.elasticsearch.indices.IndexClosedException: closed at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteIndices(IndexNameExpressionResolver.java:234) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteIndices(IndexNameExpressionResolver.java:120) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteSingleIndex(IndexNameExpressionResolver.java:282) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.concreteIndex(TransportReplicationAction.java:235) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.access$1600(TransportReplicationAction.java:97) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction$ReroutePhase.doRun(TransportReplicationAction.java:656) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.doExecute(TransportReplicationAction.java:165) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.doExecute(TransportReplicationAction.java:97) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction$RequestFilterChain.proceed(TransportAction.java:145) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:121) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:64) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.node.NodeClient.executeLocally(NodeClient.java:83) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.node.NodeClient.doExecute(NodeClient.java:72) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.support.AbstractClient.execute(AbstractClient.java:394) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.xpack.ccr.action.ShardFollowTasksExecutor$1.innerSendBulkShardOperationsRequest(ShardFollowTasksExecutor.java:242) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.sendBulkShardOperationsRequest(ShardFollowNodeTask.java:377) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.lambda$sendBulkShardOperationsRequest$10(ShardFollowNodeTask.java:392) [main/:?] at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:688) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) [?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) [?:?] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:40:25,464][INFO ][o.e.c.m.MetaDataIndexStateService] [followerm0] opening indices [[index2/EAXU78K-Timx2zweshQl2Q]] [2019-05-28T05:40:25,473][WARN ][o.e.x.c.a.b.TransportBulkShardOperationsAction] [followerd3] unexpected error during the primary phase for action [indices:data/write/bulk_shard_operations[s]], request [BulkShardOperationsRequest{historyUUID=3WmRhgV5TQSeN6agbCWJTQ, operations=1, maxSeqNoUpdates=-1, shardId=[index2][0], timeout=1m, index='index2', waitForActiveShards=DEFAULT}] org.elasticsearch.indices.IndexClosedException: closed at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteIndices(IndexNameExpressionResolver.java:234) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteIndices(IndexNameExpressionResolver.java:120) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteSingleIndex(IndexNameExpressionResolver.java:282) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.concreteIndex(TransportReplicationAction.java:235) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.access$1600(TransportReplicationAction.java:97) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction$ReroutePhase.doRun(TransportReplicationAction.java:656) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.doExecute(TransportReplicationAction.java:165) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.doExecute(TransportReplicationAction.java:97) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction$RequestFilterChain.proceed(TransportAction.java:145) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:121) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:64) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.node.NodeClient.executeLocally(NodeClient.java:83) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.node.NodeClient.doExecute(NodeClient.java:72) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.support.AbstractClient.execute(AbstractClient.java:394) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.xpack.ccr.action.ShardFollowTasksExecutor$1.innerSendBulkShardOperationsRequest(ShardFollowTasksExecutor.java:242) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.sendBulkShardOperationsRequest(ShardFollowNodeTask.java:377) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.lambda$sendBulkShardOperationsRequest$10(ShardFollowNodeTask.java:392) [main/:?] at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:688) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) [?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) [?:?] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:40:25,488][WARN ][o.e.x.c.a.b.TransportBulkShardOperationsAction] [followerd3] unexpected error during the primary phase for action [indices:data/write/bulk_shard_operations[s]], request [BulkShardOperationsRequest{historyUUID=3WmRhgV5TQSeN6agbCWJTQ, operations=1, maxSeqNoUpdates=-1, shardId=[index2][0], timeout=1m, index='index2', waitForActiveShards=DEFAULT}] org.elasticsearch.indices.IndexClosedException: closed at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteIndices(IndexNameExpressionResolver.java:234) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteIndices(IndexNameExpressionResolver.java:120) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteSingleIndex(IndexNameExpressionResolver.java:282) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.concreteIndex(TransportReplicationAction.java:235) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.access$1600(TransportReplicationAction.java:97) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction$ReroutePhase.doRun(TransportReplicationAction.java:656) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.doExecute(TransportReplicationAction.java:165) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.doExecute(TransportReplicationAction.java:97) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction$RequestFilterChain.proceed(TransportAction.java:145) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:121) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:64) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.node.NodeClient.executeLocally(NodeClient.java:83) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.node.NodeClient.doExecute(NodeClient.java:72) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.support.AbstractClient.execute(AbstractClient.java:394) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.xpack.ccr.action.ShardFollowTasksExecutor$1.innerSendBulkShardOperationsRequest(ShardFollowTasksExecutor.java:242) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.sendBulkShardOperationsRequest(ShardFollowNodeTask.java:377) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.lambda$sendBulkShardOperationsRequest$10(ShardFollowNodeTask.java:392) [main/:?] at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:688) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) [?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) [?:?] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:40:25,502][WARN ][o.e.x.c.a.b.TransportBulkShardOperationsAction] [followerd3] unexpected error during the primary phase for action [indices:data/write/bulk_shard_operations[s]], request [BulkShardOperationsRequest{historyUUID=3WmRhgV5TQSeN6agbCWJTQ, operations=1, maxSeqNoUpdates=-1, shardId=[index2][0], timeout=1m, index='index2', waitForActiveShards=DEFAULT}] org.elasticsearch.indices.IndexClosedException: closed at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteIndices(IndexNameExpressionResolver.java:234) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteIndices(IndexNameExpressionResolver.java:120) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteSingleIndex(IndexNameExpressionResolver.java:282) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.concreteIndex(TransportReplicationAction.java:235) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.access$1600(TransportReplicationAction.java:97) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction$ReroutePhase.doRun(TransportReplicationAction.java:656) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.doExecute(TransportReplicationAction.java:165) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.doExecute(TransportReplicationAction.java:97) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction$RequestFilterChain.proceed(TransportAction.java:145) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:121) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:64) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.node.NodeClient.executeLocally(NodeClient.java:83) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.node.NodeClient.doExecute(NodeClient.java:72) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.support.AbstractClient.execute(AbstractClient.java:394) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.xpack.ccr.action.ShardFollowTasksExecutor$1.innerSendBulkShardOperationsRequest(ShardFollowTasksExecutor.java:242) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.sendBulkShardOperationsRequest(ShardFollowNodeTask.java:377) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.lambda$sendBulkShardOperationsRequest$10(ShardFollowNodeTask.java:392) [main/:?] at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:688) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) [?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) [?:?] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:40:25,517][WARN ][o.e.x.c.a.b.TransportBulkShardOperationsAction] [followerd3] unexpected error during the primary phase for action [indices:data/write/bulk_shard_operations[s]], request [BulkShardOperationsRequest{historyUUID=3WmRhgV5TQSeN6agbCWJTQ, operations=1, maxSeqNoUpdates=-1, shardId=[index2][0], timeout=1m, index='index2', waitForActiveShards=DEFAULT}] org.elasticsearch.indices.IndexClosedException: closed at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteIndices(IndexNameExpressionResolver.java:234) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteIndices(IndexNameExpressionResolver.java:120) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteSingleIndex(IndexNameExpressionResolver.java:282) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.concreteIndex(TransportReplicationAction.java:235) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.access$1600(TransportReplicationAction.java:97) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction$ReroutePhase.doRun(TransportReplicationAction.java:656) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.doExecute(TransportReplicationAction.java:165) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.doExecute(TransportReplicationAction.java:97) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction$RequestFilterChain.proceed(TransportAction.java:145) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:121) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:64) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.node.NodeClient.executeLocally(NodeClient.java:83) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.node.NodeClient.doExecute(NodeClient.java:72) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.support.AbstractClient.execute(AbstractClient.java:394) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.xpack.ccr.action.ShardFollowTasksExecutor$1.innerSendBulkShardOperationsRequest(ShardFollowTasksExecutor.java:242) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.sendBulkShardOperationsRequest(ShardFollowNodeTask.java:377) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.lambda$sendBulkShardOperationsRequest$10(ShardFollowNodeTask.java:392) [main/:?] at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:688) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) [?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) [?:?] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:40:25,533][WARN ][o.e.x.c.a.b.TransportBulkShardOperationsAction] [followerd3] unexpected error during the primary phase for action [indices:data/write/bulk_shard_operations[s]], request [BulkShardOperationsRequest{historyUUID=3WmRhgV5TQSeN6agbCWJTQ, operations=1, maxSeqNoUpdates=-1, shardId=[index2][0], timeout=1m, index='index2', waitForActiveShards=DEFAULT}] org.elasticsearch.indices.IndexClosedException: closed at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteIndices(IndexNameExpressionResolver.java:234) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteIndices(IndexNameExpressionResolver.java:120) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteSingleIndex(IndexNameExpressionResolver.java:282) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.concreteIndex(TransportReplicationAction.java:235) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.access$1600(TransportReplicationAction.java:97) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction$ReroutePhase.doRun(TransportReplicationAction.java:656) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.doExecute(TransportReplicationAction.java:165) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.doExecute(TransportReplicationAction.java:97) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction$RequestFilterChain.proceed(TransportAction.java:145) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:121) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:64) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.node.NodeClient.executeLocally(NodeClient.java:83) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.node.NodeClient.doExecute(NodeClient.java:72) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.support.AbstractClient.execute(AbstractClient.java:394) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.xpack.ccr.action.ShardFollowTasksExecutor$1.innerSendBulkShardOperationsRequest(ShardFollowTasksExecutor.java:242) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.sendBulkShardOperationsRequest(ShardFollowNodeTask.java:377) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.lambda$sendBulkShardOperationsRequest$10(ShardFollowNodeTask.java:392) [main/:?] at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:688) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) [?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) [?:?] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:40:25,546][WARN ][o.e.x.c.a.b.TransportBulkShardOperationsAction] [followerd3] unexpected error during the primary phase for action [indices:data/write/bulk_shard_operations[s]], request [BulkShardOperationsRequest{historyUUID=3WmRhgV5TQSeN6agbCWJTQ, operations=1, maxSeqNoUpdates=-1, shardId=[index2][0], timeout=1m, index='index2', waitForActiveShards=DEFAULT}] org.elasticsearch.indices.IndexClosedException: closed at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteIndices(IndexNameExpressionResolver.java:234) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteIndices(IndexNameExpressionResolver.java:120) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteSingleIndex(IndexNameExpressionResolver.java:282) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.concreteIndex(TransportReplicationAction.java:235) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.access$1600(TransportReplicationAction.java:97) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction$ReroutePhase.doRun(TransportReplicationAction.java:656) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.doExecute(TransportReplicationAction.java:165) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.doExecute(TransportReplicationAction.java:97) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction$RequestFilterChain.proceed(TransportAction.java:145) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:121) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:64) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.node.NodeClient.executeLocally(NodeClient.java:83) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.node.NodeClient.doExecute(NodeClient.java:72) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.support.AbstractClient.execute(AbstractClient.java:394) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.xpack.ccr.action.ShardFollowTasksExecutor$1.innerSendBulkShardOperationsRequest(ShardFollowTasksExecutor.java:242) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.sendBulkShardOperationsRequest(ShardFollowNodeTask.java:377) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.lambda$sendBulkShardOperationsRequest$10(ShardFollowNodeTask.java:392) [main/:?] at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:688) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) [?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) [?:?] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:40:25,660][INFO ][o.e.c.r.a.AllocationService] [followerm0] Cluster health status changed from [RED] to [GREEN] (reason: [shards started [[index2][0]] ...]). [2019-05-28T05:40:26,840][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd3] [index2][0] shard follow task has been stopped [2019-05-28T05:40:26,913][INFO ][o.e.c.m.MetaDataDeleteIndexService] [leader0] [index1/2aRshfOkT76gBd3-Wz69eQ] deleting index [2019-05-28T05:40:27,135][INFO ][o.e.c.m.MetaDataDeleteIndexService] [followerm0] [index2/EAXU78K-Timx2zweshQl2Q] deleting index [2019-05-28T05:40:27,232][INFO ][o.e.x.c.IndexFollowingIT ] [testCloseFollowIndex] after test [2019-05-28T05:40:27,336][INFO ][o.e.x.c.IndexFollowingIT ] [testFollowIndexMaxOperationSizeInBytes] before test [2019-05-28T05:40:27,345][INFO ][o.e.c.m.MetaDataCreateIndexService] [leader0] [index1] creating index, cause [api], templates [], shards [1]/[0], mappings [doc] [2019-05-28T05:40:27,582][INFO ][o.e.c.r.a.AllocationService] [leader0] Cluster health status changed from [YELLOW] to [GREEN] (reason: [shards started [[index1][0]] ...]). [2019-05-28T05:40:27,695][INFO ][o.e.x.c.IndexFollowingIT ] [testFollowIndexMaxOperationSizeInBytes] Indexing [186] docs [2019-05-28T05:40:28,065][WARN ][o.e.t.n.MockNioTransport ] [leader0] Slow execution on network thread [200 milliseconds] java.lang.RuntimeException: Slow exception on network thread at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.maybeLogElapsedTime(MockNioTransport.java:367) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.unregister(MockNioTransport.java:359) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleRead(TestEventHandler.java:130) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.handleRead(NioSelector.java:397) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.processKey(NioSelector.java:246) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:172) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:40:28,590][INFO ][o.e.c.r.a.AllocationService] [followerm0] Cluster health status changed from [YELLOW] to [GREEN] (reason: [shards started [[index2][0]] ...]). [2019-05-28T05:40:28,745][INFO ][o.e.x.c.a.ShardFollowTasksExecutor] [followerd3] [index2][0] Starting to track leader shard [index1][0] [2019-05-28T05:40:28,769][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd3] [index2][0] following leader shard [index1][0], follower global checkpoint=[-1], mapping version=[1], settings version=[1] [2019-05-28T05:40:29,884][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd3] [index2][0] shard follow task has been stopped [2019-05-28T05:40:30,005][INFO ][o.e.c.m.MetaDataDeleteIndexService] [leader0] [index1/nnmUbaTBTeWSZ6HuwYz5zw] deleting index [2019-05-28T05:40:30,266][INFO ][o.e.c.m.MetaDataDeleteIndexService] [followerm0] [index2/w7gi1XZ5RBOT1CAxUnZiHQ] deleting index [2019-05-28T05:40:30,414][INFO ][o.e.x.c.IndexFollowingIT ] [testFollowIndexMaxOperationSizeInBytes] after test [2019-05-28T05:40:30,543][INFO ][o.e.x.c.IndexFollowingIT ] [testUpdateDynamicLeaderIndexSettings] before test [2019-05-28T05:40:30,551][INFO ][o.e.c.m.MetaDataCreateIndexService] [leader0] [leader] creating index, cause [api], templates [], shards [1]/[0], mappings [doc] [2019-05-28T05:40:30,800][INFO ][o.e.c.r.a.AllocationService] [leader0] Cluster health status changed from [YELLOW] to [GREEN] (reason: [shards started [[leader][0]] ...]). [2019-05-28T05:40:31,228][INFO ][o.e.c.r.a.AllocationService] [followerm0] Cluster health status changed from [YELLOW] to [GREEN] (reason: [shards started [[follower][0]] ...]). [2019-05-28T05:40:31,376][INFO ][o.e.x.c.a.ShardFollowTasksExecutor] [followerd3] [follower][0] Starting to track leader shard [leader][0] [2019-05-28T05:40:31,397][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd3] [follower][0] following leader shard [leader][0], follower global checkpoint=[-1], mapping version=[1], settings version=[1] [2019-05-28T05:40:32,631][INFO ][o.e.c.s.IndexScopedSettings] [leader0] updating [index.max_ngram_diff] from [1] to [2] [2019-05-28T05:40:32,673][WARN ][o.e.t.n.MockNioTransport ] [leader0] Slow execution on network thread [200 milliseconds] java.lang.RuntimeException: Slow exception on network thread at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.maybeLogElapsedTime(MockNioTransport.java:367) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.unregister(MockNioTransport.java:359) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleRead(TestEventHandler.java:130) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.handleRead(NioSelector.java:397) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.processKey(NioSelector.java:246) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:172) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:40:32,736][INFO ][o.e.c.s.IndexScopedSettings] [leader0] updating [index.max_ngram_diff] from [1] to [2] [2019-05-28T05:40:32,756][INFO ][o.e.c.s.IndexScopedSettings] [followerm0] updating [index.max_ngram_diff] from [1] to [2] [2019-05-28T05:40:32,781][INFO ][o.e.c.s.IndexScopedSettings] [followerd3] updating [index.max_ngram_diff] from [1] to [2] [2019-05-28T05:40:33,273][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd3] [follower][0] shard follow task has been stopped [2019-05-28T05:40:33,297][INFO ][o.e.c.m.MetaDataDeleteIndexService] [leader0] [leader/z-RH6NFZQhqER0rIEZWjqA] deleting index [2019-05-28T05:40:33,591][INFO ][o.e.c.m.MetaDataDeleteIndexService] [followerm0] [follower/b10JNXzoRmmEL9DhPY9FhA] deleting index [2019-05-28T05:40:33,704][INFO ][o.e.x.c.IndexFollowingIT ] [testUpdateDynamicLeaderIndexSettings] after test [2019-05-28T05:40:33,890][INFO ][o.e.x.c.IndexFollowingIT ] [testFollowIndexWithConcurrentMappingChanges] before test [2019-05-28T05:40:33,897][INFO ][o.e.c.m.MetaDataCreateIndexService] [leader0] [index1] creating index, cause [api], templates [], shards [3]/[0], mappings [doc] [2019-05-28T05:40:34,335][INFO ][o.e.c.r.a.AllocationService] [leader0] Cluster health status changed from [YELLOW] to [GREEN] (reason: [shards started [[index1][2], [index1][0]] ...]). [2019-05-28T05:40:34,460][INFO ][o.e.x.c.IndexFollowingIT ] [testFollowIndexWithConcurrentMappingChanges] Indexing [20] docs as first batch [2019-05-28T05:40:34,551][INFO ][o.e.c.m.MetaDataMappingService] [leader0] [index1/esxoeCI9Rne3x7n78LspRg] update_mapping [doc] [2019-05-28T05:40:34,631][INFO ][o.e.x.c.IndexFollowingIT ] [testFollowIndexWithConcurrentMappingChanges] ensure green follower indices [index2] [2019-05-28T05:40:34,802][INFO ][o.e.c.m.MetaDataMappingService] [followerm0] [index2/I-PXZqOrRiCcEyMD_3G2TA] update_mapping [doc] [2019-05-28T05:40:34,875][WARN ][o.e.t.n.MockNioTransport ] [followerd4] Slow execution on network thread [200 milliseconds] java.lang.RuntimeException: Slow exception on network thread at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.maybeLogElapsedTime(MockNioTransport.java:367) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.unregister(MockNioTransport.java:359) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleRead(TestEventHandler.java:130) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.handleRead(NioSelector.java:397) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.processKey(NioSelector.java:246) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:172) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:40:34,907][INFO ][o.e.c.m.MetaDataMappingService] [leader0] [index1/esxoeCI9Rne3x7n78LspRg] update_mapping [doc] [2019-05-28T05:40:35,070][INFO ][o.e.c.r.a.AllocationService] [followerm0] Cluster health status changed from [YELLOW] to [GREEN] (reason: [shards started [[index2][1], [index2][0]] ...]). [2019-05-28T05:40:35,223][INFO ][o.e.c.m.MetaDataMappingService] [leader0] [index1/esxoeCI9Rne3x7n78LspRg] update_mapping [doc] [2019-05-28T05:40:35,290][INFO ][o.e.x.c.a.ShardFollowTasksExecutor] [followerd3] [index2][0] Starting to track leader shard [index1][0] [2019-05-28T05:40:35,341][INFO ][o.e.x.c.a.ShardFollowTasksExecutor] [followerd4] [index2][1] Starting to track leader shard [index1][1] [2019-05-28T05:40:35,356][INFO ][o.e.c.m.MetaDataMappingService] [followerm0] [index2/I-PXZqOrRiCcEyMD_3G2TA] update_mapping [doc] [2019-05-28T05:40:35,431][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd3] [index2][0] following leader shard [index1][0], follower global checkpoint=[-1], mapping version=[3], settings version=[1] [2019-05-28T05:40:35,440][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd4] [index2][1] following leader shard [index1][1], follower global checkpoint=[-1], mapping version=[3], settings version=[1] [2019-05-28T05:40:35,485][INFO ][o.e.x.c.a.ShardFollowTasksExecutor] [followerd3] [index2][2] Starting to track leader shard [index1][2] [2019-05-28T05:40:35,508][INFO ][o.e.c.m.MetaDataMappingService] [followerm0] [index2/I-PXZqOrRiCcEyMD_3G2TA] update_mapping [doc] [2019-05-28T05:40:35,600][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd3] [index2][2] following leader shard [index1][2], follower global checkpoint=[-1], mapping version=[4], settings version=[1] [2019-05-28T05:40:35,702][INFO ][o.e.c.m.MetaDataMappingService] [leader0] [index1/esxoeCI9Rne3x7n78LspRg] update_mapping [doc] [2019-05-28T05:40:35,833][INFO ][o.e.x.c.IndexFollowingIT ] [testFollowIndexWithConcurrentMappingChanges] Indexing [32] docs as second batch [2019-05-28T05:40:35,848][INFO ][o.e.c.m.MetaDataMappingService] [followerm0] [index2/I-PXZqOrRiCcEyMD_3G2TA] update_mapping [doc] [2019-05-28T05:40:36,079][WARN ][o.e.t.n.MockNioTransport ] [leader0] Slow execution on network thread [200 milliseconds] java.lang.RuntimeException: Slow exception on network thread at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.maybeLogElapsedTime(MockNioTransport.java:367) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.unregister(MockNioTransport.java:359) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleRead(TestEventHandler.java:130) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.handleRead(NioSelector.java:397) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.processKey(NioSelector.java:246) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:172) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:40:36,111][INFO ][o.e.c.m.MetaDataMappingService] [leader0] [index1/esxoeCI9Rne3x7n78LspRg] update_mapping [doc] [2019-05-28T05:40:36,260][INFO ][o.e.c.m.MetaDataMappingService] [followerm0] [index2/I-PXZqOrRiCcEyMD_3G2TA] update_mapping [doc] [2019-05-28T05:40:36,473][INFO ][o.e.c.m.MetaDataMappingService] [leader0] [index1/esxoeCI9Rne3x7n78LspRg] update_mapping [doc] [2019-05-28T05:40:36,595][INFO ][o.e.c.m.MetaDataMappingService] [followerm0] [index2/I-PXZqOrRiCcEyMD_3G2TA] update_mapping [doc] [2019-05-28T05:40:36,746][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd4] [index2][1] shard follow task has been stopped [2019-05-28T05:40:36,753][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd3] [index2][2] shard follow task has been stopped [2019-05-28T05:40:36,754][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd3] [index2][0] shard follow task has been stopped [2019-05-28T05:40:36,889][INFO ][o.e.c.m.MetaDataDeleteIndexService] [leader0] [index1/esxoeCI9Rne3x7n78LspRg] deleting index [2019-05-28T05:40:37,378][INFO ][o.e.c.m.MetaDataDeleteIndexService] [followerm0] [index2/I-PXZqOrRiCcEyMD_3G2TA] deleting index [2019-05-28T05:40:37,527][INFO ][o.e.x.c.IndexFollowingIT ] [testFollowIndexWithConcurrentMappingChanges] after test [2019-05-28T05:40:37,702][INFO ][o.e.x.c.IndexFollowingIT ] [testFollowIndexWithNestedField] before test [2019-05-28T05:40:37,709][INFO ][o.e.c.m.MetaDataCreateIndexService] [leader0] [index1] creating index, cause [api], templates [], shards [1]/[1], mappings [doc] [2019-05-28T05:40:38,119][INFO ][o.e.x.c.IndexFollowingIT ] [testFollowIndexWithNestedField] ensure green leader indices [index1] [2019-05-28T05:40:38,247][INFO ][o.e.c.r.a.AllocationService] [leader0] Cluster health status changed from [YELLOW] to [GREEN] (reason: [shards started [[index1][0]] ...]). [2019-05-28T05:40:38,915][INFO ][o.e.x.c.a.ShardFollowTasksExecutor] [followerd3] [index2][0] Starting to track leader shard [index1][0] [2019-05-28T05:40:38,970][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd3] [index2][0] following leader shard [index1][0], follower global checkpoint=[-1], mapping version=[1], settings version=[1] [2019-05-28T05:40:39,299][INFO ][o.e.c.r.a.AllocationService] [followerm0] Cluster health status changed from [YELLOW] to [GREEN] (reason: [shards started [[index2][0]] ...]). [2019-05-28T05:40:39,411][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd3] [index2][0] shard follow task has been stopped [2019-05-28T05:40:39,536][INFO ][o.e.c.m.MetaDataDeleteIndexService] [leader0] [index1/bOASF1DQRh2ehEhqVr_Dnw] deleting index [2019-05-28T05:40:39,798][INFO ][o.e.c.m.MetaDataDeleteIndexService] [followerm0] [index2/CncJZYLDStKP74OOg5oCZA] deleting index [2019-05-28T05:40:39,892][INFO ][o.e.x.c.IndexFollowingIT ] [testFollowIndexWithNestedField] after test [2019-05-28T05:40:40,063][INFO ][o.e.x.c.IndexFollowingIT ] [testAttemptToChangeCcrFollowingIndexSetting] before test [2019-05-28T05:40:40,069][INFO ][o.e.c.m.MetaDataCreateIndexService] [leader0] [index1] creating index, cause [api], templates [], shards [1]/[0], mappings [doc] [2019-05-28T05:40:40,297][INFO ][o.e.c.r.a.AllocationService] [leader0] Cluster health status changed from [YELLOW] to [GREEN] (reason: [shards started [[index1][0]] ...]). [2019-05-28T05:40:40,767][INFO ][o.e.c.r.a.AllocationService] [followerm0] Cluster health status changed from [YELLOW] to [GREEN] (reason: [shards started [[index2][0]] ...]). [2019-05-28T05:40:40,941][INFO ][o.e.x.c.a.ShardFollowTasksExecutor] [followerd3] [index2][0] Starting to track leader shard [index1][0] [2019-05-28T05:40:41,008][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd3] [index2][0] following leader shard [index1][0], follower global checkpoint=[-1], mapping version=[1], settings version=[1] [2019-05-28T05:40:41,009][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd3] [index2][0] shard follow task has been stopped [2019-05-28T05:40:41,016][INFO ][o.e.c.m.MetaDataIndexStateService] [followerm0] closing indices [index2/L2YuAgPrQRm1BOexqL50aQ] [2019-05-28T05:40:41,068][INFO ][o.e.c.m.MetaDataIndexStateService] [followerm0] completed closing of indices [index2] [2019-05-28T05:40:41,248][INFO ][o.e.c.r.a.AllocationService] [followerm0] Cluster health status changed from [RED] to [GREEN] (reason: [shards started [[index2][0]] ...]). [2019-05-28T05:40:41,297][INFO ][o.e.c.m.MetaDataDeleteIndexService] [leader0] [index1/V2oeU7CRTYO_YAiAWndZzw] deleting index [2019-05-28T05:40:41,540][INFO ][o.e.c.m.MetaDataDeleteIndexService] [followerm0] [index2/L2YuAgPrQRm1BOexqL50aQ] deleting index [2019-05-28T05:40:41,641][INFO ][o.e.x.c.IndexFollowingIT ] [testAttemptToChangeCcrFollowingIndexSetting] after test [2019-05-28T05:40:41,934][INFO ][o.e.x.c.IndexFollowingIT ] [testCloseLeaderIndex] before test [2019-05-28T05:40:41,940][INFO ][o.e.c.m.MetaDataCreateIndexService] [leader0] [index1] creating index, cause [api], templates [], shards [1]/[0], mappings [] [2019-05-28T05:40:42,202][INFO ][o.e.c.r.a.AllocationService] [leader0] Cluster health status changed from [YELLOW] to [GREEN] (reason: [shards started [[index1][0]] ...]). [2019-05-28T05:40:42,670][INFO ][o.e.c.r.a.AllocationService] [followerm0] Cluster health status changed from [YELLOW] to [GREEN] (reason: [shards started [[index2][0]] ...]). [2019-05-28T05:40:42,691][WARN ][o.e.t.n.MockNioTransport ] [followerd4] Slow execution on network thread [200 milliseconds] java.lang.RuntimeException: Slow exception on network thread at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.maybeLogElapsedTime(MockNioTransport.java:367) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.unregister(MockNioTransport.java:359) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleRead(TestEventHandler.java:130) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.handleRead(NioSelector.java:397) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.processKey(NioSelector.java:246) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:172) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:40:42,866][INFO ][o.e.x.c.a.ShardFollowTasksExecutor] [followerd3] [index2][0] Starting to track leader shard [index1][0] [2019-05-28T05:40:42,886][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd3] [index2][0] following leader shard [index1][0], follower global checkpoint=[-1], mapping version=[1], settings version=[1] [2019-05-28T05:40:42,889][INFO ][o.e.c.m.MetaDataMappingService] [leader0] [index1/PL7uPFpKS2uWpH6b6EQILg] create_mapping [doc] [2019-05-28T05:40:42,923][WARN ][o.e.t.n.MockNioTransport ] [leader1] Slow execution on network thread [200 milliseconds] java.lang.RuntimeException: Slow exception on network thread at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.maybeLogElapsedTime(MockNioTransport.java:367) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.unregister(MockNioTransport.java:359) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleRead(TestEventHandler.java:130) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.handleRead(NioSelector.java:397) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.processKey(NioSelector.java:246) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:172) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:40:43,007][INFO ][o.e.c.m.MetaDataMappingService] [followerm0] [index2/daooU8Z6Tbud__bYXQc_SA] create_mapping [doc] [2019-05-28T05:40:43,613][INFO ][o.e.c.m.MetaDataIndexStateService] [leader0] closing indices [index1/PL7uPFpKS2uWpH6b6EQILg] [2019-05-28T05:40:43,737][INFO ][o.e.c.m.MetaDataIndexStateService] [leader0] completed closing of indices [index1] [2019-05-28T05:40:44,033][INFO ][o.e.c.m.MetaDataIndexStateService] [leader0] opening indices [[index1/PL7uPFpKS2uWpH6b6EQILg]] [2019-05-28T05:40:44,139][INFO ][o.e.c.m.MetaDataIndexStateService] [followerm0] closing indices [index2/daooU8Z6Tbud__bYXQc_SA] [2019-05-28T05:40:44,203][INFO ][o.e.c.m.MetaDataIndexStateService] [followerm0] completed closing of indices [index2] [2019-05-28T05:40:44,271][WARN ][o.e.x.c.a.ShardFollowNodeTask] [followerd3] shard follow task encounter non-retryable error org.elasticsearch.transport.RemoteTransportException: [followerm0][127.0.0.1:45522][indices:admin/settings/update] Caused by: java.lang.IllegalArgumentException: can not update private setting [index.verified_before_close]; this setting is managed by Elasticsearch at org.elasticsearch.common.settings.AbstractScopedSettings.validate(AbstractScopedSettings.java:556) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.settings.AbstractScopedSettings.validate(AbstractScopedSettings.java:476) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.settings.AbstractScopedSettings.validate(AbstractScopedSettings.java:430) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.cluster.metadata.MetaDataUpdateSettingsService.updateSettings(MetaDataUpdateSettingsService.java:92) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.admin.indices.settings.put.TransportUpdateSettingsAction.masterOperation(TransportUpdateSettingsAction.java:92) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.admin.indices.settings.put.TransportUpdateSettingsAction.masterOperation(TransportUpdateSettingsAction.java:40) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.master.TransportMasterNodeAction.masterOperation(TransportMasterNodeAction.java:127) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.master.TransportMasterNodeAction$AsyncSingleAction$1.doRun(TransportMasterNodeAction.java:200) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.util.concurrent.EsExecutors$DirectExecutorService.execute(EsExecutors.java:192) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.master.TransportMasterNodeAction$AsyncSingleAction.doStart(TransportMasterNodeAction.java:197) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.master.TransportMasterNodeAction$AsyncSingleAction.start(TransportMasterNodeAction.java:161) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.master.TransportMasterNodeAction.doExecute(TransportMasterNodeAction.java:138) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.master.TransportMasterNodeAction.doExecute(TransportMasterNodeAction.java:58) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction$RequestFilterChain.proceed(TransportAction.java:145) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:121) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.HandledTransportAction$TransportHandler.messageReceived(HandledTransportAction.java:84) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.HandledTransportAction$TransportHandler.messageReceived(HandledTransportAction.java:80) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.RequestHandlerRegistry.processMessageReceived(RequestHandlerRegistry.java:63) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.InboundHandler$RequestHandler.doRun(InboundHandler.java:267) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.util.concurrent.EsExecutors$DirectExecutorService.execute(EsExecutors.java:192) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.InboundHandler.handleRequest(InboundHandler.java:188) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.InboundHandler.messageReceived(InboundHandler.java:121) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.InboundHandler.inboundMessage(InboundHandler.java:105) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.TcpTransport.inboundMessage(TcpTransport.java:660) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.TcpTransport.consumeNetworkReads(TcpTransport.java:684) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.MockNioTransport$MockTcpReadWriteHandler.consumeReads(MockNioTransport.java:255) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.SocketChannelContext.handleReadBytes(SocketChannelContext.java:215) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.BytesChannelContext.read(BytesChannelContext.java:47) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.EventHandler.handleRead(EventHandler.java:119) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleRead(TestEventHandler.java:127) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.handleRead(NioSelector.java:397) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.processKey(NioSelector.java:246) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:172) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:40:44,286][INFO ][o.e.c.r.a.AllocationService] [leader0] Cluster health status changed from [RED] to [GREEN] (reason: [shards started [[index1][0]] ...]). [2019-05-28T05:40:44,378][INFO ][o.e.c.r.a.AllocationService] [followerm0] Cluster health status changed from [RED] to [GREEN] (reason: [shards started [[index2][0]] ...]). [2019-05-28T05:40:44,535][INFO ][o.e.c.m.MetaDataDeleteIndexService] [leader0] [index1/PL7uPFpKS2uWpH6b6EQILg] deleting index [2019-05-28T05:40:44,770][INFO ][o.e.c.m.MetaDataDeleteIndexService] [followerm0] [index2/daooU8Z6Tbud__bYXQc_SA] deleting index [2019-05-28T05:40:44,872][INFO ][o.e.x.c.IndexFollowingIT ] [testCloseLeaderIndex] after test [2019-05-28T05:40:45,189][INFO ][o.e.x.c.IndexFollowingIT ] [testDoNotAllowPutMappingToFollower] before test [2019-05-28T05:40:45,196][INFO ][o.e.c.m.MetaDataCreateIndexService] [leader0] [index-1] creating index, cause [api], templates [], shards [2]/[1], mappings [doc] [2019-05-28T05:40:45,946][INFO ][o.e.c.r.a.AllocationService] [leader0] Cluster health status changed from [YELLOW] to [GREEN] (reason: [shards started [[index-1][0]] ...]). [2019-05-28T05:40:46,409][INFO ][o.e.x.c.a.ShardFollowTasksExecutor] [followerd3] [index-2][0] Starting to track leader shard [index-1][0] [2019-05-28T05:40:46,525][INFO ][o.e.c.r.a.AllocationService] [followerm0] Cluster health status changed from [YELLOW] to [GREEN] (reason: [shards started [[index-2][1]] ...]). [2019-05-28T05:40:46,596][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd3] [index-2][0] following leader shard [index-1][0], follower global checkpoint=[-1], mapping version=[1], settings version=[1] [2019-05-28T05:40:46,634][INFO ][o.e.x.c.a.ShardFollowTasksExecutor] [followerd4] [index-2][1] Starting to track leader shard [index-1][1] [2019-05-28T05:40:46,653][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd4] [index-2][1] following leader shard [index-1][1], follower global checkpoint=[-1], mapping version=[1], settings version=[1] [2019-05-28T05:40:46,693][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd3] [index-2][0] shard follow task has been stopped [2019-05-28T05:40:46,752][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd4] [index-2][1] shard follow task has been stopped [2019-05-28T05:40:46,759][INFO ][o.e.c.m.MetaDataIndexStateService] [followerm0] closing indices [index-2/C7rTNTqvQdutEWOPEoW6Yg] [2019-05-28T05:40:46,846][INFO ][o.e.c.m.MetaDataIndexStateService] [followerm0] completed closing of indices [index-2] [2019-05-28T05:40:46,849][WARN ][o.e.t.n.MockNioTransport ] [followerm0] Slow execution on network thread [200 milliseconds] java.lang.RuntimeException: Slow exception on network thread at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.maybeLogElapsedTime(MockNioTransport.java:367) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.unregister(MockNioTransport.java:359) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleRead(TestEventHandler.java:130) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.handleRead(NioSelector.java:397) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.processKey(NioSelector.java:246) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:172) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:40:47,054][INFO ][o.e.c.r.a.AllocationService] [followerm0] Cluster health status changed from [RED] to [YELLOW] (reason: [shards started [[index-2][0]] ...]). [2019-05-28T05:40:47,377][INFO ][o.e.c.r.a.AllocationService] [followerm0] Cluster health status changed from [YELLOW] to [GREEN] (reason: [shards started [[index-2][0]] ...]). [2019-05-28T05:40:47,431][INFO ][o.e.c.m.MetaDataIndexStateService] [followerm0] opening indices [[index-2/C7rTNTqvQdutEWOPEoW6Yg]] [2019-05-28T05:40:47,683][INFO ][o.e.c.r.a.AllocationService] [followerm0] Cluster health status changed from [RED] to [YELLOW] (reason: [shards started [[index-2][1]] ...]). [2019-05-28T05:40:47,877][INFO ][o.e.c.m.MetaDataMappingService] [followerm0] [index-2/C7rTNTqvQdutEWOPEoW6Yg] update_mapping [doc] [2019-05-28T05:40:47,962][INFO ][o.e.c.r.a.AllocationService] [followerm0] Cluster health status changed from [YELLOW] to [GREEN] (reason: [shards started [[index-2][0], [index-2][1]] ...]). [2019-05-28T05:40:48,075][INFO ][o.e.c.m.MetaDataDeleteIndexService] [leader0] [index-1/eZkV3913Q2eDsijj6yJ6kQ] deleting index [2019-05-28T05:40:48,533][INFO ][o.e.c.m.MetaDataDeleteIndexService] [followerm0] [index-2/C7rTNTqvQdutEWOPEoW6Yg] deleting index [2019-05-28T05:40:48,674][INFO ][o.e.x.c.IndexFollowingIT ] [testDoNotAllowPutMappingToFollower] after test [2019-05-28T05:40:48,995][INFO ][o.e.x.c.IndexFollowingIT ] [testUpdateAnalysisLeaderIndexSettings] before test [2019-05-28T05:40:49,004][INFO ][o.e.c.m.MetaDataCreateIndexService] [leader0] [leader] creating index, cause [api], templates [], shards [1]/[0], mappings [doc] [2019-05-28T05:40:49,315][INFO ][o.e.c.r.a.AllocationService] [leader0] Cluster health status changed from [YELLOW] to [GREEN] (reason: [shards started [[leader][0]] ...]). [2019-05-28T05:40:49,790][INFO ][o.e.c.r.a.AllocationService] [followerm0] Cluster health status changed from [YELLOW] to [GREEN] (reason: [shards started [[follower][0]] ...]). [2019-05-28T05:40:49,958][INFO ][o.e.x.c.a.ShardFollowTasksExecutor] [followerd3] [follower][0] Starting to track leader shard [leader][0] [2019-05-28T05:40:49,978][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd3] [follower][0] following leader shard [leader][0], follower global checkpoint=[-1], mapping version=[1], settings version=[1] [2019-05-28T05:40:50,352][WARN ][o.e.t.n.MockNioTransport ] [leader1] Slow execution on network thread [200 milliseconds] java.lang.RuntimeException: Slow exception on network thread at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.maybeLogElapsedTime(MockNioTransport.java:367) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.unregister(MockNioTransport.java:359) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleWrite(TestEventHandler.java:154) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.handleWrite(NioSelector.java:389) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.writeToChannel(NioSelector.java:345) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.handleQueuedWrites(NioSelector.java:448) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.preSelect(NioSelector.java:262) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:153) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:40:50,721][INFO ][o.e.c.m.MetaDataIndexStateService] [leader0] closing indices [leader/3gLq_nIPTCe2Z4UARkn5yw] [2019-05-28T05:40:50,839][INFO ][o.e.c.m.MetaDataIndexStateService] [leader0] completed closing of indices [leader] [2019-05-28T05:40:51,135][WARN ][o.e.t.n.MockNioTransport ] [followerd3] Slow execution on network thread [200 milliseconds] java.lang.RuntimeException: Slow exception on network thread at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.maybeLogElapsedTime(MockNioTransport.java:367) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.unregister(MockNioTransport.java:359) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleRead(TestEventHandler.java:130) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.handleRead(NioSelector.java:397) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.processKey(NioSelector.java:246) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:172) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:40:51,246][INFO ][o.e.c.r.a.AllocationService] [leader0] Cluster health status changed from [RED] to [GREEN] (reason: [shards started [[leader][0]] ...]). [2019-05-28T05:40:51,369][INFO ][o.e.c.m.MetaDataIndexStateService] [leader0] opening indices [[leader/3gLq_nIPTCe2Z4UARkn5yw]] [2019-05-28T05:40:51,720][INFO ][o.e.c.r.a.AllocationService] [leader0] Cluster health status changed from [RED] to [GREEN] (reason: [shards started [[leader][0]] ...]). [2019-05-28T05:40:51,732][INFO ][o.e.c.m.MetaDataIndexStateService] [followerm0] closing indices [follower/mfLCNRZYTp67agXzRvs-TQ] [2019-05-28T05:40:51,792][INFO ][o.e.c.m.MetaDataIndexStateService] [followerm0] completed closing of indices [follower] [2019-05-28T05:40:51,837][INFO ][o.e.x.c.IndexFollowingIT ] [testUpdateAnalysisLeaderIndexSettings] ensure green leader indices [leader] [2019-05-28T05:40:51,843][INFO ][o.e.c.m.MetaDataMappingService] [leader0] [leader/3gLq_nIPTCe2Z4UARkn5yw] update_mapping [doc] [2019-05-28T05:40:51,978][INFO ][o.e.c.r.a.AllocationService] [followerm0] Cluster health status changed from [RED] to [GREEN] (reason: [shards started [[follower][0]] ...]). [2019-05-28T05:40:52,021][INFO ][o.e.c.m.MetaDataIndexStateService] [followerm0] opening indices [[follower/mfLCNRZYTp67agXzRvs-TQ]] [2019-05-28T05:40:52,213][INFO ][o.e.c.r.a.AllocationService] [followerm0] Cluster health status changed from [RED] to [GREEN] (reason: [shards started [[follower][0]] ...]). [2019-05-28T05:40:52,327][INFO ][o.e.c.m.MetaDataMappingService] [followerm0] [follower/mfLCNRZYTp67agXzRvs-TQ] update_mapping [doc] [2019-05-28T05:40:53,213][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd3] [follower][0] shard follow task has been stopped [2019-05-28T05:40:53,243][INFO ][o.e.c.m.MetaDataDeleteIndexService] [leader0] [leader/3gLq_nIPTCe2Z4UARkn5yw] deleting index [2019-05-28T05:40:53,493][INFO ][o.e.c.m.MetaDataDeleteIndexService] [followerm0] [follower/mfLCNRZYTp67agXzRvs-TQ] deleting index [2019-05-28T05:40:53,590][INFO ][o.e.x.c.IndexFollowingIT ] [testUpdateAnalysisLeaderIndexSettings] after test [2019-05-28T05:40:53,816][INFO ][o.e.x.c.IndexFollowingIT ] [testUnfollowIndex] before test [2019-05-28T05:40:53,822][INFO ][o.e.c.m.MetaDataCreateIndexService] [leader0] [index1] creating index, cause [api], templates [], shards [1]/[0], mappings [doc] [2019-05-28T05:40:54,056][INFO ][o.e.c.r.a.AllocationService] [leader0] Cluster health status changed from [YELLOW] to [GREEN] (reason: [shards started [[index1][0]] ...]). [2019-05-28T05:40:54,264][WARN ][o.e.t.n.MockNioTransport ] [followerm0] Slow execution on network thread [200 milliseconds] java.lang.RuntimeException: Slow exception on network thread at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.maybeLogElapsedTime(MockNioTransport.java:367) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.unregister(MockNioTransport.java:359) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleRead(TestEventHandler.java:130) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.handleRead(NioSelector.java:397) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.processKey(NioSelector.java:246) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:172) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:40:54,463][INFO ][o.e.c.r.a.AllocationService] [followerm0] Cluster health status changed from [YELLOW] to [GREEN] (reason: [shards started [[index2][0]] ...]). [2019-05-28T05:40:54,608][INFO ][o.e.x.c.a.ShardFollowTasksExecutor] [followerd3] [index2][0] Starting to track leader shard [index1][0] [2019-05-28T05:40:54,630][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd3] [index2][0] following leader shard [index1][0], follower global checkpoint=[-1], mapping version=[1], settings version=[1] [2019-05-28T05:40:55,728][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd3] [index2][0] shard follow task has been stopped [2019-05-28T05:40:55,734][INFO ][o.e.c.m.MetaDataIndexStateService] [followerm0] closing indices [index2/el8Jh9aGTRKutFGlVI2IIw] [2019-05-28T05:40:55,785][INFO ][o.e.c.m.MetaDataIndexStateService] [followerm0] completed closing of indices [index2] [2019-05-28T05:40:55,953][INFO ][o.e.c.r.a.AllocationService] [followerm0] Cluster health status changed from [RED] to [GREEN] (reason: [shards started [[index2][0]] ...]). [2019-05-28T05:40:55,995][INFO ][o.e.c.m.MetaDataIndexStateService] [followerm0] opening indices [[index2/el8Jh9aGTRKutFGlVI2IIw]] [2019-05-28T05:40:56,186][INFO ][o.e.c.r.a.AllocationService] [followerm0] Cluster health status changed from [RED] to [GREEN] (reason: [shards started [[index2][0]] ...]). [2019-05-28T05:40:56,234][INFO ][o.e.x.c.IndexFollowingIT ] [testUnfollowIndex] ensure green follower indices [index2] [2019-05-28T05:40:56,356][INFO ][o.e.c.m.MetaDataDeleteIndexService] [leader0] [index1/PVgyjq4aTKeK_R6rECnU-w] deleting index [2019-05-28T05:40:56,555][INFO ][o.e.c.m.MetaDataDeleteIndexService] [followerm0] [index2/el8Jh9aGTRKutFGlVI2IIw] deleting index [2019-05-28T05:40:56,646][INFO ][o.e.x.c.IndexFollowingIT ] [testUnfollowIndex] after test [2019-05-28T05:40:56,861][INFO ][o.e.x.c.IndexFollowingIT ] [testMustCloseIndexAndPauseToRestartWithPutFollowing] before test [2019-05-28T05:40:56,869][INFO ][o.e.c.m.MetaDataCreateIndexService] [leader0] [index1] creating index, cause [api], templates [], shards [3]/[1], mappings [doc] [2019-05-28T05:40:57,746][INFO ][o.e.c.r.a.AllocationService] [leader0] Cluster health status changed from [YELLOW] to [GREEN] (reason: [shards started [[index1][2], [index1][0]] ...]). [2019-05-28T05:40:58,084][INFO ][o.e.x.c.a.ShardFollowTasksExecutor] [followerd3] [index2][1] Starting to track leader shard [index1][1] [2019-05-28T05:40:58,188][INFO ][o.e.c.r.a.AllocationService] [followerm0] Cluster health status changed from [YELLOW] to [GREEN] (reason: [shards started [[index2][0], [index2][1]] ...]). [2019-05-28T05:40:58,247][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd3] [index2][1] following leader shard [index1][1], follower global checkpoint=[-1], mapping version=[1], settings version=[1] [2019-05-28T05:40:58,283][INFO ][o.e.x.c.a.ShardFollowTasksExecutor] [followerd4] [index2][2] Starting to track leader shard [index1][2] [2019-05-28T05:40:58,328][INFO ][o.e.x.c.a.ShardFollowTasksExecutor] [followerd3] [index2][0] Starting to track leader shard [index1][0] [2019-05-28T05:40:58,341][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd4] [index2][2] following leader shard [index1][2], follower global checkpoint=[-1], mapping version=[1], settings version=[1] [2019-05-28T05:40:58,347][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd3] [index2][0] following leader shard [index1][0], follower global checkpoint=[-1], mapping version=[1], settings version=[1] [2019-05-28T05:40:58,379][WARN ][o.e.s.RestoreService ] [followerm0] [_latest_/_latest_] failed to restore snapshot org.elasticsearch.snapshots.SnapshotRestoreException: [_ccr_leader_cluster:_latest_/_latest_] cannot restore index [index2] because an open index with same name already exists in the cluster. Either close or delete the existing index or restore the index under a different name by providing a rename pattern and replacement name at org.elasticsearch.snapshots.RestoreService$1.validateExistingIndex(RestoreService.java:457) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.snapshots.RestoreService$1.execute(RestoreService.java:307) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.cluster.ClusterStateUpdateTask.execute(ClusterStateUpdateTask.java:47) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.cluster.service.MasterService.executeTasks(MasterService.java:687) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.cluster.service.MasterService.calculateTaskOutputs(MasterService.java:310) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.cluster.service.MasterService.runTasks(MasterService.java:210) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.cluster.service.MasterService$Batcher.run(MasterService.java:142) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.cluster.service.TaskBatcher.runIfNotProcessed(TaskBatcher.java:150) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.cluster.service.TaskBatcher$BatchedTask.run(TaskBatcher.java:188) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:688) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:252) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:215) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) [?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) [?:?] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:40:58,384][INFO ][o.e.c.m.MetaDataIndexStateService] [followerm0] closing indices [index2/qdoxYxpXSkyTvuyOYjp-6w] [2019-05-28T05:40:58,462][INFO ][o.e.c.m.MetaDataIndexStateService] [followerm0] completed closing of indices [index2] [2019-05-28T05:40:58,657][INFO ][o.e.c.r.a.AllocationService] [followerm0] Cluster health status changed from [RED] to [YELLOW] (reason: [shards started [[index2][0]] ...]). [2019-05-28T05:40:58,826][INFO ][o.e.c.r.a.AllocationService] [followerm0] Cluster health status changed from [YELLOW] to [GREEN] (reason: [shards started [[index2][2], [index2][0]] ...]). [2019-05-28T05:40:59,345][WARN ][o.e.t.n.MockNioTransport ] [followerd3] Slow execution on network thread [203 milliseconds] java.lang.RuntimeException: Slow exception on network thread at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.maybeLogElapsedTime(MockNioTransport.java:367) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.unregister(MockNioTransport.java:359) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleRead(TestEventHandler.java:130) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.handleRead(NioSelector.java:397) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.processKey(NioSelector.java:246) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:172) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:40:59,557][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd3] [index2][1] shard follow task has been stopped [2019-05-28T05:40:59,557][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd3] [index2][0] shard follow task has been stopped [2019-05-28T05:40:59,577][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd4] [index2][2] shard follow task has been stopped [2019-05-28T05:40:59,684][INFO ][o.e.c.r.a.AllocationService] [followerm0] Cluster health status changed from [YELLOW] to [GREEN] (reason: [shards started [[index2][1], [index2][2]] ...]). [2019-05-28T05:40:59,735][INFO ][o.e.c.m.MetaDataDeleteIndexService] [leader0] [index1/xJKMYqAOQwuczi-QmrGy6A] deleting index [2019-05-28T05:41:00,270][INFO ][o.e.c.m.MetaDataDeleteIndexService] [followerm0] [index2/qdoxYxpXSkyTvuyOYjp-6w] deleting index [2019-05-28T05:41:00,429][INFO ][o.e.x.c.IndexFollowingIT ] [testMustCloseIndexAndPauseToRestartWithPutFollowing] after test [2019-05-28T05:41:00,825][INFO ][o.e.x.c.IndexFollowingIT ] [testUnfollowNonExistingIndex] before test [2019-05-28T05:41:00,886][INFO ][o.e.x.c.IndexFollowingIT ] [testUnfollowNonExistingIndex] after test [2019-05-28T05:41:01,208][INFO ][o.e.x.c.IndexFollowingIT ] [testFollowIndex_backlog] before test [2019-05-28T05:41:01,215][INFO ][o.e.c.m.MetaDataCreateIndexService] [leader0] [index1] creating index, cause [api], templates [], shards [3]/[0], mappings [doc] [2019-05-28T05:41:01,649][INFO ][o.e.c.r.a.AllocationService] [leader0] Cluster health status changed from [YELLOW] to [GREEN] (reason: [shards started [[index1][2], [index1][0]] ...]). [2019-05-28T05:41:01,812][INFO ][o.e.x.c.IndexFollowingIT ] [testFollowIndex_backlog] waiting for at least [46] documents to be indexed into index [index1] [2019-05-28T05:41:02,342][INFO ][o.e.c.r.a.AllocationService] [followerm0] Cluster health status changed from [YELLOW] to [GREEN] (reason: [shards started [[index2][1], [index2][0]] ...]). [2019-05-28T05:41:02,482][INFO ][o.e.x.c.a.ShardFollowTasksExecutor] [followerd3] [index2][2] Starting to track leader shard [index1][2] [2019-05-28T05:41:02,524][INFO ][o.e.x.c.a.ShardFollowTasksExecutor] [followerd4] [index2][0] Starting to track leader shard [index1][0] [2019-05-28T05:41:02,546][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd3] [index2][2] following leader shard [index1][2], follower global checkpoint=[-1], mapping version=[1], settings version=[1] [2019-05-28T05:41:02,546][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd4] [index2][0] following leader shard [index1][0], follower global checkpoint=[-1], mapping version=[1], settings version=[1] [2019-05-28T05:41:02,577][INFO ][o.e.x.c.a.ShardFollowTasksExecutor] [followerd3] [index2][1] Starting to track leader shard [index1][1] [2019-05-28T05:41:02,583][INFO ][o.e.x.c.IndexFollowingIT ] [testFollowIndex_backlog] waiting for at least [139] documents to be indexed into index [index1] [2019-05-28T05:41:02,593][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd3] [index2][1] following leader shard [index1][1], follower global checkpoint=[-1], mapping version=[1], settings version=[1] [2019-05-28T05:41:02,722][INFO ][o.e.x.c.IndexFollowingIT ] [testFollowIndex_backlog] --> asserting <> between index1 and index2 [2019-05-28T05:41:02,766][WARN ][o.e.t.n.MockNioTransport ] [followerd3] Slow execution on network thread [200 milliseconds] java.lang.RuntimeException: Slow exception on network thread at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.maybeLogElapsedTime(MockNioTransport.java:367) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.unregister(MockNioTransport.java:359) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleRead(TestEventHandler.java:130) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.handleRead(NioSelector.java:397) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.processKey(NioSelector.java:246) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:172) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:41:02,800][INFO ][o.e.x.c.IndexFollowingIT ] [testFollowIndex_backlog] --> shard [index2][1], node[XK3Qc8oGSAqdSsvD2_yMPw], [P], s[STARTED], a[id=aNbwvd2VQwazY-fRL7v4Yg] docs [doc{id='TOJi_WoB0TYwNnOOpTD9 seqNo=0 primaryTerm=1 version=1 source= {"f":1}}, doc{id='T-Ji_WoB0TYwNnOOpTD9 seqNo=1 primaryTerm=1 version=1 source= {"f":4}}, doc{id='UOJi_WoB0TYwNnOOpTD9 seqNo=2 primaryTerm=1 version=1 source= {"f":5}}, doc{id='UeJi_WoB0TYwNnOOpTD9 seqNo=3 primaryTerm=1 version=1 source= {"f":6}}, doc{id='VeJi_WoB0TYwNnOOpTD9 seqNo=4 primaryTerm=1 version=1 source= {"f":10}}, doc{id='V-Ji_WoB0TYwNnOOpTD9 seqNo=5 primaryTerm=1 version=1 source= {"f":12}}, doc{id='WeJi_WoB0TYwNnOOpTD9 seqNo=6 primaryTerm=1 version=1 source= {"f":14}}, doc{id='WuJi_WoB0TYwNnOOpTD- seqNo=7 primaryTerm=1 version=1 source= {"f":15}}, doc{id='XOJi_WoB0TYwNnOOpTD- seqNo=8 primaryTerm=1 version=1 source= {"f":17}}, doc{id='X-Ji_WoB0TYwNnOOpTD- seqNo=9 primaryTerm=1 version=1 source= {"f":20}}, doc{id='YeJi_WoB0TYwNnOOpTD- seqNo=10 primaryTerm=1 version=1 source= {"f":22}}, doc{id='YuJi_WoB0TYwNnOOpTD- seqNo=11 primaryTerm=1 version=1 source= {"f":23}}, doc{id='aOJi_WoB0TYwNnOOpjAD seqNo=12 primaryTerm=1 version=1 source= {"f":29}}, doc{id='aeJi_WoB0TYwNnOOpjAE seqNo=13 primaryTerm=1 version=1 source= {"f":30}}, doc{id='a-Ji_WoB0TYwNnOOpjAE seqNo=14 primaryTerm=1 version=1 source= {"f":32}}, doc{id='cuJi_WoB0TYwNnOOpjAE seqNo=15 primaryTerm=1 version=1 source= {"f":39}}, doc{id='dOJi_WoB0TYwNnOOpjAE seqNo=16 primaryTerm=1 version=1 source= {"f":41}}, doc{id='d-Ji_WoB0TYwNnOOpjAE seqNo=17 primaryTerm=1 version=1 source= {"f":44}}, doc{id='eOJi_WoB0TYwNnOOpjAE seqNo=18 primaryTerm=1 version=1 source= {"f":45}}, doc{id='eeJi_WoB0TYwNnOOpjAE seqNo=19 primaryTerm=1 version=1 source= {"f":46}}, doc{id='e-Ji_WoB0TYwNnOOpjAE seqNo=20 primaryTerm=1 version=1 source= {"f":48}}, doc{id='guJi_WoB0TYwNnOOpjAE seqNo=21 primaryTerm=1 version=1 source= {"f":55}}, doc{id='g-Ji_WoB0TYwNnOOpjAE seqNo=22 primaryTerm=1 version=1 source= {"f":56}}, doc{id='h-Ji_WoB0TYwNnOOpjAK seqNo=23 primaryTerm=1 version=1 source= {"f":60}}, doc{id='iOJi_WoB0TYwNnOOpjAK seqNo=24 primaryTerm=1 version=1 source= {"f":61}}, doc{id='ieJi_WoB0TYwNnOOpjAK seqNo=25 primaryTerm=1 version=1 source= {"f":62}}, doc{id='iuJi_WoB0TYwNnOOpjAK seqNo=26 primaryTerm=1 version=1 source= {"f":63}}, doc{id='jeJi_WoB0TYwNnOOpjAK seqNo=27 primaryTerm=1 version=1 source= {"f":66}}, doc{id='j-Ji_WoB0TYwNnOOpjAK seqNo=28 primaryTerm=1 version=1 source= {"f":68}}, doc{id='kOJi_WoB0TYwNnOOpjAK seqNo=29 primaryTerm=1 version=1 source= {"f":69}}, doc{id='kuJi_WoB0TYwNnOOpjAK seqNo=30 primaryTerm=1 version=1 source= {"f":71}}, doc{id='lOJi_WoB0TYwNnOOpjAK seqNo=31 primaryTerm=1 version=1 source= {"f":73}}, doc{id='l-Ji_WoB0TYwNnOOqDD4 seqNo=32 primaryTerm=1 version=1 source= {"f":76}}, doc{id='meJi_WoB0TYwNnOOqDD4 seqNo=33 primaryTerm=1 version=1 source= {"f":78}}, doc{id='m-Ji_WoB0TYwNnOOqDD4 seqNo=34 primaryTerm=1 version=1 source= {"f":80}}, doc{id='n-Ji_WoB0TYwNnOOqDD4 seqNo=35 primaryTerm=1 version=1 source= {"f":84}}, doc{id='ouJi_WoB0TYwNnOOqDD4 seqNo=36 primaryTerm=1 version=1 source= {"f":87}}, doc{id='pOJi_WoB0TYwNnOOqDD4 seqNo=37 primaryTerm=1 version=1 source= {"f":89}}, doc{id='puJi_WoB0TYwNnOOqDD4 seqNo=38 primaryTerm=1 version=1 source= {"f":91}}, doc{id='qeJi_WoB0TYwNnOOqDD4 seqNo=39 primaryTerm=1 version=1 source= {"f":94}}, doc{id='quJi_WoB0TYwNnOOqDD4 seqNo=40 primaryTerm=1 version=1 source= {"f":95}}, doc{id='q-Ji_WoB0TYwNnOOqDD4 seqNo=41 primaryTerm=1 version=1 source= {"f":96}}, doc{id='ruJi_WoB0TYwNnOOqDD4 seqNo=42 primaryTerm=1 version=1 source= {"f":99}}, doc{id='sOJi_WoB0TYwNnOOqDD4 seqNo=43 primaryTerm=1 version=1 source= {"f":101}}, doc{id='t-Ji_WoB0TYwNnOOqDD5 seqNo=44 primaryTerm=1 version=1 source= {"f":108}}, doc{id='v-Ji_WoB0TYwNnOOqDD5 seqNo=45 primaryTerm=1 version=1 source= {"f":116}}, doc{id='wuJi_WoB0TYwNnOOqDD5 seqNo=46 primaryTerm=1 version=1 source= {"f":119}}, doc{id='w-Ji_WoB0TYwNnOOqDD5 seqNo=47 primaryTerm=1 version=1 source= {"f":120}}, doc{id='xuJi_WoB0TYwNnOOqDD5 seqNo=48 primaryTerm=1 version=1 source= {"f":123}}, doc{id='yeJi_WoB0TYwNnOOqDD5 seqNo=49 primaryTerm=1 version=1 source= {"f":126}}, doc{id='yuJi_WoB0TYwNnOOqDD5 seqNo=50 primaryTerm=1 version=1 source= {"f":127}}, doc{id='zeJi_WoB0TYwNnOOqDD5 seqNo=51 primaryTerm=1 version=1 source= {"f":130}}, doc{id='zuJi_WoB0TYwNnOOqDD5 seqNo=52 primaryTerm=1 version=1 source= {"f":131}}, doc{id='z-Ji_WoB0TYwNnOOqDD5 seqNo=53 primaryTerm=1 version=1 source= {"f":132}}, doc{id='0uJi_WoB0TYwNnOOqDD_ seqNo=54 primaryTerm=1 version=1 source= {"f":135}}, doc{id='0-Ji_WoB0TYwNnOOqDD_ seqNo=55 primaryTerm=1 version=1 source= {"f":136}}, doc{id='1eJi_WoB0TYwNnOOqDD_ seqNo=56 primaryTerm=1 version=1 source= {"f":138}}, doc{id='1uJi_WoB0TYwNnOOqDD_ seqNo=57 primaryTerm=1 version=1 source= {"f":139}}, doc{id='1-Ji_WoB0TYwNnOOqDD_ seqNo=58 primaryTerm=1 version=1 source= {"f":140}}, doc{id='2-Ji_WoB0TYwNnOOqDD_ seqNo=59 primaryTerm=1 version=1 source= {"f":144}}, doc{id='4-Ji_WoB0TYwNnOOqTAB seqNo=60 primaryTerm=1 version=1 source= {"f":152}}, doc{id='5-Ji_WoB0TYwNnOOqTAB seqNo=61 primaryTerm=1 version=1 source= {"f":156}}, doc{id='6uJi_WoB0TYwNnOOqTAB seqNo=62 primaryTerm=1 version=1 source= {"f":159}}, doc{id='8eJi_WoB0TYwNnOOqTAD seqNo=63 primaryTerm=1 version=1 source= {"f":166}}, doc{id='-eJi_WoB0TYwNnOOqTAD seqNo=64 primaryTerm=1 version=1 source= {"f":174}}, doc{id='_eJi_WoB0TYwNnOOqTAD seqNo=65 primaryTerm=1 version=1 source= {"f":178}}, doc{id='_uJi_WoB0TYwNnOOqTAD seqNo=66 primaryTerm=1 version=1 source= {"f":179}}, doc{id='AeJi_WoB0TYwNnOOqTEI seqNo=67 primaryTerm=1 version=1 source= {"f":182}}, doc{id='AuJi_WoB0TYwNnOOqTEI seqNo=68 primaryTerm=1 version=1 source= {"f":183}}, doc{id='BOJi_WoB0TYwNnOOqTEI seqNo=69 primaryTerm=1 version=1 source= {"f":185}}, doc{id='COJi_WoB0TYwNnOOqTEI seqNo=70 primaryTerm=1 version=1 source= {"f":189}}, doc{id='CeJi_WoB0TYwNnOOqTEI seqNo=71 primaryTerm=1 version=1 source= {"f":190}}, doc{id='D-Ji_WoB0TYwNnOOqTEK seqNo=72 primaryTerm=1 version=1 source= {"f":196}}, doc{id='FOJi_WoB0TYwNnOOqTEK seqNo=73 primaryTerm=1 version=1 source= {"f":201}}, doc{id='FuJi_WoB0TYwNnOOqTEK seqNo=74 primaryTerm=1 version=1 source= {"f":203}}, doc{id='GOJi_WoB0TYwNnOOqTEK seqNo=75 primaryTerm=1 version=1 source= {"f":205}}, doc{id='G-Ji_WoB0TYwNnOOqTEK seqNo=76 primaryTerm=1 version=1 source= {"f":208}}, doc{id='HeJi_WoB0TYwNnOOqTEN seqNo=77 primaryTerm=1 version=1 source= {"f":210}}, doc{id='HuJi_WoB0TYwNnOOqTEN seqNo=78 primaryTerm=1 version=1 source= {"f":211}}, doc{id='IOJi_WoB0TYwNnOOqTEN seqNo=79 primaryTerm=1 version=1 source= {"f":213}}, doc{id='IuJi_WoB0TYwNnOOqTEN seqNo=80 primaryTerm=1 version=1 source= {"f":215}}, doc{id='I-Ji_WoB0TYwNnOOqTEN seqNo=81 primaryTerm=1 version=1 source= {"f":216}}, doc{id='JeJi_WoB0TYwNnOOqTEN seqNo=82 primaryTerm=1 version=1 source= {"f":218}}, doc{id='KuJi_WoB0TYwNnOOqTEN seqNo=83 primaryTerm=1 version=1 source= {"f":223}}, doc{id='LuJi_WoB0TYwNnOOqTEO seqNo=84 primaryTerm=1 version=1 source= {"f":227}}, doc{id='MeJi_WoB0TYwNnOOqTEO seqNo=85 primaryTerm=1 version=1 source= {"f":230}}, doc{id='MuJi_WoB0TYwNnOOqTEO seqNo=86 primaryTerm=1 version=1 source= {"f":231}}, doc{id='NOJi_WoB0TYwNnOOqTEO seqNo=87 primaryTerm=1 version=1 source= {"f":233}}, doc{id='NuJi_WoB0TYwNnOOqTEO seqNo=88 primaryTerm=1 version=1 source= {"f":235}}, doc{id='O-Ji_WoB0TYwNnOOqTEO seqNo=89 primaryTerm=1 version=1 source= {"f":240}}, doc{id='P-Ji_WoB0TYwNnOOqTEO seqNo=90 primaryTerm=1 version=1 source= {"f":244}}, doc{id='QOJi_WoB0TYwNnOOqTEO seqNo=91 primaryTerm=1 version=1 source= {"f":245}}, doc{id='QuJi_WoB0TYwNnOOqTEO seqNo=92 primaryTerm=1 version=1 source= {"f":247}}, doc{id='Q-Ji_WoB0TYwNnOOqTEO seqNo=93 primaryTerm=1 version=1 source= {"f":248}}, doc{id='ROJi_WoB0TYwNnOOqTEO seqNo=94 primaryTerm=1 version=1 source= {"f":249}}, doc{id='ReJi_WoB0TYwNnOOqTEO seqNo=95 primaryTerm=1 version=1 source= {"f":250}}, doc{id='SeJi_WoB0TYwNnOOqTEO seqNo=96 primaryTerm=1 version=1 source= {"f":254}}, doc{id='S-Ji_WoB0TYwNnOOqTEQ seqNo=97 primaryTerm=1 version=1 source= {"f":256}}, doc{id='UOJi_WoB0TYwNnOOqTEQ seqNo=98 primaryTerm=1 version=1 source= {"f":261}}, doc{id='U-Ji_WoB0TYwNnOOqTEQ seqNo=99 primaryTerm=1 version=1 source= {"f":264}}, doc{id='VeJi_WoB0TYwNnOOqTEQ seqNo=100 primaryTerm=1 version=1 source= {"f":266}}, doc{id='WOJi_WoB0TYwNnOOqTEQ seqNo=101 primaryTerm=1 version=1 source= {"f":269}}, doc{id='W-Ji_WoB0TYwNnOOqTET seqNo=102 primaryTerm=1 version=1 source= {"f":272}}, doc{id='XOJi_WoB0TYwNnOOqTET seqNo=103 primaryTerm=1 version=1 source= {"f":273}}, doc{id='XeJi_WoB0TYwNnOOqTET seqNo=104 primaryTerm=1 version=1 source= {"f":274}}, doc{id='XuJi_WoB0TYwNnOOqTET seqNo=105 primaryTerm=1 version=1 source= {"f":275}}, doc{id='X-Ji_WoB0TYwNnOOqTET seqNo=106 primaryTerm=1 version=1 source= {"f":276}}, doc{id='YuJi_WoB0TYwNnOOqTET seqNo=107 primaryTerm=1 version=1 source= {"f":279}}, doc{id='Y-Ji_WoB0TYwNnOOqTET seqNo=108 primaryTerm=1 version=1 source= {"f":280}}, doc{id='bOJi_WoB0TYwNnOOqTEU seqNo=109 primaryTerm=1 version=1 source= {"f":289}}, doc{id='beJi_WoB0TYwNnOOqTEU seqNo=110 primaryTerm=1 version=1 source= {"f":290}}, doc{id='buJi_WoB0TYwNnOOqTEU seqNo=111 primaryTerm=1 version=1 source= {"f":291}}, doc{id='b-Ji_WoB0TYwNnOOqTEU seqNo=112 primaryTerm=1 version=1 source= {"f":292}}, doc{id='dOJi_WoB0TYwNnOOqTEU seqNo=113 primaryTerm=1 version=1 source= {"f":297}}, doc{id='d-Ji_WoB0TYwNnOOqTEW seqNo=114 primaryTerm=1 version=1 source= {"f":300}}, doc{id='e-Ji_WoB0TYwNnOOqTEW seqNo=115 primaryTerm=1 version=1 source= {"f":304}}, doc{id='feJi_WoB0TYwNnOOqTEW seqNo=116 primaryTerm=1 version=1 source= {"f":306}}, doc{id='leJi_WoB0TYwNnOOqTEc seqNo=117 primaryTerm=1 version=1 source= {"f":330}}, doc{id='luJi_WoB0TYwNnOOqTEc seqNo=118 primaryTerm=1 version=1 source= {"f":331}}, doc{id='nuJi_WoB0TYwNnOOqTEc seqNo=119 primaryTerm=1 version=1 source= {"f":339}}, doc{id='n-Ji_WoB0TYwNnOOqTEc seqNo=120 primaryTerm=1 version=1 source= {"f":340}}, doc{id='oOJi_WoB0TYwNnOOqTEc seqNo=121 primaryTerm=1 version=1 source= {"f":341}}, doc{id='oeJi_WoB0TYwNnOOqTEc seqNo=122 primaryTerm=1 version=1 source= {"f":342}}, doc{id='ouJi_WoB0TYwNnOOqTEc seqNo=123 primaryTerm=1 version=1 source= {"f":343}}, doc{id='o-Ji_WoB0TYwNnOOqTEc seqNo=124 primaryTerm=1 version=1 source= {"f":344}}, doc{id='huJi_WoB0TYwNnOOqTEc seqNo=125 primaryTerm=1 version=1 source= {"f":315}}, doc{id='h-Ji_WoB0TYwNnOOqTEc seqNo=126 primaryTerm=1 version=1 source= {"f":316}}, doc{id='iOJi_WoB0TYwNnOOqTEc seqNo=127 primaryTerm=1 version=1 source= {"f":317}}, doc{id='kOJi_WoB0TYwNnOOqTEc seqNo=128 primaryTerm=1 version=1 source= {"f":325}}, doc{id='k-Ji_WoB0TYwNnOOqTEc seqNo=129 primaryTerm=1 version=1 source= {"f":328}}, doc{id='lOJi_WoB0TYwNnOOqTEc seqNo=130 primaryTerm=1 version=1 source= {"f":329}}, doc{id='wOJi_WoB0TYwNnOOqTEh seqNo=131 primaryTerm=1 version=1 source= {"f":373}}, doc{id='weJi_WoB0TYwNnOOqTEh seqNo=132 primaryTerm=1 version=1 source= {"f":374}}, doc{id='p-Ji_WoB0TYwNnOOqTEf seqNo=133 primaryTerm=1 version=1 source= {"f":348}}, doc{id='rOJi_WoB0TYwNnOOqTEf seqNo=134 primaryTerm=1 version=1 source= {"f":353}}, doc{id='sOJi_WoB0TYwNnOOqTEf seqNo=135 primaryTerm=1 version=1 source= {"f":357}}, doc{id='seJi_WoB0TYwNnOOqTEf seqNo=136 primaryTerm=1 version=1 source= {"f":358}}, doc{id='suJi_WoB0TYwNnOOqTEf seqNo=137 primaryTerm=1 version=1 source= {"f":359}}] seq_no_stats SeqNoStats{maxSeqNo=137, localCheckpoint=137, globalCheckpoint=137} [2019-05-28T05:41:02,883][INFO ][o.e.x.c.IndexFollowingIT ] [testFollowIndex_backlog] --> shard [index2][2], node[imikiXyHQzWPRqBx1bSawA], [P], s[STARTED], a[id=j5dRO3huTS6nZ49KAP87mA] docs [doc{id='S-Ji_WoB0TYwNnOOpTD4 seqNo=0 primaryTerm=1 version=1 source= {"f":0}}, doc{id='TeJi_WoB0TYwNnOOpTD9 seqNo=1 primaryTerm=1 version=1 source= {"f":2}}, doc{id='WOJi_WoB0TYwNnOOpTD9 seqNo=2 primaryTerm=1 version=1 source= {"f":13}}, doc{id='YOJi_WoB0TYwNnOOpTD- seqNo=3 primaryTerm=1 version=1 source= {"f":21}}, doc{id='Y-Ji_WoB0TYwNnOOpTD- seqNo=4 primaryTerm=1 version=1 source= {"f":24}}, doc{id='ZeJi_WoB0TYwNnOOpTD- seqNo=5 primaryTerm=1 version=1 source= {"f":26}}, doc{id='bOJi_WoB0TYwNnOOpjAE seqNo=6 primaryTerm=1 version=1 source= {"f":33}}, doc{id='b-Ji_WoB0TYwNnOOpjAE seqNo=7 primaryTerm=1 version=1 source= {"f":36}}, doc{id='cOJi_WoB0TYwNnOOpjAE seqNo=8 primaryTerm=1 version=1 source= {"f":37}}, doc{id='ceJi_WoB0TYwNnOOpjAE seqNo=9 primaryTerm=1 version=1 source= {"f":38}}, doc{id='c-Ji_WoB0TYwNnOOpjAE seqNo=10 primaryTerm=1 version=1 source= {"f":40}}, doc{id='deJi_WoB0TYwNnOOpjAE seqNo=11 primaryTerm=1 version=1 source= {"f":42}}, doc{id='duJi_WoB0TYwNnOOpjAE seqNo=12 primaryTerm=1 version=1 source= {"f":43}}, doc{id='euJi_WoB0TYwNnOOpjAE seqNo=13 primaryTerm=1 version=1 source= {"f":47}}, doc{id='fOJi_WoB0TYwNnOOpjAE seqNo=14 primaryTerm=1 version=1 source= {"f":49}}, doc{id='f-Ji_WoB0TYwNnOOpjAE seqNo=15 primaryTerm=1 version=1 source= {"f":52}}, doc{id='gOJi_WoB0TYwNnOOpjAE seqNo=16 primaryTerm=1 version=1 source= {"f":53}}, doc{id='geJi_WoB0TYwNnOOpjAE seqNo=17 primaryTerm=1 version=1 source= {"f":54}}, doc{id='hOJi_WoB0TYwNnOOpjAE seqNo=18 primaryTerm=1 version=1 source= {"f":57}}, doc{id='juJi_WoB0TYwNnOOpjAK seqNo=19 primaryTerm=1 version=1 source= {"f":67}}, doc{id='luJi_WoB0TYwNnOOqDD4 seqNo=20 primaryTerm=1 version=1 source= {"f":75}}, doc{id='muJi_WoB0TYwNnOOqDD4 seqNo=21 primaryTerm=1 version=1 source= {"f":79}}, doc{id='neJi_WoB0TYwNnOOqDD4 seqNo=22 primaryTerm=1 version=1 source= {"f":82}}, doc{id='oOJi_WoB0TYwNnOOqDD4 seqNo=23 primaryTerm=1 version=1 source= {"f":85}}, doc{id='o-Ji_WoB0TYwNnOOqDD4 seqNo=24 primaryTerm=1 version=1 source= {"f":88}}, doc{id='rOJi_WoB0TYwNnOOqDD4 seqNo=25 primaryTerm=1 version=1 source= {"f":97}}, doc{id='reJi_WoB0TYwNnOOqDD4 seqNo=26 primaryTerm=1 version=1 source= {"f":98}}, doc{id='tOJi_WoB0TYwNnOOqDD5 seqNo=27 primaryTerm=1 version=1 source= {"f":105}}, doc{id='vOJi_WoB0TYwNnOOqDD5 seqNo=28 primaryTerm=1 version=1 source= {"f":113}}, doc{id='veJi_WoB0TYwNnOOqDD5 seqNo=29 primaryTerm=1 version=1 source= {"f":114}}, doc{id='vuJi_WoB0TYwNnOOqDD5 seqNo=30 primaryTerm=1 version=1 source= {"f":115}}, doc{id='weJi_WoB0TYwNnOOqDD5 seqNo=31 primaryTerm=1 version=1 source= {"f":118}}, doc{id='xOJi_WoB0TYwNnOOqDD5 seqNo=32 primaryTerm=1 version=1 source= {"f":121}}, doc{id='yOJi_WoB0TYwNnOOqDD5 seqNo=33 primaryTerm=1 version=1 source= {"f":125}}, doc{id='y-Ji_WoB0TYwNnOOqDD5 seqNo=34 primaryTerm=1 version=1 source= {"f":128}}, doc{id='zOJi_WoB0TYwNnOOqDD5 seqNo=35 primaryTerm=1 version=1 source= {"f":129}}, doc{id='0OJi_WoB0TYwNnOOqDD5 seqNo=36 primaryTerm=1 version=1 source= {"f":133}}, doc{id='2uJi_WoB0TYwNnOOqDD_ seqNo=37 primaryTerm=1 version=1 source= {"f":143}}, doc{id='3uJi_WoB0TYwNnOOqDD_ seqNo=38 primaryTerm=1 version=1 source= {"f":147}}, doc{id='4uJi_WoB0TYwNnOOqTAB seqNo=39 primaryTerm=1 version=1 source= {"f":151}}, doc{id='6OJi_WoB0TYwNnOOqTAB seqNo=40 primaryTerm=1 version=1 source= {"f":157}}, doc{id='7eJi_WoB0TYwNnOOqTAB seqNo=41 primaryTerm=1 version=1 source= {"f":162}}, doc{id='7-Ji_WoB0TYwNnOOqTAB seqNo=42 primaryTerm=1 version=1 source= {"f":164}}, doc{id='8OJi_WoB0TYwNnOOqTAD seqNo=43 primaryTerm=1 version=1 source= {"f":165}}, doc{id='8-Ji_WoB0TYwNnOOqTAD seqNo=44 primaryTerm=1 version=1 source= {"f":168}}, doc{id='9OJi_WoB0TYwNnOOqTAD seqNo=45 primaryTerm=1 version=1 source= {"f":169}}, doc{id='9eJi_WoB0TYwNnOOqTAD seqNo=46 primaryTerm=1 version=1 source= {"f":170}}, doc{id='9uJi_WoB0TYwNnOOqTAD seqNo=47 primaryTerm=1 version=1 source= {"f":171}}, doc{id='9-Ji_WoB0TYwNnOOqTAD seqNo=48 primaryTerm=1 version=1 source= {"f":172}}, doc{id='-OJi_WoB0TYwNnOOqTAD seqNo=49 primaryTerm=1 version=1 source= {"f":173}}, doc{id='_-Ji_WoB0TYwNnOOqTAI seqNo=50 primaryTerm=1 version=1 source= {"f":180}}, doc{id='AOJi_WoB0TYwNnOOqTEI seqNo=51 primaryTerm=1 version=1 source= {"f":181}}, doc{id='BuJi_WoB0TYwNnOOqTEI seqNo=52 primaryTerm=1 version=1 source= {"f":187}}, doc{id='B-Ji_WoB0TYwNnOOqTEI seqNo=53 primaryTerm=1 version=1 source= {"f":188}}, doc{id='CuJi_WoB0TYwNnOOqTEI seqNo=54 primaryTerm=1 version=1 source= {"f":191}}, doc{id='DOJi_WoB0TYwNnOOqTEI seqNo=55 primaryTerm=1 version=1 source= {"f":193}}, doc{id='EOJi_WoB0TYwNnOOqTEK seqNo=56 primaryTerm=1 version=1 source= {"f":197}}, doc{id='EuJi_WoB0TYwNnOOqTEK seqNo=57 primaryTerm=1 version=1 source= {"f":199}}, doc{id='E-Ji_WoB0TYwNnOOqTEK seqNo=58 primaryTerm=1 version=1 source= {"f":200}}, doc{id='FeJi_WoB0TYwNnOOqTEK seqNo=59 primaryTerm=1 version=1 source= {"f":202}}, doc{id='H-Ji_WoB0TYwNnOOqTEN seqNo=60 primaryTerm=1 version=1 source= {"f":212}}, doc{id='IeJi_WoB0TYwNnOOqTEN seqNo=61 primaryTerm=1 version=1 source= {"f":214}}, doc{id='JuJi_WoB0TYwNnOOqTEN seqNo=62 primaryTerm=1 version=1 source= {"f":219}}, doc{id='K-Ji_WoB0TYwNnOOqTEN seqNo=63 primaryTerm=1 version=1 source= {"f":224}}, doc{id='LOJi_WoB0TYwNnOOqTEO seqNo=64 primaryTerm=1 version=1 source= {"f":225}}, doc{id='L-Ji_WoB0TYwNnOOqTEO seqNo=65 primaryTerm=1 version=1 source= {"f":228}}, doc{id='NeJi_WoB0TYwNnOOqTEO seqNo=66 primaryTerm=1 version=1 source= {"f":234}}, doc{id='N-Ji_WoB0TYwNnOOqTEO seqNo=67 primaryTerm=1 version=1 source= {"f":236}}, doc{id='OeJi_WoB0TYwNnOOqTEO seqNo=68 primaryTerm=1 version=1 source= {"f":238}}, doc{id='OuJi_WoB0TYwNnOOqTEO seqNo=69 primaryTerm=1 version=1 source= {"f":239}}, doc{id='PeJi_WoB0TYwNnOOqTEO seqNo=70 primaryTerm=1 version=1 source= {"f":242}}, doc{id='SOJi_WoB0TYwNnOOqTEO seqNo=71 primaryTerm=1 version=1 source= {"f":253}}, doc{id='SuJi_WoB0TYwNnOOqTEQ seqNo=72 primaryTerm=1 version=1 source= {"f":255}}, doc{id='TOJi_WoB0TYwNnOOqTEQ seqNo=73 primaryTerm=1 version=1 source= {"f":257}}, doc{id='TeJi_WoB0TYwNnOOqTEQ seqNo=74 primaryTerm=1 version=1 source= {"f":258}}, doc{id='TuJi_WoB0TYwNnOOqTEQ seqNo=75 primaryTerm=1 version=1 source= {"f":259}}, doc{id='T-Ji_WoB0TYwNnOOqTEQ seqNo=76 primaryTerm=1 version=1 source= {"f":260}}, doc{id='VOJi_WoB0TYwNnOOqTEQ seqNo=77 primaryTerm=1 version=1 source= {"f":265}}, doc{id='WeJi_WoB0TYwNnOOqTET seqNo=78 primaryTerm=1 version=1 source= {"f":270}}, doc{id='YOJi_WoB0TYwNnOOqTET seqNo=79 primaryTerm=1 version=1 source= {"f":277}}, doc{id='YeJi_WoB0TYwNnOOqTET seqNo=80 primaryTerm=1 version=1 source= {"f":278}}, doc{id='ZuJi_WoB0TYwNnOOqTET seqNo=81 primaryTerm=1 version=1 source= {"f":283}}, doc{id='auJi_WoB0TYwNnOOqTEU seqNo=82 primaryTerm=1 version=1 source= {"f":287}}, doc{id='cOJi_WoB0TYwNnOOqTEU seqNo=83 primaryTerm=1 version=1 source= {"f":293}}, doc{id='c-Ji_WoB0TYwNnOOqTEU seqNo=84 primaryTerm=1 version=1 source= {"f":296}}, doc{id='deJi_WoB0TYwNnOOqTEU seqNo=85 primaryTerm=1 version=1 source= {"f":298}}, doc{id='eOJi_WoB0TYwNnOOqTEW seqNo=86 primaryTerm=1 version=1 source= {"f":301}}, doc{id='eeJi_WoB0TYwNnOOqTEW seqNo=87 primaryTerm=1 version=1 source= {"f":302}}, doc{id='euJi_WoB0TYwNnOOqTEW seqNo=88 primaryTerm=1 version=1 source= {"f":303}}, doc{id='fOJi_WoB0TYwNnOOqTEW seqNo=89 primaryTerm=1 version=1 source= {"f":305}}, doc{id='f-Ji_WoB0TYwNnOOqTEW seqNo=90 primaryTerm=1 version=1 source= {"f":308}}, doc{id='gOJi_WoB0TYwNnOOqTEW seqNo=91 primaryTerm=1 version=1 source= {"f":309}}, doc{id='g-Ji_WoB0TYwNnOOqTEW seqNo=92 primaryTerm=1 version=1 source= {"f":312}}, doc{id='heJi_WoB0TYwNnOOqTEW seqNo=93 primaryTerm=1 version=1 source= {"f":314}}, doc{id='ieJi_WoB0TYwNnOOqTEc seqNo=94 primaryTerm=1 version=1 source= {"f":318}}, doc{id='kuJi_WoB0TYwNnOOqTEc seqNo=95 primaryTerm=1 version=1 source= {"f":327}}, doc{id='l-Ji_WoB0TYwNnOOqTEc seqNo=96 primaryTerm=1 version=1 source= {"f":332}}, doc{id='muJi_WoB0TYwNnOOqTEc seqNo=97 primaryTerm=1 version=1 source= {"f":335}}, doc{id='neJi_WoB0TYwNnOOqTEc seqNo=98 primaryTerm=1 version=1 source= {"f":338}}, doc{id='puJi_WoB0TYwNnOOqTEf seqNo=99 primaryTerm=1 version=1 source= {"f":347}}, doc{id='quJi_WoB0TYwNnOOqTEf seqNo=100 primaryTerm=1 version=1 source= {"f":351}}, doc{id='q-Ji_WoB0TYwNnOOqTEf seqNo=101 primaryTerm=1 version=1 source= {"f":352}}, doc{id='reJi_WoB0TYwNnOOqTEf seqNo=102 primaryTerm=1 version=1 source= {"f":354}}, doc{id='r-Ji_WoB0TYwNnOOqTEf seqNo=103 primaryTerm=1 version=1 source= {"f":356}}, doc{id='s-Ji_WoB0TYwNnOOqTEh seqNo=104 primaryTerm=1 version=1 source= {"f":360}}, doc{id='tOJi_WoB0TYwNnOOqTEh seqNo=105 primaryTerm=1 version=1 source= {"f":361}}, doc{id='teJi_WoB0TYwNnOOqTEh seqNo=106 primaryTerm=1 version=1 source= {"f":362}}, doc{id='uOJi_WoB0TYwNnOOqTEh seqNo=107 primaryTerm=1 version=1 source= {"f":365}}, doc{id='u-Ji_WoB0TYwNnOOqTEh seqNo=108 primaryTerm=1 version=1 source= {"f":368}}, doc{id='veJi_WoB0TYwNnOOqTEh seqNo=109 primaryTerm=1 version=1 source= {"f":370}}, doc{id='vuJi_WoB0TYwNnOOqTEh seqNo=110 primaryTerm=1 version=1 source= {"f":371}}] seq_no_stats SeqNoStats{maxSeqNo=110, localCheckpoint=110, globalCheckpoint=110} [2019-05-28T05:41:02,954][INFO ][o.e.x.c.IndexFollowingIT ] [testFollowIndex_backlog] --> shard [index2][0], node[imikiXyHQzWPRqBx1bSawA], [P], s[STARTED], a[id=Prk7Gg4ITcW2IvU0FfAf1A] docs [doc{id='TuJi_WoB0TYwNnOOpTD9 seqNo=0 primaryTerm=1 version=1 source= {"f":3}}, doc{id='UuJi_WoB0TYwNnOOpTD9 seqNo=1 primaryTerm=1 version=1 source= {"f":7}}, doc{id='U-Ji_WoB0TYwNnOOpTD9 seqNo=2 primaryTerm=1 version=1 source= {"f":8}}, doc{id='VOJi_WoB0TYwNnOOpTD9 seqNo=3 primaryTerm=1 version=1 source= {"f":9}}, doc{id='VuJi_WoB0TYwNnOOpTD9 seqNo=4 primaryTerm=1 version=1 source= {"f":11}}, doc{id='W-Ji_WoB0TYwNnOOpTD- seqNo=5 primaryTerm=1 version=1 source= {"f":16}}, doc{id='auJi_WoB0TYwNnOOpjAE seqNo=6 primaryTerm=1 version=1 source= {"f":31}}, doc{id='XeJi_WoB0TYwNnOOpTD- seqNo=7 primaryTerm=1 version=1 source= {"f":18}}, doc{id='XuJi_WoB0TYwNnOOpTD- seqNo=8 primaryTerm=1 version=1 source= {"f":19}}, doc{id='ZOJi_WoB0TYwNnOOpTD- seqNo=9 primaryTerm=1 version=1 source= {"f":25}}, doc{id='ZuJi_WoB0TYwNnOOpjAD seqNo=10 primaryTerm=1 version=1 source= {"f":27}}, doc{id='Z-Ji_WoB0TYwNnOOpjAD seqNo=11 primaryTerm=1 version=1 source= {"f":28}}, doc{id='beJi_WoB0TYwNnOOpjAE seqNo=12 primaryTerm=1 version=1 source= {"f":34}}, doc{id='buJi_WoB0TYwNnOOpjAE seqNo=13 primaryTerm=1 version=1 source= {"f":35}}, doc{id='feJi_WoB0TYwNnOOpjAE seqNo=14 primaryTerm=1 version=1 source= {"f":50}}, doc{id='fuJi_WoB0TYwNnOOpjAE seqNo=15 primaryTerm=1 version=1 source= {"f":51}}, doc{id='heJi_WoB0TYwNnOOpjAE seqNo=16 primaryTerm=1 version=1 source= {"f":58}}, doc{id='huJi_WoB0TYwNnOOpjAE seqNo=17 primaryTerm=1 version=1 source= {"f":59}}, doc{id='i-Ji_WoB0TYwNnOOpjAK seqNo=18 primaryTerm=1 version=1 source= {"f":64}}, doc{id='jOJi_WoB0TYwNnOOpjAK seqNo=19 primaryTerm=1 version=1 source= {"f":65}}, doc{id='keJi_WoB0TYwNnOOpjAK seqNo=20 primaryTerm=1 version=1 source= {"f":70}}, doc{id='k-Ji_WoB0TYwNnOOpjAK seqNo=21 primaryTerm=1 version=1 source= {"f":72}}, doc{id='leJi_WoB0TYwNnOOpjAK seqNo=22 primaryTerm=1 version=1 source= {"f":74}}, doc{id='mOJi_WoB0TYwNnOOqDD4 seqNo=23 primaryTerm=1 version=1 source= {"f":77}}, doc{id='nOJi_WoB0TYwNnOOqDD4 seqNo=24 primaryTerm=1 version=1 source= {"f":81}}, doc{id='nuJi_WoB0TYwNnOOqDD4 seqNo=25 primaryTerm=1 version=1 source= {"f":83}}, doc{id='oeJi_WoB0TYwNnOOqDD4 seqNo=26 primaryTerm=1 version=1 source= {"f":86}}, doc{id='peJi_WoB0TYwNnOOqDD4 seqNo=27 primaryTerm=1 version=1 source= {"f":90}}, doc{id='p-Ji_WoB0TYwNnOOqDD4 seqNo=28 primaryTerm=1 version=1 source= {"f":92}}, doc{id='qOJi_WoB0TYwNnOOqDD4 seqNo=29 primaryTerm=1 version=1 source= {"f":93}}, doc{id='r-Ji_WoB0TYwNnOOqDD4 seqNo=30 primaryTerm=1 version=1 source= {"f":100}}, doc{id='seJi_WoB0TYwNnOOqDD4 seqNo=31 primaryTerm=1 version=1 source= {"f":102}}, doc{id='suJi_WoB0TYwNnOOqDD4 seqNo=32 primaryTerm=1 version=1 source= {"f":103}}, doc{id='s-Ji_WoB0TYwNnOOqDD4 seqNo=33 primaryTerm=1 version=1 source= {"f":104}}, doc{id='teJi_WoB0TYwNnOOqDD5 seqNo=34 primaryTerm=1 version=1 source= {"f":106}}, doc{id='tuJi_WoB0TYwNnOOqDD5 seqNo=35 primaryTerm=1 version=1 source= {"f":107}}, doc{id='uOJi_WoB0TYwNnOOqDD5 seqNo=36 primaryTerm=1 version=1 source= {"f":109}}, doc{id='ueJi_WoB0TYwNnOOqDD5 seqNo=37 primaryTerm=1 version=1 source= {"f":110}}, doc{id='uuJi_WoB0TYwNnOOqDD5 seqNo=38 primaryTerm=1 version=1 source= {"f":111}}, doc{id='u-Ji_WoB0TYwNnOOqDD5 seqNo=39 primaryTerm=1 version=1 source= {"f":112}}, doc{id='wOJi_WoB0TYwNnOOqDD5 seqNo=40 primaryTerm=1 version=1 source= {"f":117}}, doc{id='xeJi_WoB0TYwNnOOqDD5 seqNo=41 primaryTerm=1 version=1 source= {"f":122}}, doc{id='x-Ji_WoB0TYwNnOOqDD5 seqNo=42 primaryTerm=1 version=1 source= {"f":124}}, doc{id='0eJi_WoB0TYwNnOOqDD5 seqNo=43 primaryTerm=1 version=1 source= {"f":134}}, doc{id='1OJi_WoB0TYwNnOOqDD_ seqNo=44 primaryTerm=1 version=1 source= {"f":137}}, doc{id='2OJi_WoB0TYwNnOOqDD_ seqNo=45 primaryTerm=1 version=1 source= {"f":141}}, doc{id='2eJi_WoB0TYwNnOOqDD_ seqNo=46 primaryTerm=1 version=1 source= {"f":142}}, doc{id='3OJi_WoB0TYwNnOOqDD_ seqNo=47 primaryTerm=1 version=1 source= {"f":145}}, doc{id='3eJi_WoB0TYwNnOOqDD_ seqNo=48 primaryTerm=1 version=1 source= {"f":146}}, doc{id='3-Ji_WoB0TYwNnOOqDD_ seqNo=49 primaryTerm=1 version=1 source= {"f":148}}, doc{id='4OJi_WoB0TYwNnOOqDD_ seqNo=50 primaryTerm=1 version=1 source= {"f":149}}, doc{id='4eJi_WoB0TYwNnOOqTAB seqNo=51 primaryTerm=1 version=1 source= {"f":150}}, doc{id='5OJi_WoB0TYwNnOOqTAB seqNo=52 primaryTerm=1 version=1 source= {"f":153}}, doc{id='5eJi_WoB0TYwNnOOqTAB seqNo=53 primaryTerm=1 version=1 source= {"f":154}}, doc{id='5uJi_WoB0TYwNnOOqTAB seqNo=54 primaryTerm=1 version=1 source= {"f":155}}, doc{id='6eJi_WoB0TYwNnOOqTAB seqNo=55 primaryTerm=1 version=1 source= {"f":158}}, doc{id='6-Ji_WoB0TYwNnOOqTAB seqNo=56 primaryTerm=1 version=1 source= {"f":160}}, doc{id='7OJi_WoB0TYwNnOOqTAB seqNo=57 primaryTerm=1 version=1 source= {"f":161}}, doc{id='7uJi_WoB0TYwNnOOqTAB seqNo=58 primaryTerm=1 version=1 source= {"f":163}}, doc{id='8uJi_WoB0TYwNnOOqTAD seqNo=59 primaryTerm=1 version=1 source= {"f":167}}, doc{id='-uJi_WoB0TYwNnOOqTAD seqNo=60 primaryTerm=1 version=1 source= {"f":175}}, doc{id='--Ji_WoB0TYwNnOOqTAD seqNo=61 primaryTerm=1 version=1 source= {"f":176}}, doc{id='_OJi_WoB0TYwNnOOqTAD seqNo=62 primaryTerm=1 version=1 source= {"f":177}}, doc{id='A-Ji_WoB0TYwNnOOqTEI seqNo=63 primaryTerm=1 version=1 source= {"f":184}}, doc{id='BeJi_WoB0TYwNnOOqTEI seqNo=64 primaryTerm=1 version=1 source= {"f":186}}, doc{id='C-Ji_WoB0TYwNnOOqTEI seqNo=65 primaryTerm=1 version=1 source= {"f":192}}, doc{id='DeJi_WoB0TYwNnOOqTEI seqNo=66 primaryTerm=1 version=1 source= {"f":194}}, doc{id='DuJi_WoB0TYwNnOOqTEK seqNo=67 primaryTerm=1 version=1 source= {"f":195}}, doc{id='EeJi_WoB0TYwNnOOqTEK seqNo=68 primaryTerm=1 version=1 source= {"f":198}}, doc{id='F-Ji_WoB0TYwNnOOqTEK seqNo=69 primaryTerm=1 version=1 source= {"f":204}}, doc{id='GeJi_WoB0TYwNnOOqTEK seqNo=70 primaryTerm=1 version=1 source= {"f":206}}, doc{id='GuJi_WoB0TYwNnOOqTEK seqNo=71 primaryTerm=1 version=1 source= {"f":207}}, doc{id='HOJi_WoB0TYwNnOOqTEK seqNo=72 primaryTerm=1 version=1 source= {"f":209}}, doc{id='JOJi_WoB0TYwNnOOqTEN seqNo=73 primaryTerm=1 version=1 source= {"f":217}}, doc{id='J-Ji_WoB0TYwNnOOqTEN seqNo=74 primaryTerm=1 version=1 source= {"f":220}}, doc{id='KOJi_WoB0TYwNnOOqTEN seqNo=75 primaryTerm=1 version=1 source= {"f":221}}, doc{id='KeJi_WoB0TYwNnOOqTEN seqNo=76 primaryTerm=1 version=1 source= {"f":222}}, doc{id='LeJi_WoB0TYwNnOOqTEO seqNo=77 primaryTerm=1 version=1 source= {"f":226}}, doc{id='MOJi_WoB0TYwNnOOqTEO seqNo=78 primaryTerm=1 version=1 source= {"f":229}}, doc{id='M-Ji_WoB0TYwNnOOqTEO seqNo=79 primaryTerm=1 version=1 source= {"f":232}}, doc{id='OOJi_WoB0TYwNnOOqTEO seqNo=80 primaryTerm=1 version=1 source= {"f":237}}, doc{id='POJi_WoB0TYwNnOOqTEO seqNo=81 primaryTerm=1 version=1 source= {"f":241}}, doc{id='PuJi_WoB0TYwNnOOqTEO seqNo=82 primaryTerm=1 version=1 source= {"f":243}}, doc{id='QeJi_WoB0TYwNnOOqTEO seqNo=83 primaryTerm=1 version=1 source= {"f":246}}, doc{id='RuJi_WoB0TYwNnOOqTEO seqNo=84 primaryTerm=1 version=1 source= {"f":251}}, doc{id='R-Ji_WoB0TYwNnOOqTEO seqNo=85 primaryTerm=1 version=1 source= {"f":252}}, doc{id='UeJi_WoB0TYwNnOOqTEQ seqNo=86 primaryTerm=1 version=1 source= {"f":262}}, doc{id='UuJi_WoB0TYwNnOOqTEQ seqNo=87 primaryTerm=1 version=1 source= {"f":263}}, doc{id='VuJi_WoB0TYwNnOOqTEQ seqNo=88 primaryTerm=1 version=1 source= {"f":267}}, doc{id='V-Ji_WoB0TYwNnOOqTEQ seqNo=89 primaryTerm=1 version=1 source= {"f":268}}, doc{id='WuJi_WoB0TYwNnOOqTET seqNo=90 primaryTerm=1 version=1 source= {"f":271}}, doc{id='ZOJi_WoB0TYwNnOOqTET seqNo=91 primaryTerm=1 version=1 source= {"f":281}}, doc{id='ZeJi_WoB0TYwNnOOqTET seqNo=92 primaryTerm=1 version=1 source= {"f":282}}, doc{id='Z-Ji_WoB0TYwNnOOqTET seqNo=93 primaryTerm=1 version=1 source= {"f":284}}, doc{id='aOJi_WoB0TYwNnOOqTEU seqNo=94 primaryTerm=1 version=1 source= {"f":285}}, doc{id='aeJi_WoB0TYwNnOOqTEU seqNo=95 primaryTerm=1 version=1 source= {"f":286}}, doc{id='a-Ji_WoB0TYwNnOOqTEU seqNo=96 primaryTerm=1 version=1 source= {"f":288}}, doc{id='ceJi_WoB0TYwNnOOqTEU seqNo=97 primaryTerm=1 version=1 source= {"f":294}}, doc{id='cuJi_WoB0TYwNnOOqTEU seqNo=98 primaryTerm=1 version=1 source= {"f":295}}, doc{id='duJi_WoB0TYwNnOOqTEU seqNo=99 primaryTerm=1 version=1 source= {"f":299}}, doc{id='fuJi_WoB0TYwNnOOqTEW seqNo=100 primaryTerm=1 version=1 source= {"f":307}}, doc{id='geJi_WoB0TYwNnOOqTEW seqNo=101 primaryTerm=1 version=1 source= {"f":310}}, doc{id='guJi_WoB0TYwNnOOqTEW seqNo=102 primaryTerm=1 version=1 source= {"f":311}}, doc{id='hOJi_WoB0TYwNnOOqTEW seqNo=103 primaryTerm=1 version=1 source= {"f":313}}, doc{id='iuJi_WoB0TYwNnOOqTEc seqNo=104 primaryTerm=1 version=1 source= {"f":319}}, doc{id='i-Ji_WoB0TYwNnOOqTEc seqNo=105 primaryTerm=1 version=1 source= {"f":320}}, doc{id='jOJi_WoB0TYwNnOOqTEc seqNo=106 primaryTerm=1 version=1 source= {"f":321}}, doc{id='jeJi_WoB0TYwNnOOqTEc seqNo=107 primaryTerm=1 version=1 source= {"f":322}}, doc{id='juJi_WoB0TYwNnOOqTEc seqNo=108 primaryTerm=1 version=1 source= {"f":323}}, doc{id='j-Ji_WoB0TYwNnOOqTEc seqNo=109 primaryTerm=1 version=1 source= {"f":324}}, doc{id='keJi_WoB0TYwNnOOqTEc seqNo=110 primaryTerm=1 version=1 source= {"f":326}}, doc{id='mOJi_WoB0TYwNnOOqTEc seqNo=111 primaryTerm=1 version=1 source= {"f":333}}, doc{id='meJi_WoB0TYwNnOOqTEc seqNo=112 primaryTerm=1 version=1 source= {"f":334}}, doc{id='m-Ji_WoB0TYwNnOOqTEc seqNo=113 primaryTerm=1 version=1 source= {"f":336}}, doc{id='nOJi_WoB0TYwNnOOqTEc seqNo=114 primaryTerm=1 version=1 source= {"f":337}}, doc{id='pOJi_WoB0TYwNnOOqTEf seqNo=115 primaryTerm=1 version=1 source= {"f":345}}, doc{id='peJi_WoB0TYwNnOOqTEf seqNo=116 primaryTerm=1 version=1 source= {"f":346}}, doc{id='qOJi_WoB0TYwNnOOqTEf seqNo=117 primaryTerm=1 version=1 source= {"f":349}}, doc{id='qeJi_WoB0TYwNnOOqTEf seqNo=118 primaryTerm=1 version=1 source= {"f":350}}, doc{id='ruJi_WoB0TYwNnOOqTEf seqNo=119 primaryTerm=1 version=1 source= {"f":355}}, doc{id='tuJi_WoB0TYwNnOOqTEh seqNo=120 primaryTerm=1 version=1 source= {"f":363}}, doc{id='t-Ji_WoB0TYwNnOOqTEh seqNo=121 primaryTerm=1 version=1 source= {"f":364}}, doc{id='ueJi_WoB0TYwNnOOqTEh seqNo=122 primaryTerm=1 version=1 source= {"f":366}}, doc{id='uuJi_WoB0TYwNnOOqTEh seqNo=123 primaryTerm=1 version=1 source= {"f":367}}, doc{id='vOJi_WoB0TYwNnOOqTEh seqNo=124 primaryTerm=1 version=1 source= {"f":369}}, doc{id='v-Ji_WoB0TYwNnOOqTEh seqNo=125 primaryTerm=1 version=1 source= {"f":372}}, doc{id='wuJi_WoB0TYwNnOOqTF9 seqNo=126 primaryTerm=1 version=1 source= {"f":375}}, doc{id='w-Ji_WoB0TYwNnOOqTF9 seqNo=127 primaryTerm=1 version=1 source= {"f":376}}] seq_no_stats SeqNoStats{maxSeqNo=127, localCheckpoint=127, globalCheckpoint=127} [2019-05-28T05:41:02,957][INFO ][o.e.x.c.IndexFollowingIT ] [testFollowIndex_backlog] --> shard [index1][2], node[9HWb90BFR1eAyo6kqv0ZYA], [P], s[STARTED], a[id=hsHChdt5S3acJdELzCiCPg] docs [doc{id='S-Ji_WoB0TYwNnOOpTD4 seqNo=0 primaryTerm=1 version=1 source= {"f":0}}, doc{id='TeJi_WoB0TYwNnOOpTD9 seqNo=1 primaryTerm=1 version=1 source= {"f":2}}, doc{id='WOJi_WoB0TYwNnOOpTD9 seqNo=2 primaryTerm=1 version=1 source= {"f":13}}, doc{id='YOJi_WoB0TYwNnOOpTD- seqNo=3 primaryTerm=1 version=1 source= {"f":21}}, doc{id='Y-Ji_WoB0TYwNnOOpTD- seqNo=4 primaryTerm=1 version=1 source= {"f":24}}, doc{id='ZeJi_WoB0TYwNnOOpTD- seqNo=5 primaryTerm=1 version=1 source= {"f":26}}, doc{id='bOJi_WoB0TYwNnOOpjAE seqNo=6 primaryTerm=1 version=1 source= {"f":33}}, doc{id='b-Ji_WoB0TYwNnOOpjAE seqNo=7 primaryTerm=1 version=1 source= {"f":36}}, doc{id='cOJi_WoB0TYwNnOOpjAE seqNo=8 primaryTerm=1 version=1 source= {"f":37}}, doc{id='ceJi_WoB0TYwNnOOpjAE seqNo=9 primaryTerm=1 version=1 source= {"f":38}}, doc{id='c-Ji_WoB0TYwNnOOpjAE seqNo=10 primaryTerm=1 version=1 source= {"f":40}}, doc{id='deJi_WoB0TYwNnOOpjAE seqNo=11 primaryTerm=1 version=1 source= {"f":42}}, doc{id='duJi_WoB0TYwNnOOpjAE seqNo=12 primaryTerm=1 version=1 source= {"f":43}}, doc{id='euJi_WoB0TYwNnOOpjAE seqNo=13 primaryTerm=1 version=1 source= {"f":47}}, doc{id='fOJi_WoB0TYwNnOOpjAE seqNo=14 primaryTerm=1 version=1 source= {"f":49}}, doc{id='f-Ji_WoB0TYwNnOOpjAE seqNo=15 primaryTerm=1 version=1 source= {"f":52}}, doc{id='gOJi_WoB0TYwNnOOpjAE seqNo=16 primaryTerm=1 version=1 source= {"f":53}}, doc{id='geJi_WoB0TYwNnOOpjAE seqNo=17 primaryTerm=1 version=1 source= {"f":54}}, doc{id='hOJi_WoB0TYwNnOOpjAE seqNo=18 primaryTerm=1 version=1 source= {"f":57}}, doc{id='juJi_WoB0TYwNnOOpjAK seqNo=19 primaryTerm=1 version=1 source= {"f":67}}, doc{id='luJi_WoB0TYwNnOOqDD4 seqNo=20 primaryTerm=1 version=1 source= {"f":75}}, doc{id='muJi_WoB0TYwNnOOqDD4 seqNo=21 primaryTerm=1 version=1 source= {"f":79}}, doc{id='neJi_WoB0TYwNnOOqDD4 seqNo=22 primaryTerm=1 version=1 source= {"f":82}}, doc{id='oOJi_WoB0TYwNnOOqDD4 seqNo=23 primaryTerm=1 version=1 source= {"f":85}}, doc{id='o-Ji_WoB0TYwNnOOqDD4 seqNo=24 primaryTerm=1 version=1 source= {"f":88}}, doc{id='rOJi_WoB0TYwNnOOqDD4 seqNo=25 primaryTerm=1 version=1 source= {"f":97}}, doc{id='reJi_WoB0TYwNnOOqDD4 seqNo=26 primaryTerm=1 version=1 source= {"f":98}}, doc{id='tOJi_WoB0TYwNnOOqDD5 seqNo=27 primaryTerm=1 version=1 source= {"f":105}}, doc{id='vOJi_WoB0TYwNnOOqDD5 seqNo=28 primaryTerm=1 version=1 source= {"f":113}}, doc{id='veJi_WoB0TYwNnOOqDD5 seqNo=29 primaryTerm=1 version=1 source= {"f":114}}, doc{id='vuJi_WoB0TYwNnOOqDD5 seqNo=30 primaryTerm=1 version=1 source= {"f":115}}, doc{id='weJi_WoB0TYwNnOOqDD5 seqNo=31 primaryTerm=1 version=1 source= {"f":118}}, doc{id='xOJi_WoB0TYwNnOOqDD5 seqNo=32 primaryTerm=1 version=1 source= {"f":121}}, doc{id='yOJi_WoB0TYwNnOOqDD5 seqNo=33 primaryTerm=1 version=1 source= {"f":125}}, doc{id='y-Ji_WoB0TYwNnOOqDD5 seqNo=34 primaryTerm=1 version=1 source= {"f":128}}, doc{id='zOJi_WoB0TYwNnOOqDD5 seqNo=35 primaryTerm=1 version=1 source= {"f":129}}, doc{id='0OJi_WoB0TYwNnOOqDD5 seqNo=36 primaryTerm=1 version=1 source= {"f":133}}, doc{id='2uJi_WoB0TYwNnOOqDD_ seqNo=37 primaryTerm=1 version=1 source= {"f":143}}, doc{id='3uJi_WoB0TYwNnOOqDD_ seqNo=38 primaryTerm=1 version=1 source= {"f":147}}, doc{id='4uJi_WoB0TYwNnOOqTAB seqNo=39 primaryTerm=1 version=1 source= {"f":151}}, doc{id='6OJi_WoB0TYwNnOOqTAB seqNo=40 primaryTerm=1 version=1 source= {"f":157}}, doc{id='7eJi_WoB0TYwNnOOqTAB seqNo=41 primaryTerm=1 version=1 source= {"f":162}}, doc{id='7-Ji_WoB0TYwNnOOqTAB seqNo=42 primaryTerm=1 version=1 source= {"f":164}}, doc{id='8OJi_WoB0TYwNnOOqTAD seqNo=43 primaryTerm=1 version=1 source= {"f":165}}, doc{id='8-Ji_WoB0TYwNnOOqTAD seqNo=44 primaryTerm=1 version=1 source= {"f":168}}, doc{id='9OJi_WoB0TYwNnOOqTAD seqNo=45 primaryTerm=1 version=1 source= {"f":169}}, doc{id='9eJi_WoB0TYwNnOOqTAD seqNo=46 primaryTerm=1 version=1 source= {"f":170}}, doc{id='9uJi_WoB0TYwNnOOqTAD seqNo=47 primaryTerm=1 version=1 source= {"f":171}}, doc{id='9-Ji_WoB0TYwNnOOqTAD seqNo=48 primaryTerm=1 version=1 source= {"f":172}}, doc{id='-OJi_WoB0TYwNnOOqTAD seqNo=49 primaryTerm=1 version=1 source= {"f":173}}, doc{id='_-Ji_WoB0TYwNnOOqTAI seqNo=50 primaryTerm=1 version=1 source= {"f":180}}, doc{id='AOJi_WoB0TYwNnOOqTEI seqNo=51 primaryTerm=1 version=1 source= {"f":181}}, doc{id='BuJi_WoB0TYwNnOOqTEI seqNo=52 primaryTerm=1 version=1 source= {"f":187}}, doc{id='B-Ji_WoB0TYwNnOOqTEI seqNo=53 primaryTerm=1 version=1 source= {"f":188}}, doc{id='CuJi_WoB0TYwNnOOqTEI seqNo=54 primaryTerm=1 version=1 source= {"f":191}}, doc{id='DOJi_WoB0TYwNnOOqTEI seqNo=55 primaryTerm=1 version=1 source= {"f":193}}, doc{id='EOJi_WoB0TYwNnOOqTEK seqNo=56 primaryTerm=1 version=1 source= {"f":197}}, doc{id='EuJi_WoB0TYwNnOOqTEK seqNo=57 primaryTerm=1 version=1 source= {"f":199}}, doc{id='E-Ji_WoB0TYwNnOOqTEK seqNo=58 primaryTerm=1 version=1 source= {"f":200}}, doc{id='FeJi_WoB0TYwNnOOqTEK seqNo=59 primaryTerm=1 version=1 source= {"f":202}}, doc{id='H-Ji_WoB0TYwNnOOqTEN seqNo=60 primaryTerm=1 version=1 source= {"f":212}}, doc{id='IeJi_WoB0TYwNnOOqTEN seqNo=61 primaryTerm=1 version=1 source= {"f":214}}, doc{id='JuJi_WoB0TYwNnOOqTEN seqNo=62 primaryTerm=1 version=1 source= {"f":219}}, doc{id='K-Ji_WoB0TYwNnOOqTEN seqNo=63 primaryTerm=1 version=1 source= {"f":224}}, doc{id='LOJi_WoB0TYwNnOOqTEO seqNo=64 primaryTerm=1 version=1 source= {"f":225}}, doc{id='L-Ji_WoB0TYwNnOOqTEO seqNo=65 primaryTerm=1 version=1 source= {"f":228}}, doc{id='NeJi_WoB0TYwNnOOqTEO seqNo=66 primaryTerm=1 version=1 source= {"f":234}}, doc{id='N-Ji_WoB0TYwNnOOqTEO seqNo=67 primaryTerm=1 version=1 source= {"f":236}}, doc{id='OeJi_WoB0TYwNnOOqTEO seqNo=68 primaryTerm=1 version=1 source= {"f":238}}, doc{id='OuJi_WoB0TYwNnOOqTEO seqNo=69 primaryTerm=1 version=1 source= {"f":239}}, doc{id='PeJi_WoB0TYwNnOOqTEO seqNo=70 primaryTerm=1 version=1 source= {"f":242}}, doc{id='SOJi_WoB0TYwNnOOqTEO seqNo=71 primaryTerm=1 version=1 source= {"f":253}}, doc{id='SuJi_WoB0TYwNnOOqTEQ seqNo=72 primaryTerm=1 version=1 source= {"f":255}}, doc{id='TOJi_WoB0TYwNnOOqTEQ seqNo=73 primaryTerm=1 version=1 source= {"f":257}}, doc{id='TeJi_WoB0TYwNnOOqTEQ seqNo=74 primaryTerm=1 version=1 source= {"f":258}}, doc{id='TuJi_WoB0TYwNnOOqTEQ seqNo=75 primaryTerm=1 version=1 source= {"f":259}}, doc{id='T-Ji_WoB0TYwNnOOqTEQ seqNo=76 primaryTerm=1 version=1 source= {"f":260}}, doc{id='VOJi_WoB0TYwNnOOqTEQ seqNo=77 primaryTerm=1 version=1 source= {"f":265}}, doc{id='WeJi_WoB0TYwNnOOqTET seqNo=78 primaryTerm=1 version=1 source= {"f":270}}, doc{id='YOJi_WoB0TYwNnOOqTET seqNo=79 primaryTerm=1 version=1 source= {"f":277}}, doc{id='YeJi_WoB0TYwNnOOqTET seqNo=80 primaryTerm=1 version=1 source= {"f":278}}, doc{id='ZuJi_WoB0TYwNnOOqTET seqNo=81 primaryTerm=1 version=1 source= {"f":283}}, doc{id='auJi_WoB0TYwNnOOqTEU seqNo=82 primaryTerm=1 version=1 source= {"f":287}}, doc{id='cOJi_WoB0TYwNnOOqTEU seqNo=83 primaryTerm=1 version=1 source= {"f":293}}, doc{id='c-Ji_WoB0TYwNnOOqTEU seqNo=84 primaryTerm=1 version=1 source= {"f":296}}, doc{id='deJi_WoB0TYwNnOOqTEU seqNo=85 primaryTerm=1 version=1 source= {"f":298}}, doc{id='eOJi_WoB0TYwNnOOqTEW seqNo=86 primaryTerm=1 version=1 source= {"f":301}}, doc{id='eeJi_WoB0TYwNnOOqTEW seqNo=87 primaryTerm=1 version=1 source= {"f":302}}, doc{id='euJi_WoB0TYwNnOOqTEW seqNo=88 primaryTerm=1 version=1 source= {"f":303}}, doc{id='fOJi_WoB0TYwNnOOqTEW seqNo=89 primaryTerm=1 version=1 source= {"f":305}}, doc{id='f-Ji_WoB0TYwNnOOqTEW seqNo=90 primaryTerm=1 version=1 source= {"f":308}}, doc{id='gOJi_WoB0TYwNnOOqTEW seqNo=91 primaryTerm=1 version=1 source= {"f":309}}, doc{id='g-Ji_WoB0TYwNnOOqTEW seqNo=92 primaryTerm=1 version=1 source= {"f":312}}, doc{id='heJi_WoB0TYwNnOOqTEW seqNo=93 primaryTerm=1 version=1 source= {"f":314}}, doc{id='ieJi_WoB0TYwNnOOqTEc seqNo=94 primaryTerm=1 version=1 source= {"f":318}}, doc{id='kuJi_WoB0TYwNnOOqTEc seqNo=95 primaryTerm=1 version=1 source= {"f":327}}, doc{id='l-Ji_WoB0TYwNnOOqTEc seqNo=96 primaryTerm=1 version=1 source= {"f":332}}, doc{id='muJi_WoB0TYwNnOOqTEc seqNo=97 primaryTerm=1 version=1 source= {"f":335}}, doc{id='neJi_WoB0TYwNnOOqTEc seqNo=98 primaryTerm=1 version=1 source= {"f":338}}, doc{id='puJi_WoB0TYwNnOOqTEf seqNo=99 primaryTerm=1 version=1 source= {"f":347}}, doc{id='quJi_WoB0TYwNnOOqTEf seqNo=100 primaryTerm=1 version=1 source= {"f":351}}, doc{id='q-Ji_WoB0TYwNnOOqTEf seqNo=101 primaryTerm=1 version=1 source= {"f":352}}, doc{id='reJi_WoB0TYwNnOOqTEf seqNo=102 primaryTerm=1 version=1 source= {"f":354}}, doc{id='r-Ji_WoB0TYwNnOOqTEf seqNo=103 primaryTerm=1 version=1 source= {"f":356}}, doc{id='s-Ji_WoB0TYwNnOOqTEh seqNo=104 primaryTerm=1 version=1 source= {"f":360}}, doc{id='tOJi_WoB0TYwNnOOqTEh seqNo=105 primaryTerm=1 version=1 source= {"f":361}}, doc{id='teJi_WoB0TYwNnOOqTEh seqNo=106 primaryTerm=1 version=1 source= {"f":362}}, doc{id='uOJi_WoB0TYwNnOOqTEh seqNo=107 primaryTerm=1 version=1 source= {"f":365}}, doc{id='u-Ji_WoB0TYwNnOOqTEh seqNo=108 primaryTerm=1 version=1 source= {"f":368}}, doc{id='veJi_WoB0TYwNnOOqTEh seqNo=109 primaryTerm=1 version=1 source= {"f":370}}, doc{id='vuJi_WoB0TYwNnOOqTEh seqNo=110 primaryTerm=1 version=1 source= {"f":371}}] seq_no_stats SeqNoStats{maxSeqNo=110, localCheckpoint=110, globalCheckpoint=110} [2019-05-28T05:41:02,959][INFO ][o.e.x.c.IndexFollowingIT ] [testFollowIndex_backlog] --> shard [index1][1], node[rUlMFBY9QXukTRws0zFidg], [P], s[STARTED], a[id=ZugfbndVTWCHOdY7GRDisw] docs [doc{id='TOJi_WoB0TYwNnOOpTD9 seqNo=0 primaryTerm=1 version=1 source= {"f":1}}, doc{id='T-Ji_WoB0TYwNnOOpTD9 seqNo=1 primaryTerm=1 version=1 source= {"f":4}}, doc{id='UOJi_WoB0TYwNnOOpTD9 seqNo=2 primaryTerm=1 version=1 source= {"f":5}}, doc{id='UeJi_WoB0TYwNnOOpTD9 seqNo=3 primaryTerm=1 version=1 source= {"f":6}}, doc{id='VeJi_WoB0TYwNnOOpTD9 seqNo=4 primaryTerm=1 version=1 source= {"f":10}}, doc{id='V-Ji_WoB0TYwNnOOpTD9 seqNo=5 primaryTerm=1 version=1 source= {"f":12}}, doc{id='WeJi_WoB0TYwNnOOpTD9 seqNo=6 primaryTerm=1 version=1 source= {"f":14}}, doc{id='WuJi_WoB0TYwNnOOpTD- seqNo=7 primaryTerm=1 version=1 source= {"f":15}}, doc{id='XOJi_WoB0TYwNnOOpTD- seqNo=8 primaryTerm=1 version=1 source= {"f":17}}, doc{id='X-Ji_WoB0TYwNnOOpTD- seqNo=9 primaryTerm=1 version=1 source= {"f":20}}, doc{id='YeJi_WoB0TYwNnOOpTD- seqNo=10 primaryTerm=1 version=1 source= {"f":22}}, doc{id='YuJi_WoB0TYwNnOOpTD- seqNo=11 primaryTerm=1 version=1 source= {"f":23}}, doc{id='aOJi_WoB0TYwNnOOpjAD seqNo=12 primaryTerm=1 version=1 source= {"f":29}}, doc{id='aeJi_WoB0TYwNnOOpjAE seqNo=13 primaryTerm=1 version=1 source= {"f":30}}, doc{id='a-Ji_WoB0TYwNnOOpjAE seqNo=14 primaryTerm=1 version=1 source= {"f":32}}, doc{id='cuJi_WoB0TYwNnOOpjAE seqNo=15 primaryTerm=1 version=1 source= {"f":39}}, doc{id='dOJi_WoB0TYwNnOOpjAE seqNo=16 primaryTerm=1 version=1 source= {"f":41}}, doc{id='d-Ji_WoB0TYwNnOOpjAE seqNo=17 primaryTerm=1 version=1 source= {"f":44}}, doc{id='eOJi_WoB0TYwNnOOpjAE seqNo=18 primaryTerm=1 version=1 source= {"f":45}}, doc{id='eeJi_WoB0TYwNnOOpjAE seqNo=19 primaryTerm=1 version=1 source= {"f":46}}, doc{id='e-Ji_WoB0TYwNnOOpjAE seqNo=20 primaryTerm=1 version=1 source= {"f":48}}, doc{id='guJi_WoB0TYwNnOOpjAE seqNo=21 primaryTerm=1 version=1 source= {"f":55}}, doc{id='g-Ji_WoB0TYwNnOOpjAE seqNo=22 primaryTerm=1 version=1 source= {"f":56}}, doc{id='h-Ji_WoB0TYwNnOOpjAK seqNo=23 primaryTerm=1 version=1 source= {"f":60}}, doc{id='iOJi_WoB0TYwNnOOpjAK seqNo=24 primaryTerm=1 version=1 source= {"f":61}}, doc{id='ieJi_WoB0TYwNnOOpjAK seqNo=25 primaryTerm=1 version=1 source= {"f":62}}, doc{id='iuJi_WoB0TYwNnOOpjAK seqNo=26 primaryTerm=1 version=1 source= {"f":63}}, doc{id='jeJi_WoB0TYwNnOOpjAK seqNo=27 primaryTerm=1 version=1 source= {"f":66}}, doc{id='j-Ji_WoB0TYwNnOOpjAK seqNo=28 primaryTerm=1 version=1 source= {"f":68}}, doc{id='kOJi_WoB0TYwNnOOpjAK seqNo=29 primaryTerm=1 version=1 source= {"f":69}}, doc{id='kuJi_WoB0TYwNnOOpjAK seqNo=30 primaryTerm=1 version=1 source= {"f":71}}, doc{id='lOJi_WoB0TYwNnOOpjAK seqNo=31 primaryTerm=1 version=1 source= {"f":73}}, doc{id='l-Ji_WoB0TYwNnOOqDD4 seqNo=32 primaryTerm=1 version=1 source= {"f":76}}, doc{id='meJi_WoB0TYwNnOOqDD4 seqNo=33 primaryTerm=1 version=1 source= {"f":78}}, doc{id='m-Ji_WoB0TYwNnOOqDD4 seqNo=34 primaryTerm=1 version=1 source= {"f":80}}, doc{id='n-Ji_WoB0TYwNnOOqDD4 seqNo=35 primaryTerm=1 version=1 source= {"f":84}}, doc{id='ouJi_WoB0TYwNnOOqDD4 seqNo=36 primaryTerm=1 version=1 source= {"f":87}}, doc{id='pOJi_WoB0TYwNnOOqDD4 seqNo=37 primaryTerm=1 version=1 source= {"f":89}}, doc{id='puJi_WoB0TYwNnOOqDD4 seqNo=38 primaryTerm=1 version=1 source= {"f":91}}, doc{id='qeJi_WoB0TYwNnOOqDD4 seqNo=39 primaryTerm=1 version=1 source= {"f":94}}, doc{id='quJi_WoB0TYwNnOOqDD4 seqNo=40 primaryTerm=1 version=1 source= {"f":95}}, doc{id='q-Ji_WoB0TYwNnOOqDD4 seqNo=41 primaryTerm=1 version=1 source= {"f":96}}, doc{id='ruJi_WoB0TYwNnOOqDD4 seqNo=42 primaryTerm=1 version=1 source= {"f":99}}, doc{id='sOJi_WoB0TYwNnOOqDD4 seqNo=43 primaryTerm=1 version=1 source= {"f":101}}, doc{id='t-Ji_WoB0TYwNnOOqDD5 seqNo=44 primaryTerm=1 version=1 source= {"f":108}}, doc{id='v-Ji_WoB0TYwNnOOqDD5 seqNo=45 primaryTerm=1 version=1 source= {"f":116}}, doc{id='wuJi_WoB0TYwNnOOqDD5 seqNo=46 primaryTerm=1 version=1 source= {"f":119}}, doc{id='w-Ji_WoB0TYwNnOOqDD5 seqNo=47 primaryTerm=1 version=1 source= {"f":120}}, doc{id='xuJi_WoB0TYwNnOOqDD5 seqNo=48 primaryTerm=1 version=1 source= {"f":123}}, doc{id='yeJi_WoB0TYwNnOOqDD5 seqNo=49 primaryTerm=1 version=1 source= {"f":126}}, doc{id='yuJi_WoB0TYwNnOOqDD5 seqNo=50 primaryTerm=1 version=1 source= {"f":127}}, doc{id='zeJi_WoB0TYwNnOOqDD5 seqNo=51 primaryTerm=1 version=1 source= {"f":130}}, doc{id='zuJi_WoB0TYwNnOOqDD5 seqNo=52 primaryTerm=1 version=1 source= {"f":131}}, doc{id='z-Ji_WoB0TYwNnOOqDD5 seqNo=53 primaryTerm=1 version=1 source= {"f":132}}, doc{id='0uJi_WoB0TYwNnOOqDD_ seqNo=54 primaryTerm=1 version=1 source= {"f":135}}, doc{id='0-Ji_WoB0TYwNnOOqDD_ seqNo=55 primaryTerm=1 version=1 source= {"f":136}}, doc{id='1eJi_WoB0TYwNnOOqDD_ seqNo=56 primaryTerm=1 version=1 source= {"f":138}}, doc{id='1uJi_WoB0TYwNnOOqDD_ seqNo=57 primaryTerm=1 version=1 source= {"f":139}}, doc{id='1-Ji_WoB0TYwNnOOqDD_ seqNo=58 primaryTerm=1 version=1 source= {"f":140}}, doc{id='2-Ji_WoB0TYwNnOOqDD_ seqNo=59 primaryTerm=1 version=1 source= {"f":144}}, doc{id='4-Ji_WoB0TYwNnOOqTAB seqNo=60 primaryTerm=1 version=1 source= {"f":152}}, doc{id='5-Ji_WoB0TYwNnOOqTAB seqNo=61 primaryTerm=1 version=1 source= {"f":156}}, doc{id='6uJi_WoB0TYwNnOOqTAB seqNo=62 primaryTerm=1 version=1 source= {"f":159}}, doc{id='8eJi_WoB0TYwNnOOqTAD seqNo=63 primaryTerm=1 version=1 source= {"f":166}}, doc{id='-eJi_WoB0TYwNnOOqTAD seqNo=64 primaryTerm=1 version=1 source= {"f":174}}, doc{id='_eJi_WoB0TYwNnOOqTAD seqNo=65 primaryTerm=1 version=1 source= {"f":178}}, doc{id='_uJi_WoB0TYwNnOOqTAD seqNo=66 primaryTerm=1 version=1 source= {"f":179}}, doc{id='AeJi_WoB0TYwNnOOqTEI seqNo=67 primaryTerm=1 version=1 source= {"f":182}}, doc{id='AuJi_WoB0TYwNnOOqTEI seqNo=68 primaryTerm=1 version=1 source= {"f":183}}, doc{id='BOJi_WoB0TYwNnOOqTEI seqNo=69 primaryTerm=1 version=1 source= {"f":185}}, doc{id='COJi_WoB0TYwNnOOqTEI seqNo=70 primaryTerm=1 version=1 source= {"f":189}}, doc{id='CeJi_WoB0TYwNnOOqTEI seqNo=71 primaryTerm=1 version=1 source= {"f":190}}, doc{id='D-Ji_WoB0TYwNnOOqTEK seqNo=72 primaryTerm=1 version=1 source= {"f":196}}, doc{id='FOJi_WoB0TYwNnOOqTEK seqNo=73 primaryTerm=1 version=1 source= {"f":201}}, doc{id='FuJi_WoB0TYwNnOOqTEK seqNo=74 primaryTerm=1 version=1 source= {"f":203}}, doc{id='GOJi_WoB0TYwNnOOqTEK seqNo=75 primaryTerm=1 version=1 source= {"f":205}}, doc{id='G-Ji_WoB0TYwNnOOqTEK seqNo=76 primaryTerm=1 version=1 source= {"f":208}}, doc{id='HeJi_WoB0TYwNnOOqTEN seqNo=77 primaryTerm=1 version=1 source= {"f":210}}, doc{id='HuJi_WoB0TYwNnOOqTEN seqNo=78 primaryTerm=1 version=1 source= {"f":211}}, doc{id='IOJi_WoB0TYwNnOOqTEN seqNo=79 primaryTerm=1 version=1 source= {"f":213}}, doc{id='IuJi_WoB0TYwNnOOqTEN seqNo=80 primaryTerm=1 version=1 source= {"f":215}}, doc{id='I-Ji_WoB0TYwNnOOqTEN seqNo=81 primaryTerm=1 version=1 source= {"f":216}}, doc{id='JeJi_WoB0TYwNnOOqTEN seqNo=82 primaryTerm=1 version=1 source= {"f":218}}, doc{id='KuJi_WoB0TYwNnOOqTEN seqNo=83 primaryTerm=1 version=1 source= {"f":223}}, doc{id='LuJi_WoB0TYwNnOOqTEO seqNo=84 primaryTerm=1 version=1 source= {"f":227}}, doc{id='MeJi_WoB0TYwNnOOqTEO seqNo=85 primaryTerm=1 version=1 source= {"f":230}}, doc{id='MuJi_WoB0TYwNnOOqTEO seqNo=86 primaryTerm=1 version=1 source= {"f":231}}, doc{id='NOJi_WoB0TYwNnOOqTEO seqNo=87 primaryTerm=1 version=1 source= {"f":233}}, doc{id='NuJi_WoB0TYwNnOOqTEO seqNo=88 primaryTerm=1 version=1 source= {"f":235}}, doc{id='O-Ji_WoB0TYwNnOOqTEO seqNo=89 primaryTerm=1 version=1 source= {"f":240}}, doc{id='P-Ji_WoB0TYwNnOOqTEO seqNo=90 primaryTerm=1 version=1 source= {"f":244}}, doc{id='QOJi_WoB0TYwNnOOqTEO seqNo=91 primaryTerm=1 version=1 source= {"f":245}}, doc{id='QuJi_WoB0TYwNnOOqTEO seqNo=92 primaryTerm=1 version=1 source= {"f":247}}, doc{id='Q-Ji_WoB0TYwNnOOqTEO seqNo=93 primaryTerm=1 version=1 source= {"f":248}}, doc{id='ROJi_WoB0TYwNnOOqTEO seqNo=94 primaryTerm=1 version=1 source= {"f":249}}, doc{id='ReJi_WoB0TYwNnOOqTEO seqNo=95 primaryTerm=1 version=1 source= {"f":250}}, doc{id='SeJi_WoB0TYwNnOOqTEO seqNo=96 primaryTerm=1 version=1 source= {"f":254}}, doc{id='S-Ji_WoB0TYwNnOOqTEQ seqNo=97 primaryTerm=1 version=1 source= {"f":256}}, doc{id='UOJi_WoB0TYwNnOOqTEQ seqNo=98 primaryTerm=1 version=1 source= {"f":261}}, doc{id='U-Ji_WoB0TYwNnOOqTEQ seqNo=99 primaryTerm=1 version=1 source= {"f":264}}, doc{id='VeJi_WoB0TYwNnOOqTEQ seqNo=100 primaryTerm=1 version=1 source= {"f":266}}, doc{id='WOJi_WoB0TYwNnOOqTEQ seqNo=101 primaryTerm=1 version=1 source= {"f":269}}, doc{id='W-Ji_WoB0TYwNnOOqTET seqNo=102 primaryTerm=1 version=1 source= {"f":272}}, doc{id='XOJi_WoB0TYwNnOOqTET seqNo=103 primaryTerm=1 version=1 source= {"f":273}}, doc{id='XeJi_WoB0TYwNnOOqTET seqNo=104 primaryTerm=1 version=1 source= {"f":274}}, doc{id='XuJi_WoB0TYwNnOOqTET seqNo=105 primaryTerm=1 version=1 source= {"f":275}}, doc{id='X-Ji_WoB0TYwNnOOqTET seqNo=106 primaryTerm=1 version=1 source= {"f":276}}, doc{id='YuJi_WoB0TYwNnOOqTET seqNo=107 primaryTerm=1 version=1 source= {"f":279}}, doc{id='Y-Ji_WoB0TYwNnOOqTET seqNo=108 primaryTerm=1 version=1 source= {"f":280}}, doc{id='bOJi_WoB0TYwNnOOqTEU seqNo=109 primaryTerm=1 version=1 source= {"f":289}}, doc{id='beJi_WoB0TYwNnOOqTEU seqNo=110 primaryTerm=1 version=1 source= {"f":290}}, doc{id='buJi_WoB0TYwNnOOqTEU seqNo=111 primaryTerm=1 version=1 source= {"f":291}}, doc{id='b-Ji_WoB0TYwNnOOqTEU seqNo=112 primaryTerm=1 version=1 source= {"f":292}}, doc{id='dOJi_WoB0TYwNnOOqTEU seqNo=113 primaryTerm=1 version=1 source= {"f":297}}, doc{id='d-Ji_WoB0TYwNnOOqTEW seqNo=114 primaryTerm=1 version=1 source= {"f":300}}, doc{id='e-Ji_WoB0TYwNnOOqTEW seqNo=115 primaryTerm=1 version=1 source= {"f":304}}, doc{id='feJi_WoB0TYwNnOOqTEW seqNo=116 primaryTerm=1 version=1 source= {"f":306}}, doc{id='leJi_WoB0TYwNnOOqTEc seqNo=117 primaryTerm=1 version=1 source= {"f":330}}, doc{id='luJi_WoB0TYwNnOOqTEc seqNo=118 primaryTerm=1 version=1 source= {"f":331}}, doc{id='nuJi_WoB0TYwNnOOqTEc seqNo=119 primaryTerm=1 version=1 source= {"f":339}}, doc{id='n-Ji_WoB0TYwNnOOqTEc seqNo=120 primaryTerm=1 version=1 source= {"f":340}}, doc{id='oOJi_WoB0TYwNnOOqTEc seqNo=121 primaryTerm=1 version=1 source= {"f":341}}, doc{id='oeJi_WoB0TYwNnOOqTEc seqNo=122 primaryTerm=1 version=1 source= {"f":342}}, doc{id='ouJi_WoB0TYwNnOOqTEc seqNo=123 primaryTerm=1 version=1 source= {"f":343}}, doc{id='o-Ji_WoB0TYwNnOOqTEc seqNo=124 primaryTerm=1 version=1 source= {"f":344}}, doc{id='huJi_WoB0TYwNnOOqTEc seqNo=125 primaryTerm=1 version=1 source= {"f":315}}, doc{id='h-Ji_WoB0TYwNnOOqTEc seqNo=126 primaryTerm=1 version=1 source= {"f":316}}, doc{id='iOJi_WoB0TYwNnOOqTEc seqNo=127 primaryTerm=1 version=1 source= {"f":317}}, doc{id='kOJi_WoB0TYwNnOOqTEc seqNo=128 primaryTerm=1 version=1 source= {"f":325}}, doc{id='k-Ji_WoB0TYwNnOOqTEc seqNo=129 primaryTerm=1 version=1 source= {"f":328}}, doc{id='lOJi_WoB0TYwNnOOqTEc seqNo=130 primaryTerm=1 version=1 source= {"f":329}}, doc{id='wOJi_WoB0TYwNnOOqTEh seqNo=131 primaryTerm=1 version=1 source= {"f":373}}, doc{id='weJi_WoB0TYwNnOOqTEh seqNo=132 primaryTerm=1 version=1 source= {"f":374}}, doc{id='p-Ji_WoB0TYwNnOOqTEf seqNo=133 primaryTerm=1 version=1 source= {"f":348}}, doc{id='rOJi_WoB0TYwNnOOqTEf seqNo=134 primaryTerm=1 version=1 source= {"f":353}}, doc{id='sOJi_WoB0TYwNnOOqTEf seqNo=135 primaryTerm=1 version=1 source= {"f":357}}, doc{id='seJi_WoB0TYwNnOOqTEf seqNo=136 primaryTerm=1 version=1 source= {"f":358}}, doc{id='suJi_WoB0TYwNnOOqTEf seqNo=137 primaryTerm=1 version=1 source= {"f":359}}] seq_no_stats SeqNoStats{maxSeqNo=137, localCheckpoint=137, globalCheckpoint=137} [2019-05-28T05:41:02,962][INFO ][o.e.x.c.IndexFollowingIT ] [testFollowIndex_backlog] --> shard [index1][0], node[9HWb90BFR1eAyo6kqv0ZYA], [P], s[STARTED], a[id=_Nr5ioI1QFukio1TlEPGEA] docs [doc{id='TuJi_WoB0TYwNnOOpTD9 seqNo=0 primaryTerm=1 version=1 source= {"f":3}}, doc{id='UuJi_WoB0TYwNnOOpTD9 seqNo=1 primaryTerm=1 version=1 source= {"f":7}}, doc{id='U-Ji_WoB0TYwNnOOpTD9 seqNo=2 primaryTerm=1 version=1 source= {"f":8}}, doc{id='VOJi_WoB0TYwNnOOpTD9 seqNo=3 primaryTerm=1 version=1 source= {"f":9}}, doc{id='VuJi_WoB0TYwNnOOpTD9 seqNo=4 primaryTerm=1 version=1 source= {"f":11}}, doc{id='W-Ji_WoB0TYwNnOOpTD- seqNo=5 primaryTerm=1 version=1 source= {"f":16}}, doc{id='auJi_WoB0TYwNnOOpjAE seqNo=6 primaryTerm=1 version=1 source= {"f":31}}, doc{id='XeJi_WoB0TYwNnOOpTD- seqNo=7 primaryTerm=1 version=1 source= {"f":18}}, doc{id='XuJi_WoB0TYwNnOOpTD- seqNo=8 primaryTerm=1 version=1 source= {"f":19}}, doc{id='ZOJi_WoB0TYwNnOOpTD- seqNo=9 primaryTerm=1 version=1 source= {"f":25}}, doc{id='ZuJi_WoB0TYwNnOOpjAD seqNo=10 primaryTerm=1 version=1 source= {"f":27}}, doc{id='Z-Ji_WoB0TYwNnOOpjAD seqNo=11 primaryTerm=1 version=1 source= {"f":28}}, doc{id='beJi_WoB0TYwNnOOpjAE seqNo=12 primaryTerm=1 version=1 source= {"f":34}}, doc{id='buJi_WoB0TYwNnOOpjAE seqNo=13 primaryTerm=1 version=1 source= {"f":35}}, doc{id='feJi_WoB0TYwNnOOpjAE seqNo=14 primaryTerm=1 version=1 source= {"f":50}}, doc{id='fuJi_WoB0TYwNnOOpjAE seqNo=15 primaryTerm=1 version=1 source= {"f":51}}, doc{id='heJi_WoB0TYwNnOOpjAE seqNo=16 primaryTerm=1 version=1 source= {"f":58}}, doc{id='huJi_WoB0TYwNnOOpjAE seqNo=17 primaryTerm=1 version=1 source= {"f":59}}, doc{id='i-Ji_WoB0TYwNnOOpjAK seqNo=18 primaryTerm=1 version=1 source= {"f":64}}, doc{id='jOJi_WoB0TYwNnOOpjAK seqNo=19 primaryTerm=1 version=1 source= {"f":65}}, doc{id='keJi_WoB0TYwNnOOpjAK seqNo=20 primaryTerm=1 version=1 source= {"f":70}}, doc{id='k-Ji_WoB0TYwNnOOpjAK seqNo=21 primaryTerm=1 version=1 source= {"f":72}}, doc{id='leJi_WoB0TYwNnOOpjAK seqNo=22 primaryTerm=1 version=1 source= {"f":74}}, doc{id='mOJi_WoB0TYwNnOOqDD4 seqNo=23 primaryTerm=1 version=1 source= {"f":77}}, doc{id='nOJi_WoB0TYwNnOOqDD4 seqNo=24 primaryTerm=1 version=1 source= {"f":81}}, doc{id='nuJi_WoB0TYwNnOOqDD4 seqNo=25 primaryTerm=1 version=1 source= {"f":83}}, doc{id='oeJi_WoB0TYwNnOOqDD4 seqNo=26 primaryTerm=1 version=1 source= {"f":86}}, doc{id='peJi_WoB0TYwNnOOqDD4 seqNo=27 primaryTerm=1 version=1 source= {"f":90}}, doc{id='p-Ji_WoB0TYwNnOOqDD4 seqNo=28 primaryTerm=1 version=1 source= {"f":92}}, doc{id='qOJi_WoB0TYwNnOOqDD4 seqNo=29 primaryTerm=1 version=1 source= {"f":93}}, doc{id='r-Ji_WoB0TYwNnOOqDD4 seqNo=30 primaryTerm=1 version=1 source= {"f":100}}, doc{id='seJi_WoB0TYwNnOOqDD4 seqNo=31 primaryTerm=1 version=1 source= {"f":102}}, doc{id='suJi_WoB0TYwNnOOqDD4 seqNo=32 primaryTerm=1 version=1 source= {"f":103}}, doc{id='s-Ji_WoB0TYwNnOOqDD4 seqNo=33 primaryTerm=1 version=1 source= {"f":104}}, doc{id='teJi_WoB0TYwNnOOqDD5 seqNo=34 primaryTerm=1 version=1 source= {"f":106}}, doc{id='tuJi_WoB0TYwNnOOqDD5 seqNo=35 primaryTerm=1 version=1 source= {"f":107}}, doc{id='uOJi_WoB0TYwNnOOqDD5 seqNo=36 primaryTerm=1 version=1 source= {"f":109}}, doc{id='ueJi_WoB0TYwNnOOqDD5 seqNo=37 primaryTerm=1 version=1 source= {"f":110}}, doc{id='uuJi_WoB0TYwNnOOqDD5 seqNo=38 primaryTerm=1 version=1 source= {"f":111}}, doc{id='u-Ji_WoB0TYwNnOOqDD5 seqNo=39 primaryTerm=1 version=1 source= {"f":112}}, doc{id='wOJi_WoB0TYwNnOOqDD5 seqNo=40 primaryTerm=1 version=1 source= {"f":117}}, doc{id='xeJi_WoB0TYwNnOOqDD5 seqNo=41 primaryTerm=1 version=1 source= {"f":122}}, doc{id='x-Ji_WoB0TYwNnOOqDD5 seqNo=42 primaryTerm=1 version=1 source= {"f":124}}, doc{id='0eJi_WoB0TYwNnOOqDD5 seqNo=43 primaryTerm=1 version=1 source= {"f":134}}, doc{id='1OJi_WoB0TYwNnOOqDD_ seqNo=44 primaryTerm=1 version=1 source= {"f":137}}, doc{id='2OJi_WoB0TYwNnOOqDD_ seqNo=45 primaryTerm=1 version=1 source= {"f":141}}, doc{id='2eJi_WoB0TYwNnOOqDD_ seqNo=46 primaryTerm=1 version=1 source= {"f":142}}, doc{id='3OJi_WoB0TYwNnOOqDD_ seqNo=47 primaryTerm=1 version=1 source= {"f":145}}, doc{id='3eJi_WoB0TYwNnOOqDD_ seqNo=48 primaryTerm=1 version=1 source= {"f":146}}, doc{id='3-Ji_WoB0TYwNnOOqDD_ seqNo=49 primaryTerm=1 version=1 source= {"f":148}}, doc{id='4OJi_WoB0TYwNnOOqDD_ seqNo=50 primaryTerm=1 version=1 source= {"f":149}}, doc{id='4eJi_WoB0TYwNnOOqTAB seqNo=51 primaryTerm=1 version=1 source= {"f":150}}, doc{id='5OJi_WoB0TYwNnOOqTAB seqNo=52 primaryTerm=1 version=1 source= {"f":153}}, doc{id='5eJi_WoB0TYwNnOOqTAB seqNo=53 primaryTerm=1 version=1 source= {"f":154}}, doc{id='5uJi_WoB0TYwNnOOqTAB seqNo=54 primaryTerm=1 version=1 source= {"f":155}}, doc{id='6eJi_WoB0TYwNnOOqTAB seqNo=55 primaryTerm=1 version=1 source= {"f":158}}, doc{id='6-Ji_WoB0TYwNnOOqTAB seqNo=56 primaryTerm=1 version=1 source= {"f":160}}, doc{id='7OJi_WoB0TYwNnOOqTAB seqNo=57 primaryTerm=1 version=1 source= {"f":161}}, doc{id='7uJi_WoB0TYwNnOOqTAB seqNo=58 primaryTerm=1 version=1 source= {"f":163}}, doc{id='8uJi_WoB0TYwNnOOqTAD seqNo=59 primaryTerm=1 version=1 source= {"f":167}}, doc{id='-uJi_WoB0TYwNnOOqTAD seqNo=60 primaryTerm=1 version=1 source= {"f":175}}, doc{id='--Ji_WoB0TYwNnOOqTAD seqNo=61 primaryTerm=1 version=1 source= {"f":176}}, doc{id='_OJi_WoB0TYwNnOOqTAD seqNo=62 primaryTerm=1 version=1 source= {"f":177}}, doc{id='A-Ji_WoB0TYwNnOOqTEI seqNo=63 primaryTerm=1 version=1 source= {"f":184}}, doc{id='BeJi_WoB0TYwNnOOqTEI seqNo=64 primaryTerm=1 version=1 source= {"f":186}}, doc{id='C-Ji_WoB0TYwNnOOqTEI seqNo=65 primaryTerm=1 version=1 source= {"f":192}}, doc{id='DeJi_WoB0TYwNnOOqTEI seqNo=66 primaryTerm=1 version=1 source= {"f":194}}, doc{id='DuJi_WoB0TYwNnOOqTEK seqNo=67 primaryTerm=1 version=1 source= {"f":195}}, doc{id='EeJi_WoB0TYwNnOOqTEK seqNo=68 primaryTerm=1 version=1 source= {"f":198}}, doc{id='F-Ji_WoB0TYwNnOOqTEK seqNo=69 primaryTerm=1 version=1 source= {"f":204}}, doc{id='GeJi_WoB0TYwNnOOqTEK seqNo=70 primaryTerm=1 version=1 source= {"f":206}}, doc{id='GuJi_WoB0TYwNnOOqTEK seqNo=71 primaryTerm=1 version=1 source= {"f":207}}, doc{id='HOJi_WoB0TYwNnOOqTEK seqNo=72 primaryTerm=1 version=1 source= {"f":209}}, doc{id='JOJi_WoB0TYwNnOOqTEN seqNo=73 primaryTerm=1 version=1 source= {"f":217}}, doc{id='J-Ji_WoB0TYwNnOOqTEN seqNo=74 primaryTerm=1 version=1 source= {"f":220}}, doc{id='KOJi_WoB0TYwNnOOqTEN seqNo=75 primaryTerm=1 version=1 source= {"f":221}}, doc{id='KeJi_WoB0TYwNnOOqTEN seqNo=76 primaryTerm=1 version=1 source= {"f":222}}, doc{id='LeJi_WoB0TYwNnOOqTEO seqNo=77 primaryTerm=1 version=1 source= {"f":226}}, doc{id='MOJi_WoB0TYwNnOOqTEO seqNo=78 primaryTerm=1 version=1 source= {"f":229}}, doc{id='M-Ji_WoB0TYwNnOOqTEO seqNo=79 primaryTerm=1 version=1 source= {"f":232}}, doc{id='OOJi_WoB0TYwNnOOqTEO seqNo=80 primaryTerm=1 version=1 source= {"f":237}}, doc{id='POJi_WoB0TYwNnOOqTEO seqNo=81 primaryTerm=1 version=1 source= {"f":241}}, doc{id='PuJi_WoB0TYwNnOOqTEO seqNo=82 primaryTerm=1 version=1 source= {"f":243}}, doc{id='QeJi_WoB0TYwNnOOqTEO seqNo=83 primaryTerm=1 version=1 source= {"f":246}}, doc{id='RuJi_WoB0TYwNnOOqTEO seqNo=84 primaryTerm=1 version=1 source= {"f":251}}, doc{id='R-Ji_WoB0TYwNnOOqTEO seqNo=85 primaryTerm=1 version=1 source= {"f":252}}, doc{id='UeJi_WoB0TYwNnOOqTEQ seqNo=86 primaryTerm=1 version=1 source= {"f":262}}, doc{id='UuJi_WoB0TYwNnOOqTEQ seqNo=87 primaryTerm=1 version=1 source= {"f":263}}, doc{id='VuJi_WoB0TYwNnOOqTEQ seqNo=88 primaryTerm=1 version=1 source= {"f":267}}, doc{id='V-Ji_WoB0TYwNnOOqTEQ seqNo=89 primaryTerm=1 version=1 source= {"f":268}}, doc{id='WuJi_WoB0TYwNnOOqTET seqNo=90 primaryTerm=1 version=1 source= {"f":271}}, doc{id='ZOJi_WoB0TYwNnOOqTET seqNo=91 primaryTerm=1 version=1 source= {"f":281}}, doc{id='ZeJi_WoB0TYwNnOOqTET seqNo=92 primaryTerm=1 version=1 source= {"f":282}}, doc{id='Z-Ji_WoB0TYwNnOOqTET seqNo=93 primaryTerm=1 version=1 source= {"f":284}}, doc{id='aOJi_WoB0TYwNnOOqTEU seqNo=94 primaryTerm=1 version=1 source= {"f":285}}, doc{id='aeJi_WoB0TYwNnOOqTEU seqNo=95 primaryTerm=1 version=1 source= {"f":286}}, doc{id='a-Ji_WoB0TYwNnOOqTEU seqNo=96 primaryTerm=1 version=1 source= {"f":288}}, doc{id='ceJi_WoB0TYwNnOOqTEU seqNo=97 primaryTerm=1 version=1 source= {"f":294}}, doc{id='cuJi_WoB0TYwNnOOqTEU seqNo=98 primaryTerm=1 version=1 source= {"f":295}}, doc{id='duJi_WoB0TYwNnOOqTEU seqNo=99 primaryTerm=1 version=1 source= {"f":299}}, doc{id='fuJi_WoB0TYwNnOOqTEW seqNo=100 primaryTerm=1 version=1 source= {"f":307}}, doc{id='geJi_WoB0TYwNnOOqTEW seqNo=101 primaryTerm=1 version=1 source= {"f":310}}, doc{id='guJi_WoB0TYwNnOOqTEW seqNo=102 primaryTerm=1 version=1 source= {"f":311}}, doc{id='hOJi_WoB0TYwNnOOqTEW seqNo=103 primaryTerm=1 version=1 source= {"f":313}}, doc{id='iuJi_WoB0TYwNnOOqTEc seqNo=104 primaryTerm=1 version=1 source= {"f":319}}, doc{id='i-Ji_WoB0TYwNnOOqTEc seqNo=105 primaryTerm=1 version=1 source= {"f":320}}, doc{id='jOJi_WoB0TYwNnOOqTEc seqNo=106 primaryTerm=1 version=1 source= {"f":321}}, doc{id='jeJi_WoB0TYwNnOOqTEc seqNo=107 primaryTerm=1 version=1 source= {"f":322}}, doc{id='juJi_WoB0TYwNnOOqTEc seqNo=108 primaryTerm=1 version=1 source= {"f":323}}, doc{id='j-Ji_WoB0TYwNnOOqTEc seqNo=109 primaryTerm=1 version=1 source= {"f":324}}, doc{id='keJi_WoB0TYwNnOOqTEc seqNo=110 primaryTerm=1 version=1 source= {"f":326}}, doc{id='mOJi_WoB0TYwNnOOqTEc seqNo=111 primaryTerm=1 version=1 source= {"f":333}}, doc{id='meJi_WoB0TYwNnOOqTEc seqNo=112 primaryTerm=1 version=1 source= {"f":334}}, doc{id='m-Ji_WoB0TYwNnOOqTEc seqNo=113 primaryTerm=1 version=1 source= {"f":336}}, doc{id='nOJi_WoB0TYwNnOOqTEc seqNo=114 primaryTerm=1 version=1 source= {"f":337}}, doc{id='pOJi_WoB0TYwNnOOqTEf seqNo=115 primaryTerm=1 version=1 source= {"f":345}}, doc{id='peJi_WoB0TYwNnOOqTEf seqNo=116 primaryTerm=1 version=1 source= {"f":346}}, doc{id='qOJi_WoB0TYwNnOOqTEf seqNo=117 primaryTerm=1 version=1 source= {"f":349}}, doc{id='qeJi_WoB0TYwNnOOqTEf seqNo=118 primaryTerm=1 version=1 source= {"f":350}}, doc{id='ruJi_WoB0TYwNnOOqTEf seqNo=119 primaryTerm=1 version=1 source= {"f":355}}, doc{id='tuJi_WoB0TYwNnOOqTEh seqNo=120 primaryTerm=1 version=1 source= {"f":363}}, doc{id='t-Ji_WoB0TYwNnOOqTEh seqNo=121 primaryTerm=1 version=1 source= {"f":364}}, doc{id='ueJi_WoB0TYwNnOOqTEh seqNo=122 primaryTerm=1 version=1 source= {"f":366}}, doc{id='uuJi_WoB0TYwNnOOqTEh seqNo=123 primaryTerm=1 version=1 source= {"f":367}}, doc{id='vOJi_WoB0TYwNnOOqTEh seqNo=124 primaryTerm=1 version=1 source= {"f":369}}, doc{id='v-Ji_WoB0TYwNnOOqTEh seqNo=125 primaryTerm=1 version=1 source= {"f":372}}, doc{id='wuJi_WoB0TYwNnOOqTF9 seqNo=126 primaryTerm=1 version=1 source= {"f":375}}, doc{id='w-Ji_WoB0TYwNnOOqTF9 seqNo=127 primaryTerm=1 version=1 source= {"f":376}}] seq_no_stats SeqNoStats{maxSeqNo=127, localCheckpoint=127, globalCheckpoint=127} [2019-05-28T05:41:02,964][INFO ][o.e.x.c.IndexFollowingIT ] [testFollowIndex_backlog] --> asserting seq_no_stats between index1 and index2 [2019-05-28T05:41:03,015][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd4] [index2][0] shard follow task has been stopped [2019-05-28T05:41:03,048][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd3] [index2][2] shard follow task has been stopped [2019-05-28T05:41:03,094][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd3] [index2][1] shard follow task has been stopped [2019-05-28T05:41:03,170][INFO ][o.e.c.m.MetaDataDeleteIndexService] [leader0] [index1/-xlwEWDWS-2X-mln0M_wzg] deleting index [2019-05-28T05:41:03,515][INFO ][o.e.c.m.MetaDataDeleteIndexService] [followerm0] [index2/BAMKyvPaTNWFtt2wEuvEOQ] deleting index [2019-05-28T05:41:03,629][INFO ][o.e.x.c.IndexFollowingIT ] [testFollowIndex_backlog] after test [2019-05-28T05:41:03,881][INFO ][o.e.x.c.IndexFollowingIT ] [testUpdateRemoteConfigsDuringFollowing] before test [2019-05-28T05:41:03,888][INFO ][o.e.c.m.MetaDataCreateIndexService] [leader0] [index1] creating index, cause [api], templates [], shards [3]/[1], mappings [doc] [2019-05-28T05:41:04,522][INFO ][o.e.x.c.IndexFollowingIT ] [testUpdateRemoteConfigsDuringFollowing] Executing put follow [2019-05-28T05:41:04,763][INFO ][o.e.c.r.a.AllocationService] [leader0] Cluster health status changed from [YELLOW] to [GREEN] (reason: [shards started [[index1][0], [index1][2]] ...]). [2019-05-28T05:41:05,138][INFO ][o.e.x.c.a.ShardFollowTasksExecutor] [followerd3] [index2][0] Starting to track leader shard [index1][0] [2019-05-28T05:41:05,222][INFO ][o.e.x.c.a.ShardFollowTasksExecutor] [followerd4] [index2][1] Starting to track leader shard [index1][1] [2019-05-28T05:41:05,228][INFO ][o.e.c.r.a.AllocationService] [followerm0] Cluster health status changed from [YELLOW] to [GREEN] (reason: [shards started [[index2][2], [index2][1], [index2][0]] ...]). [2019-05-28T05:41:05,271][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd3] [index2][0] following leader shard [index1][0], follower global checkpoint=[-1], mapping version=[1], settings version=[1] [2019-05-28T05:41:05,271][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd4] [index2][1] following leader shard [index1][1], follower global checkpoint=[-1], mapping version=[1], settings version=[1] [2019-05-28T05:41:05,301][INFO ][o.e.x.c.a.ShardFollowTasksExecutor] [followerd3] [index2][2] Starting to track leader shard [index1][2] [2019-05-28T05:41:05,307][INFO ][o.e.x.c.IndexFollowingIT ] [testUpdateRemoteConfigsDuringFollowing] Indexing [672] docs while updateing remote config [2019-05-28T05:41:05,307][INFO ][o.e.t.BackgroundIndexer ] [testUpdateRemoteConfigsDuringFollowing] --> creating 3 indexing threads (auto start: [true], numOfDocs: [672]) [2019-05-28T05:41:05,311][INFO ][o.e.t.BackgroundIndexer ] [[Thread-7]] **** starting indexing thread 1 [2019-05-28T05:41:05,311][INFO ][o.e.t.BackgroundIndexer ] [[Thread-6]] **** starting indexing thread 0 [2019-05-28T05:41:05,311][INFO ][o.e.t.BackgroundIndexer ] [[Thread-8]] **** starting indexing thread 2 [2019-05-28T05:41:05,321][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd3] [index2][2] following leader shard [index1][2], follower global checkpoint=[-1], mapping version=[1], settings version=[1] [2019-05-28T05:41:05,334][INFO ][o.e.c.m.MetaDataMappingService] [leader0] [index1/VXE4pDSRTuy-Fv8gvuhZrw] update_mapping [doc] [2019-05-28T05:41:05,336][INFO ][o.e.c.s.ClusterSettings ] [followerm0] updating [cluster.remote.leader_cluster.transport.compress] from [false] to [true] [2019-05-28T05:41:05,340][INFO ][o.e.c.s.ClusterSettings ] [followerm0] updating [cluster.remote.leader_cluster.transport.compress] from [false] to [true] [2019-05-28T05:41:05,378][INFO ][o.e.c.s.ClusterSettings ] [followerm2] updating [cluster.remote.leader_cluster.transport.compress] from [false] to [true] [2019-05-28T05:41:05,378][INFO ][o.e.c.s.ClusterSettings ] [followerd4] updating [cluster.remote.leader_cluster.transport.compress] from [false] to [true] [2019-05-28T05:41:05,378][INFO ][o.e.c.s.ClusterSettings ] [followerd3] updating [cluster.remote.leader_cluster.transport.compress] from [false] to [true] [2019-05-28T05:41:05,378][INFO ][o.e.c.s.ClusterSettings ] [followerm1] updating [cluster.remote.leader_cluster.transport.compress] from [false] to [true] [2019-05-28T05:41:05,379][INFO ][o.e.c.s.ClusterSettings ] [followerm2] updating [cluster.remote.leader_cluster.transport.compress] from [false] to [true] [2019-05-28T05:41:05,379][INFO ][o.e.c.s.ClusterSettings ] [followerd3] updating [cluster.remote.leader_cluster.transport.compress] from [false] to [true] [2019-05-28T05:41:05,379][INFO ][o.e.c.s.ClusterSettings ] [followerm1] updating [cluster.remote.leader_cluster.transport.compress] from [false] to [true] [2019-05-28T05:41:05,379][INFO ][o.e.c.s.ClusterSettings ] [followerd4] updating [cluster.remote.leader_cluster.transport.compress] from [false] to [true] [2019-05-28T05:41:05,427][INFO ][o.e.c.s.ClusterSettings ] [followerm0] updating [cluster.remote.leader_cluster.transport.compress] from [false] to [true] [2019-05-28T05:41:05,428][INFO ][o.e.c.s.ClusterSettings ] [followerm0] updating [cluster.remote.leader_cluster.transport.compress] from [false] to [true] [2019-05-28T05:41:05,511][INFO ][o.e.c.m.MetaDataMappingService] [followerm0] [index2/66R5Q14IS3GUquSal6EHrA] update_mapping [doc] [2019-05-28T05:41:05,809][WARN ][o.e.t.n.MockNioTransport ] [leader1] Slow execution on network thread [200 milliseconds] java.lang.RuntimeException: Slow exception on network thread at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.maybeLogElapsedTime(MockNioTransport.java:367) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.unregister(MockNioTransport.java:359) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleWrite(TestEventHandler.java:154) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.handleWrite(NioSelector.java:389) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.writeToChannel(NioSelector.java:345) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.handleQueuedWrites(NioSelector.java:448) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.preSelect(NioSelector.java:262) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:153) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:41:06,184][WARN ][o.e.t.n.MockNioTransport ] [leader0] Slow execution on network thread [200 milliseconds] java.lang.RuntimeException: Slow exception on network thread at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.maybeLogElapsedTime(MockNioTransport.java:367) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.unregister(MockNioTransport.java:359) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleRead(TestEventHandler.java:130) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.handleRead(NioSelector.java:397) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.processKey(NioSelector.java:246) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:172) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:41:09,239][WARN ][o.e.t.n.MockNioTransport ] [leader1] Slow execution on network thread [200 milliseconds] java.lang.RuntimeException: Slow exception on network thread at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.maybeLogElapsedTime(MockNioTransport.java:367) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.unregister(MockNioTransport.java:359) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleRead(TestEventHandler.java:130) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.handleRead(NioSelector.java:397) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.processKey(NioSelector.java:246) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:172) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:41:09,290][INFO ][o.e.t.BackgroundIndexer ] [[Thread-6]] **** done indexing thread 0 stop: true numDocsIndexed: 672 [2019-05-28T05:41:09,295][INFO ][o.e.t.BackgroundIndexer ] [[Thread-8]] **** done indexing thread 2 stop: true numDocsIndexed: 672 [2019-05-28T05:41:09,301][INFO ][o.e.t.BackgroundIndexer ] [[Thread-7]] **** done indexing thread 1 stop: true numDocsIndexed: 672 [2019-05-28T05:41:09,306][INFO ][o.e.c.s.ClusterSettings ] [followerm0] updating [cluster.remote.leader_cluster.seeds] from [["127.0.0.1:39962"]] to [["127.0.0.1:43808"]] [2019-05-28T05:41:09,307][INFO ][o.e.c.s.ClusterSettings ] [followerm0] updating [cluster.remote.leader_cluster.transport.compress] from [true] to [false] [2019-05-28T05:41:09,307][INFO ][o.e.c.s.ClusterSettings ] [followerm0] updating [cluster.remote.leader_cluster.seeds] from [["127.0.0.1:39962"]] to [["127.0.0.1:43808"]] [2019-05-28T05:41:09,307][INFO ][o.e.c.s.ClusterSettings ] [followerm0] updating [cluster.remote.leader_cluster.transport.compress] from [true] to [false] [2019-05-28T05:41:09,332][INFO ][o.e.c.s.ClusterSettings ] [followerm1] updating [cluster.remote.leader_cluster.seeds] from [["127.0.0.1:39962"]] to [["127.0.0.1:43808"]] [2019-05-28T05:41:09,332][INFO ][o.e.c.s.ClusterSettings ] [followerd3] updating [cluster.remote.leader_cluster.seeds] from [["127.0.0.1:39962"]] to [["127.0.0.1:43808"]] [2019-05-28T05:41:09,332][INFO ][o.e.c.s.ClusterSettings ] [followerm1] updating [cluster.remote.leader_cluster.transport.compress] from [true] to [false] [2019-05-28T05:41:09,332][INFO ][o.e.c.s.ClusterSettings ] [followerd4] updating [cluster.remote.leader_cluster.seeds] from [["127.0.0.1:39962"]] to [["127.0.0.1:43808"]] [2019-05-28T05:41:09,332][INFO ][o.e.c.s.ClusterSettings ] [followerm2] updating [cluster.remote.leader_cluster.seeds] from [["127.0.0.1:39962"]] to [["127.0.0.1:43808"]] [2019-05-28T05:41:09,332][INFO ][o.e.c.s.ClusterSettings ] [followerd3] updating [cluster.remote.leader_cluster.transport.compress] from [true] to [false] [2019-05-28T05:41:09,332][INFO ][o.e.c.s.ClusterSettings ] [followerm2] updating [cluster.remote.leader_cluster.transport.compress] from [true] to [false] [2019-05-28T05:41:09,332][INFO ][o.e.c.s.ClusterSettings ] [followerd4] updating [cluster.remote.leader_cluster.transport.compress] from [true] to [false] [2019-05-28T05:41:09,332][INFO ][o.e.c.s.ClusterSettings ] [followerm1] updating [cluster.remote.leader_cluster.seeds] from [["127.0.0.1:39962"]] to [["127.0.0.1:43808"]] [2019-05-28T05:41:09,333][INFO ][o.e.c.s.ClusterSettings ] [followerm1] updating [cluster.remote.leader_cluster.transport.compress] from [true] to [false] [2019-05-28T05:41:09,333][INFO ][o.e.c.s.ClusterSettings ] [followerm2] updating [cluster.remote.leader_cluster.seeds] from [["127.0.0.1:39962"]] to [["127.0.0.1:43808"]] [2019-05-28T05:41:09,333][INFO ][o.e.c.s.ClusterSettings ] [followerm2] updating [cluster.remote.leader_cluster.transport.compress] from [true] to [false] [2019-05-28T05:41:09,333][INFO ][o.e.c.s.ClusterSettings ] [followerd4] updating [cluster.remote.leader_cluster.seeds] from [["127.0.0.1:39962"]] to [["127.0.0.1:43808"]] [2019-05-28T05:41:09,333][INFO ][o.e.c.s.ClusterSettings ] [followerd3] updating [cluster.remote.leader_cluster.seeds] from [["127.0.0.1:39962"]] to [["127.0.0.1:43808"]] [2019-05-28T05:41:09,334][INFO ][o.e.c.s.ClusterSettings ] [followerd4] updating [cluster.remote.leader_cluster.transport.compress] from [true] to [false] [2019-05-28T05:41:09,335][INFO ][o.e.c.s.ClusterSettings ] [followerd3] updating [cluster.remote.leader_cluster.transport.compress] from [true] to [false] [2019-05-28T05:41:09,378][INFO ][o.e.c.s.ClusterSettings ] [followerm0] updating [cluster.remote.leader_cluster.seeds] from [["127.0.0.1:39962"]] to [["127.0.0.1:43808"]] [2019-05-28T05:41:09,378][INFO ][o.e.c.s.ClusterSettings ] [followerm0] updating [cluster.remote.leader_cluster.transport.compress] from [true] to [false] [2019-05-28T05:41:09,379][INFO ][o.e.c.s.ClusterSettings ] [followerm0] updating [cluster.remote.leader_cluster.seeds] from [["127.0.0.1:39962"]] to [["127.0.0.1:43808"]] [2019-05-28T05:41:09,379][INFO ][o.e.c.s.ClusterSettings ] [followerm0] updating [cluster.remote.leader_cluster.transport.compress] from [true] to [false] [2019-05-28T05:41:09,445][WARN ][o.e.t.n.MockNioTransport ] [leader1] Slow execution on network thread [206 milliseconds] java.lang.RuntimeException: Slow exception on network thread at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.maybeLogElapsedTime(MockNioTransport.java:367) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.unregister(MockNioTransport.java:359) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleRead(TestEventHandler.java:130) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.handleRead(NioSelector.java:397) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.processKey(NioSelector.java:246) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:172) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:41:09,455][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd4] [index2][1] shard follow task has been stopped [2019-05-28T05:41:09,458][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd3] [index2][0] shard follow task has been stopped [2019-05-28T05:41:09,459][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd3] [index2][2] shard follow task has been stopped [2019-05-28T05:41:09,787][INFO ][o.e.c.m.MetaDataDeleteIndexService] [leader0] [index1/VXE4pDSRTuy-Fv8gvuhZrw] deleting index [2019-05-28T05:41:10,378][INFO ][o.e.c.m.MetaDataDeleteIndexService] [followerm0] [index2/66R5Q14IS3GUquSal6EHrA] deleting index [2019-05-28T05:41:10,519][INFO ][o.e.x.c.IndexFollowingIT ] [testUpdateRemoteConfigsDuringFollowing] after test [2019-05-28T05:41:10,765][INFO ][o.e.x.c.IndexFollowingIT ] [testFollowNonExistentIndex] before test [2019-05-28T05:41:10,772][INFO ][o.e.c.m.MetaDataCreateIndexService] [leader0] [test-leader] creating index, cause [api], templates [], shards [1]/[0], mappings [doc] [2019-05-28T05:41:10,986][INFO ][o.e.c.r.a.AllocationService] [leader0] Cluster health status changed from [YELLOW] to [GREEN] (reason: [shards started [[test-leader][0]] ...]). [2019-05-28T05:41:11,105][INFO ][o.e.c.m.MetaDataCreateIndexService] [followerm0] [test-follower] creating index, cause [api], templates [], shards [1]/[0], mappings [doc] [2019-05-28T05:41:11,240][INFO ][o.e.c.r.a.AllocationService] [followerm0] Cluster health status changed from [YELLOW] to [GREEN] (reason: [shards started [[test-follower][0]] ...]). [2019-05-28T05:41:11,279][INFO ][o.e.x.c.IndexFollowingIT ] [testFollowNonExistentIndex] ensure green leader indices [test-leader] [2019-05-28T05:41:11,281][INFO ][o.e.x.c.IndexFollowingIT ] [testFollowNonExistentIndex] ensure green follower indices [test-follower] [2019-05-28T05:41:11,378][INFO ][o.e.c.m.MetaDataDeleteIndexService] [leader0] [test-leader/mDCMaAX9R8-llP_Di3vT_w] deleting index [2019-05-28T05:41:11,575][INFO ][o.e.c.m.MetaDataDeleteIndexService] [followerm0] [test-follower/J1iIsZvPSnu3uwTigMQ87Q] deleting index [2019-05-28T05:41:11,655][INFO ][o.e.x.c.IndexFollowingIT ] [testFollowNonExistentIndex] after test [2019-05-28T05:41:11,902][INFO ][o.e.x.c.IndexFollowingIT ] [testIndexDoesNotFallBehind] before test [2019-05-28T05:41:11,910][INFO ][o.e.c.m.MetaDataCreateIndexService] [leader0] [index1] creating index, cause [api], templates [], shards [1]/[1], mappings [doc] [2019-05-28T05:41:12,259][INFO ][o.e.x.c.IndexFollowingIT ] [testIndexDoesNotFallBehind] Indexing [35] docs as first batch [2019-05-28T05:41:12,430][INFO ][o.e.c.r.a.AllocationService] [leader0] Cluster health status changed from [YELLOW] to [GREEN] (reason: [shards started [[index1][0]] ...]). [2019-05-28T05:41:12,772][INFO ][o.e.x.c.a.ShardFollowTasksExecutor] [followerd3] [index2][0] Starting to track leader shard [index1][0] [2019-05-28T05:41:12,777][INFO ][o.e.x.c.IndexFollowingIT ] [testIndexDoesNotFallBehind] --> asserting <> between index1 and index2 [2019-05-28T05:41:12,779][INFO ][o.e.x.c.IndexFollowingIT ] [testIndexDoesNotFallBehind] --> shard [index2][0], node[imikiXyHQzWPRqBx1bSawA], [P], s[STARTED], a[id=AbFAyDhxTVOl4JSA6vu1oQ] docs [] seq_no_stats SeqNoStats{maxSeqNo=-1, localCheckpoint=-1, globalCheckpoint=-1} [2019-05-28T05:41:12,788][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd3] [index2][0] following leader shard [index1][0], follower global checkpoint=[-1], mapping version=[1], settings version=[1] [2019-05-28T05:41:12,819][INFO ][o.e.x.c.IndexFollowingIT ] [testIndexDoesNotFallBehind] --> shard [index1][0], node[9HWb90BFR1eAyo6kqv0ZYA], [P], s[STARTED], a[id=u0UAKSQCT5SpOTH_uvEGKg] docs [doc{id='0 seqNo=0 primaryTerm=1 version=1 source= {"f":0}}, doc{id='1 seqNo=1 primaryTerm=1 version=1 source= {"f":1}}, doc{id='2 seqNo=2 primaryTerm=1 version=1 source= {"f":2}}, doc{id='3 seqNo=3 primaryTerm=1 version=1 source= {"f":3}}, doc{id='4 seqNo=4 primaryTerm=1 version=1 source= {"f":4}}, doc{id='5 seqNo=5 primaryTerm=1 version=1 source= {"f":5}}, doc{id='6 seqNo=6 primaryTerm=1 version=1 source= {"f":6}}, doc{id='7 seqNo=7 primaryTerm=1 version=1 source= {"f":7}}, doc{id='8 seqNo=8 primaryTerm=1 version=1 source= {"f":8}}, doc{id='9 seqNo=9 primaryTerm=1 version=1 source= {"f":9}}, doc{id='10 seqNo=10 primaryTerm=1 version=1 source= {"f":10}}, doc{id='11 seqNo=11 primaryTerm=1 version=1 source= {"f":11}}, doc{id='12 seqNo=12 primaryTerm=1 version=1 source= {"f":12}}, doc{id='13 seqNo=13 primaryTerm=1 version=1 source= {"f":13}}, doc{id='14 seqNo=14 primaryTerm=1 version=1 source= {"f":14}}, doc{id='15 seqNo=15 primaryTerm=1 version=1 source= {"f":15}}, doc{id='16 seqNo=16 primaryTerm=1 version=1 source= {"f":16}}, doc{id='17 seqNo=17 primaryTerm=1 version=1 source= {"f":17}}, doc{id='18 seqNo=18 primaryTerm=1 version=1 source= {"f":18}}, doc{id='19 seqNo=19 primaryTerm=1 version=1 source= {"f":19}}, doc{id='20 seqNo=20 primaryTerm=1 version=1 source= {"f":20}}, doc{id='21 seqNo=21 primaryTerm=1 version=1 source= {"f":21}}, doc{id='22 seqNo=22 primaryTerm=1 version=1 source= {"f":22}}, doc{id='23 seqNo=23 primaryTerm=1 version=1 source= {"f":23}}, doc{id='24 seqNo=24 primaryTerm=1 version=1 source= {"f":24}}, doc{id='25 seqNo=25 primaryTerm=1 version=1 source= {"f":25}}, doc{id='26 seqNo=26 primaryTerm=1 version=1 source= {"f":26}}, doc{id='27 seqNo=27 primaryTerm=1 version=1 source= {"f":27}}, doc{id='28 seqNo=28 primaryTerm=1 version=1 source= {"f":28}}, doc{id='29 seqNo=29 primaryTerm=1 version=1 source= {"f":29}}, doc{id='30 seqNo=30 primaryTerm=1 version=1 source= {"f":30}}, doc{id='31 seqNo=31 primaryTerm=1 version=1 source= {"f":31}}, doc{id='32 seqNo=32 primaryTerm=1 version=1 source= {"f":32}}, doc{id='33 seqNo=33 primaryTerm=1 version=1 source= {"f":33}}, doc{id='34 seqNo=34 primaryTerm=1 version=1 source= {"f":34}}] seq_no_stats SeqNoStats{maxSeqNo=34, localCheckpoint=34, globalCheckpoint=34} [2019-05-28T05:41:12,819][INFO ][o.e.x.c.IndexFollowingIT ] [testIndexDoesNotFallBehind] --> shard [index1][0], node[rUlMFBY9QXukTRws0zFidg], [R], s[STARTED], a[id=q79iNRtdRKe8g1MXIJaLvg] docs [doc{id='0 seqNo=0 primaryTerm=1 version=1 source= {"f":0}}, doc{id='1 seqNo=1 primaryTerm=1 version=1 source= {"f":1}}, doc{id='2 seqNo=2 primaryTerm=1 version=1 source= {"f":2}}, doc{id='3 seqNo=3 primaryTerm=1 version=1 source= {"f":3}}, doc{id='4 seqNo=4 primaryTerm=1 version=1 source= {"f":4}}, doc{id='5 seqNo=5 primaryTerm=1 version=1 source= {"f":5}}, doc{id='6 seqNo=6 primaryTerm=1 version=1 source= {"f":6}}, doc{id='7 seqNo=7 primaryTerm=1 version=1 source= {"f":7}}, doc{id='8 seqNo=8 primaryTerm=1 version=1 source= {"f":8}}, doc{id='9 seqNo=9 primaryTerm=1 version=1 source= {"f":9}}, doc{id='10 seqNo=10 primaryTerm=1 version=1 source= {"f":10}}, doc{id='11 seqNo=11 primaryTerm=1 version=1 source= {"f":11}}, doc{id='12 seqNo=12 primaryTerm=1 version=1 source= {"f":12}}, doc{id='13 seqNo=13 primaryTerm=1 version=1 source= {"f":13}}, doc{id='14 seqNo=14 primaryTerm=1 version=1 source= {"f":14}}, doc{id='15 seqNo=15 primaryTerm=1 version=1 source= {"f":15}}, doc{id='16 seqNo=16 primaryTerm=1 version=1 source= {"f":16}}, doc{id='17 seqNo=17 primaryTerm=1 version=1 source= {"f":17}}, doc{id='18 seqNo=18 primaryTerm=1 version=1 source= {"f":18}}, doc{id='19 seqNo=19 primaryTerm=1 version=1 source= {"f":19}}, doc{id='20 seqNo=20 primaryTerm=1 version=1 source= {"f":20}}, doc{id='21 seqNo=21 primaryTerm=1 version=1 source= {"f":21}}, doc{id='22 seqNo=22 primaryTerm=1 version=1 source= {"f":22}}, doc{id='23 seqNo=23 primaryTerm=1 version=1 source= {"f":23}}, doc{id='24 seqNo=24 primaryTerm=1 version=1 source= {"f":24}}, doc{id='25 seqNo=25 primaryTerm=1 version=1 source= {"f":25}}, doc{id='26 seqNo=26 primaryTerm=1 version=1 source= {"f":26}}, doc{id='27 seqNo=27 primaryTerm=1 version=1 source= {"f":27}}, doc{id='28 seqNo=28 primaryTerm=1 version=1 source= {"f":28}}, doc{id='29 seqNo=29 primaryTerm=1 version=1 source= {"f":29}}, doc{id='30 seqNo=30 primaryTerm=1 version=1 source= {"f":30}}, doc{id='31 seqNo=31 primaryTerm=1 version=1 source= {"f":31}}, doc{id='32 seqNo=32 primaryTerm=1 version=1 source= {"f":32}}, doc{id='33 seqNo=33 primaryTerm=1 version=1 source= {"f":33}}, doc{id='34 seqNo=34 primaryTerm=1 version=1 source= {"f":34}}] seq_no_stats SeqNoStats{maxSeqNo=34, localCheckpoint=34, globalCheckpoint=34} [2019-05-28T05:41:12,862][INFO ][o.e.x.c.IndexFollowingIT ] [testIndexDoesNotFallBehind] --> shard [index2][0], node[XK3Qc8oGSAqdSsvD2_yMPw], [R], recovery_source[peer recovery], s[INITIALIZING], a[id=Ivtu3lJfR2KnDy8VoN1E3Q], unassigned_info[[reason=NEW_INDEX_RESTORED], at[2019-05-28T07:41:12.357Z], delayed=false, details[restore_source[_ccr_leader_cluster/_latest_]], allocation_status[no_attempt]] docs [doc{id='0 seqNo=0 primaryTerm=1 version=1 source= {"f":0}}] seq_no_stats SeqNoStats{maxSeqNo=34, localCheckpoint=34, globalCheckpoint=34} [2019-05-28T05:41:12,904][INFO ][o.e.x.c.IndexFollowingIT ] [testIndexDoesNotFallBehind] --> shard [index2][0], node[imikiXyHQzWPRqBx1bSawA], [P], s[STARTED], a[id=AbFAyDhxTVOl4JSA6vu1oQ] docs [doc{id='0 seqNo=0 primaryTerm=1 version=1 source= {"f":0}}, doc{id='1 seqNo=1 primaryTerm=1 version=1 source= {"f":1}}, doc{id='2 seqNo=2 primaryTerm=1 version=1 source= {"f":2}}, doc{id='3 seqNo=3 primaryTerm=1 version=1 source= {"f":3}}, doc{id='4 seqNo=4 primaryTerm=1 version=1 source= {"f":4}}, doc{id='5 seqNo=5 primaryTerm=1 version=1 source= {"f":5}}, doc{id='6 seqNo=6 primaryTerm=1 version=1 source= {"f":6}}, doc{id='7 seqNo=7 primaryTerm=1 version=1 source= {"f":7}}, doc{id='8 seqNo=8 primaryTerm=1 version=1 source= {"f":8}}, doc{id='9 seqNo=9 primaryTerm=1 version=1 source= {"f":9}}, doc{id='10 seqNo=10 primaryTerm=1 version=1 source= {"f":10}}, doc{id='11 seqNo=11 primaryTerm=1 version=1 source= {"f":11}}, doc{id='12 seqNo=12 primaryTerm=1 version=1 source= {"f":12}}, doc{id='13 seqNo=13 primaryTerm=1 version=1 source= {"f":13}}, doc{id='14 seqNo=14 primaryTerm=1 version=1 source= {"f":14}}, doc{id='15 seqNo=15 primaryTerm=1 version=1 source= {"f":15}}, doc{id='16 seqNo=16 primaryTerm=1 version=1 source= {"f":16}}, doc{id='17 seqNo=17 primaryTerm=1 version=1 source= {"f":17}}, doc{id='18 seqNo=18 primaryTerm=1 version=1 source= {"f":18}}, doc{id='19 seqNo=19 primaryTerm=1 version=1 source= {"f":19}}, doc{id='20 seqNo=20 primaryTerm=1 version=1 source= {"f":20}}, doc{id='21 seqNo=21 primaryTerm=1 version=1 source= {"f":21}}, doc{id='22 seqNo=22 primaryTerm=1 version=1 source= {"f":22}}, doc{id='23 seqNo=23 primaryTerm=1 version=1 source= {"f":23}}, doc{id='24 seqNo=24 primaryTerm=1 version=1 source= {"f":24}}, doc{id='25 seqNo=25 primaryTerm=1 version=1 source= {"f":25}}, doc{id='26 seqNo=26 primaryTerm=1 version=1 source= {"f":26}}, doc{id='27 seqNo=27 primaryTerm=1 version=1 source= {"f":27}}, doc{id='28 seqNo=28 primaryTerm=1 version=1 source= {"f":28}}, doc{id='29 seqNo=29 primaryTerm=1 version=1 source= {"f":29}}, doc{id='30 seqNo=30 primaryTerm=1 version=1 source= {"f":30}}, doc{id='31 seqNo=31 primaryTerm=1 version=1 source= {"f":31}}, doc{id='32 seqNo=32 primaryTerm=1 version=1 source= {"f":32}}, doc{id='33 seqNo=33 primaryTerm=1 version=1 source= {"f":33}}, doc{id='34 seqNo=34 primaryTerm=1 version=1 source= {"f":34}}] seq_no_stats SeqNoStats{maxSeqNo=34, localCheckpoint=34, globalCheckpoint=34} [2019-05-28T05:41:12,905][INFO ][o.e.x.c.IndexFollowingIT ] [testIndexDoesNotFallBehind] --> shard [index1][0], node[rUlMFBY9QXukTRws0zFidg], [R], s[STARTED], a[id=q79iNRtdRKe8g1MXIJaLvg] docs [doc{id='0 seqNo=0 primaryTerm=1 version=1 source= {"f":0}}, doc{id='1 seqNo=1 primaryTerm=1 version=1 source= {"f":1}}, doc{id='2 seqNo=2 primaryTerm=1 version=1 source= {"f":2}}, doc{id='3 seqNo=3 primaryTerm=1 version=1 source= {"f":3}}, doc{id='4 seqNo=4 primaryTerm=1 version=1 source= {"f":4}}, doc{id='5 seqNo=5 primaryTerm=1 version=1 source= {"f":5}}, doc{id='6 seqNo=6 primaryTerm=1 version=1 source= {"f":6}}, doc{id='7 seqNo=7 primaryTerm=1 version=1 source= {"f":7}}, doc{id='8 seqNo=8 primaryTerm=1 version=1 source= {"f":8}}, doc{id='9 seqNo=9 primaryTerm=1 version=1 source= {"f":9}}, doc{id='10 seqNo=10 primaryTerm=1 version=1 source= {"f":10}}, doc{id='11 seqNo=11 primaryTerm=1 version=1 source= {"f":11}}, doc{id='12 seqNo=12 primaryTerm=1 version=1 source= {"f":12}}, doc{id='13 seqNo=13 primaryTerm=1 version=1 source= {"f":13}}, doc{id='14 seqNo=14 primaryTerm=1 version=1 source= {"f":14}}, doc{id='15 seqNo=15 primaryTerm=1 version=1 source= {"f":15}}, doc{id='16 seqNo=16 primaryTerm=1 version=1 source= {"f":16}}, doc{id='17 seqNo=17 primaryTerm=1 version=1 source= {"f":17}}, doc{id='18 seqNo=18 primaryTerm=1 version=1 source= {"f":18}}, doc{id='19 seqNo=19 primaryTerm=1 version=1 source= {"f":19}}, doc{id='20 seqNo=20 primaryTerm=1 version=1 source= {"f":20}}, doc{id='21 seqNo=21 primaryTerm=1 version=1 source= {"f":21}}, doc{id='22 seqNo=22 primaryTerm=1 version=1 source= {"f":22}}, doc{id='23 seqNo=23 primaryTerm=1 version=1 source= {"f":23}}, doc{id='24 seqNo=24 primaryTerm=1 version=1 source= {"f":24}}, doc{id='25 seqNo=25 primaryTerm=1 version=1 source= {"f":25}}, doc{id='26 seqNo=26 primaryTerm=1 version=1 source= {"f":26}}, doc{id='27 seqNo=27 primaryTerm=1 version=1 source= {"f":27}}, doc{id='28 seqNo=28 primaryTerm=1 version=1 source= {"f":28}}, doc{id='29 seqNo=29 primaryTerm=1 version=1 source= {"f":29}}, doc{id='30 seqNo=30 primaryTerm=1 version=1 source= {"f":30}}, doc{id='31 seqNo=31 primaryTerm=1 version=1 source= {"f":31}}, doc{id='32 seqNo=32 primaryTerm=1 version=1 source= {"f":32}}, doc{id='33 seqNo=33 primaryTerm=1 version=1 source= {"f":33}}, doc{id='34 seqNo=34 primaryTerm=1 version=1 source= {"f":34}}] seq_no_stats SeqNoStats{maxSeqNo=34, localCheckpoint=34, globalCheckpoint=34} [2019-05-28T05:41:12,905][INFO ][o.e.x.c.IndexFollowingIT ] [testIndexDoesNotFallBehind] --> shard [index1][0], node[9HWb90BFR1eAyo6kqv0ZYA], [P], s[STARTED], a[id=u0UAKSQCT5SpOTH_uvEGKg] docs [doc{id='0 seqNo=0 primaryTerm=1 version=1 source= {"f":0}}, doc{id='1 seqNo=1 primaryTerm=1 version=1 source= {"f":1}}, doc{id='2 seqNo=2 primaryTerm=1 version=1 source= {"f":2}}, doc{id='3 seqNo=3 primaryTerm=1 version=1 source= {"f":3}}, doc{id='4 seqNo=4 primaryTerm=1 version=1 source= {"f":4}}, doc{id='5 seqNo=5 primaryTerm=1 version=1 source= {"f":5}}, doc{id='6 seqNo=6 primaryTerm=1 version=1 source= {"f":6}}, doc{id='7 seqNo=7 primaryTerm=1 version=1 source= {"f":7}}, doc{id='8 seqNo=8 primaryTerm=1 version=1 source= {"f":8}}, doc{id='9 seqNo=9 primaryTerm=1 version=1 source= {"f":9}}, doc{id='10 seqNo=10 primaryTerm=1 version=1 source= {"f":10}}, doc{id='11 seqNo=11 primaryTerm=1 version=1 source= {"f":11}}, doc{id='12 seqNo=12 primaryTerm=1 version=1 source= {"f":12}}, doc{id='13 seqNo=13 primaryTerm=1 version=1 source= {"f":13}}, doc{id='14 seqNo=14 primaryTerm=1 version=1 source= {"f":14}}, doc{id='15 seqNo=15 primaryTerm=1 version=1 source= {"f":15}}, doc{id='16 seqNo=16 primaryTerm=1 version=1 source= {"f":16}}, doc{id='17 seqNo=17 primaryTerm=1 version=1 source= {"f":17}}, doc{id='18 seqNo=18 primaryTerm=1 version=1 source= {"f":18}}, doc{id='19 seqNo=19 primaryTerm=1 version=1 source= {"f":19}}, doc{id='20 seqNo=20 primaryTerm=1 version=1 source= {"f":20}}, doc{id='21 seqNo=21 primaryTerm=1 version=1 source= {"f":21}}, doc{id='22 seqNo=22 primaryTerm=1 version=1 source= {"f":22}}, doc{id='23 seqNo=23 primaryTerm=1 version=1 source= {"f":23}}, doc{id='24 seqNo=24 primaryTerm=1 version=1 source= {"f":24}}, doc{id='25 seqNo=25 primaryTerm=1 version=1 source= {"f":25}}, doc{id='26 seqNo=26 primaryTerm=1 version=1 source= {"f":26}}, doc{id='27 seqNo=27 primaryTerm=1 version=1 source= {"f":27}}, doc{id='28 seqNo=28 primaryTerm=1 version=1 source= {"f":28}}, doc{id='29 seqNo=29 primaryTerm=1 version=1 source= {"f":29}}, doc{id='30 seqNo=30 primaryTerm=1 version=1 source= {"f":30}}, doc{id='31 seqNo=31 primaryTerm=1 version=1 source= {"f":31}}, doc{id='32 seqNo=32 primaryTerm=1 version=1 source= {"f":32}}, doc{id='33 seqNo=33 primaryTerm=1 version=1 source= {"f":33}}, doc{id='34 seqNo=34 primaryTerm=1 version=1 source= {"f":34}}] seq_no_stats SeqNoStats{maxSeqNo=34, localCheckpoint=34, globalCheckpoint=34} [2019-05-28T05:41:12,905][INFO ][o.e.x.c.IndexFollowingIT ] [testIndexDoesNotFallBehind] --> asserting seq_no_stats between index1 and index2 [2019-05-28T05:41:12,919][INFO ][o.e.c.r.a.AllocationService] [followerm0] Cluster health status changed from [YELLOW] to [GREEN] (reason: [shards started [[index2][0]] ...]). [2019-05-28T05:41:12,995][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd3] [index2][0] shard follow task has been stopped [2019-05-28T05:41:13,446][INFO ][o.e.x.c.a.ShardFollowTasksExecutor] [followerd3] [index2][0] Starting to track leader shard [index1][0] [2019-05-28T05:41:13,457][INFO ][o.e.c.m.MetaDataIndexStateService] [followerm0] closing indices [index2/2jBVx_e2RWSjMu1y2nq2GA] [2019-05-28T05:41:13,495][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd3] [index2][0] following leader shard [index1][0], follower global checkpoint=[34], mapping version=[1], settings version=[1] [2019-05-28T05:41:13,525][INFO ][o.e.c.m.MetaDataIndexStateService] [followerm0] completed closing of indices [index2] [2019-05-28T05:41:13,566][WARN ][o.e.x.c.a.b.TransportBulkShardOperationsAction] [followerd3] unexpected error during the primary phase for action [indices:data/write/bulk_shard_operations[s]], request [BulkShardOperationsRequest{historyUUID=dWbR0DugQmefOIvGqkuwVw, operations=36, maxSeqNoUpdates=70, shardId=[index2][0], timeout=1m, index='index2', waitForActiveShards=DEFAULT}] org.elasticsearch.indices.IndexClosedException: closed at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteIndices(IndexNameExpressionResolver.java:234) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteIndices(IndexNameExpressionResolver.java:120) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteSingleIndex(IndexNameExpressionResolver.java:282) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.concreteIndex(TransportReplicationAction.java:235) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.access$1600(TransportReplicationAction.java:97) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction$ReroutePhase.doRun(TransportReplicationAction.java:656) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.doExecute(TransportReplicationAction.java:165) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.doExecute(TransportReplicationAction.java:97) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction$RequestFilterChain.proceed(TransportAction.java:145) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:121) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:64) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.node.NodeClient.executeLocally(NodeClient.java:83) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.node.NodeClient.doExecute(NodeClient.java:72) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.support.AbstractClient.execute(AbstractClient.java:394) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.xpack.ccr.action.ShardFollowTasksExecutor$1.innerSendBulkShardOperationsRequest(ShardFollowTasksExecutor.java:242) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.sendBulkShardOperationsRequest(ShardFollowNodeTask.java:377) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.lambda$sendBulkShardOperationsRequest$10(ShardFollowNodeTask.java:392) [main/:?] at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:688) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) [?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) [?:?] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:41:13,579][WARN ][o.e.x.c.a.b.TransportBulkShardOperationsAction] [followerd3] unexpected error during the primary phase for action [indices:data/write/bulk_shard_operations[s]], request [BulkShardOperationsRequest{historyUUID=dWbR0DugQmefOIvGqkuwVw, operations=36, maxSeqNoUpdates=70, shardId=[index2][0], timeout=1m, index='index2', waitForActiveShards=DEFAULT}] org.elasticsearch.indices.IndexClosedException: closed at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteIndices(IndexNameExpressionResolver.java:234) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteIndices(IndexNameExpressionResolver.java:120) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteSingleIndex(IndexNameExpressionResolver.java:282) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.concreteIndex(TransportReplicationAction.java:235) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.access$1600(TransportReplicationAction.java:97) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction$ReroutePhase.doRun(TransportReplicationAction.java:656) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.doExecute(TransportReplicationAction.java:165) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.doExecute(TransportReplicationAction.java:97) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction$RequestFilterChain.proceed(TransportAction.java:145) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:121) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:64) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.node.NodeClient.executeLocally(NodeClient.java:83) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.node.NodeClient.doExecute(NodeClient.java:72) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.support.AbstractClient.execute(AbstractClient.java:394) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.xpack.ccr.action.ShardFollowTasksExecutor$1.innerSendBulkShardOperationsRequest(ShardFollowTasksExecutor.java:242) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.sendBulkShardOperationsRequest(ShardFollowNodeTask.java:377) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.lambda$sendBulkShardOperationsRequest$10(ShardFollowNodeTask.java:392) [main/:?] at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:688) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) [?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) [?:?] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:41:13,594][WARN ][o.e.x.c.a.b.TransportBulkShardOperationsAction] [followerd3] unexpected error during the primary phase for action [indices:data/write/bulk_shard_operations[s]], request [BulkShardOperationsRequest{historyUUID=dWbR0DugQmefOIvGqkuwVw, operations=36, maxSeqNoUpdates=70, shardId=[index2][0], timeout=1m, index='index2', waitForActiveShards=DEFAULT}] org.elasticsearch.indices.IndexClosedException: closed at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteIndices(IndexNameExpressionResolver.java:234) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteIndices(IndexNameExpressionResolver.java:120) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteSingleIndex(IndexNameExpressionResolver.java:282) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.concreteIndex(TransportReplicationAction.java:235) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.access$1600(TransportReplicationAction.java:97) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction$ReroutePhase.doRun(TransportReplicationAction.java:656) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.doExecute(TransportReplicationAction.java:165) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.doExecute(TransportReplicationAction.java:97) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction$RequestFilterChain.proceed(TransportAction.java:145) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:121) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:64) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.node.NodeClient.executeLocally(NodeClient.java:83) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.node.NodeClient.doExecute(NodeClient.java:72) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.support.AbstractClient.execute(AbstractClient.java:394) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.xpack.ccr.action.ShardFollowTasksExecutor$1.innerSendBulkShardOperationsRequest(ShardFollowTasksExecutor.java:242) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.sendBulkShardOperationsRequest(ShardFollowNodeTask.java:377) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.lambda$sendBulkShardOperationsRequest$10(ShardFollowNodeTask.java:392) [main/:?] at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:688) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) [?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) [?:?] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:41:13,606][WARN ][o.e.x.c.a.b.TransportBulkShardOperationsAction] [followerd3] unexpected error during the primary phase for action [indices:data/write/bulk_shard_operations[s]], request [BulkShardOperationsRequest{historyUUID=dWbR0DugQmefOIvGqkuwVw, operations=36, maxSeqNoUpdates=70, shardId=[index2][0], timeout=1m, index='index2', waitForActiveShards=DEFAULT}] org.elasticsearch.indices.IndexClosedException: closed at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteIndices(IndexNameExpressionResolver.java:234) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteIndices(IndexNameExpressionResolver.java:120) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteSingleIndex(IndexNameExpressionResolver.java:282) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.concreteIndex(TransportReplicationAction.java:235) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.access$1600(TransportReplicationAction.java:97) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction$ReroutePhase.doRun(TransportReplicationAction.java:656) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.doExecute(TransportReplicationAction.java:165) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.doExecute(TransportReplicationAction.java:97) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction$RequestFilterChain.proceed(TransportAction.java:145) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:121) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:64) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.node.NodeClient.executeLocally(NodeClient.java:83) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.node.NodeClient.doExecute(NodeClient.java:72) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.support.AbstractClient.execute(AbstractClient.java:394) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.xpack.ccr.action.ShardFollowTasksExecutor$1.innerSendBulkShardOperationsRequest(ShardFollowTasksExecutor.java:242) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.sendBulkShardOperationsRequest(ShardFollowNodeTask.java:377) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.lambda$sendBulkShardOperationsRequest$10(ShardFollowNodeTask.java:392) [main/:?] at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:688) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) [?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) [?:?] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:41:13,618][WARN ][o.e.x.c.a.b.TransportBulkShardOperationsAction] [followerd3] unexpected error during the primary phase for action [indices:data/write/bulk_shard_operations[s]], request [BulkShardOperationsRequest{historyUUID=dWbR0DugQmefOIvGqkuwVw, operations=36, maxSeqNoUpdates=70, shardId=[index2][0], timeout=1m, index='index2', waitForActiveShards=DEFAULT}] org.elasticsearch.indices.IndexClosedException: closed at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteIndices(IndexNameExpressionResolver.java:234) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteIndices(IndexNameExpressionResolver.java:120) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteSingleIndex(IndexNameExpressionResolver.java:282) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.concreteIndex(TransportReplicationAction.java:235) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.access$1600(TransportReplicationAction.java:97) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction$ReroutePhase.doRun(TransportReplicationAction.java:656) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.doExecute(TransportReplicationAction.java:165) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.doExecute(TransportReplicationAction.java:97) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction$RequestFilterChain.proceed(TransportAction.java:145) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:121) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:64) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.node.NodeClient.executeLocally(NodeClient.java:83) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.node.NodeClient.doExecute(NodeClient.java:72) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.support.AbstractClient.execute(AbstractClient.java:394) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.xpack.ccr.action.ShardFollowTasksExecutor$1.innerSendBulkShardOperationsRequest(ShardFollowTasksExecutor.java:242) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.sendBulkShardOperationsRequest(ShardFollowNodeTask.java:377) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.lambda$sendBulkShardOperationsRequest$10(ShardFollowNodeTask.java:392) [main/:?] at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:688) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) [?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) [?:?] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:41:13,630][WARN ][o.e.x.c.a.b.TransportBulkShardOperationsAction] [followerd3] unexpected error during the primary phase for action [indices:data/write/bulk_shard_operations[s]], request [BulkShardOperationsRequest{historyUUID=dWbR0DugQmefOIvGqkuwVw, operations=36, maxSeqNoUpdates=70, shardId=[index2][0], timeout=1m, index='index2', waitForActiveShards=DEFAULT}] org.elasticsearch.indices.IndexClosedException: closed at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteIndices(IndexNameExpressionResolver.java:234) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteIndices(IndexNameExpressionResolver.java:120) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteSingleIndex(IndexNameExpressionResolver.java:282) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.concreteIndex(TransportReplicationAction.java:235) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.access$1600(TransportReplicationAction.java:97) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction$ReroutePhase.doRun(TransportReplicationAction.java:656) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.doExecute(TransportReplicationAction.java:165) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.doExecute(TransportReplicationAction.java:97) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction$RequestFilterChain.proceed(TransportAction.java:145) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:121) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:64) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.node.NodeClient.executeLocally(NodeClient.java:83) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.node.NodeClient.doExecute(NodeClient.java:72) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.support.AbstractClient.execute(AbstractClient.java:394) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.xpack.ccr.action.ShardFollowTasksExecutor$1.innerSendBulkShardOperationsRequest(ShardFollowTasksExecutor.java:242) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.sendBulkShardOperationsRequest(ShardFollowNodeTask.java:377) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.lambda$sendBulkShardOperationsRequest$10(ShardFollowNodeTask.java:392) [main/:?] at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:688) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) [?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) [?:?] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:41:13,648][WARN ][o.e.x.c.a.b.TransportBulkShardOperationsAction] [followerd3] unexpected error during the primary phase for action [indices:data/write/bulk_shard_operations[s]], request [BulkShardOperationsRequest{historyUUID=dWbR0DugQmefOIvGqkuwVw, operations=36, maxSeqNoUpdates=70, shardId=[index2][0], timeout=1m, index='index2', waitForActiveShards=DEFAULT}] org.elasticsearch.indices.IndexClosedException: closed at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteIndices(IndexNameExpressionResolver.java:234) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteIndices(IndexNameExpressionResolver.java:120) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteSingleIndex(IndexNameExpressionResolver.java:282) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.concreteIndex(TransportReplicationAction.java:235) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.access$1600(TransportReplicationAction.java:97) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction$ReroutePhase.doRun(TransportReplicationAction.java:656) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.doExecute(TransportReplicationAction.java:165) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.doExecute(TransportReplicationAction.java:97) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction$RequestFilterChain.proceed(TransportAction.java:145) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:121) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:64) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.node.NodeClient.executeLocally(NodeClient.java:83) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.node.NodeClient.doExecute(NodeClient.java:72) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.support.AbstractClient.execute(AbstractClient.java:394) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.xpack.ccr.action.ShardFollowTasksExecutor$1.innerSendBulkShardOperationsRequest(ShardFollowTasksExecutor.java:242) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.sendBulkShardOperationsRequest(ShardFollowNodeTask.java:377) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.lambda$sendBulkShardOperationsRequest$10(ShardFollowNodeTask.java:392) [main/:?] at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:688) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) [?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) [?:?] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:41:13,661][WARN ][o.e.x.c.a.b.TransportBulkShardOperationsAction] [followerd3] unexpected error during the primary phase for action [indices:data/write/bulk_shard_operations[s]], request [BulkShardOperationsRequest{historyUUID=dWbR0DugQmefOIvGqkuwVw, operations=36, maxSeqNoUpdates=70, shardId=[index2][0], timeout=1m, index='index2', waitForActiveShards=DEFAULT}] org.elasticsearch.indices.IndexClosedException: closed at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteIndices(IndexNameExpressionResolver.java:234) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteIndices(IndexNameExpressionResolver.java:120) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteSingleIndex(IndexNameExpressionResolver.java:282) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.concreteIndex(TransportReplicationAction.java:235) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.access$1600(TransportReplicationAction.java:97) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction$ReroutePhase.doRun(TransportReplicationAction.java:656) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.doExecute(TransportReplicationAction.java:165) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.doExecute(TransportReplicationAction.java:97) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction$RequestFilterChain.proceed(TransportAction.java:145) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:121) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:64) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.node.NodeClient.executeLocally(NodeClient.java:83) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.node.NodeClient.doExecute(NodeClient.java:72) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.support.AbstractClient.execute(AbstractClient.java:394) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.xpack.ccr.action.ShardFollowTasksExecutor$1.innerSendBulkShardOperationsRequest(ShardFollowTasksExecutor.java:242) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.sendBulkShardOperationsRequest(ShardFollowNodeTask.java:377) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.lambda$sendBulkShardOperationsRequest$10(ShardFollowNodeTask.java:392) [main/:?] at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:688) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) [?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) [?:?] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:41:13,674][WARN ][o.e.x.c.a.b.TransportBulkShardOperationsAction] [followerd3] unexpected error during the primary phase for action [indices:data/write/bulk_shard_operations[s]], request [BulkShardOperationsRequest{historyUUID=dWbR0DugQmefOIvGqkuwVw, operations=36, maxSeqNoUpdates=70, shardId=[index2][0], timeout=1m, index='index2', waitForActiveShards=DEFAULT}] org.elasticsearch.indices.IndexClosedException: closed at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteIndices(IndexNameExpressionResolver.java:234) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteIndices(IndexNameExpressionResolver.java:120) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteSingleIndex(IndexNameExpressionResolver.java:282) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.concreteIndex(TransportReplicationAction.java:235) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.access$1600(TransportReplicationAction.java:97) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction$ReroutePhase.doRun(TransportReplicationAction.java:656) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.doExecute(TransportReplicationAction.java:165) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.doExecute(TransportReplicationAction.java:97) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction$RequestFilterChain.proceed(TransportAction.java:145) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:121) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:64) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.node.NodeClient.executeLocally(NodeClient.java:83) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.node.NodeClient.doExecute(NodeClient.java:72) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.support.AbstractClient.execute(AbstractClient.java:394) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.xpack.ccr.action.ShardFollowTasksExecutor$1.innerSendBulkShardOperationsRequest(ShardFollowTasksExecutor.java:242) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.sendBulkShardOperationsRequest(ShardFollowNodeTask.java:377) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.lambda$sendBulkShardOperationsRequest$10(ShardFollowNodeTask.java:392) [main/:?] at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:688) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) [?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) [?:?] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:41:13,686][INFO ][o.e.c.r.a.AllocationService] [followerm0] Cluster health status changed from [RED] to [YELLOW] (reason: [shards started [[index2][0]] ...]). [2019-05-28T05:41:13,686][WARN ][o.e.x.c.a.b.TransportBulkShardOperationsAction] [followerd3] unexpected error during the primary phase for action [indices:data/write/bulk_shard_operations[s]], request [BulkShardOperationsRequest{historyUUID=dWbR0DugQmefOIvGqkuwVw, operations=36, maxSeqNoUpdates=70, shardId=[index2][0], timeout=1m, index='index2', waitForActiveShards=DEFAULT}] org.elasticsearch.indices.IndexClosedException: closed at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteIndices(IndexNameExpressionResolver.java:234) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteIndices(IndexNameExpressionResolver.java:120) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteSingleIndex(IndexNameExpressionResolver.java:282) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.concreteIndex(TransportReplicationAction.java:235) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.access$1600(TransportReplicationAction.java:97) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction$ReroutePhase.doRun(TransportReplicationAction.java:656) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.doExecute(TransportReplicationAction.java:165) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.doExecute(TransportReplicationAction.java:97) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction$RequestFilterChain.proceed(TransportAction.java:145) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:121) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:64) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.node.NodeClient.executeLocally(NodeClient.java:83) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.node.NodeClient.doExecute(NodeClient.java:72) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.support.AbstractClient.execute(AbstractClient.java:394) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.xpack.ccr.action.ShardFollowTasksExecutor$1.innerSendBulkShardOperationsRequest(ShardFollowTasksExecutor.java:242) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.sendBulkShardOperationsRequest(ShardFollowNodeTask.java:377) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.lambda$sendBulkShardOperationsRequest$10(ShardFollowNodeTask.java:392) [main/:?] at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:688) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) [?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) [?:?] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:41:13,689][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd3] [index2][0] shard follow task has been stopped [2019-05-28T05:41:14,242][INFO ][o.e.x.c.a.ShardFollowTasksExecutor] [followerd3] [index2][0] Starting to track leader shard [index1][0] [2019-05-28T05:41:14,246][INFO ][o.e.x.c.IndexFollowingIT ] [testIndexDoesNotFallBehind] ensure green follower indices [index2] [2019-05-28T05:41:14,290][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd3] [index2][0] following leader shard [index1][0], follower global checkpoint=[70], mapping version=[1], settings version=[1] [2019-05-28T05:41:14,382][INFO ][o.e.c.r.a.AllocationService] [followerm0] Cluster health status changed from [YELLOW] to [GREEN] (reason: [shards started [[index2][0]] ...]). [2019-05-28T05:41:14,424][INFO ][o.e.x.c.IndexFollowingIT ] [testIndexDoesNotFallBehind] --> asserting <> between index1 and index2 [2019-05-28T05:41:14,427][INFO ][o.e.x.c.IndexFollowingIT ] [testIndexDoesNotFallBehind] --> shard [index2][0], node[imikiXyHQzWPRqBx1bSawA], [P], s[STARTED], a[id=AbFAyDhxTVOl4JSA6vu1oQ] docs [doc{id='0 seqNo=35 primaryTerm=1 version=2 source= {"f":0}}, doc{id='2 seqNo=37 primaryTerm=1 version=2 source= {"f":4}}, doc{id='3 seqNo=38 primaryTerm=1 version=2 source= {"f":6}}, doc{id='4 seqNo=39 primaryTerm=1 version=2 source= {"f":8}}, doc{id='5 seqNo=40 primaryTerm=1 version=2 source= {"f":10}}, doc{id='6 seqNo=41 primaryTerm=1 version=2 source= {"f":12}}, doc{id='7 seqNo=42 primaryTerm=1 version=2 source= {"f":14}}, doc{id='8 seqNo=43 primaryTerm=1 version=2 source= {"f":16}}, doc{id='9 seqNo=44 primaryTerm=1 version=2 source= {"f":18}}, doc{id='10 seqNo=45 primaryTerm=1 version=2 source= {"f":20}}, doc{id='11 seqNo=46 primaryTerm=1 version=2 source= {"f":22}}, doc{id='12 seqNo=47 primaryTerm=1 version=2 source= {"f":24}}, doc{id='13 seqNo=48 primaryTerm=1 version=2 source= {"f":26}}, doc{id='14 seqNo=49 primaryTerm=1 version=2 source= {"f":28}}, doc{id='15 seqNo=50 primaryTerm=1 version=2 source= {"f":30}}, doc{id='16 seqNo=51 primaryTerm=1 version=2 source= {"f":32}}, doc{id='17 seqNo=52 primaryTerm=1 version=2 source= {"f":34}}, doc{id='18 seqNo=53 primaryTerm=1 version=2 source= {"f":36}}, doc{id='19 seqNo=54 primaryTerm=1 version=2 source= {"f":38}}, doc{id='20 seqNo=55 primaryTerm=1 version=2 source= {"f":40}}, doc{id='21 seqNo=56 primaryTerm=1 version=2 source= {"f":42}}, doc{id='22 seqNo=57 primaryTerm=1 version=2 source= {"f":44}}, doc{id='23 seqNo=58 primaryTerm=1 version=2 source= {"f":46}}, doc{id='24 seqNo=59 primaryTerm=1 version=2 source= {"f":48}}, doc{id='25 seqNo=60 primaryTerm=1 version=2 source= {"f":50}}, doc{id='26 seqNo=61 primaryTerm=1 version=2 source= {"f":52}}, doc{id='27 seqNo=62 primaryTerm=1 version=2 source= {"f":54}}, doc{id='28 seqNo=63 primaryTerm=1 version=2 source= {"f":56}}, doc{id='29 seqNo=64 primaryTerm=1 version=2 source= {"f":58}}, doc{id='30 seqNo=65 primaryTerm=1 version=2 source= {"f":60}}, doc{id='31 seqNo=66 primaryTerm=1 version=2 source= {"f":62}}, doc{id='32 seqNo=67 primaryTerm=1 version=2 source= {"f":64}}, doc{id='33 seqNo=68 primaryTerm=1 version=2 source= {"f":66}}, doc{id='34 seqNo=69 primaryTerm=1 version=2 source= {"f":68}}] seq_no_stats SeqNoStats{maxSeqNo=70, localCheckpoint=70, globalCheckpoint=70} [2019-05-28T05:41:14,427][INFO ][o.e.x.c.IndexFollowingIT ] [testIndexDoesNotFallBehind] --> shard [index2][0], node[XK3Qc8oGSAqdSsvD2_yMPw], [R], s[STARTED], a[id=JVPPPSAAQX2X58IG7PqKTw] docs [doc{id='0 seqNo=35 primaryTerm=1 version=2 source= {"f":0}}, doc{id='2 seqNo=37 primaryTerm=1 version=2 source= {"f":4}}, doc{id='3 seqNo=38 primaryTerm=1 version=2 source= {"f":6}}, doc{id='4 seqNo=39 primaryTerm=1 version=2 source= {"f":8}}, doc{id='5 seqNo=40 primaryTerm=1 version=2 source= {"f":10}}, doc{id='6 seqNo=41 primaryTerm=1 version=2 source= {"f":12}}, doc{id='7 seqNo=42 primaryTerm=1 version=2 source= {"f":14}}, doc{id='8 seqNo=43 primaryTerm=1 version=2 source= {"f":16}}, doc{id='9 seqNo=44 primaryTerm=1 version=2 source= {"f":18}}, doc{id='10 seqNo=45 primaryTerm=1 version=2 source= {"f":20}}, doc{id='11 seqNo=46 primaryTerm=1 version=2 source= {"f":22}}, doc{id='12 seqNo=47 primaryTerm=1 version=2 source= {"f":24}}, doc{id='13 seqNo=48 primaryTerm=1 version=2 source= {"f":26}}, doc{id='14 seqNo=49 primaryTerm=1 version=2 source= {"f":28}}, doc{id='15 seqNo=50 primaryTerm=1 version=2 source= {"f":30}}, doc{id='16 seqNo=51 primaryTerm=1 version=2 source= {"f":32}}, doc{id='17 seqNo=52 primaryTerm=1 version=2 source= {"f":34}}, doc{id='18 seqNo=53 primaryTerm=1 version=2 source= {"f":36}}, doc{id='19 seqNo=54 primaryTerm=1 version=2 source= {"f":38}}, doc{id='20 seqNo=55 primaryTerm=1 version=2 source= {"f":40}}, doc{id='21 seqNo=56 primaryTerm=1 version=2 source= {"f":42}}, doc{id='22 seqNo=57 primaryTerm=1 version=2 source= {"f":44}}, doc{id='23 seqNo=58 primaryTerm=1 version=2 source= {"f":46}}, doc{id='24 seqNo=59 primaryTerm=1 version=2 source= {"f":48}}, doc{id='25 seqNo=60 primaryTerm=1 version=2 source= {"f":50}}, doc{id='26 seqNo=61 primaryTerm=1 version=2 source= {"f":52}}, doc{id='27 seqNo=62 primaryTerm=1 version=2 source= {"f":54}}, doc{id='28 seqNo=63 primaryTerm=1 version=2 source= {"f":56}}, doc{id='29 seqNo=64 primaryTerm=1 version=2 source= {"f":58}}, doc{id='30 seqNo=65 primaryTerm=1 version=2 source= {"f":60}}, doc{id='31 seqNo=66 primaryTerm=1 version=2 source= {"f":62}}, doc{id='32 seqNo=67 primaryTerm=1 version=2 source= {"f":64}}, doc{id='33 seqNo=68 primaryTerm=1 version=2 source= {"f":66}}, doc{id='34 seqNo=69 primaryTerm=1 version=2 source= {"f":68}}] seq_no_stats SeqNoStats{maxSeqNo=70, localCheckpoint=70, globalCheckpoint=70} [2019-05-28T05:41:14,428][INFO ][o.e.x.c.IndexFollowingIT ] [testIndexDoesNotFallBehind] --> shard [index1][0], node[9HWb90BFR1eAyo6kqv0ZYA], [P], s[STARTED], a[id=u0UAKSQCT5SpOTH_uvEGKg] docs [doc{id='0 seqNo=35 primaryTerm=1 version=2 source= {"f":0}}, doc{id='2 seqNo=37 primaryTerm=1 version=2 source= {"f":4}}, doc{id='3 seqNo=38 primaryTerm=1 version=2 source= {"f":6}}, doc{id='4 seqNo=39 primaryTerm=1 version=2 source= {"f":8}}, doc{id='5 seqNo=40 primaryTerm=1 version=2 source= {"f":10}}, doc{id='6 seqNo=41 primaryTerm=1 version=2 source= {"f":12}}, doc{id='7 seqNo=42 primaryTerm=1 version=2 source= {"f":14}}, doc{id='8 seqNo=43 primaryTerm=1 version=2 source= {"f":16}}, doc{id='9 seqNo=44 primaryTerm=1 version=2 source= {"f":18}}, doc{id='10 seqNo=45 primaryTerm=1 version=2 source= {"f":20}}, doc{id='11 seqNo=46 primaryTerm=1 version=2 source= {"f":22}}, doc{id='12 seqNo=47 primaryTerm=1 version=2 source= {"f":24}}, doc{id='13 seqNo=48 primaryTerm=1 version=2 source= {"f":26}}, doc{id='14 seqNo=49 primaryTerm=1 version=2 source= {"f":28}}, doc{id='15 seqNo=50 primaryTerm=1 version=2 source= {"f":30}}, doc{id='16 seqNo=51 primaryTerm=1 version=2 source= {"f":32}}, doc{id='17 seqNo=52 primaryTerm=1 version=2 source= {"f":34}}, doc{id='18 seqNo=53 primaryTerm=1 version=2 source= {"f":36}}, doc{id='19 seqNo=54 primaryTerm=1 version=2 source= {"f":38}}, doc{id='20 seqNo=55 primaryTerm=1 version=2 source= {"f":40}}, doc{id='21 seqNo=56 primaryTerm=1 version=2 source= {"f":42}}, doc{id='22 seqNo=57 primaryTerm=1 version=2 source= {"f":44}}, doc{id='23 seqNo=58 primaryTerm=1 version=2 source= {"f":46}}, doc{id='24 seqNo=59 primaryTerm=1 version=2 source= {"f":48}}, doc{id='25 seqNo=60 primaryTerm=1 version=2 source= {"f":50}}, doc{id='26 seqNo=61 primaryTerm=1 version=2 source= {"f":52}}, doc{id='27 seqNo=62 primaryTerm=1 version=2 source= {"f":54}}, doc{id='28 seqNo=63 primaryTerm=1 version=2 source= {"f":56}}, doc{id='29 seqNo=64 primaryTerm=1 version=2 source= {"f":58}}, doc{id='30 seqNo=65 primaryTerm=1 version=2 source= {"f":60}}, doc{id='31 seqNo=66 primaryTerm=1 version=2 source= {"f":62}}, doc{id='32 seqNo=67 primaryTerm=1 version=2 source= {"f":64}}, doc{id='33 seqNo=68 primaryTerm=1 version=2 source= {"f":66}}, doc{id='34 seqNo=69 primaryTerm=1 version=2 source= {"f":68}}] seq_no_stats SeqNoStats{maxSeqNo=70, localCheckpoint=70, globalCheckpoint=70} [2019-05-28T05:41:14,428][INFO ][o.e.x.c.IndexFollowingIT ] [testIndexDoesNotFallBehind] --> shard [index1][0], node[rUlMFBY9QXukTRws0zFidg], [R], s[STARTED], a[id=q79iNRtdRKe8g1MXIJaLvg] docs [doc{id='0 seqNo=35 primaryTerm=1 version=2 source= {"f":0}}, doc{id='2 seqNo=37 primaryTerm=1 version=2 source= {"f":4}}, doc{id='3 seqNo=38 primaryTerm=1 version=2 source= {"f":6}}, doc{id='4 seqNo=39 primaryTerm=1 version=2 source= {"f":8}}, doc{id='5 seqNo=40 primaryTerm=1 version=2 source= {"f":10}}, doc{id='6 seqNo=41 primaryTerm=1 version=2 source= {"f":12}}, doc{id='7 seqNo=42 primaryTerm=1 version=2 source= {"f":14}}, doc{id='8 seqNo=43 primaryTerm=1 version=2 source= {"f":16}}, doc{id='9 seqNo=44 primaryTerm=1 version=2 source= {"f":18}}, doc{id='10 seqNo=45 primaryTerm=1 version=2 source= {"f":20}}, doc{id='11 seqNo=46 primaryTerm=1 version=2 source= {"f":22}}, doc{id='12 seqNo=47 primaryTerm=1 version=2 source= {"f":24}}, doc{id='13 seqNo=48 primaryTerm=1 version=2 source= {"f":26}}, doc{id='14 seqNo=49 primaryTerm=1 version=2 source= {"f":28}}, doc{id='15 seqNo=50 primaryTerm=1 version=2 source= {"f":30}}, doc{id='16 seqNo=51 primaryTerm=1 version=2 source= {"f":32}}, doc{id='17 seqNo=52 primaryTerm=1 version=2 source= {"f":34}}, doc{id='18 seqNo=53 primaryTerm=1 version=2 source= {"f":36}}, doc{id='19 seqNo=54 primaryTerm=1 version=2 source= {"f":38}}, doc{id='20 seqNo=55 primaryTerm=1 version=2 source= {"f":40}}, doc{id='21 seqNo=56 primaryTerm=1 version=2 source= {"f":42}}, doc{id='22 seqNo=57 primaryTerm=1 version=2 source= {"f":44}}, doc{id='23 seqNo=58 primaryTerm=1 version=2 source= {"f":46}}, doc{id='24 seqNo=59 primaryTerm=1 version=2 source= {"f":48}}, doc{id='25 seqNo=60 primaryTerm=1 version=2 source= {"f":50}}, doc{id='26 seqNo=61 primaryTerm=1 version=2 source= {"f":52}}, doc{id='27 seqNo=62 primaryTerm=1 version=2 source= {"f":54}}, doc{id='28 seqNo=63 primaryTerm=1 version=2 source= {"f":56}}, doc{id='29 seqNo=64 primaryTerm=1 version=2 source= {"f":58}}, doc{id='30 seqNo=65 primaryTerm=1 version=2 source= {"f":60}}, doc{id='31 seqNo=66 primaryTerm=1 version=2 source= {"f":62}}, doc{id='32 seqNo=67 primaryTerm=1 version=2 source= {"f":64}}, doc{id='33 seqNo=68 primaryTerm=1 version=2 source= {"f":66}}, doc{id='34 seqNo=69 primaryTerm=1 version=2 source= {"f":68}}] seq_no_stats SeqNoStats{maxSeqNo=70, localCheckpoint=70, globalCheckpoint=70} [2019-05-28T05:41:14,429][INFO ][o.e.x.c.IndexFollowingIT ] [testIndexDoesNotFallBehind] --> asserting seq_no_stats between index1 and index2 [2019-05-28T05:41:14,492][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd3] [index2][0] shard follow task has been stopped [2019-05-28T05:41:14,508][INFO ][o.e.c.m.MetaDataDeleteIndexService] [leader0] [index1/7qoYGRFxSZWgmg9CmVcD-w] deleting index [2019-05-28T05:41:14,772][INFO ][o.e.c.m.MetaDataDeleteIndexService] [followerm0] [index2/2jBVx_e2RWSjMu1y2nq2GA] deleting index [2019-05-28T05:41:14,853][INFO ][o.e.x.c.IndexFollowingIT ] [testIndexDoesNotFallBehind] after test [2019-05-28T05:41:15,066][INFO ][o.e.x.c.IndexFollowingIT ] [testLeaderIndexRed] before test [2019-05-28T05:41:15,100][INFO ][o.e.c.s.ClusterSettings ] [leader1] updating [cluster.routing.allocation.enable] from [all] to [none] [2019-05-28T05:41:15,118][INFO ][o.e.c.s.ClusterSettings ] [leader0] updating [cluster.routing.allocation.enable] from [all] to [none] [2019-05-28T05:41:15,139][INFO ][o.e.c.m.MetaDataCreateIndexService] [leader0] [index1] creating index, cause [api], templates [], shards [1]/[0], mappings [] [2019-05-28T05:41:15,140][INFO ][o.e.c.r.a.AllocationService] [leader0] Cluster health status changed from [YELLOW] to [RED] (reason: [index [index1] created]). [2019-05-28T05:41:15,318][INFO ][o.e.c.s.ClusterSettings ] [leader1] updating [cluster.routing.allocation.enable] from [none] to [all] [2019-05-28T05:41:15,335][INFO ][o.e.c.s.ClusterSettings ] [leader0] updating [cluster.routing.allocation.enable] from [none] to [all] [2019-05-28T05:41:15,540][INFO ][o.e.c.m.MetaDataDeleteIndexService] [leader0] [index1/hxYbHp6oQzOV0k05cbAbJg] deleting index [2019-05-28T05:41:15,729][INFO ][o.e.x.c.IndexFollowingIT ] [testLeaderIndexRed] after test [2019-05-28T05:41:15,932][INFO ][o.e.x.c.IndexFollowingIT ] [testIndexFallBehind] before test [2019-05-28T05:41:15,938][INFO ][o.e.c.m.MetaDataCreateIndexService] [leader0] [index1] creating index, cause [api], templates [], shards [2]/[1], mappings [doc] [2019-05-28T05:41:16,553][INFO ][o.e.c.r.a.AllocationService] [leader0] Cluster health status changed from [YELLOW] to [GREEN] (reason: [shards started [[index1][0]] ...]). [2019-05-28T05:41:16,648][INFO ][o.e.x.c.IndexFollowingIT ] [testIndexFallBehind] Indexing [35] docs as first batch [2019-05-28T05:41:16,658][WARN ][o.e.t.n.MockNioTransport ] [leader1] Slow execution on network thread [200 milliseconds] java.lang.RuntimeException: Slow exception on network thread at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.maybeLogElapsedTime(MockNioTransport.java:367) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.unregister(MockNioTransport.java:359) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleRead(TestEventHandler.java:130) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.handleRead(NioSelector.java:397) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.processKey(NioSelector.java:246) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:172) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:41:17,099][WARN ][o.e.t.n.MockNioTransport ] [followerm1] Slow execution on network thread [200 milliseconds] java.lang.RuntimeException: Slow exception on network thread at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.maybeLogElapsedTime(MockNioTransport.java:367) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.unregister(MockNioTransport.java:359) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleWrite(TestEventHandler.java:154) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.handleWrite(NioSelector.java:389) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.writeToChannel(NioSelector.java:345) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.handleQueuedWrites(NioSelector.java:448) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.preSelect(NioSelector.java:262) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:153) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:41:17,283][INFO ][o.e.x.c.a.ShardFollowTasksExecutor] [followerd3] [index2][1] Starting to track leader shard [index1][1] [2019-05-28T05:41:17,372][INFO ][o.e.c.r.a.AllocationService] [followerm0] Cluster health status changed from [YELLOW] to [GREEN] (reason: [shards started [[index2][0]] ...]). [2019-05-28T05:41:17,416][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd3] [index2][1] following leader shard [index1][1], follower global checkpoint=[-1], mapping version=[1], settings version=[1] [2019-05-28T05:41:17,446][INFO ][o.e.x.c.a.ShardFollowTasksExecutor] [followerd4] [index2][0] Starting to track leader shard [index1][0] [2019-05-28T05:41:17,450][INFO ][o.e.x.c.IndexFollowingIT ] [testIndexFallBehind] --> asserting <> between index1 and index2 [2019-05-28T05:41:17,450][INFO ][o.e.x.c.IndexFollowingIT ] [testIndexFallBehind] --> shard [index2][0], node[imikiXyHQzWPRqBx1bSawA], [P], s[STARTED], a[id=i2gIeuStRuSl_pkUdZEQdg] docs [] seq_no_stats SeqNoStats{maxSeqNo=-1, localCheckpoint=-1, globalCheckpoint=-1} [2019-05-28T05:41:17,459][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd4] [index2][0] following leader shard [index1][0], follower global checkpoint=[-1], mapping version=[1], settings version=[1] [2019-05-28T05:41:17,493][INFO ][o.e.x.c.IndexFollowingIT ] [testIndexFallBehind] --> shard [index2][1], node[imikiXyHQzWPRqBx1bSawA], [R], s[STARTED], a[id=8gKCmjiORE2x49yXCnbBRg] docs [doc{id='0 seqNo=0 primaryTerm=1 version=1 source= {"f":0}}, doc{id='4 seqNo=1 primaryTerm=1 version=1 source= {"f":4}}, doc{id='8 seqNo=2 primaryTerm=1 version=1 source= {"f":8}}, doc{id='12 seqNo=3 primaryTerm=1 version=1 source= {"f":12}}, doc{id='13 seqNo=4 primaryTerm=1 version=1 source= {"f":13}}, doc{id='14 seqNo=5 primaryTerm=1 version=1 source= {"f":14}}, doc{id='15 seqNo=6 primaryTerm=1 version=1 source= {"f":15}}, doc{id='17 seqNo=7 primaryTerm=1 version=1 source= {"f":17}}, doc{id='20 seqNo=8 primaryTerm=1 version=1 source= {"f":20}}, doc{id='24 seqNo=9 primaryTerm=1 version=1 source= {"f":24}}, doc{id='25 seqNo=10 primaryTerm=1 version=1 source= {"f":25}}, doc{id='27 seqNo=11 primaryTerm=1 version=1 source= {"f":27}}, doc{id='28 seqNo=12 primaryTerm=1 version=1 source= {"f":28}}, doc{id='29 seqNo=13 primaryTerm=1 version=1 source= {"f":29}}, doc{id='32 seqNo=14 primaryTerm=1 version=1 source= {"f":32}}, doc{id='33 seqNo=15 primaryTerm=1 version=1 source= {"f":33}}, doc{id='34 seqNo=16 primaryTerm=1 version=1 source= {"f":34}}] seq_no_stats SeqNoStats{maxSeqNo=16, localCheckpoint=16, globalCheckpoint=16} [2019-05-28T05:41:17,525][INFO ][o.e.x.c.IndexFollowingIT ] [testIndexFallBehind] --> shard [index2][0], node[XK3Qc8oGSAqdSsvD2_yMPw], [R], s[STARTED], a[id=IgqlTELcSL-dwLFr-G4t0A] docs [doc{id='1 seqNo=0 primaryTerm=1 version=1 source= {"f":1}}, doc{id='2 seqNo=1 primaryTerm=1 version=1 source= {"f":2}}, doc{id='3 seqNo=2 primaryTerm=1 version=1 source= {"f":3}}, doc{id='5 seqNo=3 primaryTerm=1 version=1 source= {"f":5}}, doc{id='6 seqNo=4 primaryTerm=1 version=1 source= {"f":6}}, doc{id='7 seqNo=5 primaryTerm=1 version=1 source= {"f":7}}, doc{id='9 seqNo=6 primaryTerm=1 version=1 source= {"f":9}}, doc{id='10 seqNo=7 primaryTerm=1 version=1 source= {"f":10}}, doc{id='11 seqNo=8 primaryTerm=1 version=1 source= {"f":11}}, doc{id='16 seqNo=9 primaryTerm=1 version=1 source= {"f":16}}, doc{id='18 seqNo=10 primaryTerm=1 version=1 source= {"f":18}}, doc{id='19 seqNo=11 primaryTerm=1 version=1 source= {"f":19}}, doc{id='21 seqNo=12 primaryTerm=1 version=1 source= {"f":21}}, doc{id='22 seqNo=13 primaryTerm=1 version=1 source= {"f":22}}, doc{id='23 seqNo=14 primaryTerm=1 version=1 source= {"f":23}}, doc{id='26 seqNo=15 primaryTerm=1 version=1 source= {"f":26}}, doc{id='30 seqNo=16 primaryTerm=1 version=1 source= {"f":30}}, doc{id='31 seqNo=17 primaryTerm=1 version=1 source= {"f":31}}] seq_no_stats SeqNoStats{maxSeqNo=17, localCheckpoint=17, globalCheckpoint=17} [2019-05-28T05:41:17,558][INFO ][o.e.x.c.IndexFollowingIT ] [testIndexFallBehind] --> shard [index2][1], node[XK3Qc8oGSAqdSsvD2_yMPw], [P], s[STARTED], a[id=nR0kHFoRR_adR0soxSmdGQ] docs [doc{id='0 seqNo=0 primaryTerm=1 version=1 source= {"f":0}}, doc{id='4 seqNo=1 primaryTerm=1 version=1 source= {"f":4}}, doc{id='8 seqNo=2 primaryTerm=1 version=1 source= {"f":8}}, doc{id='12 seqNo=3 primaryTerm=1 version=1 source= {"f":12}}, doc{id='13 seqNo=4 primaryTerm=1 version=1 source= {"f":13}}, doc{id='14 seqNo=5 primaryTerm=1 version=1 source= {"f":14}}, doc{id='15 seqNo=6 primaryTerm=1 version=1 source= {"f":15}}, doc{id='17 seqNo=7 primaryTerm=1 version=1 source= {"f":17}}, doc{id='20 seqNo=8 primaryTerm=1 version=1 source= {"f":20}}, doc{id='24 seqNo=9 primaryTerm=1 version=1 source= {"f":24}}, doc{id='25 seqNo=10 primaryTerm=1 version=1 source= {"f":25}}, doc{id='27 seqNo=11 primaryTerm=1 version=1 source= {"f":27}}, doc{id='28 seqNo=12 primaryTerm=1 version=1 source= {"f":28}}, doc{id='29 seqNo=13 primaryTerm=1 version=1 source= {"f":29}}, doc{id='32 seqNo=14 primaryTerm=1 version=1 source= {"f":32}}, doc{id='33 seqNo=15 primaryTerm=1 version=1 source= {"f":33}}, doc{id='34 seqNo=16 primaryTerm=1 version=1 source= {"f":34}}] seq_no_stats SeqNoStats{maxSeqNo=16, localCheckpoint=16, globalCheckpoint=16} [2019-05-28T05:41:17,560][INFO ][o.e.x.c.IndexFollowingIT ] [testIndexFallBehind] --> shard [index1][1], node[rUlMFBY9QXukTRws0zFidg], [P], s[STARTED], a[id=fzKX9McvRV2whrGSTnMedw] docs [doc{id='0 seqNo=0 primaryTerm=1 version=1 source= {"f":0}}, doc{id='4 seqNo=1 primaryTerm=1 version=1 source= {"f":4}}, doc{id='8 seqNo=2 primaryTerm=1 version=1 source= {"f":8}}, doc{id='12 seqNo=3 primaryTerm=1 version=1 source= {"f":12}}, doc{id='13 seqNo=4 primaryTerm=1 version=1 source= {"f":13}}, doc{id='14 seqNo=5 primaryTerm=1 version=1 source= {"f":14}}, doc{id='15 seqNo=6 primaryTerm=1 version=1 source= {"f":15}}, doc{id='17 seqNo=7 primaryTerm=1 version=1 source= {"f":17}}, doc{id='20 seqNo=8 primaryTerm=1 version=1 source= {"f":20}}, doc{id='24 seqNo=9 primaryTerm=1 version=1 source= {"f":24}}, doc{id='25 seqNo=10 primaryTerm=1 version=1 source= {"f":25}}, doc{id='27 seqNo=11 primaryTerm=1 version=1 source= {"f":27}}, doc{id='28 seqNo=12 primaryTerm=1 version=1 source= {"f":28}}, doc{id='29 seqNo=13 primaryTerm=1 version=1 source= {"f":29}}, doc{id='32 seqNo=14 primaryTerm=1 version=1 source= {"f":32}}, doc{id='33 seqNo=15 primaryTerm=1 version=1 source= {"f":33}}, doc{id='34 seqNo=16 primaryTerm=1 version=1 source= {"f":34}}] seq_no_stats SeqNoStats{maxSeqNo=16, localCheckpoint=16, globalCheckpoint=16} [2019-05-28T05:41:17,560][INFO ][o.e.x.c.IndexFollowingIT ] [testIndexFallBehind] --> shard [index1][1], node[9HWb90BFR1eAyo6kqv0ZYA], [R], s[STARTED], a[id=Fc0G9EruSrenWQ3HJBZ2xg] docs [doc{id='0 seqNo=0 primaryTerm=1 version=1 source= {"f":0}}, doc{id='4 seqNo=1 primaryTerm=1 version=1 source= {"f":4}}, doc{id='8 seqNo=2 primaryTerm=1 version=1 source= {"f":8}}, doc{id='12 seqNo=3 primaryTerm=1 version=1 source= {"f":12}}, doc{id='13 seqNo=4 primaryTerm=1 version=1 source= {"f":13}}, doc{id='14 seqNo=5 primaryTerm=1 version=1 source= {"f":14}}, doc{id='15 seqNo=6 primaryTerm=1 version=1 source= {"f":15}}, doc{id='17 seqNo=7 primaryTerm=1 version=1 source= {"f":17}}, doc{id='20 seqNo=8 primaryTerm=1 version=1 source= {"f":20}}, doc{id='24 seqNo=9 primaryTerm=1 version=1 source= {"f":24}}, doc{id='25 seqNo=10 primaryTerm=1 version=1 source= {"f":25}}, doc{id='27 seqNo=11 primaryTerm=1 version=1 source= {"f":27}}, doc{id='28 seqNo=12 primaryTerm=1 version=1 source= {"f":28}}, doc{id='29 seqNo=13 primaryTerm=1 version=1 source= {"f":29}}, doc{id='32 seqNo=14 primaryTerm=1 version=1 source= {"f":32}}, doc{id='33 seqNo=15 primaryTerm=1 version=1 source= {"f":33}}, doc{id='34 seqNo=16 primaryTerm=1 version=1 source= {"f":34}}] seq_no_stats SeqNoStats{maxSeqNo=16, localCheckpoint=16, globalCheckpoint=16} [2019-05-28T05:41:17,560][INFO ][o.e.x.c.IndexFollowingIT ] [testIndexFallBehind] --> shard [index1][0], node[9HWb90BFR1eAyo6kqv0ZYA], [P], s[STARTED], a[id=QgzRgA1MQyuEJaiJitsbEA] docs [doc{id='1 seqNo=0 primaryTerm=1 version=1 source= {"f":1}}, doc{id='2 seqNo=1 primaryTerm=1 version=1 source= {"f":2}}, doc{id='3 seqNo=2 primaryTerm=1 version=1 source= {"f":3}}, doc{id='5 seqNo=3 primaryTerm=1 version=1 source= {"f":5}}, doc{id='6 seqNo=4 primaryTerm=1 version=1 source= {"f":6}}, doc{id='7 seqNo=5 primaryTerm=1 version=1 source= {"f":7}}, doc{id='9 seqNo=6 primaryTerm=1 version=1 source= {"f":9}}, doc{id='10 seqNo=7 primaryTerm=1 version=1 source= {"f":10}}, doc{id='11 seqNo=8 primaryTerm=1 version=1 source= {"f":11}}, doc{id='16 seqNo=9 primaryTerm=1 version=1 source= {"f":16}}, doc{id='18 seqNo=10 primaryTerm=1 version=1 source= {"f":18}}, doc{id='19 seqNo=11 primaryTerm=1 version=1 source= {"f":19}}, doc{id='21 seqNo=12 primaryTerm=1 version=1 source= {"f":21}}, doc{id='22 seqNo=13 primaryTerm=1 version=1 source= {"f":22}}, doc{id='23 seqNo=14 primaryTerm=1 version=1 source= {"f":23}}, doc{id='26 seqNo=15 primaryTerm=1 version=1 source= {"f":26}}, doc{id='30 seqNo=16 primaryTerm=1 version=1 source= {"f":30}}, doc{id='31 seqNo=17 primaryTerm=1 version=1 source= {"f":31}}] seq_no_stats SeqNoStats{maxSeqNo=17, localCheckpoint=17, globalCheckpoint=17} [2019-05-28T05:41:17,560][INFO ][o.e.x.c.IndexFollowingIT ] [testIndexFallBehind] --> shard [index1][0], node[rUlMFBY9QXukTRws0zFidg], [R], s[STARTED], a[id=sWqdG213Twy4gh6Q0GdTWQ] docs [doc{id='1 seqNo=0 primaryTerm=1 version=1 source= {"f":1}}, doc{id='2 seqNo=1 primaryTerm=1 version=1 source= {"f":2}}, doc{id='3 seqNo=2 primaryTerm=1 version=1 source= {"f":3}}, doc{id='5 seqNo=3 primaryTerm=1 version=1 source= {"f":5}}, doc{id='6 seqNo=4 primaryTerm=1 version=1 source= {"f":6}}, doc{id='7 seqNo=5 primaryTerm=1 version=1 source= {"f":7}}, doc{id='9 seqNo=6 primaryTerm=1 version=1 source= {"f":9}}, doc{id='10 seqNo=7 primaryTerm=1 version=1 source= {"f":10}}, doc{id='11 seqNo=8 primaryTerm=1 version=1 source= {"f":11}}, doc{id='16 seqNo=9 primaryTerm=1 version=1 source= {"f":16}}, doc{id='18 seqNo=10 primaryTerm=1 version=1 source= {"f":18}}, doc{id='19 seqNo=11 primaryTerm=1 version=1 source= {"f":19}}, doc{id='21 seqNo=12 primaryTerm=1 version=1 source= {"f":21}}, doc{id='22 seqNo=13 primaryTerm=1 version=1 source= {"f":22}}, doc{id='23 seqNo=14 primaryTerm=1 version=1 source= {"f":23}}, doc{id='26 seqNo=15 primaryTerm=1 version=1 source= {"f":26}}, doc{id='30 seqNo=16 primaryTerm=1 version=1 source= {"f":30}}, doc{id='31 seqNo=17 primaryTerm=1 version=1 source= {"f":31}}] seq_no_stats SeqNoStats{maxSeqNo=17, localCheckpoint=17, globalCheckpoint=17} [2019-05-28T05:41:17,561][INFO ][o.e.x.c.IndexFollowingIT ] [testIndexFallBehind] --> asserting seq_no_stats between index1 and index2 [2019-05-28T05:41:17,611][WARN ][o.e.t.n.MockNioTransport ] [followerd3] Slow execution on network thread [200 milliseconds] java.lang.RuntimeException: Slow exception on network thread at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.maybeLogElapsedTime(MockNioTransport.java:367) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.unregister(MockNioTransport.java:359) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleRead(TestEventHandler.java:130) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.handleRead(NioSelector.java:397) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.processKey(NioSelector.java:246) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:172) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:41:17,662][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd3] [index2][1] shard follow task has been stopped [2019-05-28T05:41:17,708][WARN ][o.e.t.n.MockNioTransport ] [followerm0] Slow execution on network thread [200 milliseconds] java.lang.RuntimeException: Slow exception on network thread at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.maybeLogElapsedTime(MockNioTransport.java:367) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.unregister(MockNioTransport.java:359) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleRead(TestEventHandler.java:130) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.handleRead(NioSelector.java:397) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.processKey(NioSelector.java:246) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:172) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:41:17,712][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd4] [index2][0] shard follow task has been stopped [2019-05-28T05:41:18,173][INFO ][o.e.x.c.a.ShardFollowTasksExecutor] [followerd3] [index2][0] Starting to track leader shard [index1][0] [2019-05-28T05:41:18,209][INFO ][o.e.x.c.a.ShardFollowTasksExecutor] [followerd4] [index2][1] Starting to track leader shard [index1][1] [2019-05-28T05:41:18,221][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd3] [index2][0] following leader shard [index1][0], follower global checkpoint=[17], mapping version=[1], settings version=[1] [2019-05-28T05:41:18,224][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd4] [index2][1] following leader shard [index1][1], follower global checkpoint=[16], mapping version=[1], settings version=[1] [2019-05-28T05:41:18,226][WARN ][o.e.x.c.a.ShardFollowNodeTask] [followerd3] shard follow task encounter non-retryable error org.elasticsearch.transport.RemoteTransportException: [leader0][127.0.0.1:43808][indices:data/read/xpack/ccr/shard_changes] Caused by: org.elasticsearch.transport.RemoteTransportException: [leader1][127.0.0.1:39962][indices:data/read/xpack/ccr/shard_changes[s]] Caused by: org.elasticsearch.ResourceNotFoundException: Operations are no longer available for replicating. Maybe increase the retention setting [index.soft_deletes.retention.operations]? at org.elasticsearch.xpack.ccr.action.ShardChangesAction.getOperations(ShardChangesAction.java:534) ~[main/:?] at org.elasticsearch.xpack.ccr.action.ShardChangesAction$TransportAction.shardOperation(ShardChangesAction.java:349) ~[main/:?] at org.elasticsearch.xpack.ccr.action.ShardChangesAction$TransportAction.shardOperation(ShardChangesAction.java:328) ~[main/:?] at org.elasticsearch.action.support.single.shard.TransportSingleShardAction$1.doRun(TransportSingleShardAction.java:113) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.util.concurrent.TimedRunnable.doRun(TimedRunnable.java:44) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingAbstractRunnable.doRun(ThreadContext.java:758) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) ~[?:?] at java.lang.Thread.run(Thread.java:835) [?:?] Caused by: java.lang.IllegalStateException: Not all operations between from_seqno [18] and to_seqno [36] found; expected seqno [18]; found [Index{id='2', type='doc', seqNo=19, primaryTerm=1, version=2, autoGeneratedIdTimestamp=-1}] at org.elasticsearch.index.engine.LuceneChangesSnapshot.rangeCheck(LuceneChangesSnapshot.java:155) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.index.engine.LuceneChangesSnapshot.next(LuceneChangesSnapshot.java:138) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.xpack.ccr.action.ShardChangesAction.getOperations(ShardChangesAction.java:522) ~[main/:?] at org.elasticsearch.xpack.ccr.action.ShardChangesAction$TransportAction.shardOperation(ShardChangesAction.java:349) ~[main/:?] at org.elasticsearch.xpack.ccr.action.ShardChangesAction$TransportAction.shardOperation(ShardChangesAction.java:328) ~[main/:?] at org.elasticsearch.action.support.single.shard.TransportSingleShardAction$1.doRun(TransportSingleShardAction.java:113) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.util.concurrent.TimedRunnable.doRun(TimedRunnable.java:44) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingAbstractRunnable.doRun(ThreadContext.java:758) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) ~[?:?] at java.lang.Thread.run(Thread.java:835) ~[?:?] [2019-05-28T05:41:18,229][INFO ][o.e.c.m.MetaDataIndexStateService] [followerm0] closing indices [index2/gMZQRnHfRoOD5ED1FV8e5Q] [2019-05-28T05:41:18,425][INFO ][o.e.c.m.MetaDataIndexStateService] [followerm0] completed closing of indices [index2] [2019-05-28T05:41:18,611][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd4] [index2][1] shard follow task has been stopped [2019-05-28T05:41:18,613][INFO ][o.e.c.r.a.AllocationService] [followerm0] Cluster health status changed from [RED] to [YELLOW] (reason: [shards started [[index2][0], [index2][1]] ...]). [2019-05-28T05:41:18,788][INFO ][o.e.c.r.a.AllocationService] [followerm0] Cluster health status changed from [YELLOW] to [GREEN] (reason: [shards started [[index2][1]] ...]). [2019-05-28T05:41:19,404][INFO ][o.e.x.c.a.ShardFollowTasksExecutor] [followerd3] [index2][0] Starting to track leader shard [index1][0] [2019-05-28T05:41:19,458][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd3] [index2][0] following leader shard [index1][0], follower global checkpoint=[36], mapping version=[1], settings version=[1] [2019-05-28T05:41:19,505][INFO ][o.e.x.c.a.ShardFollowTasksExecutor] [followerd4] [index2][1] Starting to track leader shard [index1][1] [2019-05-28T05:41:19,509][INFO ][o.e.x.c.IndexFollowingIT ] [testIndexFallBehind] ensure green follower indices [index2] [2019-05-28T05:41:19,510][INFO ][o.e.c.r.a.AllocationService] [followerm0] Cluster health status changed from [YELLOW] to [GREEN] (reason: [shards started [[index2][0]] ...]). [2019-05-28T05:41:19,551][INFO ][o.e.x.c.IndexFollowingIT ] [testIndexFallBehind] --> asserting <> between index1 and index2 [2019-05-28T05:41:19,552][INFO ][o.e.x.c.IndexFollowingIT ] [testIndexFallBehind] --> shard [index2][0], node[XK3Qc8oGSAqdSsvD2_yMPw], [R], s[STARTED], a[id=t2BIaHAbTSCe4CRqQuPOVw] docs [doc{id='2 seqNo=19 primaryTerm=1 version=2 source= {"f":4}}, doc{id='3 seqNo=20 primaryTerm=1 version=2 source= {"f":6}}, doc{id='5 seqNo=21 primaryTerm=1 version=2 source= {"f":10}}, doc{id='6 seqNo=22 primaryTerm=1 version=2 source= {"f":12}}, doc{id='7 seqNo=23 primaryTerm=1 version=2 source= {"f":14}}, doc{id='9 seqNo=24 primaryTerm=1 version=2 source= {"f":18}}, doc{id='10 seqNo=25 primaryTerm=1 version=2 source= {"f":20}}, doc{id='11 seqNo=26 primaryTerm=1 version=2 source= {"f":22}}, doc{id='16 seqNo=27 primaryTerm=1 version=2 source= {"f":32}}, doc{id='18 seqNo=28 primaryTerm=1 version=2 source= {"f":36}}, doc{id='19 seqNo=29 primaryTerm=1 version=2 source= {"f":38}}, doc{id='21 seqNo=30 primaryTerm=1 version=2 source= {"f":42}}, doc{id='22 seqNo=31 primaryTerm=1 version=2 source= {"f":44}}, doc{id='23 seqNo=32 primaryTerm=1 version=2 source= {"f":46}}, doc{id='26 seqNo=33 primaryTerm=1 version=2 source= {"f":52}}, doc{id='30 seqNo=34 primaryTerm=1 version=2 source= {"f":60}}, doc{id='31 seqNo=35 primaryTerm=1 version=2 source= {"f":62}}] seq_no_stats SeqNoStats{maxSeqNo=36, localCheckpoint=36, globalCheckpoint=36} [2019-05-28T05:41:19,552][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd4] [index2][1] following leader shard [index1][1], follower global checkpoint=[33], mapping version=[1], settings version=[1] [2019-05-28T05:41:19,552][INFO ][o.e.x.c.IndexFollowingIT ] [testIndexFallBehind] --> shard [index2][1], node[XK3Qc8oGSAqdSsvD2_yMPw], [P], s[STARTED], a[id=nR0kHFoRR_adR0soxSmdGQ] docs [doc{id='0 seqNo=17 primaryTerm=1 version=2 source= {"f":0}}, doc{id='4 seqNo=18 primaryTerm=1 version=2 source= {"f":8}}, doc{id='8 seqNo=19 primaryTerm=1 version=2 source= {"f":16}}, doc{id='12 seqNo=20 primaryTerm=1 version=2 source= {"f":24}}, doc{id='13 seqNo=21 primaryTerm=1 version=2 source= {"f":26}}, doc{id='14 seqNo=22 primaryTerm=1 version=2 source= {"f":28}}, doc{id='15 seqNo=23 primaryTerm=1 version=2 source= {"f":30}}, doc{id='17 seqNo=24 primaryTerm=1 version=2 source= {"f":34}}, doc{id='20 seqNo=25 primaryTerm=1 version=2 source= {"f":40}}, doc{id='24 seqNo=26 primaryTerm=1 version=2 source= {"f":48}}, doc{id='25 seqNo=27 primaryTerm=1 version=2 source= {"f":50}}, doc{id='27 seqNo=28 primaryTerm=1 version=2 source= {"f":54}}, doc{id='28 seqNo=29 primaryTerm=1 version=2 source= {"f":56}}, doc{id='29 seqNo=30 primaryTerm=1 version=2 source= {"f":58}}, doc{id='32 seqNo=31 primaryTerm=1 version=2 source= {"f":64}}, doc{id='33 seqNo=32 primaryTerm=1 version=2 source= {"f":66}}, doc{id='34 seqNo=33 primaryTerm=1 version=2 source= {"f":68}}] seq_no_stats SeqNoStats{maxSeqNo=33, localCheckpoint=33, globalCheckpoint=33} [2019-05-28T05:41:19,553][INFO ][o.e.x.c.IndexFollowingIT ] [testIndexFallBehind] --> shard [index2][1], node[imikiXyHQzWPRqBx1bSawA], [R], s[STARTED], a[id=73rUla1HS3-fOZe44lSDVA] docs [doc{id='0 seqNo=17 primaryTerm=1 version=2 source= {"f":0}}, doc{id='4 seqNo=18 primaryTerm=1 version=2 source= {"f":8}}, doc{id='8 seqNo=19 primaryTerm=1 version=2 source= {"f":16}}, doc{id='12 seqNo=20 primaryTerm=1 version=2 source= {"f":24}}, doc{id='13 seqNo=21 primaryTerm=1 version=2 source= {"f":26}}, doc{id='14 seqNo=22 primaryTerm=1 version=2 source= {"f":28}}, doc{id='15 seqNo=23 primaryTerm=1 version=2 source= {"f":30}}, doc{id='17 seqNo=24 primaryTerm=1 version=2 source= {"f":34}}, doc{id='20 seqNo=25 primaryTerm=1 version=2 source= {"f":40}}, doc{id='24 seqNo=26 primaryTerm=1 version=2 source= {"f":48}}, doc{id='25 seqNo=27 primaryTerm=1 version=2 source= {"f":50}}, doc{id='27 seqNo=28 primaryTerm=1 version=2 source= {"f":54}}, doc{id='28 seqNo=29 primaryTerm=1 version=2 source= {"f":56}}, doc{id='29 seqNo=30 primaryTerm=1 version=2 source= {"f":58}}, doc{id='32 seqNo=31 primaryTerm=1 version=2 source= {"f":64}}, doc{id='33 seqNo=32 primaryTerm=1 version=2 source= {"f":66}}, doc{id='34 seqNo=33 primaryTerm=1 version=2 source= {"f":68}}] seq_no_stats SeqNoStats{maxSeqNo=33, localCheckpoint=33, globalCheckpoint=33} [2019-05-28T05:41:19,553][INFO ][o.e.x.c.IndexFollowingIT ] [testIndexFallBehind] --> shard [index2][0], node[imikiXyHQzWPRqBx1bSawA], [P], s[STARTED], a[id=i2gIeuStRuSl_pkUdZEQdg] docs [doc{id='2 seqNo=19 primaryTerm=1 version=2 source= {"f":4}}, doc{id='3 seqNo=20 primaryTerm=1 version=2 source= {"f":6}}, doc{id='5 seqNo=21 primaryTerm=1 version=2 source= {"f":10}}, doc{id='6 seqNo=22 primaryTerm=1 version=2 source= {"f":12}}, doc{id='7 seqNo=23 primaryTerm=1 version=2 source= {"f":14}}, doc{id='9 seqNo=24 primaryTerm=1 version=2 source= {"f":18}}, doc{id='10 seqNo=25 primaryTerm=1 version=2 source= {"f":20}}, doc{id='11 seqNo=26 primaryTerm=1 version=2 source= {"f":22}}, doc{id='16 seqNo=27 primaryTerm=1 version=2 source= {"f":32}}, doc{id='18 seqNo=28 primaryTerm=1 version=2 source= {"f":36}}, doc{id='19 seqNo=29 primaryTerm=1 version=2 source= {"f":38}}, doc{id='21 seqNo=30 primaryTerm=1 version=2 source= {"f":42}}, doc{id='22 seqNo=31 primaryTerm=1 version=2 source= {"f":44}}, doc{id='23 seqNo=32 primaryTerm=1 version=2 source= {"f":46}}, doc{id='26 seqNo=33 primaryTerm=1 version=2 source= {"f":52}}, doc{id='30 seqNo=34 primaryTerm=1 version=2 source= {"f":60}}, doc{id='31 seqNo=35 primaryTerm=1 version=2 source= {"f":62}}] seq_no_stats SeqNoStats{maxSeqNo=36, localCheckpoint=36, globalCheckpoint=36} [2019-05-28T05:41:19,555][INFO ][o.e.x.c.IndexFollowingIT ] [testIndexFallBehind] --> shard [index1][0], node[9HWb90BFR1eAyo6kqv0ZYA], [P], s[STARTED], a[id=QgzRgA1MQyuEJaiJitsbEA] docs [doc{id='2 seqNo=19 primaryTerm=1 version=2 source= {"f":4}}, doc{id='3 seqNo=20 primaryTerm=1 version=2 source= {"f":6}}, doc{id='5 seqNo=21 primaryTerm=1 version=2 source= {"f":10}}, doc{id='6 seqNo=22 primaryTerm=1 version=2 source= {"f":12}}, doc{id='7 seqNo=23 primaryTerm=1 version=2 source= {"f":14}}, doc{id='9 seqNo=24 primaryTerm=1 version=2 source= {"f":18}}, doc{id='10 seqNo=25 primaryTerm=1 version=2 source= {"f":20}}, doc{id='11 seqNo=26 primaryTerm=1 version=2 source= {"f":22}}, doc{id='16 seqNo=27 primaryTerm=1 version=2 source= {"f":32}}, doc{id='18 seqNo=28 primaryTerm=1 version=2 source= {"f":36}}, doc{id='19 seqNo=29 primaryTerm=1 version=2 source= {"f":38}}, doc{id='21 seqNo=30 primaryTerm=1 version=2 source= {"f":42}}, doc{id='22 seqNo=31 primaryTerm=1 version=2 source= {"f":44}}, doc{id='23 seqNo=32 primaryTerm=1 version=2 source= {"f":46}}, doc{id='26 seqNo=33 primaryTerm=1 version=2 source= {"f":52}}, doc{id='30 seqNo=34 primaryTerm=1 version=2 source= {"f":60}}, doc{id='31 seqNo=35 primaryTerm=1 version=2 source= {"f":62}}] seq_no_stats SeqNoStats{maxSeqNo=36, localCheckpoint=36, globalCheckpoint=36} [2019-05-28T05:41:19,555][INFO ][o.e.x.c.IndexFollowingIT ] [testIndexFallBehind] --> shard [index1][0], node[rUlMFBY9QXukTRws0zFidg], [R], s[STARTED], a[id=sWqdG213Twy4gh6Q0GdTWQ] docs [doc{id='2 seqNo=19 primaryTerm=1 version=2 source= {"f":4}}, doc{id='3 seqNo=20 primaryTerm=1 version=2 source= {"f":6}}, doc{id='5 seqNo=21 primaryTerm=1 version=2 source= {"f":10}}, doc{id='6 seqNo=22 primaryTerm=1 version=2 source= {"f":12}}, doc{id='7 seqNo=23 primaryTerm=1 version=2 source= {"f":14}}, doc{id='9 seqNo=24 primaryTerm=1 version=2 source= {"f":18}}, doc{id='10 seqNo=25 primaryTerm=1 version=2 source= {"f":20}}, doc{id='11 seqNo=26 primaryTerm=1 version=2 source= {"f":22}}, doc{id='16 seqNo=27 primaryTerm=1 version=2 source= {"f":32}}, doc{id='18 seqNo=28 primaryTerm=1 version=2 source= {"f":36}}, doc{id='19 seqNo=29 primaryTerm=1 version=2 source= {"f":38}}, doc{id='21 seqNo=30 primaryTerm=1 version=2 source= {"f":42}}, doc{id='22 seqNo=31 primaryTerm=1 version=2 source= {"f":44}}, doc{id='23 seqNo=32 primaryTerm=1 version=2 source= {"f":46}}, doc{id='26 seqNo=33 primaryTerm=1 version=2 source= {"f":52}}, doc{id='30 seqNo=34 primaryTerm=1 version=2 source= {"f":60}}, doc{id='31 seqNo=35 primaryTerm=1 version=2 source= {"f":62}}] seq_no_stats SeqNoStats{maxSeqNo=36, localCheckpoint=36, globalCheckpoint=36} [2019-05-28T05:41:19,555][INFO ][o.e.x.c.IndexFollowingIT ] [testIndexFallBehind] --> shard [index1][1], node[9HWb90BFR1eAyo6kqv0ZYA], [R], s[STARTED], a[id=Fc0G9EruSrenWQ3HJBZ2xg] docs [doc{id='0 seqNo=17 primaryTerm=1 version=2 source= {"f":0}}, doc{id='4 seqNo=18 primaryTerm=1 version=2 source= {"f":8}}, doc{id='8 seqNo=19 primaryTerm=1 version=2 source= {"f":16}}, doc{id='12 seqNo=20 primaryTerm=1 version=2 source= {"f":24}}, doc{id='13 seqNo=21 primaryTerm=1 version=2 source= {"f":26}}, doc{id='14 seqNo=22 primaryTerm=1 version=2 source= {"f":28}}, doc{id='15 seqNo=23 primaryTerm=1 version=2 source= {"f":30}}, doc{id='17 seqNo=24 primaryTerm=1 version=2 source= {"f":34}}, doc{id='20 seqNo=25 primaryTerm=1 version=2 source= {"f":40}}, doc{id='24 seqNo=26 primaryTerm=1 version=2 source= {"f":48}}, doc{id='25 seqNo=27 primaryTerm=1 version=2 source= {"f":50}}, doc{id='27 seqNo=28 primaryTerm=1 version=2 source= {"f":54}}, doc{id='28 seqNo=29 primaryTerm=1 version=2 source= {"f":56}}, doc{id='29 seqNo=30 primaryTerm=1 version=2 source= {"f":58}}, doc{id='32 seqNo=31 primaryTerm=1 version=2 source= {"f":64}}, doc{id='33 seqNo=32 primaryTerm=1 version=2 source= {"f":66}}, doc{id='34 seqNo=33 primaryTerm=1 version=2 source= {"f":68}}] seq_no_stats SeqNoStats{maxSeqNo=33, localCheckpoint=33, globalCheckpoint=33} [2019-05-28T05:41:19,556][INFO ][o.e.x.c.IndexFollowingIT ] [testIndexFallBehind] --> shard [index1][1], node[rUlMFBY9QXukTRws0zFidg], [P], s[STARTED], a[id=fzKX9McvRV2whrGSTnMedw] docs [doc{id='0 seqNo=17 primaryTerm=1 version=2 source= {"f":0}}, doc{id='4 seqNo=18 primaryTerm=1 version=2 source= {"f":8}}, doc{id='8 seqNo=19 primaryTerm=1 version=2 source= {"f":16}}, doc{id='12 seqNo=20 primaryTerm=1 version=2 source= {"f":24}}, doc{id='13 seqNo=21 primaryTerm=1 version=2 source= {"f":26}}, doc{id='14 seqNo=22 primaryTerm=1 version=2 source= {"f":28}}, doc{id='15 seqNo=23 primaryTerm=1 version=2 source= {"f":30}}, doc{id='17 seqNo=24 primaryTerm=1 version=2 source= {"f":34}}, doc{id='20 seqNo=25 primaryTerm=1 version=2 source= {"f":40}}, doc{id='24 seqNo=26 primaryTerm=1 version=2 source= {"f":48}}, doc{id='25 seqNo=27 primaryTerm=1 version=2 source= {"f":50}}, doc{id='27 seqNo=28 primaryTerm=1 version=2 source= {"f":54}}, doc{id='28 seqNo=29 primaryTerm=1 version=2 source= {"f":56}}, doc{id='29 seqNo=30 primaryTerm=1 version=2 source= {"f":58}}, doc{id='32 seqNo=31 primaryTerm=1 version=2 source= {"f":64}}, doc{id='33 seqNo=32 primaryTerm=1 version=2 source= {"f":66}}, doc{id='34 seqNo=33 primaryTerm=1 version=2 source= {"f":68}}] seq_no_stats SeqNoStats{maxSeqNo=33, localCheckpoint=33, globalCheckpoint=33} [2019-05-28T05:41:19,556][INFO ][o.e.x.c.IndexFollowingIT ] [testIndexFallBehind] --> asserting seq_no_stats between index1 and index2 [2019-05-28T05:41:19,563][WARN ][o.e.t.n.MockNioTransport ] [followerd4] Slow execution on network thread [200 milliseconds] java.lang.RuntimeException: Slow exception on network thread at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.maybeLogElapsedTime(MockNioTransport.java:367) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.unregister(MockNioTransport.java:359) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleWrite(TestEventHandler.java:154) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.handleWrite(NioSelector.java:389) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.writeToChannel(NioSelector.java:345) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.handleQueuedWrites(NioSelector.java:448) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.preSelect(NioSelector.java:262) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:153) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:41:19,618][WARN ][o.e.t.n.MockNioTransport ] [followerd3] Slow execution on network thread [204 milliseconds] java.lang.RuntimeException: Slow exception on network thread at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.maybeLogElapsedTime(MockNioTransport.java:367) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.unregister(MockNioTransport.java:359) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleRead(TestEventHandler.java:130) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.handleRead(NioSelector.java:397) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.processKey(NioSelector.java:246) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:172) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:41:19,663][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd4] [index2][1] shard follow task has been stopped [2019-05-28T05:41:19,666][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd3] [index2][0] shard follow task has been stopped [2019-05-28T05:41:19,681][INFO ][o.e.c.m.MetaDataDeleteIndexService] [leader0] [index1/czBDkbeqSAmo26DbCKTITQ] deleting index [2019-05-28T05:41:20,069][INFO ][o.e.c.m.MetaDataDeleteIndexService] [followerm0] [index2/gMZQRnHfRoOD5ED1FV8e5Q] deleting index [2019-05-28T05:41:20,177][INFO ][o.e.x.c.IndexFollowingIT ] [testIndexFallBehind] after test [2019-05-28T05:41:20,598][INFO ][o.e.x.c.IndexFollowingIT ] [testFollowIndexWithoutWaitForComplete] before test [2019-05-28T05:41:20,605][INFO ][o.e.c.m.MetaDataCreateIndexService] [leader0] [index1] creating index, cause [api], templates [], shards [2]/[1], mappings [doc] [2019-05-28T05:41:21,185][INFO ][o.e.x.c.IndexFollowingIT ] [testFollowIndexWithoutWaitForComplete] Indexing [63] docs as first batch [2019-05-28T05:41:21,199][INFO ][o.e.c.r.a.AllocationService] [leader0] Cluster health status changed from [YELLOW] to [GREEN] (reason: [shards started [[index1][0]] ...]). [2019-05-28T05:41:21,493][INFO ][o.e.x.c.IndexFollowingIT ] [testFollowIndexWithoutWaitForComplete] ensure green follower indices [index2] [2019-05-28T05:41:21,512][WARN ][o.e.t.n.MockNioTransport ] [followerm0] Slow execution on network thread [200 milliseconds] java.lang.RuntimeException: Slow exception on network thread at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.maybeLogElapsedTime(MockNioTransport.java:367) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.unregister(MockNioTransport.java:359) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleRead(TestEventHandler.java:130) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.handleRead(NioSelector.java:397) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.processKey(NioSelector.java:246) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:172) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:41:21,919][INFO ][o.e.x.c.a.ShardFollowTasksExecutor] [followerd3] [index2][0] Starting to track leader shard [index1][0] [2019-05-28T05:41:21,992][INFO ][o.e.c.r.a.AllocationService] [followerm0] Cluster health status changed from [YELLOW] to [GREEN] (reason: [shards started [[index2][0]] ...]). [2019-05-28T05:41:22,046][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd3] [index2][0] following leader shard [index1][0], follower global checkpoint=[-1], mapping version=[1], settings version=[1] [2019-05-28T05:41:22,077][INFO ][o.e.x.c.a.ShardFollowTasksExecutor] [followerd4] [index2][1] Starting to track leader shard [index1][1] [2019-05-28T05:41:22,093][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd4] [index2][1] following leader shard [index1][1], follower global checkpoint=[-1], mapping version=[1], settings version=[1] [2019-05-28T05:41:22,389][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd4] [index2][1] shard follow task has been stopped [2019-05-28T05:41:22,423][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd3] [index2][0] shard follow task has been stopped [2019-05-28T05:41:22,491][INFO ][o.e.c.m.MetaDataDeleteIndexService] [leader0] [index1/gadgKlbPSmevOERupcxU5w] deleting index [2019-05-28T05:41:22,849][INFO ][o.e.c.m.MetaDataDeleteIndexService] [followerm0] [index2/hWGshZ-wTSypyNFEG1i5gw] deleting index [2019-05-28T05:41:22,955][INFO ][o.e.x.c.IndexFollowingIT ] [testFollowIndexWithoutWaitForComplete] after test [2019-05-28T05:41:23,334][INFO ][o.e.x.c.IndexFollowingIT ] [testUnknownClusterAlias] before test [2019-05-28T05:41:23,342][INFO ][o.e.c.m.MetaDataCreateIndexService] [leader0] [index1] creating index, cause [api], templates [], shards [1]/[0], mappings [doc] [2019-05-28T05:41:23,555][INFO ][o.e.c.r.a.AllocationService] [leader0] Cluster health status changed from [YELLOW] to [GREEN] (reason: [shards started [[index1][0]] ...]). [2019-05-28T05:41:23,659][INFO ][o.e.x.c.IndexFollowingIT ] [testUnknownClusterAlias] ensure green leader indices [index1] [2019-05-28T05:41:23,696][INFO ][o.e.c.m.MetaDataDeleteIndexService] [leader0] [index1/QuaUJC4UQqWDAJ6mxwyz4A] deleting index [2019-05-28T05:41:23,903][INFO ][o.e.x.c.IndexFollowingIT ] [testUnknownClusterAlias] after test [2019-05-28T05:41:24,327][INFO ][o.e.x.c.IndexFollowingIT ] [testNoMappingDefined] before test [2019-05-28T05:41:24,331][INFO ][o.e.c.m.MetaDataCreateIndexService] [leader0] [index1] creating index, cause [api], templates [], shards [1]/[0], mappings [] [2019-05-28T05:41:24,545][INFO ][o.e.c.r.a.AllocationService] [leader0] Cluster health status changed from [YELLOW] to [GREEN] (reason: [shards started [[index1][0]] ...]). [2019-05-28T05:41:24,646][INFO ][o.e.x.c.IndexFollowingIT ] [testNoMappingDefined] ensure green leader indices [index1] [2019-05-28T05:41:24,716][WARN ][o.e.t.n.MockNioTransport ] [followerm0] Slow execution on network thread [200 milliseconds] java.lang.RuntimeException: Slow exception on network thread at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.maybeLogElapsedTime(MockNioTransport.java:367) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.unregister(MockNioTransport.java:359) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleRead(TestEventHandler.java:130) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.handleRead(NioSelector.java:397) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.processKey(NioSelector.java:246) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:172) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:41:24,716][WARN ][o.e.t.n.MockNioTransport ] [followerm0] Slow execution on network thread [200 milliseconds] java.lang.RuntimeException: Slow exception on network thread at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.maybeLogElapsedTime(MockNioTransport.java:367) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.unregister(MockNioTransport.java:359) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleRead(TestEventHandler.java:130) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.handleRead(NioSelector.java:397) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.processKey(NioSelector.java:246) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:172) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:41:24,897][INFO ][o.e.c.r.a.AllocationService] [followerm0] Cluster health status changed from [YELLOW] to [GREEN] (reason: [shards started [[index2][0]] ...]). [2019-05-28T05:41:25,023][INFO ][o.e.x.c.a.ShardFollowTasksExecutor] [followerd3] [index2][0] Starting to track leader shard [index1][0] [2019-05-28T05:41:25,033][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd3] [index2][0] following leader shard [index1][0], follower global checkpoint=[-1], mapping version=[1], settings version=[1] [2019-05-28T05:41:25,037][INFO ][o.e.c.m.MetaDataMappingService] [leader0] [index1/bLVeUpWrS1mNz-I_kOhRfw] create_mapping [doc] [2019-05-28T05:41:25,116][WARN ][o.e.t.n.MockNioTransport ] [followerm0] Slow execution on network thread [200 milliseconds] java.lang.RuntimeException: Slow exception on network thread at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.maybeLogElapsedTime(MockNioTransport.java:367) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.unregister(MockNioTransport.java:359) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleRead(TestEventHandler.java:130) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.handleRead(NioSelector.java:397) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.processKey(NioSelector.java:246) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:172) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:41:25,120][INFO ][o.e.c.m.MetaDataMappingService] [followerm0] [index2/ruhvjjVPSbq_xUkAcGdejw] create_mapping [doc] [2019-05-28T05:41:26,231][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd3] [index2][0] shard follow task has been stopped [2019-05-28T05:41:26,295][INFO ][o.e.c.m.MetaDataDeleteIndexService] [leader0] [index1/bLVeUpWrS1mNz-I_kOhRfw] deleting index [2019-05-28T05:41:26,492][INFO ][o.e.c.m.MetaDataDeleteIndexService] [followerm0] [index2/ruhvjjVPSbq_xUkAcGdejw] deleting index [2019-05-28T05:41:26,572][INFO ][o.e.x.c.IndexFollowingIT ] [testNoMappingDefined] after test [2019-05-28T05:41:26,941][INFO ][o.e.x.c.IndexFollowingIT ] [testLeaderIndexSettingNotPercolatedToFollower] before test [2019-05-28T05:41:26,947][INFO ][o.e.c.m.MetaDataCreateIndexService] [leader0] [leader] creating index, cause [api], templates [], shards [1]/[0], mappings [doc] [2019-05-28T05:41:27,172][INFO ][o.e.c.r.a.AllocationService] [leader0] Cluster health status changed from [YELLOW] to [GREEN] (reason: [shards started [[leader][0]] ...]). [2019-05-28T05:41:27,548][INFO ][o.e.c.r.a.AllocationService] [followerm0] Cluster health status changed from [YELLOW] to [GREEN] (reason: [shards started [[follower][0]] ...]). [2019-05-28T05:41:27,681][INFO ][o.e.x.c.a.ShardFollowTasksExecutor] [followerd3] [follower][0] Starting to track leader shard [leader][0] [2019-05-28T05:41:27,697][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd3] [follower][0] following leader shard [leader][0], follower global checkpoint=[-1], mapping version=[1], settings version=[1] [2019-05-28T05:41:28,757][INFO ][o.e.c.m.MetaDataUpdateSettingsService] [leader0] updating number_of_replicas to [1] for indices [leader] [2019-05-28T05:41:29,159][INFO ][o.e.c.r.a.AllocationService] [leader0] Cluster health status changed from [YELLOW] to [GREEN] (reason: [shards started [[leader][0]] ...]). [2019-05-28T05:41:30,030][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd3] [follower][0] shard follow task has been stopped [2019-05-28T05:41:30,030][WARN ][o.e.t.n.MockNioTransport ] [followerd3] Slow execution on network thread [200 milliseconds] java.lang.RuntimeException: Slow exception on network thread at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.maybeLogElapsedTime(MockNioTransport.java:367) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.unregister(MockNioTransport.java:359) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleRead(TestEventHandler.java:130) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.handleRead(NioSelector.java:397) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.processKey(NioSelector.java:246) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:172) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:41:30,040][INFO ][o.e.c.m.MetaDataDeleteIndexService] [leader0] [leader/q5Af58ZOTVqYWcQyjvIalg] deleting index [2019-05-28T05:41:30,301][INFO ][o.e.c.m.MetaDataDeleteIndexService] [followerm0] [follower/N35Nb93bS3CQZIVY7iUmXw] deleting index [2019-05-28T05:41:30,378][INFO ][o.e.x.c.IndexFollowingIT ] [testLeaderIndexSettingNotPercolatedToFollower] after test [2019-05-28T05:41:30,771][INFO ][o.e.x.c.IndexFollowingIT ] [testDeleteFollowerIndex] before test [2019-05-28T05:41:30,776][INFO ][o.e.c.m.MetaDataCreateIndexService] [leader0] [index1] creating index, cause [api], templates [], shards [1]/[0], mappings [] [2019-05-28T05:41:30,989][INFO ][o.e.c.r.a.AllocationService] [leader0] Cluster health status changed from [YELLOW] to [GREEN] (reason: [shards started [[index1][0]] ...]). [2019-05-28T05:41:31,393][INFO ][o.e.c.r.a.AllocationService] [followerm0] Cluster health status changed from [YELLOW] to [GREEN] (reason: [shards started [[index2][0]] ...]). [2019-05-28T05:41:31,520][INFO ][o.e.x.c.a.ShardFollowTasksExecutor] [followerd3] [index2][0] Starting to track leader shard [index1][0] [2019-05-28T05:41:31,529][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd3] [index2][0] following leader shard [index1][0], follower global checkpoint=[-1], mapping version=[1], settings version=[1] [2019-05-28T05:41:31,530][INFO ][o.e.c.m.MetaDataMappingService] [leader0] [index1/634m7ZVDSsmcunNOurzlOw] create_mapping [doc] [2019-05-28T05:41:31,628][INFO ][o.e.c.m.MetaDataMappingService] [followerm0] [index2/hKtFkMOvTKqJ7a5bVYOxyA] create_mapping [doc] [2019-05-28T05:41:32,685][INFO ][o.e.c.m.MetaDataDeleteIndexService] [followerm0] [index2/hKtFkMOvTKqJ7a5bVYOxyA] deleting index [2019-05-28T05:41:32,810][WARN ][o.e.x.c.a.b.TransportBulkShardOperationsAction] [followerd3] unexpected error during the primary phase for action [indices:data/write/bulk_shard_operations[s]], request [BulkShardOperationsRequest{historyUUID=fQCvOKjGRSS2pcctUh-ogA, operations=1, maxSeqNoUpdates=-1, shardId=[index2][0], timeout=1m, index='index2', waitForActiveShards=DEFAULT}] org.elasticsearch.index.IndexNotFoundException: no such index [index2] at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteIndices(IndexNameExpressionResolver.java:194) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteIndices(IndexNameExpressionResolver.java:120) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteSingleIndex(IndexNameExpressionResolver.java:282) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.concreteIndex(TransportReplicationAction.java:235) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.access$1600(TransportReplicationAction.java:97) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction$ReroutePhase.doRun(TransportReplicationAction.java:656) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.doExecute(TransportReplicationAction.java:165) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.doExecute(TransportReplicationAction.java:97) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction$RequestFilterChain.proceed(TransportAction.java:145) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:121) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:64) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.node.NodeClient.executeLocally(NodeClient.java:83) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.node.NodeClient.doExecute(NodeClient.java:72) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.support.AbstractClient.execute(AbstractClient.java:394) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.xpack.ccr.action.ShardFollowTasksExecutor$1.innerSendBulkShardOperationsRequest(ShardFollowTasksExecutor.java:242) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.sendBulkShardOperationsRequest(ShardFollowNodeTask.java:377) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.coordinateWrites(ShardFollowNodeTask.java:247) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.innerHandleReadResponse(ShardFollowNodeTask.java:359) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.lambda$handleReadResponse$6(ShardFollowNodeTask.java:310) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.maybeUpdateMapping(ShardFollowNodeTask.java:413) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.lambda$handleReadResponse$7(ShardFollowNodeTask.java:312) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.maybeUpdateSettings(ShardFollowNodeTask.java:428) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.handleReadResponse(ShardFollowNodeTask.java:314) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.lambda$sendShardChangesRequest$3(ShardFollowNodeTask.java:285) [main/:?] at org.elasticsearch.action.ActionListener$1.onResponse(ActionListener.java:62) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.ActionListenerResponseHandler.handleResponse(ActionListenerResponseHandler.java:54) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.TransportService$ContextRestoreResponseHandler.handleResponse(TransportService.java:1101) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.InboundHandler$1.doRun(InboundHandler.java:224) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.util.concurrent.EsExecutors$DirectExecutorService.execute(EsExecutors.java:192) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.InboundHandler.handleResponse(InboundHandler.java:216) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.InboundHandler.messageReceived(InboundHandler.java:141) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.InboundHandler.inboundMessage(InboundHandler.java:105) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.TcpTransport.inboundMessage(TcpTransport.java:660) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.TcpTransport.consumeNetworkReads(TcpTransport.java:684) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.MockNioTransport$MockTcpReadWriteHandler.consumeReads(MockNioTransport.java:255) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.SocketChannelContext.handleReadBytes(SocketChannelContext.java:215) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.BytesChannelContext.read(BytesChannelContext.java:47) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.EventHandler.handleRead(EventHandler.java:119) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleRead(TestEventHandler.java:127) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.handleRead(NioSelector.java:397) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.processKey(NioSelector.java:246) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:172) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:41:32,812][WARN ][o.e.x.c.a.ShardFollowNodeTask] [followerd3] shard follow task encounter non-retryable error org.elasticsearch.index.IndexNotFoundException: no such index [index2] at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteIndices(IndexNameExpressionResolver.java:194) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteIndices(IndexNameExpressionResolver.java:120) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.concreteSingleIndex(IndexNameExpressionResolver.java:282) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.concreteIndex(TransportReplicationAction.java:235) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.access$1600(TransportReplicationAction.java:97) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction$ReroutePhase.doRun(TransportReplicationAction.java:656) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.doExecute(TransportReplicationAction.java:165) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.replication.TransportReplicationAction.doExecute(TransportReplicationAction.java:97) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction$RequestFilterChain.proceed(TransportAction.java:145) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:121) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:64) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.node.NodeClient.executeLocally(NodeClient.java:83) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.node.NodeClient.doExecute(NodeClient.java:72) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.client.support.AbstractClient.execute(AbstractClient.java:394) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.xpack.ccr.action.ShardFollowTasksExecutor$1.innerSendBulkShardOperationsRequest(ShardFollowTasksExecutor.java:242) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.sendBulkShardOperationsRequest(ShardFollowNodeTask.java:377) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.coordinateWrites(ShardFollowNodeTask.java:247) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.innerHandleReadResponse(ShardFollowNodeTask.java:359) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.lambda$handleReadResponse$6(ShardFollowNodeTask.java:310) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.maybeUpdateMapping(ShardFollowNodeTask.java:413) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.lambda$handleReadResponse$7(ShardFollowNodeTask.java:312) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.maybeUpdateSettings(ShardFollowNodeTask.java:428) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.handleReadResponse(ShardFollowNodeTask.java:314) [main/:?] at org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask.lambda$sendShardChangesRequest$3(ShardFollowNodeTask.java:285) [main/:?] at org.elasticsearch.action.ActionListener$1.onResponse(ActionListener.java:62) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.ActionListenerResponseHandler.handleResponse(ActionListenerResponseHandler.java:54) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.TransportService$ContextRestoreResponseHandler.handleResponse(TransportService.java:1101) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.InboundHandler$1.doRun(InboundHandler.java:224) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.util.concurrent.EsExecutors$DirectExecutorService.execute(EsExecutors.java:192) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.InboundHandler.handleResponse(InboundHandler.java:216) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.InboundHandler.messageReceived(InboundHandler.java:141) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.InboundHandler.inboundMessage(InboundHandler.java:105) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.TcpTransport.inboundMessage(TcpTransport.java:660) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.TcpTransport.consumeNetworkReads(TcpTransport.java:684) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.MockNioTransport$MockTcpReadWriteHandler.consumeReads(MockNioTransport.java:255) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.SocketChannelContext.handleReadBytes(SocketChannelContext.java:215) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.BytesChannelContext.read(BytesChannelContext.java:47) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.EventHandler.handleRead(EventHandler.java:119) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleRead(TestEventHandler.java:127) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.handleRead(NioSelector.java:397) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.processKey(NioSelector.java:246) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:172) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:41:32,815][INFO ][o.e.x.c.a.ShardFollowNodeTask] [followerd3] [index2][0] shard follow task has been stopped [2019-05-28T05:41:32,920][INFO ][o.e.c.m.MetaDataDeleteIndexService] [leader0] [index1/634m7ZVDSsmcunNOurzlOw] deleting index [2019-05-28T05:41:33,125][INFO ][o.e.x.c.IndexFollowingIT ] [testDeleteFollowerIndex] after test [2019-05-28T05:41:33,130][INFO ][o.e.n.Node ] [suite] stopping ... [2019-05-28T05:41:33,161][INFO ][o.e.c.s.ClusterApplierService] [leader1] master node changed {previous [{leader0}{9HWb90BFR1eAyo6kqv0ZYA}{Z9OIVCxxTJehM-UX-y2vKA}{127.0.0.1}{127.0.0.1:43808}{xpack.installed=true}], current []}, term: 1, version: 130, reason: becoming candidate: onLeaderFailure [2019-05-28T05:41:33,161][INFO ][o.e.n.Node ] [suite] stopped [2019-05-28T05:41:33,165][INFO ][o.e.n.Node ] [suite] closing ... [2019-05-28T05:41:33,169][WARN ][o.e.t.RemoteClusterConnection] [followerm1] fetching nodes from external cluster [leader_cluster] failed org.elasticsearch.transport.ConnectTransportException: [][127.0.0.1:43808] connect_exception at org.elasticsearch.transport.TcpTransport$ChannelsConnectedListener.onFailure(TcpTransport.java:972) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.ActionListener.lambda$toBiConsumer$3(ActionListener.java:161) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.concurrent.CompletableContext.lambda$addListener$0(CompletableContext.java:42) ~[elasticsearch-core-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:859) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:837) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:506) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2159) ~[?:?] at org.elasticsearch.common.concurrent.CompletableContext.completeExceptionally(CompletableContext.java:57) ~[elasticsearch-core-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.SocketChannelContext.connect(SocketChannelContext.java:120) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.EventHandler.handleConnect(EventHandler.java:97) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleConnect(TestEventHandler.java:99) ~[framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.attemptConnect(NioSelector.java:405) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.setUpNewChannels(NioSelector.java:422) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.preSelect(NioSelector.java:261) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:153) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.lang.Thread.run(Thread.java:835) [?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:779) ~[?:?] at org.elasticsearch.nio.SocketChannelContext.connect(SocketChannelContext.java:117) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] ... 8 more [2019-05-28T05:41:33,169][WARN ][o.e.t.RemoteClusterConnection] [followerm2] fetching nodes from external cluster [leader_cluster] failed org.elasticsearch.transport.ConnectTransportException: [][127.0.0.1:43808] connect_exception at org.elasticsearch.transport.TcpTransport$ChannelsConnectedListener.onFailure(TcpTransport.java:972) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.ActionListener.lambda$toBiConsumer$3(ActionListener.java:161) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.concurrent.CompletableContext.lambda$addListener$0(CompletableContext.java:42) ~[elasticsearch-core-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:859) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:837) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:506) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2159) ~[?:?] at org.elasticsearch.common.concurrent.CompletableContext.completeExceptionally(CompletableContext.java:57) ~[elasticsearch-core-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.SocketChannelContext.connect(SocketChannelContext.java:120) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.EventHandler.handleConnect(EventHandler.java:97) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleConnect(TestEventHandler.java:99) ~[framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.attemptConnect(NioSelector.java:405) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.setUpNewChannels(NioSelector.java:422) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.preSelect(NioSelector.java:261) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:153) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.lang.Thread.run(Thread.java:835) [?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:779) ~[?:?] at org.elasticsearch.nio.SocketChannelContext.connect(SocketChannelContext.java:117) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] ... 8 more [2019-05-28T05:41:33,170][WARN ][o.e.c.NodeConnectionsService] [leader1] failed to connect to {leader0}{9HWb90BFR1eAyo6kqv0ZYA}{Z9OIVCxxTJehM-UX-y2vKA}{127.0.0.1}{127.0.0.1:43808}{xpack.installed=true} (tried [1] times) org.elasticsearch.transport.ConnectTransportException: [leader0][127.0.0.1:43808] connect_exception at org.elasticsearch.transport.TcpTransport$ChannelsConnectedListener.onFailure(TcpTransport.java:972) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.ActionListener.lambda$toBiConsumer$3(ActionListener.java:161) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.concurrent.CompletableContext.lambda$addListener$0(CompletableContext.java:42) ~[elasticsearch-core-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:859) ~[?:?] at java.util.concurrent.CompletableFuture.uniWhenCompleteStage(CompletableFuture.java:883) ~[?:?] at java.util.concurrent.CompletableFuture.whenComplete(CompletableFuture.java:2322) ~[?:?] at org.elasticsearch.common.concurrent.CompletableContext.addListener(CompletableContext.java:45) ~[elasticsearch-core-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.SocketChannelContext.addConnectListener(SocketChannelContext.java:83) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSocketChannel.addConnectListener(NioSocketChannel.java:77) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.MockNioTransport$MockSocketChannel.addConnectListener(MockNioTransport.java:318) ~[framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.TcpTransport.initiateConnection(TcpTransport.java:299) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.TcpTransport.openConnection(TcpTransport.java:266) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.test.transport.StubbableTransport.openConnection(StubbableTransport.java:140) ~[framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.ConnectionManager.internalOpenConnection(ConnectionManager.java:206) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.ConnectionManager.connectToNode(ConnectionManager.java:104) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.test.transport.StubbableConnectionManager.connectToNode(StubbableConnectionManager.java:115) ~[framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.TransportService.connectToNode(TransportService.java:346) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.TransportService.connectToNode(TransportService.java:333) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.cluster.NodeConnectionsService$ConnectionTarget$1.doRun(NodeConnectionsService.java:304) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingAbstractRunnable.doRun(ThreadContext.java:758) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) [?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) [?:?] at java.lang.Thread.run(Thread.java:835) [?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:779) ~[?:?] at org.elasticsearch.nio.SocketChannelContext.connect(SocketChannelContext.java:117) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.EventHandler.handleConnect(EventHandler.java:97) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleConnect(TestEventHandler.java:99) ~[framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.attemptConnect(NioSelector.java:405) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.setUpNewChannels(NioSelector.java:422) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.preSelect(NioSelector.java:261) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:153) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] ... 1 more [2019-05-28T05:41:33,169][WARN ][o.e.t.RemoteClusterConnection] [followerd3] fetching nodes from external cluster [leader_cluster] failed org.elasticsearch.transport.ConnectTransportException: [][127.0.0.1:43808] connect_exception at org.elasticsearch.transport.TcpTransport$ChannelsConnectedListener.onFailure(TcpTransport.java:972) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.ActionListener.lambda$toBiConsumer$3(ActionListener.java:161) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.concurrent.CompletableContext.lambda$addListener$0(CompletableContext.java:42) ~[elasticsearch-core-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:859) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:837) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:506) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2159) ~[?:?] at org.elasticsearch.common.concurrent.CompletableContext.completeExceptionally(CompletableContext.java:57) ~[elasticsearch-core-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.SocketChannelContext.connect(SocketChannelContext.java:120) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.EventHandler.handleConnect(EventHandler.java:97) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleConnect(TestEventHandler.java:99) ~[framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.attemptConnect(NioSelector.java:405) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.setUpNewChannels(NioSelector.java:422) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.preSelect(NioSelector.java:261) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:153) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.lang.Thread.run(Thread.java:835) [?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:779) ~[?:?] at org.elasticsearch.nio.SocketChannelContext.connect(SocketChannelContext.java:117) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] ... 8 more [2019-05-28T05:41:33,169][WARN ][o.e.t.RemoteClusterConnection] [followerd4] fetching nodes from external cluster [leader_cluster] failed org.elasticsearch.transport.ConnectTransportException: [][127.0.0.1:43808] connect_exception at org.elasticsearch.transport.TcpTransport$ChannelsConnectedListener.onFailure(TcpTransport.java:972) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.ActionListener.lambda$toBiConsumer$3(ActionListener.java:161) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.concurrent.CompletableContext.lambda$addListener$0(CompletableContext.java:42) ~[elasticsearch-core-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:859) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:837) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:506) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2159) ~[?:?] at org.elasticsearch.common.concurrent.CompletableContext.completeExceptionally(CompletableContext.java:57) ~[elasticsearch-core-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.SocketChannelContext.connect(SocketChannelContext.java:120) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.EventHandler.handleConnect(EventHandler.java:97) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleConnect(TestEventHandler.java:99) ~[framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.attemptConnect(NioSelector.java:405) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.setUpNewChannels(NioSelector.java:422) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.preSelect(NioSelector.java:261) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:153) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.lang.Thread.run(Thread.java:835) [?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:779) ~[?:?] at org.elasticsearch.nio.SocketChannelContext.connect(SocketChannelContext.java:117) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] ... 8 more [2019-05-28T05:41:33,169][WARN ][o.e.t.RemoteClusterConnection] [followerm0] fetching nodes from external cluster [leader_cluster] failed org.elasticsearch.transport.ConnectTransportException: [][127.0.0.1:43808] connect_exception at org.elasticsearch.transport.TcpTransport$ChannelsConnectedListener.onFailure(TcpTransport.java:972) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.ActionListener.lambda$toBiConsumer$3(ActionListener.java:161) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.concurrent.CompletableContext.lambda$addListener$0(CompletableContext.java:42) ~[elasticsearch-core-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:859) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:837) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:506) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2159) ~[?:?] at org.elasticsearch.common.concurrent.CompletableContext.completeExceptionally(CompletableContext.java:57) ~[elasticsearch-core-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.SocketChannelContext.connect(SocketChannelContext.java:120) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.EventHandler.handleConnect(EventHandler.java:97) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleConnect(TestEventHandler.java:99) ~[framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.attemptConnect(NioSelector.java:405) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.setUpNewChannels(NioSelector.java:422) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.preSelect(NioSelector.java:261) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:153) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.lang.Thread.run(Thread.java:835) [?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:779) ~[?:?] at org.elasticsearch.nio.SocketChannelContext.connect(SocketChannelContext.java:117) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] ... 8 more [2019-05-28T05:41:33,174][INFO ][o.e.n.Node ] [suite] closed [2019-05-28T05:41:33,180][INFO ][o.e.n.Node ] [suite] stopping ... [2019-05-28T05:41:33,194][INFO ][o.e.n.Node ] [suite] stopped [2019-05-28T05:41:33,194][INFO ][o.e.n.Node ] [suite] closing ... [2019-05-28T05:41:33,198][WARN ][o.e.t.RemoteClusterConnection] [followerd3] fetching nodes from external cluster [leader_cluster] failed org.elasticsearch.transport.ConnectTransportException: [][127.0.0.1:43808] connect_exception at org.elasticsearch.transport.TcpTransport$ChannelsConnectedListener.onFailure(TcpTransport.java:972) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.ActionListener.lambda$toBiConsumer$3(ActionListener.java:161) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.concurrent.CompletableContext.lambda$addListener$0(CompletableContext.java:42) ~[elasticsearch-core-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:859) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:837) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:506) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2159) ~[?:?] at org.elasticsearch.common.concurrent.CompletableContext.completeExceptionally(CompletableContext.java:57) ~[elasticsearch-core-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.SocketChannelContext.connect(SocketChannelContext.java:120) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.EventHandler.handleConnect(EventHandler.java:97) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleConnect(TestEventHandler.java:99) ~[framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.attemptConnect(NioSelector.java:405) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.setUpNewChannels(NioSelector.java:422) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.preSelect(NioSelector.java:261) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:153) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.lang.Thread.run(Thread.java:835) [?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:779) ~[?:?] at org.elasticsearch.nio.SocketChannelContext.connect(SocketChannelContext.java:117) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] ... 8 more [2019-05-28T05:41:33,201][WARN ][o.e.t.RemoteClusterConnection] [followerm1] fetching nodes from external cluster [leader_cluster] failed org.elasticsearch.transport.ConnectTransportException: [][127.0.0.1:43808] connect_exception at org.elasticsearch.transport.TcpTransport$ChannelsConnectedListener.onFailure(TcpTransport.java:972) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.ActionListener.lambda$toBiConsumer$3(ActionListener.java:161) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.concurrent.CompletableContext.lambda$addListener$0(CompletableContext.java:42) ~[elasticsearch-core-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:859) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:837) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:506) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2159) ~[?:?] at org.elasticsearch.common.concurrent.CompletableContext.completeExceptionally(CompletableContext.java:57) ~[elasticsearch-core-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.SocketChannelContext.connect(SocketChannelContext.java:120) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.EventHandler.handleConnect(EventHandler.java:97) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleConnect(TestEventHandler.java:99) ~[framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.attemptConnect(NioSelector.java:405) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.setUpNewChannels(NioSelector.java:422) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.preSelect(NioSelector.java:261) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:153) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.lang.Thread.run(Thread.java:835) [?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:779) ~[?:?] at org.elasticsearch.nio.SocketChannelContext.connect(SocketChannelContext.java:117) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] ... 8 more [2019-05-28T05:41:33,198][WARN ][o.e.t.RemoteClusterConnection] [followerm0] fetching nodes from external cluster [leader_cluster] failed org.elasticsearch.transport.ConnectTransportException: [][127.0.0.1:43808] connect_exception at org.elasticsearch.transport.TcpTransport$ChannelsConnectedListener.onFailure(TcpTransport.java:972) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.ActionListener.lambda$toBiConsumer$3(ActionListener.java:161) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.concurrent.CompletableContext.lambda$addListener$0(CompletableContext.java:42) ~[elasticsearch-core-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:859) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:837) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:506) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2159) ~[?:?] at org.elasticsearch.common.concurrent.CompletableContext.completeExceptionally(CompletableContext.java:57) ~[elasticsearch-core-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.SocketChannelContext.connect(SocketChannelContext.java:120) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.EventHandler.handleConnect(EventHandler.java:97) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleConnect(TestEventHandler.java:99) ~[framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.attemptConnect(NioSelector.java:405) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.setUpNewChannels(NioSelector.java:422) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.preSelect(NioSelector.java:261) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:153) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.lang.Thread.run(Thread.java:835) [?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:779) ~[?:?] at org.elasticsearch.nio.SocketChannelContext.connect(SocketChannelContext.java:117) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] ... 8 more [2019-05-28T05:41:33,200][WARN ][o.e.t.RemoteClusterConnection] [followerd4] fetching nodes from external cluster [leader_cluster] failed org.elasticsearch.transport.ConnectTransportException: [][127.0.0.1:43808] connect_exception at org.elasticsearch.transport.TcpTransport$ChannelsConnectedListener.onFailure(TcpTransport.java:972) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.ActionListener.lambda$toBiConsumer$3(ActionListener.java:161) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.concurrent.CompletableContext.lambda$addListener$0(CompletableContext.java:42) ~[elasticsearch-core-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:859) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:837) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:506) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2159) ~[?:?] at org.elasticsearch.common.concurrent.CompletableContext.completeExceptionally(CompletableContext.java:57) ~[elasticsearch-core-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.SocketChannelContext.connect(SocketChannelContext.java:120) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.EventHandler.handleConnect(EventHandler.java:97) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleConnect(TestEventHandler.java:99) ~[framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.attemptConnect(NioSelector.java:405) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.setUpNewChannels(NioSelector.java:422) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.preSelect(NioSelector.java:261) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:153) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.lang.Thread.run(Thread.java:835) [?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:779) ~[?:?] at org.elasticsearch.nio.SocketChannelContext.connect(SocketChannelContext.java:117) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] ... 8 more [2019-05-28T05:41:33,198][WARN ][o.e.t.RemoteClusterConnection] [followerm2] fetching nodes from external cluster [leader_cluster] failed org.elasticsearch.transport.ConnectTransportException: [][127.0.0.1:43808] connect_exception at org.elasticsearch.transport.TcpTransport$ChannelsConnectedListener.onFailure(TcpTransport.java:972) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.ActionListener.lambda$toBiConsumer$3(ActionListener.java:161) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.concurrent.CompletableContext.lambda$addListener$0(CompletableContext.java:42) ~[elasticsearch-core-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:859) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:837) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:506) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2159) ~[?:?] at org.elasticsearch.common.concurrent.CompletableContext.completeExceptionally(CompletableContext.java:57) ~[elasticsearch-core-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.SocketChannelContext.connect(SocketChannelContext.java:120) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.EventHandler.handleConnect(EventHandler.java:97) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleConnect(TestEventHandler.java:99) ~[framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.attemptConnect(NioSelector.java:405) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.setUpNewChannels(NioSelector.java:422) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.preSelect(NioSelector.java:261) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:153) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.lang.Thread.run(Thread.java:835) [?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:779) ~[?:?] at org.elasticsearch.nio.SocketChannelContext.connect(SocketChannelContext.java:117) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] ... 8 more [2019-05-28T05:41:33,208][INFO ][o.e.n.Node ] [suite] closed [2019-05-28T05:41:33,213][INFO ][o.e.n.Node ] [suite] stopping ... [2019-05-28T05:41:33,221][INFO ][o.e.c.s.MasterService ] [followerm0] node-left[{followerd3}{imikiXyHQzWPRqBx1bSawA}{C6gT8G-yRjyyJ4rx_dUrGA}{127.0.0.1}{127.0.0.1:37483}{xpack.installed=true} disconnected], term: 1, version: 336, reason: removed {{followerd3}{imikiXyHQzWPRqBx1bSawA}{C6gT8G-yRjyyJ4rx_dUrGA}{127.0.0.1}{127.0.0.1:37483}{xpack.installed=true},} [2019-05-28T05:41:33,221][INFO ][o.e.n.Node ] [suite] stopped [2019-05-28T05:41:33,221][INFO ][o.e.n.Node ] [suite] closing ... [2019-05-28T05:41:33,233][INFO ][o.e.c.s.ClusterApplierService] [followerd4] removed {{followerd3}{imikiXyHQzWPRqBx1bSawA}{C6gT8G-yRjyyJ4rx_dUrGA}{127.0.0.1}{127.0.0.1:37483}{xpack.installed=true},}, term: 1, version: 336, reason: ApplyCommitRequest{term=1, version=336, sourceNode={followerm0}{UYz5fl8mR7eWQMfDxioQTg}{cveYeD_-Q5Sl4iFbTxGJAA}{127.0.0.1}{127.0.0.1:45522}{xpack.installed=true}} [2019-05-28T05:41:33,233][INFO ][o.e.c.s.ClusterApplierService] [followerm1] removed {{followerd3}{imikiXyHQzWPRqBx1bSawA}{C6gT8G-yRjyyJ4rx_dUrGA}{127.0.0.1}{127.0.0.1:37483}{xpack.installed=true},}, term: 1, version: 336, reason: ApplyCommitRequest{term=1, version=336, sourceNode={followerm0}{UYz5fl8mR7eWQMfDxioQTg}{cveYeD_-Q5Sl4iFbTxGJAA}{127.0.0.1}{127.0.0.1:45522}{xpack.installed=true}} [2019-05-28T05:41:33,233][INFO ][o.e.c.s.ClusterApplierService] [followerm2] removed {{followerd3}{imikiXyHQzWPRqBx1bSawA}{C6gT8G-yRjyyJ4rx_dUrGA}{127.0.0.1}{127.0.0.1:37483}{xpack.installed=true},}, term: 1, version: 336, reason: ApplyCommitRequest{term=1, version=336, sourceNode={followerm0}{UYz5fl8mR7eWQMfDxioQTg}{cveYeD_-Q5Sl4iFbTxGJAA}{127.0.0.1}{127.0.0.1:45522}{xpack.installed=true}} [2019-05-28T05:41:33,236][INFO ][o.e.n.Node ] [suite] closed [2019-05-28T05:41:33,239][INFO ][o.e.n.Node ] [suite] stopping ... [2019-05-28T05:41:33,246][INFO ][o.e.c.s.ClusterApplierService] [followerm0] removed {{followerd3}{imikiXyHQzWPRqBx1bSawA}{C6gT8G-yRjyyJ4rx_dUrGA}{127.0.0.1}{127.0.0.1:37483}{xpack.installed=true},}, term: 1, version: 336, reason: Publication{term=1, version=336} [2019-05-28T05:41:33,248][INFO ][o.e.n.Node ] [suite] stopped [2019-05-28T05:41:33,248][INFO ][o.e.n.Node ] [suite] closing ... [2019-05-28T05:41:33,251][INFO ][o.e.c.s.MasterService ] [followerm0] node-left[{followerd4}{XK3Qc8oGSAqdSsvD2_yMPw}{Gk8vRK_-TJuZ67jtxVU98w}{127.0.0.1}{127.0.0.1:34928}{xpack.installed=true} disconnected], term: 1, version: 337, reason: removed {{followerd4}{XK3Qc8oGSAqdSsvD2_yMPw}{Gk8vRK_-TJuZ67jtxVU98w}{127.0.0.1}{127.0.0.1:34928}{xpack.installed=true},} [2019-05-28T05:41:33,259][INFO ][o.e.n.Node ] [suite] closed [2019-05-28T05:41:33,263][INFO ][o.e.c.s.ClusterApplierService] [followerm1] removed {{followerd4}{XK3Qc8oGSAqdSsvD2_yMPw}{Gk8vRK_-TJuZ67jtxVU98w}{127.0.0.1}{127.0.0.1:34928}{xpack.installed=true},}, term: 1, version: 337, reason: ApplyCommitRequest{term=1, version=337, sourceNode={followerm0}{UYz5fl8mR7eWQMfDxioQTg}{cveYeD_-Q5Sl4iFbTxGJAA}{127.0.0.1}{127.0.0.1:45522}{xpack.installed=true}} [2019-05-28T05:41:33,264][INFO ][o.e.c.s.ClusterApplierService] [followerm2] removed {{followerd4}{XK3Qc8oGSAqdSsvD2_yMPw}{Gk8vRK_-TJuZ67jtxVU98w}{127.0.0.1}{127.0.0.1:34928}{xpack.installed=true},}, term: 1, version: 337, reason: ApplyCommitRequest{term=1, version=337, sourceNode={followerm0}{UYz5fl8mR7eWQMfDxioQTg}{cveYeD_-Q5Sl4iFbTxGJAA}{127.0.0.1}{127.0.0.1:45522}{xpack.installed=true}} [2019-05-28T05:41:33,266][INFO ][o.e.n.Node ] [suite] stopping ... [2019-05-28T05:41:33,268][INFO ][o.e.c.s.ClusterApplierService] [followerm0] removed {{followerd4}{XK3Qc8oGSAqdSsvD2_yMPw}{Gk8vRK_-TJuZ67jtxVU98w}{127.0.0.1}{127.0.0.1:34928}{xpack.installed=true},}, term: 1, version: 337, reason: Publication{term=1, version=337} [2019-05-28T05:41:33,274][INFO ][o.e.c.s.ClusterApplierService] [followerm1] master node changed {previous [{followerm0}{UYz5fl8mR7eWQMfDxioQTg}{cveYeD_-Q5Sl4iFbTxGJAA}{127.0.0.1}{127.0.0.1:45522}{xpack.installed=true}], current []}, term: 1, version: 337, reason: becoming candidate: onLeaderFailure [2019-05-28T05:41:33,274][INFO ][o.e.n.Node ] [suite] stopped [2019-05-28T05:41:33,274][INFO ][o.e.n.Node ] [suite] closing ... [2019-05-28T05:41:33,274][INFO ][o.e.c.s.ClusterApplierService] [followerm2] master node changed {previous [{followerm0}{UYz5fl8mR7eWQMfDxioQTg}{cveYeD_-Q5Sl4iFbTxGJAA}{127.0.0.1}{127.0.0.1:45522}{xpack.installed=true}], current []}, term: 1, version: 337, reason: becoming candidate: onLeaderFailure [2019-05-28T05:41:33,287][WARN ][o.e.t.n.MockNioTransport ] [followerm2] Slow execution on network thread [200 milliseconds] java.lang.RuntimeException: Slow exception on network thread at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.maybeLogElapsedTime(MockNioTransport.java:367) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.MockNioTransport$TransportThreadWatchdog.unregister(MockNioTransport.java:359) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleRead(TestEventHandler.java:130) [framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.handleRead(NioSelector.java:397) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.processKey(NioSelector.java:246) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:172) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) [elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.lang.Thread.run(Thread.java:835) [?:?] [2019-05-28T05:41:33,283][WARN ][o.e.c.NodeConnectionsService] [followerm1] failed to connect to {followerm0}{UYz5fl8mR7eWQMfDxioQTg}{cveYeD_-Q5Sl4iFbTxGJAA}{127.0.0.1}{127.0.0.1:45522}{xpack.installed=true} (tried [1] times) org.elasticsearch.transport.ConnectTransportException: [followerm0][127.0.0.1:45522] connect_exception at org.elasticsearch.transport.TcpTransport$ChannelsConnectedListener.onFailure(TcpTransport.java:972) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.ActionListener.lambda$toBiConsumer$3(ActionListener.java:161) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.concurrent.CompletableContext.lambda$addListener$0(CompletableContext.java:42) ~[elasticsearch-core-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:859) ~[?:?] at java.util.concurrent.CompletableFuture.uniWhenCompleteStage(CompletableFuture.java:883) ~[?:?] at java.util.concurrent.CompletableFuture.whenComplete(CompletableFuture.java:2322) ~[?:?] at org.elasticsearch.common.concurrent.CompletableContext.addListener(CompletableContext.java:45) ~[elasticsearch-core-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.SocketChannelContext.addConnectListener(SocketChannelContext.java:83) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSocketChannel.addConnectListener(NioSocketChannel.java:77) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.MockNioTransport$MockSocketChannel.addConnectListener(MockNioTransport.java:318) ~[framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.TcpTransport.initiateConnection(TcpTransport.java:299) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.TcpTransport.openConnection(TcpTransport.java:266) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.test.transport.StubbableTransport.openConnection(StubbableTransport.java:140) ~[framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.ConnectionManager.internalOpenConnection(ConnectionManager.java:206) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.ConnectionManager.connectToNode(ConnectionManager.java:104) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.test.transport.StubbableConnectionManager.connectToNode(StubbableConnectionManager.java:115) ~[framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.TransportService.connectToNode(TransportService.java:346) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.TransportService.connectToNode(TransportService.java:333) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.cluster.NodeConnectionsService$ConnectionTarget$1.doRun(NodeConnectionsService.java:304) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingAbstractRunnable.doRun(ThreadContext.java:758) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) [?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) [?:?] at java.lang.Thread.run(Thread.java:835) [?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:779) ~[?:?] at org.elasticsearch.nio.SocketChannelContext.connect(SocketChannelContext.java:117) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.EventHandler.handleConnect(EventHandler.java:97) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleConnect(TestEventHandler.java:99) ~[framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.attemptConnect(NioSelector.java:405) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.setUpNewChannels(NioSelector.java:422) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.preSelect(NioSelector.java:261) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:153) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] ... 1 more [2019-05-28T05:41:33,287][WARN ][o.e.c.NodeConnectionsService] [followerm2] failed to connect to {followerm0}{UYz5fl8mR7eWQMfDxioQTg}{cveYeD_-Q5Sl4iFbTxGJAA}{127.0.0.1}{127.0.0.1:45522}{xpack.installed=true} (tried [1] times) org.elasticsearch.transport.ConnectTransportException: [followerm0][127.0.0.1:45522] connect_exception at org.elasticsearch.transport.TcpTransport$ChannelsConnectedListener.onFailure(TcpTransport.java:972) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.action.ActionListener.lambda$toBiConsumer$3(ActionListener.java:161) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.concurrent.CompletableContext.lambda$addListener$0(CompletableContext.java:42) ~[elasticsearch-core-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:859) ~[?:?] at java.util.concurrent.CompletableFuture.uniWhenCompleteStage(CompletableFuture.java:883) ~[?:?] at java.util.concurrent.CompletableFuture.whenComplete(CompletableFuture.java:2322) ~[?:?] at org.elasticsearch.common.concurrent.CompletableContext.addListener(CompletableContext.java:45) ~[elasticsearch-core-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.SocketChannelContext.addConnectListener(SocketChannelContext.java:83) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSocketChannel.addConnectListener(NioSocketChannel.java:77) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.MockNioTransport$MockSocketChannel.addConnectListener(MockNioTransport.java:318) ~[framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.TcpTransport.initiateConnection(TcpTransport.java:299) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.TcpTransport.openConnection(TcpTransport.java:266) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.test.transport.StubbableTransport.openConnection(StubbableTransport.java:140) ~[framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.ConnectionManager.internalOpenConnection(ConnectionManager.java:206) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.ConnectionManager.connectToNode(ConnectionManager.java:104) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.test.transport.StubbableConnectionManager.connectToNode(StubbableConnectionManager.java:115) ~[framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.TransportService.connectToNode(TransportService.java:346) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.TransportService.connectToNode(TransportService.java:333) ~[elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.cluster.NodeConnectionsService$ConnectionTarget$1.doRun(NodeConnectionsService.java:304) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingAbstractRunnable.doRun(ThreadContext.java:758) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) [elasticsearch-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) [?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) [?:?] at java.lang.Thread.run(Thread.java:835) [?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:779) ~[?:?] at org.elasticsearch.nio.SocketChannelContext.connect(SocketChannelContext.java:117) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.EventHandler.handleConnect(EventHandler.java:97) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.transport.nio.TestEventHandler.handleConnect(TestEventHandler.java:99) ~[framework-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.attemptConnect(NioSelector.java:405) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.setUpNewChannels(NioSelector.java:422) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.preSelect(NioSelector.java:261) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.singleLoop(NioSelector.java:153) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] at org.elasticsearch.nio.NioSelector.runLoop(NioSelector.java:129) ~[elasticsearch-nio-7.3.0-SNAPSHOT.jar:7.3.0-SNAPSHOT] ... 1 more [2019-05-28T05:41:33,296][INFO ][o.e.n.Node ] [suite] closed [2019-05-28T05:41:33,299][INFO ][o.e.n.Node ] [suite] stopping ... [2019-05-28T05:41:33,304][INFO ][o.e.n.Node ] [suite] stopped [2019-05-28T05:41:33,304][INFO ][o.e.n.Node ] [suite] closing ... [2019-05-28T05:41:33,308][INFO ][o.e.n.Node ] [suite] closed [2019-05-28T05:41:33,310][INFO ][o.e.n.Node ] [suite] stopping ... [2019-05-28T05:41:33,315][INFO ][o.e.n.Node ] [suite] stopped [2019-05-28T05:41:33,315][INFO ][o.e.n.Node ] [suite] closing ... [2019-05-28T05:41:33,318][INFO ][o.e.n.Node ] [suite] closed