Skip to content

Commit c7571ec

Browse files
committed
Merge remote-tracking branch 'origin/master' into resize-settings
* origin/master: [test] add java packaging test project (elastic#30161) Fix macros in changelog (elastic#30269) [DOCS] Fixes syskeygen command name [ML] Include 3rd party C++ component notices (elastic#30132) _cluster/state Skip Test for pre-6.4, not pre-7.0 (elastic#30264) Improve docs for disk watermarks (elastic#30249) [DOCS] Removes redundant Active Directory realm settings (elastic#30190) [DOCS] Removes redundant LDAP realm settings (elastic#30193) _cluster/state should always return cluster_uuid (elastic#30143) HTML5ify Javadoc for core and test framework (elastic#30234) Minor tweaks to reroute documentation (elastic#30246)
2 parents 796f011 + 65e5868 commit c7571ec

File tree

28 files changed

+511
-614
lines changed

28 files changed

+511
-614
lines changed

Vagrantfile

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -337,6 +337,7 @@ export BATS=/project/build/bats
337337
export BATS_UTILS=/project/build/packaging/bats/utils
338338
export BATS_TESTS=/project/build/packaging/bats/tests
339339
export PACKAGING_ARCHIVES=/project/build/packaging/archives
340+
export PACKAGING_TESTS=/project/build/packaging/tests
340341
VARS
341342
cat \<\<SUDOERS_VARS > /etc/sudoers.d/elasticsearch_vars
342343
Defaults env_keep += "ZIP"
@@ -347,6 +348,7 @@ Defaults env_keep += "BATS"
347348
Defaults env_keep += "BATS_UTILS"
348349
Defaults env_keep += "BATS_TESTS"
349350
Defaults env_keep += "PACKAGING_ARCHIVES"
351+
Defaults env_keep += "PACKAGING_TESTS"
350352
SUDOERS_VARS
351353
chmod 0440 /etc/sudoers.d/elasticsearch_vars
352354
SHELL

buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -561,8 +561,6 @@ class BuildPlugin implements Plugin<Project> {
561561
*/
562562
List html4Projects = [
563563
':server',
564-
':libs:elasticsearch-core',
565-
':test:framework',
566564
':x-pack:plugin:core',
567565
]
568566
if (false == html4Projects.contains(project.path)) {

buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantPropertiesExtension.groovy

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,9 @@ class VagrantPropertiesExtension {
4141
@Input
4242
Boolean inheritTestUtils
4343

44+
@Input
45+
String testClass
46+
4447
VagrantPropertiesExtension(List<String> availableBoxes) {
4548
this.boxes = availableBoxes
4649
this.batsDir = 'src/test/resources/packaging'

buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy

Lines changed: 41 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,7 @@ class VagrantTestPlugin implements Plugin<Project> {
5151
static List<String> UPGRADE_FROM_ARCHIVES = ['rpm', 'deb']
5252

5353
private static final PACKAGING_CONFIGURATION = 'packaging'
54+
private static final PACKAGING_TEST_CONFIGURATION = 'packagingTest'
5455
private static final BATS = 'bats'
5556
private static final String BATS_TEST_COMMAND ="cd \$PACKAGING_ARCHIVES && sudo bats --tap \$BATS_TESTS/*.$BATS"
5657
private static final String PLATFORM_TEST_COMMAND ="rm -rf ~/elasticsearch && rsync -r /elasticsearch/ ~/elasticsearch && cd ~/elasticsearch && ./gradlew test integTest"
@@ -66,6 +67,7 @@ class VagrantTestPlugin implements Plugin<Project> {
6667

6768
// Creates custom configurations for Bats testing files (and associated scripts and archives)
6869
createPackagingConfiguration(project)
70+
project.configurations.create(PACKAGING_TEST_CONFIGURATION)
6971

7072
// Creates all the main Vagrant tasks
7173
createVagrantTasks(project)
@@ -144,10 +146,12 @@ class VagrantTestPlugin implements Plugin<Project> {
144146
}
145147

146148
private static void createCleanTask(Project project) {
147-
project.tasks.create('clean', Delete.class) {
148-
description 'Clean the project build directory'
149-
group 'Build'
150-
delete project.buildDir
149+
if (project.tasks.findByName('clean') == null) {
150+
project.tasks.create('clean', Delete.class) {
151+
description 'Clean the project build directory'
152+
group 'Build'
153+
delete project.buildDir
154+
}
151155
}
152156
}
153157

@@ -174,6 +178,18 @@ class VagrantTestPlugin implements Plugin<Project> {
174178
from project.configurations[PACKAGING_CONFIGURATION]
175179
}
176180

181+
File testsDir = new File(packagingDir, 'tests')
182+
Copy copyPackagingTests = project.tasks.create('copyPackagingTests', Copy) {
183+
into testsDir
184+
from project.configurations[PACKAGING_TEST_CONFIGURATION]
185+
}
186+
187+
Task createTestRunnerScript = project.tasks.create('createTestRunnerScript', FileContentsTask) {
188+
dependsOn copyPackagingTests
189+
file "${testsDir}/run-tests.sh"
190+
contents "java -cp \"\$PACKAGING_TESTS/*\" org.junit.runner.JUnitCore ${-> project.extensions.esvagrant.testClass}"
191+
}
192+
177193
Task createVersionFile = project.tasks.create('createVersionFile', FileContentsTask) {
178194
dependsOn copyPackagingArchives
179195
file "${archivesDir}/version"
@@ -234,7 +250,8 @@ class VagrantTestPlugin implements Plugin<Project> {
234250

235251
Task vagrantSetUpTask = project.tasks.create('setupPackagingTest')
236252
vagrantSetUpTask.dependsOn 'vagrantCheckVersion'
237-
vagrantSetUpTask.dependsOn copyPackagingArchives, createVersionFile, createUpgradeFromFile, createUpgradeIsOssFile
253+
vagrantSetUpTask.dependsOn copyPackagingArchives, copyPackagingTests, createTestRunnerScript
254+
vagrantSetUpTask.dependsOn createVersionFile, createUpgradeFromFile, createUpgradeIsOssFile
238255
vagrantSetUpTask.dependsOn copyBatsTests, copyBatsUtils
239256
}
240257

@@ -393,20 +410,29 @@ class VagrantTestPlugin implements Plugin<Project> {
393410
packagingTest.dependsOn(batsPackagingTest)
394411
}
395412

396-
// This task doesn't do anything yet. In the future it will execute a jar containing tests on the vm
397-
Task groovyPackagingTest = project.tasks.create("vagrant${boxTask}#groovyPackagingTest")
398-
groovyPackagingTest.dependsOn(up)
399-
groovyPackagingTest.finalizedBy(halt)
413+
Task javaPackagingTest = project.tasks.create("vagrant${boxTask}#javaPackagingTest", VagrantCommandTask) {
414+
command 'ssh'
415+
boxName box
416+
environmentVars vagrantEnvVars
417+
dependsOn up, setupPackagingTest
418+
finalizedBy halt
419+
args '--command', "bash \"\$PACKAGING_TESTS/run-tests.sh\""
420+
}
421+
422+
// todo remove this onlyIf after all packaging tests are consolidated
423+
javaPackagingTest.onlyIf {
424+
project.extensions.esvagrant.testClass != null
425+
}
400426

401-
TaskExecutionAdapter groovyPackagingReproListener = createReproListener(project, groovyPackagingTest.path)
402-
groovyPackagingTest.doFirst {
403-
project.gradle.addListener(groovyPackagingReproListener)
427+
TaskExecutionAdapter javaPackagingReproListener = createReproListener(project, javaPackagingTest.path)
428+
javaPackagingTest.doFirst {
429+
project.gradle.addListener(javaPackagingReproListener)
404430
}
405-
groovyPackagingTest.doLast {
406-
project.gradle.removeListener(groovyPackagingReproListener)
431+
javaPackagingTest.doLast {
432+
project.gradle.removeListener(javaPackagingReproListener)
407433
}
408434
if (project.extensions.esvagrant.boxes.contains(box)) {
409-
packagingTest.dependsOn(groovyPackagingTest)
435+
packagingTest.dependsOn(javaPackagingTest)
410436
}
411437

412438
Task platform = project.tasks.create("vagrant${boxTask}#platformTest", VagrantCommandTask) {

distribution/archives/build.gradle

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -217,6 +217,24 @@ subprojects {
217217
}
218218
check.dependsOn checkNotice
219219

220+
if (project.name == 'zip' || project.name == 'tar') {
221+
task checkMlCppNotice {
222+
dependsOn buildDist, checkExtraction
223+
onlyIf toolExists
224+
doLast {
225+
// this is just a small sample from the C++ notices, the idea being that if we've added these lines we've probably added all the required lines
226+
final List<String> expectedLines = Arrays.asList("Apache log4cxx", "Boost Software License - Version 1.0 - August 17th, 2003")
227+
final Path noticePath = archiveExtractionDir.toPath().resolve("elasticsearch-${VersionProperties.elasticsearch}/modules/x-pack/x-pack-ml/NOTICE.txt")
228+
final List<String> actualLines = Files.readAllLines(noticePath)
229+
for (final String expectedLine : expectedLines) {
230+
if (actualLines.contains(expectedLine) == false) {
231+
throw new GradleException("expected [${noticePath}] to contain [${expectedLine}] but it did not")
232+
}
233+
}
234+
}
235+
}
236+
check.dependsOn checkMlCppNotice
237+
}
220238
}
221239

222240
/*****************************************************************************

docs/CHANGELOG.asciidoc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
// Use these for links to issue and pulls. Note issues and pulls redirect one to
22
// each other on Github, so don't worry too much on using the right prefix.
3-
// :issue: https://github.com/elastic/elasticsearch/issues/
4-
// :pull: https://github.com/elastic/elasticsearch/pull/
3+
:issue: https://github.com/elastic/elasticsearch/issues/
4+
:pull: https://github.com/elastic/elasticsearch/pull/
55

66
= Elasticsearch Release Notes
77

Lines changed: 70 additions & 65 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,12 @@
11
[[cluster-reroute]]
22
== Cluster Reroute
33

4-
The reroute command allows to explicitly execute a cluster reroute
5-
allocation command including specific commands. For example, a shard can
6-
be moved from one node to another explicitly, an allocation can be
7-
canceled, or an unassigned shard can be explicitly allocated on a
8-
specific node.
4+
The reroute command allows for manual changes to the allocation of individual
5+
shards in the cluster. For example, a shard can be moved from one node to
6+
another explicitly, an allocation can be cancelled, and an unassigned shard can
7+
be explicitly allocated to a specific node.
98

10-
Here is a short example of how a simple reroute API call:
9+
Here is a short example of a simple reroute API call:
1110

1211
[source,js]
1312
--------------------------------------------------
@@ -32,59 +31,53 @@ POST /_cluster/reroute
3231
// CONSOLE
3332
// TEST[skip:doc tests run with only a single node]
3433

35-
An important aspect to remember is the fact that once when an allocation
36-
occurs, the cluster will aim at re-balancing its state back to an even
37-
state. For example, if the allocation includes moving a shard from
38-
`node1` to `node2`, in an `even` state, then another shard will be moved
39-
from `node2` to `node1` to even things out.
34+
It is important to note that that after processing any reroute commands
35+
Elasticsearch will perform rebalancing as normal (respecting the values of
36+
settings such as `cluster.routing.rebalance.enable`) in order to remain in a
37+
balanced state. For example, if the requested allocation includes moving a
38+
shard from `node1` to `node2` then this may cause a shard to be moved from
39+
`node2` back to `node1` to even things out.
4040

41-
The cluster can be set to disable allocations, which means that only the
42-
explicitly allocations will be performed. Obviously, only once all
43-
commands has been applied, the cluster will aim to be re-balance its
44-
state.
41+
The cluster can be set to disable allocations using the
42+
`cluster.routing.allocation.enable` setting. If allocations are disabled then
43+
the only allocations that will be performed are explicit ones given using the
44+
`reroute` command, and consequent allocations due to rebalancing.
4545

46-
Another option is to run the commands in `dry_run` (as a URI flag, or in
47-
the request body). This will cause the commands to apply to the current
48-
cluster state, and return the resulting cluster after the commands (and
49-
re-balancing) has been applied.
46+
It is possible to run `reroute` commands in "dry run" mode by using the
47+
`?dry_run` URI query parameter, or by passing `"dry_run": true` in the request
48+
body. This will calculate the result of applying the commands to the current
49+
cluster state, and return the resulting cluster state after the commands (and
50+
re-balancing) has been applied, but will not actually perform the requested
51+
changes.
5052

51-
If the `explain` parameter is specified, a detailed explanation of why the
52-
commands could or could not be executed is returned.
53+
If the `?explain` URI query parameter is included then a detailed explanation
54+
of why the commands could or could not be executed is included in the response.
5355

5456
The commands supported are:
5557

5658
`move`::
5759
Move a started shard from one node to another node. Accepts
5860
`index` and `shard` for index name and shard number, `from_node` for the
59-
node to move the shard `from`, and `to_node` for the node to move the
61+
node to move the shard from, and `to_node` for the node to move the
6062
shard to.
6163

6264
`cancel`::
63-
Cancel allocation of a shard (or recovery). Accepts `index`
64-
and `shard` for index name and shard number, and `node` for the node to
65-
cancel the shard allocation on. It also accepts `allow_primary` flag to
66-
explicitly specify that it is allowed to cancel allocation for a primary
67-
shard. This can be used to force resynchronization of existing replicas
68-
from the primary shard by cancelling them and allowing them to be
69-
reinitialized through the standard reallocation process.
65+
Cancel allocation of a shard (or recovery). Accepts `index` and `shard` for
66+
index name and shard number, and `node` for the node to cancel the shard
67+
allocation on. This can be used to force resynchronization of existing
68+
replicas from the primary shard by cancelling them and allowing them to be
69+
reinitialized through the standard recovery process. By default only
70+
replica shard allocations can be cancelled. If it is necessary to cancel
71+
the allocation of a primary shard then the `allow_primary` flag must also
72+
be included in the request.
7073

7174
`allocate_replica`::
72-
Allocate an unassigned replica shard to a node. Accepts the
73-
`index` and `shard` for index name and shard number, and `node` to
74-
allocate the shard to. Takes <<modules-cluster,allocation deciders>> into account.
75-
76-
Two more commands are available that allow the allocation of a primary shard
77-
to a node. These commands should however be used with extreme care, as primary
78-
shard allocation is usually fully automatically handled by Elasticsearch.
79-
Reasons why a primary shard cannot be automatically allocated include the following:
80-
81-
- A new index was created but there is no node which satisfies the allocation deciders.
82-
- An up-to-date shard copy of the data cannot be found on the current data nodes in
83-
the cluster. To prevent data loss, the system does not automatically promote a stale
84-
shard copy to primary.
75+
Allocate an unassigned replica shard to a node. Accepts `index` and `shard`
76+
for index name and shard number, and `node` to allocate the shard to. Takes
77+
<<modules-cluster,allocation deciders>> into account.
8578

8679
[float]
87-
=== Retry failed shards
80+
=== Retrying failed allocations
8881

8982
The cluster will attempt to allocate a shard a maximum of
9083
`index.allocation.max_retries` times in a row (defaults to `5`), before giving
@@ -93,36 +86,48 @@ structural problems such as having an analyzer which refers to a stopwords
9386
file which doesn't exist on all nodes.
9487

9588
Once the problem has been corrected, allocation can be manually retried by
96-
calling the <<cluster-reroute,`reroute`>> API with `?retry_failed`, which
97-
will attempt a single retry round for these shards.
89+
calling the <<cluster-reroute,`reroute`>> API with the `?retry_failed` URI
90+
query parameter, which will attempt a single retry round for these shards.
9891

9992
[float]
10093
=== Forced allocation on unrecoverable errors
10194

95+
Two more commands are available that allow the allocation of a primary shard to
96+
a node. These commands should however be used with extreme care, as primary
97+
shard allocation is usually fully automatically handled by Elasticsearch.
98+
Reasons why a primary shard cannot be automatically allocated include the
99+
following:
100+
101+
- A new index was created but there is no node which satisfies the allocation
102+
deciders.
103+
- An up-to-date shard copy of the data cannot be found on the current data
104+
nodes in the cluster. To prevent data loss, the system does not automatically
105+
promote a stale shard copy to primary.
106+
102107
The following two commands are dangerous and may result in data loss. They are
103-
meant to be used in cases where the original data can not be recovered and the cluster
104-
administrator accepts the loss. If you have suffered a temporary issue that has been
105-
fixed, please see the `retry_failed` flag described above.
108+
meant to be used in cases where the original data can not be recovered and the
109+
cluster administrator accepts the loss. If you have suffered a temporary issue
110+
that can be fixed, please see the `retry_failed` flag described above. To
111+
emphasise: if these commands are performed and then a node joins the cluster
112+
that holds a copy of the affected shard then the copy on the newly-joined node
113+
will be deleted or overwritten.
106114

107115
`allocate_stale_primary`::
108116
Allocate a primary shard to a node that holds a stale copy. Accepts the
109-
`index` and `shard` for index name and shard number, and `node` to
110-
allocate the shard to. Using this command may lead to data loss
111-
for the provided shard id. If a node which has the good copy of the
112-
data rejoins the cluster later on, that data will be overwritten with
113-
the data of the stale copy that was forcefully allocated with this
114-
command. To ensure that these implications are well-understood,
115-
this command requires the special field `accept_data_loss` to be
116-
explicitly set to `true` for it to work.
117+
`index` and `shard` for index name and shard number, and `node` to allocate
118+
the shard to. Using this command may lead to data loss for the provided
119+
shard id. If a node which has the good copy of the data rejoins the cluster
120+
later on, that data will be deleted or overwritten with the data of the
121+
stale copy that was forcefully allocated with this command. To ensure that
122+
these implications are well-understood, this command requires the flag
123+
`accept_data_loss` to be explicitly set to `true`.
117124

118125
`allocate_empty_primary`::
119-
Allocate an empty primary shard to a node. Accepts the
120-
`index` and `shard` for index name and shard number, and `node` to
121-
allocate the shard to. Using this command leads to a complete loss
122-
of all data that was indexed into this shard, if it was previously
123-
started. If a node which has a copy of the
124-
data rejoins the cluster later on, that data will be deleted!
125-
To ensure that these implications are well-understood,
126-
this command requires the special field `accept_data_loss` to be
127-
explicitly set to `true` for it to work.
126+
Allocate an empty primary shard to a node. Accepts the `index` and `shard`
127+
for index name and shard number, and `node` to allocate the shard to. Using
128+
this command leads to a complete loss of all data that was indexed into
129+
this shard, if it was previously started. If a node which has a copy of the
130+
data rejoins the cluster later on, that data will be deleted. To ensure
131+
that these implications are well-understood, this command requires the flag
132+
`accept_data_loss` to be explicitly set to `true`.
128133

docs/reference/cluster/state.asciidoc

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,12 @@ of the cluster state (its size when serialized for transmission over
1515
the network), and the cluster state itself, which can be filtered to
1616
only retrieve the parts of interest, as described below.
1717

18+
The cluster's `cluster_uuid` is also returned as part of the top-level
19+
response, in addition to the `metadata` section. added[6.4.0]
20+
21+
NOTE: While the cluster is still forming, it is possible for the `cluster_uuid`
22+
to be `_na_` as well as the cluster state's version to be `-1`.
23+
1824
By default, the cluster state request is routed to the master node, to
1925
ensure that the latest cluster state is returned.
2026
For debugging purposes, you can retrieve the cluster state local to a

0 commit comments

Comments
 (0)