Skip to content

Commit

Permalink
Merge remote-tracking branch 'elastic/master' into retention-lease-st…
Browse files Browse the repository at this point in the history
…ate-file

* elastic/master:
  Remove tests and branches that will never execute (elastic#38772)
  also check ccr stats api return empty response in ensureNoCcrTasks()
  Add overlapping, before, after filters to intervals query (elastic#38999)
  Mute test elastic#38949
  Add remote recovery to ShardFollowTaskReplicationTests (elastic#39007)
  [ML] More advanced post-test cleanup of ML indices (elastic#39049)
  wait for shard to be allocated before executing a resume follow api
  Update track-total-hits.asciidoc
  Force kill testcluster nodes (elastic#37353)
  Make pullFixture a task dependency of resolveAllDependencies (elastic#38956)
  set minimum supported version (elastic#39043)
  Enforce Completion Context Limit (elastic#38675)
  Mute test
  Don't close caches while there might still be in-flight requests. (elastic#38958)
  Fix elastic#38623 remove xpack namespace REST API (elastic#38625)
  Add data frame feature (elastic#38934)
  Test bi-directional index following during a rolling upgrade. (elastic#38962)
  Generate mvn pom for ssl-config library (elastic#39019)
  Mute testRetentionLeaseIsRenewedDuringRecovery
  • Loading branch information
jasontedor committed Feb 18, 2019
2 parents 162cf77 + 6891475 commit 8433480
Show file tree
Hide file tree
Showing 242 changed files with 9,929 additions and 1,414 deletions.
2 changes: 1 addition & 1 deletion .ci/packer_cache.sh
Original file line number Diff line number Diff line change
Expand Up @@ -21,4 +21,4 @@ export JAVA_HOME="${HOME}"/.java/${ES_BUILD_JAVA}
export JAVA8_HOME="${HOME}"/.java/java8
export JAVA11_HOME="${HOME}"/.java/java11
export JAVA12_HOME="${HOME}"/.java/java12
./gradlew --parallel clean pullFixture --scan -Porg.elasticsearch.acceptScanTOS=true -s resolveAllDependencies
./gradlew --parallel clean --scan -Porg.elasticsearch.acceptScanTOS=true -s resolveAllDependencies
7 changes: 4 additions & 3 deletions build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -616,9 +616,10 @@ if (System.properties.get("build.compare") != null) {

allprojects {
task resolveAllDependencies {
doLast {
configurations.findAll { it.isCanBeResolved() }.each { it.resolve() }
}
dependsOn tasks.matching { it.name == "pullFixture"}
doLast {
configurations.findAll { it.isCanBeResolved() }.each { it.resolve() }
}
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -243,35 +243,46 @@ synchronized void stop(boolean tailLogs) {
}
logger.info("Stopping `{}`, tailLogs: {}", this, tailLogs);
requireNonNull(esProcess, "Can't stop `" + this + "` as it was not started or already stopped.");
stopHandle(esProcess.toHandle());
// Test clusters are not reused, don't spend time on a graceful shutdown
stopHandle(esProcess.toHandle(), true);
if (tailLogs) {
logFileContents("Standard output of node", esStdoutFile);
logFileContents("Standard error of node", esStderrFile);
}
esProcess = null;
}

private void stopHandle(ProcessHandle processHandle) {
private void stopHandle(ProcessHandle processHandle, boolean forcibly) {
// Stop all children first, ES could actually be a child when there's some wrapper process like on Windows.
if (processHandle.isAlive()) {
processHandle.children().forEach(this::stopHandle);
}
logProcessInfo("Terminating elasticsearch process:", processHandle.info());
if (processHandle.isAlive()) {
processHandle.destroy();
} else {
if (processHandle.isAlive() == false) {
logger.info("Process was not running when we tried to terminate it.");
return;
}
waitForProcessToExit(processHandle);
if (processHandle.isAlive()) {

// Stop all children first, ES could actually be a child when there's some wrapper process like on Windows.
processHandle.children().forEach(each -> stopHandle(each, forcibly));

logProcessInfo(
"Terminating elasticsearch process" + (forcibly ? " forcibly " : "gracefully") + ":",
processHandle.info()
);

if (forcibly) {
processHandle.destroyForcibly();
} else {
processHandle.destroy();
waitForProcessToExit(processHandle);
if (processHandle.isAlive() == false) {
return;
}
logger.info("process did not terminate after {} {}, stopping it forcefully",
ES_DESTROY_TIMEOUT, ES_DESTROY_TIMEOUT_UNIT
);
ES_DESTROY_TIMEOUT, ES_DESTROY_TIMEOUT_UNIT);
processHandle.destroyForcibly();
}

waitForProcessToExit(processHandle);
if (processHandle.isAlive()) {
throw new TestClustersException("Was not able to terminate es process");
throw new TestClustersException("Was not able to terminate elasticsearch process");
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1697,7 +1697,7 @@ public void testCRUDIndexTemplateWithTypes() throws Exception {
assertTrue(template2.mappings().containsKey("custom_doc_type"));

List<String> names = randomBoolean()
? Arrays.asList("*-1", "template-2")
? Arrays.asList("*plate-1", "template-2")
: Arrays.asList("template-*");
GetIndexTemplatesRequest getBothRequest = new GetIndexTemplatesRequest(names);
org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse getBoth = execute(
Expand Down Expand Up @@ -1780,7 +1780,7 @@ public void testCRUDIndexTemplate() throws Exception {


List<String> names = randomBoolean()
? Arrays.asList("*-1", "template-2")
? Arrays.asList("*plate-1", "template-2")
: Arrays.asList("template-*");
GetIndexTemplatesRequest getBothRequest = new GetIndexTemplatesRequest(names);
GetIndexTemplatesResponse getBoth = execute(
Expand Down Expand Up @@ -1834,7 +1834,7 @@ public void testIndexTemplatesExist() throws Exception {

{
final List<String> templateNames = randomBoolean()
? Arrays.asList("*-1", "template-2")
? Arrays.asList("*plate-1", "template-2")
: Arrays.asList("template-*");

final IndexTemplatesExistRequest bothRequest = new IndexTemplatesExistRequest(templateNames);
Expand Down
24 changes: 12 additions & 12 deletions docs/build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -840,7 +840,7 @@ buildRestTests.setups['sensor_prefab_data'] = '''
'''
buildRestTests.setups['sample_job'] = '''
- do:
xpack.ml.put_job:
ml.put_job:
job_id: "sample_job"
body: >
{
Expand Down Expand Up @@ -894,7 +894,7 @@ buildRestTests.setups['farequote_data'] = buildRestTests.setups['farequote_index
'''
buildRestTests.setups['farequote_job'] = buildRestTests.setups['farequote_data'] + '''
- do:
xpack.ml.put_job:
ml.put_job:
job_id: "farequote"
body: >
{
Expand All @@ -914,7 +914,7 @@ buildRestTests.setups['farequote_job'] = buildRestTests.setups['farequote_data']
'''
buildRestTests.setups['farequote_datafeed'] = buildRestTests.setups['farequote_job'] + '''
- do:
xpack.ml.put_datafeed:
ml.put_datafeed:
datafeed_id: "datafeed-farequote"
body: >
{
Expand Down Expand Up @@ -978,7 +978,7 @@ buildRestTests.setups['server_metrics_data'] = buildRestTests.setups['server_met
'''
buildRestTests.setups['server_metrics_job'] = buildRestTests.setups['server_metrics_data'] + '''
- do:
xpack.ml.put_job:
ml.put_job:
job_id: "total-requests"
body: >
{
Expand All @@ -1000,7 +1000,7 @@ buildRestTests.setups['server_metrics_job'] = buildRestTests.setups['server_metr
'''
buildRestTests.setups['server_metrics_datafeed'] = buildRestTests.setups['server_metrics_job'] + '''
- do:
xpack.ml.put_datafeed:
ml.put_datafeed:
datafeed_id: "datafeed-total-requests"
body: >
{
Expand All @@ -1010,22 +1010,22 @@ buildRestTests.setups['server_metrics_datafeed'] = buildRestTests.setups['server
'''
buildRestTests.setups['server_metrics_openjob'] = buildRestTests.setups['server_metrics_datafeed'] + '''
- do:
xpack.ml.open_job:
ml.open_job:
job_id: "total-requests"
'''
buildRestTests.setups['server_metrics_startdf'] = buildRestTests.setups['server_metrics_openjob'] + '''
- do:
xpack.ml.start_datafeed:
ml.start_datafeed:
datafeed_id: "datafeed-total-requests"
'''
buildRestTests.setups['calendar_outages'] = '''
- do:
xpack.ml.put_calendar:
ml.put_calendar:
calendar_id: "planned-outages"
'''
buildRestTests.setups['calendar_outages_addevent'] = buildRestTests.setups['calendar_outages'] + '''
- do:
xpack.ml.post_calendar_events:
ml.post_calendar_events:
calendar_id: "planned-outages"
body: >
{ "description": "event 1", "start_time": "2017-12-01T00:00:00Z", "end_time": "2017-12-02T00:00:00Z", "calendar_id": "planned-outages" }
Expand All @@ -1034,12 +1034,12 @@ buildRestTests.setups['calendar_outages_addevent'] = buildRestTests.setups['cale
'''
buildRestTests.setups['calendar_outages_openjob'] = buildRestTests.setups['server_metrics_openjob'] + '''
- do:
xpack.ml.put_calendar:
ml.put_calendar:
calendar_id: "planned-outages"
'''
buildRestTests.setups['calendar_outages_addjob'] = buildRestTests.setups['server_metrics_openjob'] + '''
- do:
xpack.ml.put_calendar:
ml.put_calendar:
calendar_id: "planned-outages"
body: >
{
Expand All @@ -1048,7 +1048,7 @@ buildRestTests.setups['calendar_outages_addjob'] = buildRestTests.setups['server
'''
buildRestTests.setups['calendar_outages_addevent'] = buildRestTests.setups['calendar_outages_addjob'] + '''
- do:
xpack.ml.post_calendar_events:
ml.post_calendar_events:
calendar_id: "planned-outages"
body: >
{ "events" : [
Expand Down
16 changes: 16 additions & 0 deletions docs/reference/migration/migrate_8_0.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -13,4 +13,20 @@ coming[8.0.0]

* <<breaking_80_mappings_changes>>

[float]
=== Indices created before 7.0

Elasticsearch 8.0 can read indices created in version 7.0 or above. An
Elasticsearch 8.0 node will not start in the presence of indices created in a
version of Elasticsearch before 7.0.

[IMPORTANT]
.Reindex indices from Elasticsearch 6.x or before
=========================================
Indices created in Elasticsearch 6.x or before will need to be reindexed with
Elasticsearch 7.x in order to be readable by Elasticsearch 8.x.
=========================================

include::migrate_8_0/mappings.asciidoc[]
8 changes: 7 additions & 1 deletion docs/reference/migration/migrate_8_0/mappings.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -7,4 +7,10 @@

The `nGram` and `edgeNGram` token filter names that have been deprecated since
version 6.4 have been removed. Both token filters should be used by their
alternative names `ngram` and `edge_ngram` instead.
alternative names `ngram` and `edge_ngram` instead.

[float]
==== Limiting the number of completion contexts

The number of completion contexts within a single completion field
has been limited to 10.
6 changes: 6 additions & 0 deletions docs/reference/query-dsl/intervals-query.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -151,8 +151,14 @@ Produces intervals that are contained by an interval from the filter rule
Produces intervals that do not contain an interval from the filter rule
`not_contained_by`::
Produces intervals that are not contained by an interval from the filter rule
`overlapping`::
Produces intervals that overlap with an interval from the filter rule
`not_overlapping`::
Produces intervals that do not overlap with an interval from the filter rule
`before`::
Produces intervals that appear before an interval from the filter role
`after`::
Produces intervals that appear after an interval from the filter role

[[interval-script-filter]]
==== Script filters
Expand Down
5 changes: 5 additions & 0 deletions docs/reference/rest-api/info.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,11 @@ Example response:
"available" : true,
"enabled" : true
},
"data_frame" : {
"description" : "Data Frame for the Elastic Stack",
"available" : true,
"enabled" : true
},
"graph" : {
"description" : "Graph Data Exploration for the Elastic Stack",
"available" : true,
Expand Down
4 changes: 2 additions & 2 deletions docs/reference/search/request/track-total-hits.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ of hits after a certain threshold.

When set to `true` the search response will always track the number of hits that
match the query accurately (e.g. `total.relation` will always be equal to `"eq"`
when `track_total_hits is set to true). Otherwise the `"total.relation"` returned
when `track_total_hits` is set to true). Otherwise the `"total.relation"` returned
in the `"total"` object in the search response determines how the `"total.value"`
should be interpreted. A value of `"gte"` means that the `"total.value"` is a
lower bound of the total hits that match the query and a value of `"eq"` indicates
Expand Down Expand Up @@ -178,4 +178,4 @@ GET twitter/_search
<1> The total number of hits is unknown.

Finally you can force an accurate count by setting `"track_total_hits"`
to `true` in the request.
to `true` in the request.
2 changes: 2 additions & 0 deletions docs/reference/search/suggesters/context-suggest.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,8 @@ the field mapping.
NOTE: It is mandatory to provide a context when indexing and querying
a context enabled completion field.

NOTE: The maximum allowed number of completion field context mappings is 10.

The following defines types, each with two context mappings for a completion
field:

Expand Down
3 changes: 3 additions & 0 deletions libs/ssl-config/build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,9 @@
* under the License.
*/

apply plugin: 'nebula.maven-base-publish'
apply plugin: 'nebula.maven-scm'

dependencies {
compile "org.elasticsearch:elasticsearch-core:${version}"

Expand Down
Loading

0 comments on commit 8433480

Please sign in to comment.