Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions build-tools/coverage.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -51,8 +51,8 @@ jacocoTestReport {
sourceDirectories.from = "src/main/kotlin"
classDirectories.from = sourceSets.main.output
reports {
html.enabled = true // human readable
xml.enabled = true // for coverlay
html.required = true // human readable
xml.required = true // for coverlay
}
}

Expand Down
12 changes: 5 additions & 7 deletions build-tools/pkgbuild.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
* SPDX-License-Identifier: Apache-2.0
*/

apply plugin: 'nebula.ospackage'
apply plugin: 'com.netflix.nebula.ospackage'

// This is afterEvaluate because the bundlePlugin ZIP task is updated afterEvaluate and changes the ZIP name to match the plugin name
afterEvaluate {
Expand Down Expand Up @@ -44,9 +44,8 @@ afterEvaluate {
task renameRpm(type: Copy) {
from("$buildDir/distributions")
into("$buildDir/distributions")
include archiveName
rename archiveName, "${packageName}-${version}.rpm"
doLast { delete file("$buildDir/distributions/$archiveName") }
rename "$archiveFileName", "${packageName}-${archiveVersion}.rpm"
doLast { delete file("$buildDir/distributions/$archiveFileName") }
}
}
buildDeb {
Expand All @@ -56,9 +55,8 @@ afterEvaluate {
task renameDeb(type: Copy) {
from("$buildDir/distributions")
into("$buildDir/distributions")
include archiveName
rename archiveName, "${packageName}-${version}.deb"
doLast { delete file("$buildDir/distributions/$archiveName") }
rename "$archiveFileName", "${packageName}-${archiveVersion}.deb"
doLast { delete file("$buildDir/distributions/$archiveFileName") }
}
}
}
2 changes: 1 addition & 1 deletion build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ buildscript {
}

plugins {
id 'nebula.ospackage' version "8.3.0"
id 'com.netflix.nebula.ospackage' version "11.0.0"
id "com.dorongold.task-tree" version "2.1.1"
}

Expand Down
2 changes: 1 addition & 1 deletion gradle/wrapper/gradle-wrapper.properties
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
distributionBase=GRADLE_USER_HOME
distributionPath=wrapper/dists
distributionUrl=https\://services.gradle.org/distributions/gradle-7.5.1-bin.zip
distributionUrl=https\://services.gradle.org/distributions/gradle-8.1.1-bin.zip
zipStoreBase=GRADLE_USER_HOME
zipStorePath=wrapper/dists
17 changes: 8 additions & 9 deletions spi/build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -25,13 +25,13 @@ ext {

jacoco {
toolVersion = '0.8.7'
reportsDir = file("$buildDir/JacocoReport")
reportsDirectory = file("$buildDir/JacocoReport")
}

jacocoTestReport {
reports {
xml.enabled false
csv.enabled false
xml.required = false
csv.required = false
html.destination file("${buildDir}/jacoco/")
}
}
Expand Down Expand Up @@ -70,7 +70,7 @@ idea.module {
}

task sourcesJar(type: Jar, dependsOn: classes) {
classifier = 'sources'
archiveClassifier = 'sources'
from sourceSets.main.allSource
}

Expand All @@ -84,12 +84,11 @@ test {
}

task integTest(type: RestIntegTestTask) {
description 'Run integ test with opensearch test framework'
group 'verification'
systemProperty 'tests.security.manager', 'false'
dependsOn test
description = "Run integration tests against integTest cluster"
testClassesDirs = sourceSets.test.output.classesDirs
classpath = sourceSets.test.runtimeClasspath
}
check.dependsOn integTest
tasks.named("check").configure { dependsOn(integTest) }

testClusters.javaRestTest {
testDistribution = 'INTEG_TEST'
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,7 @@ class IndexStateManagementHistory(
clusterStateRequest,
object : ActionListener<ClusterStateResponse> {
override fun onResponse(clusterStateResponse: ClusterStateResponse) {
if (!clusterStateResponse.state.metadata.indices.isEmpty) {
if (clusterStateResponse.state.metadata.indices.isNotEmpty()) {
val indicesToDelete = getIndicesToDelete(clusterStateResponse)
logger.info("Deleting old history indices viz $indicesToDelete")
deleteAllOldHistoryIndices(indicesToDelete)
Expand All @@ -199,7 +199,7 @@ class IndexStateManagementHistory(
val creationTime = indexMetaData.creationDate

if ((Instant.now().toEpochMilli() - creationTime) > historyRetentionPeriod.millis) {
val alias = indexMetaData.aliases.firstOrNull { IndexManagementIndices.HISTORY_WRITE_INDEX_ALIAS == it.value.alias }
val alias = indexMetaData.aliases.entries.firstOrNull { IndexManagementIndices.HISTORY_WRITE_INDEX_ALIAS == it.value.alias }
if (alias != null && historyEnabled) {
// If index has write alias and history is enable, don't delete the index.
continue
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -846,7 +846,7 @@ object ManagedIndexRunner :

val response: ClusterStateResponse = client.admin().cluster().suspendUntil { state(clusterStateRequest, it) }

indexMetaData = response.state.metadata.indices.firstOrNull()?.value
indexMetaData = response.state.metadata.indices.entries.firstOrNull()?.value
} catch (e: Exception) {
logger.error("Failed to get IndexMetaData from cluster manager cluster state for index=$index", e)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ class MetadataService(
val indexUuidMap = mutableMapOf<IndexUuid, IndexName>()
clusterStateManagedIndexMetadata.forEach { (indexName, metadata) ->
val indexMetadata = indicesMetadata[indexName]
val currentIndexUuid = indexMetadata.indexUUID
val currentIndexUuid = indexMetadata!!.indexUUID
if (currentIndexUuid != metadata?.indexUuid) {
corruptManagedIndices.add(indexMetadata.index)
} else {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -199,7 +199,7 @@ fun XContentBuilder.buildMetadata(name: String, metadata: ToXContentFragment, pa

// Get the oldest rollover time or null if index was never rolled over
fun IndexMetadata.getOldestRolloverTime(): Instant? {
return this.rolloverInfos.values()
return this.rolloverInfos.entries
.map { it.value.time }
.minOrNull() // oldest should be min as its epoch time
?.let { Instant.ofEpochMilli(it) }
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ class AttemptMoveShardsStep(private val action: ShrinkAction) : ShrinkStep(name,
val (statsStore, statsDocs, shardStats) = getIndexStats(indexName, client) ?: return this
val indexSize = statsStore.sizeInBytes
// Get stats of current and target shards
val numOriginalShards = context.clusterService.state().metadata.indices[indexName].numberOfShards
val numOriginalShards = context.clusterService.state().metadata.indices[indexName]!!.numberOfShards
val numTargetShards = getNumTargetShards(numOriginalShards, indexSize)

if (shouldFailTooManyDocuments(statsDocs, numTargetShards)) return this
Expand Down Expand Up @@ -215,7 +215,7 @@ class AttemptMoveShardsStep(private val action: ShrinkAction) : ShrinkStep(name,
private fun shouldFailUnsafe(clusterService: ClusterService, indexName: String): Boolean {
// If forceUnsafe is set and is true, then we don't even need to check the number of replicas
if (action.forceUnsafe == true) return false
val numReplicas = clusterService.state().metadata.indices[indexName].numberOfReplicas
val numReplicas = clusterService.state().metadata.indices.get(indexName)!!.numberOfReplicas
val shouldFailForceUnsafeCheck = numReplicas == 0
if (shouldFailForceUnsafeCheck) {
logger.info(UNSAFE_FAILURE_MESSAGE)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ class WaitForMoveShardsStep(private val action: ShrinkAction) : ShrinkStep(name,
val numShardsInSync = getNumShardsInSync(shardStats, context.clusterService.state(), indexName)
val nodeToMoveOnto = localShrinkActionProperties.nodeName
val numShardsOnNode = getNumShardsWithCopyOnNode(shardStats, context.clusterService.state(), nodeToMoveOnto)
val numPrimaryShards = context.clusterService.state().metadata.indices[indexName].numberOfShards
val numPrimaryShards = context.clusterService.state().metadata.indices[indexName]!!.numberOfShards

// If a copy of each shard is on the node, and all shards are in sync, move on
if (numShardsOnNode >= numPrimaryShards && numShardsInSync >= numPrimaryShards) {
Expand All @@ -49,16 +49,16 @@ class WaitForMoveShardsStep(private val action: ShrinkAction) : ShrinkStep(name,

// Returns the number of shard IDs where all primary and replicas are in sync
private fun getNumShardsInSync(shardStats: Array<ShardStats>, state: ClusterState, indexName: String): Int {
val numReplicas = state.metadata.indices[indexName].numberOfReplicas
val inSyncAllocations = state.metadata.indices[indexName].inSyncAllocationIds
val numReplicas = state.metadata.indices[indexName]!!.numberOfReplicas
val inSyncAllocations = state.metadata.indices[indexName]!!.inSyncAllocationIds
var numShardsInSync = 0
for (shard: ShardStats in shardStats) {
val routingInfo = shard.shardRouting
// Only check primaries so that we only check once for each shardID
if (routingInfo.primary()) {
// All shards must be in sync as it isn't known which shard (replica or primary) will be
// moved to the target node and used in the shrink.
if (inSyncAllocations[routingInfo.id].size == (numReplicas + 1)) {
if (inSyncAllocations[routingInfo.id]!!.size == (numReplicas + 1)) {
numShardsInSync++
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ class WaitForShrinkStep(private val action: ShrinkAction) : ShrinkStep(name, tru

private suspend fun shrinkNotDone(targetIndex: String, targetNumShards: Int, client: Client, clusterService: ClusterService): Boolean {
val numPrimaryShardsStarted = getNumPrimaryShardsStarted(client, targetIndex)
val numPrimaryShards = clusterService.state().metadata.indices[targetIndex].numberOfShards
val numPrimaryShards = clusterService.state().metadata.indices[targetIndex]!!.numberOfShards
return numPrimaryShards != targetNumShards || numPrimaryShardsStarted != targetNumShards
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -269,7 +269,7 @@ class TransportExplainAction @Inject constructor(
clusterStateRequest,
object : ActionListener<ClusterStateResponse> {
override fun onResponse(response: ClusterStateResponse) {
val clusterStateIndexMetadatas = response.state.metadata.indices.associate { it.key to it.value }
val clusterStateIndexMetadatas = response.state.metadata.indices.entries.associate { it.key to it.value }
getMetadataMap(clusterStateIndexMetadatas, threadContext)
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -343,7 +343,7 @@ class RollupMapperService(
return RollupJobValidationResult.Failure(getMappingsResult.message, getMappingsResult.cause)
}

val indexMapping: MappingMetadata = res.mappings[targetIndexResolvedName]
val indexMapping: MappingMetadata = res.mappings[targetIndexResolvedName]!!

return if (((indexMapping.sourceAsMap?.get(_META) as Map<*, *>?)?.get(ROLLUPS) as Map<*, *>?)?.containsKey(rollup.id) == true) {
RollupJobValidationResult.Valid
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ object RollupFieldValueExpressionResolver {
open fun hasAlias(index: String): Boolean {
val aliases = this.clusterService.state().metadata().indices.get(index)?.aliases
if (aliases != null) {
return aliases.size() > 0
return aliases.isNotEmpty()
}
return false
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ class IndexUtils {
actionListener: ActionListener<AcknowledgedResponse>
) {
if (clusterState.metadata.indices.containsKey(index)) {
if (shouldUpdateIndex(clusterState.metadata.indices[index], schemaVersion)) {
if (shouldUpdateIndex(clusterState.metadata.indices[index]!!, schemaVersion)) {
val putMappingRequest: PutMappingRequest = PutMappingRequest(index).source(mapping, XContentType.JSON)
client.putMapping(putMappingRequest, actionListener)
} else {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ abstract class IndexStateManagementIntegTestCase : OpenSearchIntegTestCase() {
return client().admin().cluster().prepareState()
.setIndices(indexName)
.setMetadata(true).get()
.state.metadata.indices[indexName]
.state.metadata.indices[indexName]!!
}

// reuse utility fun from RestTestCase
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@ import org.opensearch.client.ClusterAdminClient
import org.opensearch.cluster.ClusterState
import org.opensearch.cluster.metadata.Metadata
import org.opensearch.cluster.service.ClusterService
import org.opensearch.common.collect.ImmutableOpenMap
import org.opensearch.indexmanagement.IndexManagementIndices
import org.opensearch.test.OpenSearchTestCase
import kotlin.test.assertFailsWith
Expand All @@ -40,7 +39,7 @@ class MetadataServiceTests : OpenSearchTestCase() {
fun setup() {
whenever(clusterService.state()).doReturn(clusterState)
whenever(clusterState.metadata).doReturn(metadata)
whenever(metadata.indices).doReturn(ImmutableOpenMap.of())
whenever(metadata.indices).doReturn(mutableMapOf())
}

fun `test config index not exists`() = runBlocking {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ import org.opensearch.cluster.ClusterState
import org.opensearch.cluster.metadata.IndexMetadata
import org.opensearch.cluster.metadata.Metadata
import org.opensearch.cluster.service.ClusterService
import org.opensearch.common.collect.ImmutableOpenMap
import org.opensearch.common.settings.ClusterSettings
import org.opensearch.common.settings.Settings
import org.opensearch.index.shard.DocsStats
Expand All @@ -50,7 +49,7 @@ class AttemptTransitionStepTests : OpenSearchTestCase() {
private val indexUUID: String = "indexUuid"
@Suppress("UNCHECKED_CAST")
private val indexMetadata: IndexMetadata = mock {
on { rolloverInfos } doReturn ImmutableOpenMap.builder<String, RolloverInfo>().build()
on { rolloverInfos } doReturn mutableMapOf<String, RolloverInfo>()
on { indexUUID } doReturn indexUUID
}
private val metadata: Metadata = mock {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ import org.opensearch.client.IndicesAdminClient
import org.opensearch.cluster.metadata.IndexNameExpressionResolver
import org.opensearch.cluster.metadata.MappingMetadata
import org.opensearch.cluster.service.ClusterService
import org.opensearch.common.collect.ImmutableOpenMap
import org.opensearch.common.xcontent.XContentType
import org.opensearch.indexmanagement.rollup.model.RollupJobValidationResult
import org.opensearch.test.OpenSearchTestCase
Expand Down Expand Up @@ -292,16 +291,14 @@ class RollupMapperServiceTests : OpenSearchTestCase() {

private fun getMappingResponse(indexName: String, emptyMapping: Boolean = false): GetMappingsResponse {
val mappings = if (emptyMapping) {
ImmutableOpenMap.Builder<String, MappingMetadata>().build()
mutableMapOf()
} else {
val mappingSourceMap = createParser(
XContentType.JSON.xContent(),
javaClass.classLoader.getResource("mappings/kibana-sample-data.json").readText()
).map()
val mappingMetadata = MappingMetadata("_doc", mappingSourceMap) // it seems it still expects a type, i.e. _doc now
ImmutableOpenMap.Builder<String, MappingMetadata>()
.fPut(indexName, mappingMetadata)
.build()
mutableMapOf(indexName to mappingMetadata)
}

return GetMappingsResponse(mappings)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,17 +14,16 @@ import org.opensearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse
import org.opensearch.action.index.IndexResponse
import org.opensearch.cluster.SnapshotsInProgress
import org.opensearch.common.UUIDs
import org.opensearch.common.collect.ImmutableOpenMap
import org.opensearch.common.unit.TimeValue
import org.opensearch.common.xcontent.LoggingDeprecationHandler
import org.opensearch.common.xcontent.XContentFactory
import org.opensearch.common.xcontent.XContentType
import org.opensearch.core.xcontent.NamedXContentRegistry
import org.opensearch.core.xcontent.ToXContent
import org.opensearch.common.xcontent.XContentFactory
import org.opensearch.core.xcontent.XContentParser
import org.opensearch.common.xcontent.XContentType
import org.opensearch.indexmanagement.opensearchapi.string
import org.opensearch.index.seqno.SequenceNumbers
import org.opensearch.indexmanagement.indexstatemanagement.randomChannel
import org.opensearch.indexmanagement.opensearchapi.string
import org.opensearch.indexmanagement.opensearchapi.toMap
import org.opensearch.indexmanagement.randomCronSchedule
import org.opensearch.indexmanagement.randomInstant
Expand All @@ -39,9 +38,9 @@ import org.opensearch.snapshots.Snapshot
import org.opensearch.snapshots.SnapshotId
import org.opensearch.snapshots.SnapshotInfo
import org.opensearch.test.OpenSearchTestCase.randomAlphaOfLength
import org.opensearch.test.OpenSearchTestCase.randomBoolean
import org.opensearch.test.OpenSearchTestCase.randomIntBetween
import org.opensearch.test.OpenSearchTestCase.randomNonNegativeLong
import org.opensearch.test.OpenSearchTestCase.randomIntBetween
import org.opensearch.test.OpenSearchTestCase.randomBoolean
import org.opensearch.test.rest.OpenSearchRestTestCase
import java.time.Instant
import java.time.temporal.ChronoUnit
Expand Down Expand Up @@ -242,7 +241,7 @@ fun mockInProgressSnapshotInfo(
emptyList(),
randomNonNegativeLong(),
randomNonNegativeLong(),
ImmutableOpenMap.of(),
mapOf(),
"",
mapOf("sm_policy" to "daily-snapshot"),
Version.CURRENT,
Expand Down