diff --git a/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/scheduler/cluster/k8s/ExecutorPodsAllocator.scala b/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/scheduler/cluster/k8s/ExecutorPodsAllocator.scala index 52821a519dd02..7c92e0ff444dc 100644 --- a/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/scheduler/cluster/k8s/ExecutorPodsAllocator.scala +++ b/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/scheduler/cluster/k8s/ExecutorPodsAllocator.scala @@ -152,6 +152,7 @@ class ExecutorPodsAllocator( applicationId: String, schedulerBackend: KubernetesClusterSchedulerBackend, snapshots: Seq[ExecutorPodsSnapshot]): Unit = { + logDebug(s"Received ${snapshots.size} snapshots") val k8sKnownExecIds = snapshots.flatMap(_.executorPods.keys).distinct newlyCreatedExecutors --= k8sKnownExecIds schedulerKnownNewlyCreatedExecs --= k8sKnownExecIds @@ -352,8 +353,12 @@ class ExecutorPodsAllocator( } } + // Try to request new executors only when there exist remaining slots within the maximum + // number of pending pods and new snapshot arrives in case of waiting for releasing of the + // existing PVCs val remainingSlotFromPendingPods = maxPendingPods - totalNotRunningPodCount - if (remainingSlotFromPendingPods > 0 && podsToAllocateWithRpId.size > 0) { + if (remainingSlotFromPendingPods > 0 && podsToAllocateWithRpId.size > 0 && + !(snapshots.isEmpty && podAllocOnPVC && maxPVCs <= PVC_COUNTER.get())) { ExecutorPodsAllocator.splitSlots(podsToAllocateWithRpId, remainingSlotFromPendingPods) .foreach { case ((rpId, podCountForRpId, targetNum), sharedSlotFromPendingPods) => val numMissingPodsForRpId = targetNum - podCountForRpId