Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion Jenkinsfile
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ void createCluster(String CLUSTER_SUFFIX) {
--preemptible \
--zone=${region} \
--machine-type='n1-standard-4' \
--cluster-version='1.30' \
--cluster-version='1.31' \
--num-nodes=3 \
--labels='delete-cluster-after-hours=6' \
--disk-size=30 \
Expand Down
58 changes: 58 additions & 0 deletions e2e-tests/functions
Original file line number Diff line number Diff line change
Expand Up @@ -115,6 +115,64 @@ remove_all_finalizers() {
done
}

wait_for_ready_containers() {
local pod_prefix="$1"
local target_count="$2"
local namespace="${NAMESPACE}"
local max_wait_seconds=300
local check_interval=5
local elapsed_time=0

if [[ -z "$pod_prefix" || -z "$target_count" || -z "$namespace" ]]; then
echo "Error: Missing arguments." >&2
echo "Usage: wait_for_ready_containers <pod_name_prefix> <target_ready_count> <namespace>" >&2
return 1
fi

echo "Waiting for pods starting with '$pod_prefix' in namespace '$namespace' to have $target_count ready containers (Max ${max_wait_seconds}s)..."

while [[ "$elapsed_time" -lt "$max_wait_seconds" ]]; do
local target_pods
# Get pods that match the prefix AND are running
target_pods=$(kubectl get pods -n "$namespace" --field-selector=status.phase=Running --output=json | \
jq -r ".items[] | select(.metadata.name | startswith(\"$pod_prefix\")) | .metadata.name")
# If no running pods match the prefix, something might be wrong, but we'll keep waiting.
if [[ -z "$target_pods" ]]; then
echo "No running pods found with prefix '$pod_prefix'. Waiting..."
sleep "$check_interval"
elapsed_time=$((elapsed_time + check_interval))
continue
fi

local ready_count=0
local total_matches=0

# Check each pod individually
for pod_name in $target_pods; do
total_matches=$((total_matches + 1))
current_ready=$(kubectl get pod "$pod_name" -n "$namespace" -o json 2>/dev/null | \
jq '.status.containerStatuses | map(select(.ready == true)) | length')

if [[ "$current_ready" -eq "$target_count" ]]; then
ready_count=$((ready_count + 1))
fi
done

if [[ "$ready_count" -eq "$total_matches" ]]; then
echo "Success: All $total_matches pods now have $target_count ready containers."
return 0
fi

echo "Current status: $ready_count of $total_matches pods have $target_count ready containers. Waiting ${check_interval}s..."

sleep "$check_interval"
elapsed_time=$((elapsed_time + check_interval))
done

echo "Error: Timeout reached! After ${max_wait_seconds} seconds, not all pods reached $target_count ready containers." >&2
return 1
Comment on lines +119 to +173
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

[shfmt] reported by reviewdog 🐶

Suggested change
local pod_prefix="$1"
local target_count="$2"
local namespace="${NAMESPACE}"
local max_wait_seconds=300
local check_interval=5
local elapsed_time=0
if [[ -z "$pod_prefix" || -z "$target_count" || -z "$namespace" ]]; then
echo "Error: Missing arguments." >&2
echo "Usage: wait_for_ready_containers <pod_name_prefix> <target_ready_count> <namespace>" >&2
return 1
fi
echo "Waiting for pods starting with '$pod_prefix' in namespace '$namespace' to have $target_count ready containers (Max ${max_wait_seconds}s)..."
while [[ "$elapsed_time" -lt "$max_wait_seconds" ]]; do
local target_pods
# Get pods that match the prefix AND are running
target_pods=$(kubectl get pods -n "$namespace" --field-selector=status.phase=Running --output=json | \
jq -r ".items[] | select(.metadata.name | startswith(\"$pod_prefix\")) | .metadata.name")
# If no running pods match the prefix, something might be wrong, but we'll keep waiting.
if [[ -z "$target_pods" ]]; then
echo "No running pods found with prefix '$pod_prefix'. Waiting..."
sleep "$check_interval"
elapsed_time=$((elapsed_time + check_interval))
continue
fi
local ready_count=0
local total_matches=0
# Check each pod individually
for pod_name in $target_pods; do
total_matches=$((total_matches + 1))
current_ready=$(kubectl get pod "$pod_name" -n "$namespace" -o json 2>/dev/null | \
jq '.status.containerStatuses | map(select(.ready == true)) | length')
if [[ "$current_ready" -eq "$target_count" ]]; then
ready_count=$((ready_count + 1))
fi
done
if [[ "$ready_count" -eq "$total_matches" ]]; then
echo "Success: All $total_matches pods now have $target_count ready containers."
return 0
fi
echo "Current status: $ready_count of $total_matches pods have $target_count ready containers. Waiting ${check_interval}s..."
sleep "$check_interval"
elapsed_time=$((elapsed_time + check_interval))
done
echo "Error: Timeout reached! After ${max_wait_seconds} seconds, not all pods reached $target_count ready containers." >&2
return 1
local pod_prefix="$1"
local target_count="$2"
local namespace="${NAMESPACE}"
local max_wait_seconds=300
local check_interval=5
local elapsed_time=0
if [[ -z $pod_prefix || -z $target_count || -z $namespace ]]; then
echo "Error: Missing arguments." >&2
echo "Usage: wait_for_ready_containers <pod_name_prefix> <target_ready_count> <namespace>" >&2
return 1
fi
echo "Waiting for pods starting with '$pod_prefix' in namespace '$namespace' to have $target_count ready containers (Max ${max_wait_seconds}s)..."
while [[ $elapsed_time -lt $max_wait_seconds ]]; do
local target_pods
# Get pods that match the prefix AND are running
target_pods=$(kubectl get pods -n "$namespace" --field-selector=status.phase=Running --output=json \
| jq -r ".items[] | select(.metadata.name | startswith(\"$pod_prefix\")) | .metadata.name")
# If no running pods match the prefix, something might be wrong, but we'll keep waiting.
if [[ -z $target_pods ]]; then
echo "No running pods found with prefix '$pod_prefix'. Waiting..."
sleep "$check_interval"
elapsed_time=$((elapsed_time + check_interval))
continue
fi
local ready_count=0
local total_matches=0
# Check each pod individually
for pod_name in $target_pods; do
total_matches=$((total_matches + 1))
current_ready=$(kubectl get pod "$pod_name" -n "$namespace" -o json 2>/dev/null \
| jq '.status.containerStatuses | map(select(.ready == true)) | length')
if [[ $current_ready -eq $target_count ]]; then
ready_count=$((ready_count + 1))
fi
done
if [[ $ready_count -eq $total_matches ]]; then
echo "Success: All $total_matches pods now have $target_count ready containers."
return 0
fi
echo "Current status: $ready_count of $total_matches pods have $target_count ready containers. Waiting ${check_interval}s..."
sleep "$check_interval"
elapsed_time=$((elapsed_time + check_interval))
done
echo "Error: Timeout reached! After ${max_wait_seconds} seconds, not all pods reached $target_count ready containers." >&2
return 1

}

destroy_operator() {
kubectl -n "${OPERATOR_NS:-$NAMESPACE}" delete deployment percona-postgresql-operator --force --grace-period=0 || true
if [[ $OPERATOR_NS ]]; then
Expand Down
2 changes: 1 addition & 1 deletion e2e-tests/kuttl.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,4 @@ apiVersion: kuttl.dev/v1beta1
kind: TestSuite
testDirs:
- e2e-tests/tests
timeout: 180
timeout: 600
Original file line number Diff line number Diff line change
Expand Up @@ -11,3 +11,5 @@ commands:
| yq '.metadata.annotations."pgv2.percona.com/authorizeBackupRemoval"="true"' \
| yq '.spec.backups.enabled=false' \
| kubectl -n "${NAMESPACE}" apply -f -

wait_for_ready_containers "some-name-instance1" 2
Loading