Skip to content

Commit

Permalink
Merge pull request kubernetes#16069 from spowelljr/updateUbuntu22
Browse files Browse the repository at this point in the history
Kicbase: Update base image to Ubuntu 22.04
  • Loading branch information
spowelljr authored May 17, 2023
2 parents 68180a6 + 5b63431 commit 3631db3
Show file tree
Hide file tree
Showing 18 changed files with 66 additions and 68 deletions.
6 changes: 3 additions & 3 deletions .github/workflows/update-ubuntu-version.yml
Original file line number Diff line number Diff line change
Expand Up @@ -37,16 +37,16 @@ jobs:
uses: peter-evans/create-pull-request@284f54f989303d2699d373481a0cfa13ad5a6666
with:
token: ${{ secrets.MINIKUBE_BOT_PAT }}
commit-message: 'Kicbase: Bump ubuntu:focal from ${{ steps.bumpUbuntu.outputs.OLD_VERSION }} to ${{ steps.bumpUbuntu.outpus.NEW_VERSION }}'
commit-message: 'Kicbase: Bump ubuntu:jammy from ${{ steps.bumpUbuntu.outputs.OLD_VERSION }} to ${{ steps.bumpUbuntu.outpus.NEW_VERSION }}'
committer: minikube-bot <[email protected]>
author: minikube-bot <[email protected]>
branch: auto_bump_ubuntu_version
push-to-fork: minikube-bot/minikube
base: master
delete-branch: true
title: 'Kicbase: Bump ubuntu:focal from ${{ steps.bumpUbuntu.outputs.OLD_VERSION }} to ${{ steps.bumpUbuntu.outpus.NEW_VERSION }}'
title: 'Kicbase: Bump ubuntu:jammy from ${{ steps.bumpUbuntu.outputs.OLD_VERSION }} to ${{ steps.bumpUbuntu.outpus.NEW_VERSION }}'
body: |
The ubuntu:focal image released a new version
The ubuntu:jammy image released a new version
This PR was auto-generated by `make update-ubuntu-version` using [update-ubuntu-version.yml](https://github.com/kubernetes/minikube/tree/master/.github/workflows/update-ubuntu-version.yml) CI Workflow.
- uses: actions/github-script@d7906e4ad0b1822421a7e6a35d5ca353c962f410
Expand Down
63 changes: 30 additions & 33 deletions deploy/kicbase/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,8 @@
# https://systemd.io/CONTAINER_INTERFACE/


# this ARG needs to be global to use it in `FROM` & is updated for new versions of ubuntu:focal-*
ARG UBUNTU_FOCAL_IMAGE="ubuntu:focal-20230308"
# this ARG needs to be global to use it in `FROM` & is updated for new versions of ubuntu:jammy-*
ARG UBUNTU_JAMMY_IMAGE="ubuntu:jammy-20230425"
# multi-stage docker build so we can build auto-pause for arm64
FROM golang:1.20.4 as auto-pause
WORKDIR /src
Expand All @@ -37,16 +37,17 @@ ENV GOARCH=${TARGETARCH}
ARG PREBUILT_AUTO_PAUSE
RUN if [ "$PREBUILT_AUTO_PAUSE" != "true" ]; then cd ./cmd/auto-pause/ && go build -o auto-pause-${TARGETARCH}; fi

# start from ubuntu 20.04, this image is reasonably small as a starting point
# start from ubuntu 22.04, this image is reasonably small as a starting point
# for a kubernetes node image, it doesn't contain much we don't need
FROM ${UBUNTU_FOCAL_IMAGE} as kicbase
FROM ${UBUNTU_JAMMY_IMAGE} as kicbase

ARG BUILDKIT_VERSION="v0.11.6"
ARG FUSE_OVERLAYFS_VERSION="v1.7.1"
ARG CONTAINERD_FUSE_OVERLAYFS_VERSION="1.0.3"
ARG CRIO_VERSION="1.24"
ARG CRI_DOCKERD_VERSION="v0.3.1"
ARG CRI_DOCKERD_COMMIT="9a87d6ae274ecf0f23776920964d6484bd679282"
ARG CNI_PLUGINS_VERSION="v1.2.0"
ARG TARGETARCH

# copy in static files (configs, scripts)
Expand Down Expand Up @@ -144,10 +145,7 @@ RUN clean-install \
libglib2.0-0

# install docker
# use the bionic packages for arm32
RUN export ARCH=$(dpkg --print-architecture | sed 's/armhf/arm-v7/') && \
if [ "$ARCH" == "arm-v7" ]; then export DIST="bionic"; else export DIST="focal"; fi && \
sh -c "echo 'deb https://download.docker.com/linux/ubuntu ${DIST} stable' > /etc/apt/sources.list.d/docker.list" && \
RUN sh -c "echo 'deb https://download.docker.com/linux/ubuntu jammy stable' > /etc/apt/sources.list.d/docker.list" && \
curl -L https://download.docker.com/linux/ubuntu/gpg -o docker.key && \
apt-key add - < docker.key && \
clean-install docker-ce docker-ce-cli containerd.io docker-buildx-plugin
Expand All @@ -171,38 +169,41 @@ RUN export ARCH=$(dpkg --print-architecture | sed 's/ppc64el/ppc64le/' | sed 's/
&& chmod 755 /usr/local/bin/buildkitd \
&& systemctl enable buildkit.socket

# Install cri-o/podman dependencies:
# install podman
RUN clean-install podman && \
addgroup --system podman && \
mkdir -p /etc/systemd/system/podman.socket.d && \
printf "[Socket]\nSocketMode=0660\nSocketUser=root\nSocketGroup=podman\n" \
> /etc/systemd/system/podman.socket.d/override.conf && \
mkdir -p /etc/tmpfiles.d && \
echo "d /run/podman 0770 root podman" > /etc/tmpfiles.d/podman.conf && \
systemd-tmpfiles --create

# install cri-o dependencies:
RUN export ARCH=$(dpkg --print-architecture | sed 's/ppc64el/ppc64le/') && \
sh -c "echo 'deb https://downloadcontent.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_20.04/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list" && \
curl -LO https://downloadcontent.opensuse.org/repositories/devel:kubic:libcontainers:stable/xUbuntu_20.04/Release.key && \
sh -c "echo 'deb https://downloadcontent.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_22.04/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list" && \
curl -LO https://downloadcontent.opensuse.org/repositories/devel:kubic:libcontainers:stable/xUbuntu_22.04/Release.key && \
apt-key add - < Release.key && \
if [ "$ARCH" != "ppc64le" ]; then \
clean-install containers-common catatonit conmon containernetworking-plugins cri-tools podman-plugins crun; \
clean-install catatonit conmon cri-tools crun; \
else \
clean-install containers-common conmon containernetworking-plugins crun; \
clean-install conmon crun; \
fi

# install containernetworking-plugins
RUN export ARCH=$(dpkg --print-architecture | sed 's/ppc64el/ppc64le/' | sed 's/armhf/arm/') && \
curl -LO "https://github.com/containernetworking/plugins/releases/download/${CNI_PLUGINS_VERSION}/cni-plugins-linux-$ARCH-${CNI_PLUGINS_VERSION}.tgz" && \
mkdir -p /opt/cni/bin && \
tar -xf "cni-plugins-linux-$ARCH-${CNI_PLUGINS_VERSION}.tgz" -C /opt/cni/bin && \
rm "cni-plugins-linux-$ARCH-${CNI_PLUGINS_VERSION}.tgz"

# install cri-o based on https://github.com/cri-o/cri-o/blob/release-1.24/README.md#installing-cri-o
RUN export ARCH=$(dpkg --print-architecture | sed 's/ppc64el/ppc64le/' | sed 's/armhf/arm-v7/') && \
if [ "$ARCH" != "ppc64le" ] && [ "$ARCH" != "arm-v7" ]; then sh -c "echo 'deb https://downloadcontent.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/${CRIO_VERSION}/xUbuntu_20.04/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable:cri-o:${CRIO_VERSION}.list" && \
curl -LO https://downloadcontent.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/${CRIO_VERSION}/xUbuntu_20.04/Release.key && \
if [ "$ARCH" != "ppc64le" ] && [ "$ARCH" != "arm-v7" ]; then sh -c "echo 'deb https://downloadcontent.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/${CRIO_VERSION}/xUbuntu_22.04/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable:cri-o:${CRIO_VERSION}.list" && \
curl -LO https://downloadcontent.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/${CRIO_VERSION}/xUbuntu_22.04/Release.key && \
apt-key add - < Release.key && \
clean-install cri-o cri-o-runc; fi

# install podman
RUN export ARCH=$(dpkg --print-architecture | sed 's/ppc64el/ppc64le/') && \
if [ "$ARCH" != "ppc64le" ]; then sh -c "echo 'deb http://downloadcontent.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_20.04/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list" && \
curl -LO https://downloadcontent.opensuse.org/repositories/devel:kubic:libcontainers:stable/xUbuntu_20.04/Release.key && \
apt-key add - < Release.key && \
clean-install podman && \
addgroup --system podman && \
mkdir -p /etc/systemd/system/podman.socket.d && \
printf "[Socket]\nSocketMode=0660\nSocketUser=root\nSocketGroup=podman\n" \
> /etc/systemd/system/podman.socket.d/override.conf && \
mkdir -p /etc/tmpfiles.d && \
echo "d /run/podman 0770 root podman" > /etc/tmpfiles.d/podman.conf && \
systemd-tmpfiles --create; fi

# install version.json
ARG VERSION_JSON
RUN echo "${VERSION_JSON}" > /version.json
Expand Down Expand Up @@ -234,10 +235,6 @@ RUN sed -ri 's/UsePAM yes/#UsePAM yes/g' /etc/ssh/sshd_config
# minikube relies on /etc/hosts for control-plane discovery. This prevents nefarious DNS servers from breaking it.
RUN sed -ri 's/dns files/files dns/g' /etc/nsswitch.conf

# metacopy breaks crio on certain OS and isn't necessary for minikube
# https://github.com/kubernetes/minikube/issues/10520
RUN sed -ri 's/mountopt = "nodev,metacopy=on"/mountopt = "nodev"/g' /etc/containers/storage.conf

EXPOSE 22
# create docker user for minikube ssh. to match VM using "docker" as username
RUN adduser --ingroup docker --disabled-password --gecos '' docker
Expand Down
16 changes: 8 additions & 8 deletions hack/update/ubuntu_version/update_ubuntu_version.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,13 +49,13 @@ var (
schema = map[string]update.Item{
"deploy/kicbase/Dockerfile": {
Replace: map[string]string{
`UBUNTU_FOCAL_IMAGE=.*`: `UBUNTU_FOCAL_IMAGE="{{.LatestVersion}}"`,
`UBUNTU_JAMMY_IMAGE=.*`: `UBUNTU_JAMMY_IMAGE="{{.LatestVersion}}"`,
},
},
}
)

// Data holds latest Ubuntu focal version in semver format.
// Data holds latest Ubuntu jammy version in semver format.
type Data struct {
LatestVersion string
}
Expand All @@ -70,7 +70,7 @@ type Response struct {
func getLatestVersion() (string, error) {
resp, err := http.Get(dockerHubUbuntuBaseURL)
if err != nil {
return "", fmt.Errorf("unable to get Ubuntu focal's latest version: %v", err)
return "", fmt.Errorf("unable to get Ubuntu jammy's latest version: %v", err)
}
defer resp.Body.Close()

Expand All @@ -86,22 +86,22 @@ func getLatestVersion() (string, error) {
}

for _, i := range content.Results {
if strings.Contains(i.Name, "focal-") {
if strings.Contains(i.Name, "jammy-") {
return i.Name, nil
}
}

return "", fmt.Errorf("response from Docker Hub does not contain a latest focal image")
return "", fmt.Errorf("response from Docker Hub does not contain a latest jammy image")
}

func main() {
// get Ubuntu Focal latest version
// get Ubuntu Jammy latest version
latest, err := getLatestVersion()
if err != nil {
klog.Fatalf("Unable to find latest ubuntu:focal version: %v\n", err)
klog.Fatalf("Unable to find latest ubuntu:jammy version: %v\n", err)
}
data := Data{LatestVersion: fmt.Sprintf("ubuntu:%s", latest)}
klog.Infof("Ubuntu focal latest version: %s", latest)
klog.Infof("Ubuntu jammy latest version: %s", latest)

update.Apply(schema, data)
}
4 changes: 2 additions & 2 deletions pkg/drivers/kic/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,10 +24,10 @@ import (

const (
// Version is the current version of kic
Version = "v0.0.39-1683246328-16426"
Version = "v0.0.39-1683580114-16069"

// SHA of the kic base image
baseImageSHA = "c063371f0e8933096fec0d2952100c31c4e17f2898a91dfbf036ee0048f9ee72"
baseImageSHA = "6c7bb1051911da0c2d7a414b8e1bebd8ab4c6015c82be5a2716c237af0c1a8ba"
// The name of the GCR kicbase repository
gcrRepo = "gcr.io/k8s-minikube/kicbase-builds"
// The name of the Dockerhub kicbase repository
Expand Down
2 changes: 1 addition & 1 deletion pkg/minikube/bootstrapper/images/images.go
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,7 @@ func storageProvisioner(mirror string) string {
// src: https://github.com/kubernetes-sigs/kind/tree/master/images/kindnetd
func KindNet(repo string) string {
if repo == "" {
repo = "kindest"
repo = "docker.io/kindest"
}
return path.Join(repo, "kindnetd:v20230511-dc714da8")
}
Expand Down
1 change: 1 addition & 0 deletions pkg/minikube/cruntime/crio.go
Original file line number Diff line number Diff line change
Expand Up @@ -306,6 +306,7 @@ func (r *CRIO) BuildImage(src string, file string, tag string, push bool, env []
for _, opt := range opts {
args = append(args, "--"+opt)
}
args = append(args, "--cgroup-manager=cgroupfs")
c := exec.Command("sudo", args...)
e := os.Environ()
e = append(e, env...)
Expand Down
2 changes: 1 addition & 1 deletion site/content/en/docs/commands/start.md
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ minikube start [flags]
--apiserver-names strings A set of apiserver names which are used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine
--apiserver-port int The apiserver listening port (default 8443)
--auto-update-drivers If set, automatically updates drivers to the latest version. Defaults to true. (default true)
--base-image string The base image to use for docker/podman drivers. Intended for local development. (default "gcr.io/k8s-minikube/kicbase-builds:v0.0.39-1683246328-16426@sha256:c063371f0e8933096fec0d2952100c31c4e17f2898a91dfbf036ee0048f9ee72")
--base-image string The base image to use for docker/podman drivers. Intended for local development. (default "gcr.io/k8s-minikube/kicbase-builds:v0.0.39-1683580114-16069@sha256:6c7bb1051911da0c2d7a414b8e1bebd8ab4c6015c82be5a2716c237af0c1a8ba")
--binary-mirror string Location to fetch kubectl, kubelet, & kubeadm binaries from.
--cache-images If true, cache docker images for the current bootstrapper and load them into the machine. Always false with --driver=none. (default true)
--cert-expiration duration Duration until minikube certificate expiration, defaults to three years (26280h). (default 26280h0m0s)
Expand Down
2 changes: 1 addition & 1 deletion test/integration/addons_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -446,7 +446,7 @@ func validateHelmTillerAddon(ctx context.Context, t *testing.T, profile string)
// Test from inside the cluster (`helm version` use pod.list permission.)
checkHelmTiller := func() error {

rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "run", "--rm", "helm-test", "--restart=Never", "--image=alpine/helm:2.16.3", "-it", "--namespace=kube-system", "--", "version"))
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "run", "--rm", "helm-test", "--restart=Never", "--image=docker.io/alpine/helm:2.16.3", "-it", "--namespace=kube-system", "--", "version"))
if err != nil {
return err
}
Expand Down
20 changes: 10 additions & 10 deletions test/integration/functional_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -240,7 +240,7 @@ func tagAndLoadImage(ctx context.Context, t *testing.T, profile, taggedImage str
t.Fatalf("failed to setup test (tag image) : %v\n%s", err, rr.Output())
}

rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "image", "load", "--daemon", taggedImage))
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "image", "load", "--daemon", taggedImage, "--alsologtostderr"))
if err != nil {
t.Fatalf("loading image into minikube from daemon: %v\n%s", err, rr.Output())
}
Expand All @@ -256,7 +256,7 @@ func runImageList(ctx context.Context, t *testing.T, profile, testName, format,
t.Run(testName, func(t *testing.T) {
MaybeParallel(t)

rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "image", "ls", "--format", format))
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "image", "ls", "--format", format, "--alsologtostderr"))
if err != nil {
t.Fatalf("listing image with minikube: %v\n%s", err, rr.Output())
}
Expand Down Expand Up @@ -310,7 +310,7 @@ func validateImageCommands(ctx context.Context, t *testing.T, profile string) {
newImage := fmt.Sprintf("localhost/my-image:%s", profile)

// try to build the new image with minikube
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "image", "build", "-t", newImage, filepath.Join(*testdataDir, "build")))
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "image", "build", "-t", newImage, filepath.Join(*testdataDir, "build"), "--alsologtostderr"))
if err != nil {
t.Fatalf("building image with minikube: %v\n%s", err, rr.Output())
}
Expand Down Expand Up @@ -350,7 +350,7 @@ func validateImageCommands(ctx context.Context, t *testing.T, profile string) {

// docs: Make sure image loading from Docker daemon works by `minikube image load --daemon`
t.Run("ImageLoadDaemon", func(t *testing.T) {
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "image", "load", "--daemon", taggedImage))
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "image", "load", "--daemon", taggedImage, "--alsologtostderr"))
if err != nil {
t.Fatalf("loading image into minikube from daemon: %v\n%s", err, rr.Output())
}
Expand All @@ -360,7 +360,7 @@ func validateImageCommands(ctx context.Context, t *testing.T, profile string) {

// docs: Try to load image already loaded and make sure `minikube image load --daemon` works
t.Run("ImageReloadDaemon", func(t *testing.T) {
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "image", "load", "--daemon", taggedImage))
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "image", "load", "--daemon", taggedImage, "--alsologtostderr"))
if err != nil {
t.Fatalf("loading image into minikube from daemon: %v\n%s", err, rr.Output())
}
Expand All @@ -375,7 +375,7 @@ func validateImageCommands(ctx context.Context, t *testing.T, profile string) {

// docs: Make sure image saving works by `minikube image load --daemon`
t.Run("ImageSaveToFile", func(t *testing.T) {
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "image", "save", taggedImage, imagePath))
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "image", "save", taggedImage, imagePath, "--alsologtostderr"))
if err != nil {
t.Fatalf("saving image from minikube to file: %v\n%s", err, rr.Output())
}
Expand All @@ -387,7 +387,7 @@ func validateImageCommands(ctx context.Context, t *testing.T, profile string) {

// docs: Make sure image removal works by `minikube image rm`
t.Run("ImageRemove", func(t *testing.T) {
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "image", "rm", taggedImage))
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "image", "rm", taggedImage, "--alsologtostderr"))
if err != nil {
t.Fatalf("removing image from minikube: %v\n%s", err, rr.Output())
}
Expand All @@ -404,8 +404,8 @@ func validateImageCommands(ctx context.Context, t *testing.T, profile string) {

// docs: Make sure image loading from file works by `minikube image load`
t.Run("ImageLoadFromFile", func(t *testing.T) {
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "image", "load", imagePath))
if err != nil || rr.Stderr.String() != "" {
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "image", "load", imagePath, "--alsologtostderr"))
if err != nil || strings.Contains(rr.Output(), "failed pushing to: functional") {
t.Fatalf("loading image into minikube from file: %v\n%s", err, rr.Output())
}

Expand All @@ -419,7 +419,7 @@ func validateImageCommands(ctx context.Context, t *testing.T, profile string) {
t.Fatalf("failed to remove image from docker: %v\n%s", err, rr.Output())
}

rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "image", "save", "--daemon", taggedImage))
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "image", "save", "--daemon", taggedImage, "--alsologtostderr"))
if err != nil {
t.Fatalf("saving image from minikube to daemon: %v\n%s", err, rr.Output())
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ spec:
claimName: hpvc-restore
containers:
- name: task-pv-container
image: nginx
image: docker.io/nginx
ports:
- containerPort: 80
name: "http-server"
Expand Down
2 changes: 1 addition & 1 deletion test/integration/testdata/csi-hostpath-driver/pv-pod.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ spec:
claimName: hpvc
containers:
- name: task-pv-container
image: nginx
image: docker.io/nginx
ports:
- containerPort: 80
name: "http-server"
Expand Down
2 changes: 1 addition & 1 deletion test/integration/testdata/inaccel.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ metadata:
name: inaccel-vadd
spec:
containers:
- image: inaccel/vadd
- image: docker.io/inaccel/vadd
name: inaccel-vadd
resources:
limits:
Expand Down
2 changes: 1 addition & 1 deletion test/integration/testdata/mysql.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ spec:
app: mysql
spec:
containers:
- image: mysql:5.7
- image: docker.io/mysql:5.7
name: mysql
resources:
requests:
Expand Down
2 changes: 1 addition & 1 deletion test/integration/testdata/nginx-gvisor.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -9,4 +9,4 @@ spec:
runtimeClassName: gvisor
containers:
- name: nginx
image: nginx
image: docker.io/nginx
2 changes: 1 addition & 1 deletion test/integration/testdata/nginx-pod-svc.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ metadata:
spec:
containers:
- name: nginx
image: nginx:alpine
image: docker.io/nginx:alpine
ports:
- containerPort: 80
protocol: TCP
Expand Down
2 changes: 1 addition & 1 deletion test/integration/testdata/nginx-untrusted.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -10,4 +10,4 @@ metadata:
spec:
containers:
- name: nginx
image: nginx
image: docker.io/nginx
2 changes: 1 addition & 1 deletion test/integration/testdata/storage-provisioner/pod.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ metadata:
spec:
containers:
- name: myfrontend
image: nginx
image: docker.io/nginx
volumeMounts:
- mountPath: "/tmp/mount"
name: mypd
Expand Down
2 changes: 1 addition & 1 deletion test/integration/testdata/testsvc.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ metadata:
spec:
containers:
- name: nginx
image: nginx:alpine
image: docker.io/nginx:alpine
ports:
- containerPort: 80
protocol: TCP
Expand Down

0 comments on commit 3631db3

Please sign in to comment.