Found 16 cores, limiting parallelism with --test.parallel=9 === RUN TestDownloadOnly === RUN TestDownloadOnly/crio === RUN TestDownloadOnly/crio/v1.13.0 aaa_download_only_test.go:65: (dbg) Run: ./minikube-linux-amd64 start --download-only -p crio-20200724213609-14997 --force --alsologtostderr --kubernetes-version=v1.13.0 --container-runtime=crio --vm-driver=docker --base-image=local/kicbase:-snapshot aaa_download_only_test.go:65: (dbg) Done: ./minikube-linux-amd64 start --download-only -p crio-20200724213609-14997 --force --alsologtostderr --kubernetes-version=v1.13.0 --container-runtime=crio --vm-driver=docker --base-image=local/kicbase:-snapshot: (17.152795608s) === RUN TestDownloadOnly/crio/v1.18.3 aaa_download_only_test.go:67: (dbg) Run: ./minikube-linux-amd64 start --download-only -p crio-20200724213609-14997 --force --alsologtostderr --kubernetes-version=v1.18.3 --container-runtime=crio --vm-driver=docker --base-image=local/kicbase:-snapshot aaa_download_only_test.go:67: (dbg) Done: ./minikube-linux-amd64 start --download-only -p crio-20200724213609-14997 --force --alsologtostderr --kubernetes-version=v1.18.3 --container-runtime=crio --vm-driver=docker --base-image=local/kicbase:-snapshot: (6.484641159s) === RUN TestDownloadOnly/crio/v1.18.4-rc.0 aaa_download_only_test.go:67: (dbg) Run: ./minikube-linux-amd64 start --download-only -p crio-20200724213609-14997 --force --alsologtostderr --kubernetes-version=v1.18.4-rc.0 --container-runtime=crio --vm-driver=docker --base-image=local/kicbase:-snapshot aaa_download_only_test.go:67: (dbg) Done: ./minikube-linux-amd64 start --download-only -p crio-20200724213609-14997 --force --alsologtostderr --kubernetes-version=v1.18.4-rc.0 --container-runtime=crio --vm-driver=docker --base-image=local/kicbase:-snapshot: (7.784927538s) === RUN TestDownloadOnly/crio/DeleteAll aaa_download_only_test.go:133: (dbg) Run: ./minikube-linux-amd64 delete --all === RUN TestDownloadOnly/crio/DeleteAlwaysSucceeds aaa_download_only_test.go:145: (dbg) Run: ./minikube-linux-amd64 delete -p crio-20200724213609-14997 === CONT TestDownloadOnly/crio helpers_test.go:170: Cleaning up "crio-20200724213609-14997" profile ... helpers_test.go:171: (dbg) Run: ./minikube-linux-amd64 delete -p crio-20200724213609-14997 === RUN TestDownloadOnly/docker === RUN TestDownloadOnly/docker/v1.13.0 aaa_download_only_test.go:65: (dbg) Run: ./minikube-linux-amd64 start --download-only -p docker-20200724213642-14997 --force --alsologtostderr --kubernetes-version=v1.13.0 --container-runtime=docker --vm-driver=docker --base-image=local/kicbase:-snapshot aaa_download_only_test.go:65: (dbg) Done: ./minikube-linux-amd64 start --download-only -p docker-20200724213642-14997 --force --alsologtostderr --kubernetes-version=v1.13.0 --container-runtime=docker --vm-driver=docker --base-image=local/kicbase:-snapshot: (4.790501089s) === RUN TestDownloadOnly/docker/v1.18.3 aaa_download_only_test.go:67: (dbg) Run: ./minikube-linux-amd64 start --download-only -p docker-20200724213642-14997 --force --alsologtostderr --kubernetes-version=v1.18.3 --container-runtime=docker --vm-driver=docker --base-image=local/kicbase:-snapshot aaa_download_only_test.go:67: (dbg) Done: ./minikube-linux-amd64 start --download-only -p docker-20200724213642-14997 --force --alsologtostderr --kubernetes-version=v1.18.3 --container-runtime=docker --vm-driver=docker --base-image=local/kicbase:-snapshot: (10.324596816s) === RUN TestDownloadOnly/docker/v1.18.4-rc.0 aaa_download_only_test.go:67: (dbg) Run: ./minikube-linux-amd64 start --download-only -p docker-20200724213642-14997 --force --alsologtostderr --kubernetes-version=v1.18.4-rc.0 --container-runtime=docker --vm-driver=docker --base-image=local/kicbase:-snapshot aaa_download_only_test.go:67: (dbg) Done: ./minikube-linux-amd64 start --download-only -p docker-20200724213642-14997 --force --alsologtostderr --kubernetes-version=v1.18.4-rc.0 --container-runtime=docker --vm-driver=docker --base-image=local/kicbase:-snapshot: (13.927686326s) === RUN TestDownloadOnly/docker/DeleteAll aaa_download_only_test.go:133: (dbg) Run: ./minikube-linux-amd64 delete --all === RUN TestDownloadOnly/docker/DeleteAlwaysSucceeds aaa_download_only_test.go:145: (dbg) Run: ./minikube-linux-amd64 delete -p docker-20200724213642-14997 === CONT TestDownloadOnly/docker helpers_test.go:170: Cleaning up "docker-20200724213642-14997" profile ... helpers_test.go:171: (dbg) Run: ./minikube-linux-amd64 delete -p docker-20200724213642-14997 === RUN TestDownloadOnly/containerd === RUN TestDownloadOnly/containerd/v1.13.0 aaa_download_only_test.go:65: (dbg) Run: ./minikube-linux-amd64 start --download-only -p containerd-20200724213712-14997 --force --alsologtostderr --kubernetes-version=v1.13.0 --container-runtime=containerd --vm-driver=docker --base-image=local/kicbase:-snapshot aaa_download_only_test.go:65: (dbg) Done: ./minikube-linux-amd64 start --download-only -p containerd-20200724213712-14997 --force --alsologtostderr --kubernetes-version=v1.13.0 --container-runtime=containerd --vm-driver=docker --base-image=local/kicbase:-snapshot: (8.646512627s) === RUN TestDownloadOnly/containerd/v1.18.3 aaa_download_only_test.go:67: (dbg) Run: ./minikube-linux-amd64 start --download-only -p containerd-20200724213712-14997 --force --alsologtostderr --kubernetes-version=v1.18.3 --container-runtime=containerd --vm-driver=docker --base-image=local/kicbase:-snapshot aaa_download_only_test.go:67: (dbg) Done: ./minikube-linux-amd64 start --download-only -p containerd-20200724213712-14997 --force --alsologtostderr --kubernetes-version=v1.18.3 --container-runtime=containerd --vm-driver=docker --base-image=local/kicbase:-snapshot: (12.886204949s) === RUN TestDownloadOnly/containerd/v1.18.4-rc.0 aaa_download_only_test.go:67: (dbg) Run: ./minikube-linux-amd64 start --download-only -p containerd-20200724213712-14997 --force --alsologtostderr --kubernetes-version=v1.18.4-rc.0 --container-runtime=containerd --vm-driver=docker --base-image=local/kicbase:-snapshot aaa_download_only_test.go:67: (dbg) Done: ./minikube-linux-amd64 start --download-only -p containerd-20200724213712-14997 --force --alsologtostderr --kubernetes-version=v1.18.4-rc.0 --container-runtime=containerd --vm-driver=docker --base-image=local/kicbase:-snapshot: (7.090360064s) === RUN TestDownloadOnly/containerd/DeleteAll aaa_download_only_test.go:133: (dbg) Run: ./minikube-linux-amd64 delete --all === RUN TestDownloadOnly/containerd/DeleteAlwaysSucceeds aaa_download_only_test.go:145: (dbg) Run: ./minikube-linux-amd64 delete -p containerd-20200724213712-14997 === CONT TestDownloadOnly/containerd helpers_test.go:170: Cleaning up "containerd-20200724213712-14997" profile ... helpers_test.go:171: (dbg) Run: ./minikube-linux-amd64 delete -p containerd-20200724213712-14997 --- PASS: TestDownloadOnly (91.79s) --- PASS: TestDownloadOnly/crio (32.32s) --- PASS: TestDownloadOnly/crio/v1.13.0 (17.15s) --- PASS: TestDownloadOnly/crio/v1.18.3 (6.48s) --- PASS: TestDownloadOnly/crio/v1.18.4-rc.0 (7.79s) --- PASS: TestDownloadOnly/crio/DeleteAll (0.45s) --- PASS: TestDownloadOnly/crio/DeleteAlwaysSucceeds (0.23s) --- PASS: TestDownloadOnly/docker (29.93s) --- PASS: TestDownloadOnly/docker/v1.13.0 (4.79s) --- PASS: TestDownloadOnly/docker/v1.18.3 (10.32s) --- PASS: TestDownloadOnly/docker/v1.18.4-rc.0 (13.93s) --- PASS: TestDownloadOnly/docker/DeleteAll (0.42s) --- PASS: TestDownloadOnly/docker/DeleteAlwaysSucceeds (0.24s) --- PASS: TestDownloadOnly/containerd (29.53s) --- PASS: TestDownloadOnly/containerd/v1.13.0 (8.65s) --- PASS: TestDownloadOnly/containerd/v1.18.3 (12.89s) --- PASS: TestDownloadOnly/containerd/v1.18.4-rc.0 (7.09s) --- PASS: TestDownloadOnly/containerd/DeleteAll (0.45s) --- PASS: TestDownloadOnly/containerd/DeleteAlwaysSucceeds (0.23s) === RUN TestDownloadOnlyKic aaa_download_only_test.go:168: (dbg) Run: ./minikube-linux-amd64 start --download-only -p download-docker-20200724213741-14997 --force --alsologtostderr --vm-driver=docker --base-image=local/kicbase:-snapshot aaa_download_only_test.go:168: (dbg) Done: ./minikube-linux-amd64 start --download-only -p download-docker-20200724213741-14997 --force --alsologtostderr --vm-driver=docker --base-image=local/kicbase:-snapshot: (10.683632932s) helpers_test.go:170: Cleaning up "download-docker-20200724213741-14997" profile ... helpers_test.go:171: (dbg) Run: ./minikube-linux-amd64 delete -p download-docker-20200724213741-14997 --- PASS: TestDownloadOnlyKic (12.16s) === RUN TestOffline === RUN TestOffline/group === RUN TestOffline/group/docker === PAUSE TestOffline/group/docker === RUN TestOffline/group/crio === PAUSE TestOffline/group/crio === RUN TestOffline/group/containerd === PAUSE TestOffline/group/containerd === CONT TestOffline/group/docker === CONT TestOffline/group/containerd === CONT TestOffline/group/crio === CONT TestOffline/group/docker aab_offline_test.go:53: (dbg) Run: ./minikube-linux-amd64 start -p offline-docker-20200724213753-14997 --alsologtostderr -v=1 --memory=2000 --wait=true --container-runtime docker --vm-driver=docker --base-image=local/kicbase:-snapshot === CONT TestOffline/group/containerd aab_offline_test.go:53: (dbg) Run: ./minikube-linux-amd64 start -p offline-containerd-20200724213753-14997 --alsologtostderr -v=1 --memory=2000 --wait=true --container-runtime containerd --vm-driver=docker --base-image=local/kicbase:-snapshot === CONT TestOffline/group/crio aab_offline_test.go:53: (dbg) Run: ./minikube-linux-amd64 start -p offline-crio-20200724213753-14997 --alsologtostderr -v=1 --memory=2000 --wait=true --container-runtime crio --vm-driver=docker --base-image=local/kicbase:-snapshot === CONT TestOffline/group/containerd aab_offline_test.go:53: (dbg) Non-zero exit: ./minikube-linux-amd64 start -p offline-containerd-20200724213753-14997 --alsologtostderr -v=1 --memory=2000 --wait=true --container-runtime containerd --vm-driver=docker --base-image=local/kicbase:-snapshot: exit status 70 (8m16.968933967s) -- stdout -- * [offline-containerd-20200724213753-14997] minikube v1.12.1 on Ubuntu 20.04 - KUBECONFIG=/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/kubeconfig - MINIKUBE_HOME=/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome * Using the docker driver based on user configuration * Starting control plane node offline-containerd-20200724213753-14997 in cluster offline-containerd-20200724213753-14997 * Pulling base image ... * Creating docker container (CPUs=2, Memory=2000MB) ... * docker "offline-containerd-20200724213753-14997" container is missing, will recreate. * Creating docker container (CPUs=2, Memory=2000MB) ... -- /stdout -- ** stderr ** I0724 21:37:53.898789 17331 out.go:188] Setting JSON to false I0724 21:37:53.900362 17331 start.go:101] hostinfo: {"hostname":"mini-test-11-ubuntu","uptime":312,"bootTime":1595626361,"procs":334,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.4.0-1022-azure","virtualizationSystem":"kvm","virtualizationRole":"host","hostid":"c95cb721-f5cd-cb47-980f-2a6f7a0ad6b2"} I0724 21:37:53.900928 17331 start.go:111] virtualization: kvm host I0724 21:37:53.915117 17331 notify.go:125] Checking for updates... I0724 21:37:53.915470 17331 driver.go:287] Setting default libvirt URI to qemu:///system I0724 21:37:53.969380 17331 docker.go:87] docker version: linux-19.03.8 I0724 21:37:53.978419 17331 start.go:217] selected driver: docker I0724 21:37:53.978426 17331 start.go:623] validating driver "docker" against I0724 21:37:53.978441 17331 start.go:634] status for docker: {Installed:true Healthy:true NeedsImprovement:false Error: Fix: Doc:} I0724 21:37:53.978499 17331 cli_runner.go:109] Run: docker system info --format "{{json .}}" I0724 21:37:54.031550 17331 start_flags.go:223] no existing cluster config was found, will generate one from the flags I0724 21:37:54.031909 17331 start_flags.go:617] Waiting for all components: map[apiserver:true apps_running:true default_sa:true system_pods:true] I0724 21:37:54.031944 17331 cni.go:74] Creating CNI manager for "" I0724 21:37:54.031952 17331 cni.go:105] "docker" driver + containerd runtime found, recommending kindnet I0724 21:37:54.031967 17331 start_flags.go:340] Found "CNI" CNI - setting NetworkPlugin=cni I0724 21:37:54.031976 17331 start_flags.go:345] config: {Name:offline-containerd-20200724213753-14997 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:local/kicbase:-snapshot Memory:2000 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.99.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio KubernetesConfig:{KubernetesVersion:v1.18.3 ClusterName:offline-containerd-20200724213753-14997 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[] Addons:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true system_pods:true]} I0724 21:37:54.094362 17331 cache.go:117] Beginning downloading kic base image for docker with containerd I0724 21:37:54.099615 17331 preload.go:97] Checking if preload exists for k8s version v1.18.3 and runtime containerd I0724 21:37:54.099655 17331 preload.go:105] Found local preload: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v4-v1.18.3-containerd-overlay2-amd64.tar.lz4 I0724 21:37:54.099670 17331 cache.go:51] Caching tarball of preloaded images I0724 21:37:54.099682 17331 preload.go:131] Found /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v4-v1.18.3-containerd-overlay2-amd64.tar.lz4 in cache, skipping download I0724 21:37:54.099691 17331 cache.go:54] Finished verifying existence of preloaded tar for v1.18.3 on containerd I0724 21:37:54.099753 17331 cache.go:137] Downloading local/kicbase:-snapshot to local daemon I0724 21:37:54.099772 17331 image.go:140] Writing local/kicbase:-snapshot to local daemon I0724 21:37:54.100555 17331 profile.go:150] Saving config to /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/offline-containerd-20200724213753-14997/config.json ... I0724 21:37:54.100903 17331 lock.go:35] WriteFile acquiring /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/offline-containerd-20200724213753-14997/config.json: {Name:mka418d28693a7c94608a7a08ccc99ec90ee2f8d Clock:{} Delay:500ms Timeout:1m0s Cancel:} W0724 21:38:23.916199 17331 notify.go:56] Error getting json from minikube version url: error with http GET for endpoint https://storage.googleapis.com/minikube/releases.json: Get "https://storage.googleapis.com/minikube/releases.json": proxyconnect tcp: dial tcp 172.16.1.1:1: i/o timeout I0724 21:40:28.316093 17331 cache.go:151] failed to download local/kicbase:-snapshot, will try fallback image if available: getting remote image: Get "https://index.docker.io/v2/": proxyconnect tcp: dial tcp 172.16.1.1:1: i/o timeout I0724 21:40:28.316186 17331 cache.go:137] Downloading kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 to local daemon I0724 21:40:28.316195 17331 image.go:140] Writing kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 to local daemon I0724 21:43:02.392618 17331 cache.go:151] failed to download kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438, will try fallback image if available: getting remote image: Get "https://index.docker.io/v2/": proxyconnect tcp: dial tcp 172.16.1.1:1: i/o timeout I0724 21:43:02.392686 17331 cache.go:137] Downloading docker.pkg.github.com/kubernetes/minikube/kicbase:v0.0.10 to local daemon I0724 21:43:02.392692 17331 image.go:140] Writing docker.pkg.github.com/kubernetes/minikube/kicbase:v0.0.10 to local daemon I0724 21:45:36.671937 17331 cache.go:151] failed to download docker.pkg.github.com/kubernetes/minikube/kicbase:v0.0.10, will try fallback image if available: getting remote image: Get "https://docker.pkg.github.com/v2/": proxyconnect tcp: dial tcp 172.16.1.1:1: i/o timeout E0724 21:45:36.671992 17331 cache.go:172] Error downloading kic artifacts: failed to download kic base image or any fallback image I0724 21:45:36.672296 17331 cache.go:178] Successfully downloaded all kic artifacts I0724 21:45:36.672372 17331 start.go:241] acquiring machines lock for offline-containerd-20200724213753-14997: {Name:mk91e0531dbac8bf0bab062b1deb1a07bd4532ce Clock:{} Delay:500ms Timeout:15m0s Cancel:} I0724 21:45:36.672493 17331 start.go:245] acquired machines lock for "offline-containerd-20200724213753-14997" in 98.007µs I0724 21:45:36.672525 17331 start.go:85] Provisioning new machine with config: &{Name:offline-containerd-20200724213753-14997 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:local/kicbase:-snapshot Memory:2000 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.99.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio KubernetesConfig:{KubernetesVersion:v1.18.3 ClusterName:offline-containerd-20200724213753-14997 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.18.3 ControlPlane:true Worker:true}] Addons:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true system_pods:true]} &{Name: IP: Port:8443 KubernetesVersion:v1.18.3 ControlPlane:true Worker:true} I0724 21:45:36.672589 17331 start.go:122] createHost starting for "" (driver="docker") I0724 21:45:36.706180 17331 start.go:158] libmachine.API.Create for "offline-containerd-20200724213753-14997" (driver="docker") I0724 21:45:36.706222 17331 client.go:161] LocalClient.Create starting I0724 21:45:36.706282 17331 main.go:115] libmachine: Creating CA: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca.pem I0724 21:45:37.132936 17331 main.go:115] libmachine: Creating client certificate: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/cert.pem I0724 21:45:37.284819 17331 cli_runner.go:109] Run: docker ps -a --format {{.Names}} I0724 21:45:37.332188 17331 cli_runner.go:109] Run: docker volume create offline-containerd-20200724213753-14997 --label name.minikube.sigs.k8s.io=offline-containerd-20200724213753-14997 --label created_by.minikube.sigs.k8s.io=true I0724 21:45:37.380374 17331 oci.go:101] Successfully created a docker volume offline-containerd-20200724213753-14997 I0724 21:45:37.380476 17331 cli_runner.go:109] Run: docker run --rm --entrypoint /usr/bin/test -v offline-containerd-20200724213753-14997:/var local/kicbase:-snapshot -d /var/lib I0724 21:45:37.423020 17331 client.go:164] LocalClient.Create took 716.776029ms I0724 21:45:39.423209 17331 start.go:125] duration metric: createHost completed in 2.750605042s I0724 21:45:39.423252 17331 start.go:76] releasing machines lock for "offline-containerd-20200724213753-14997", held for 2.750742752s I0724 21:45:39.424199 17331 cli_runner.go:109] Run: docker container inspect offline-containerd-20200724213753-14997 --format={{.State.Status}} I0724 21:45:39.480130 17331 delete.go:81] Unable to get host status for offline-containerd-20200724213753-14997, assuming it has already been deleted: state: unknown state "offline-containerd-20200724213753-14997": docker container inspect offline-containerd-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-containerd-20200724213753-14997 ! StartHost failed, but will try again: creating host: create: creating: setting up container node: preparing volume for offline-containerd-20200724213753-14997 container: docker run --rm --entrypoint /usr/bin/test -v offline-containerd-20200724213753-14997:/var local/kicbase:-snapshot -d /var/lib: exit status 125 stdout: stderr: docker: invalid reference format. See 'docker run --help'. I0724 21:45:44.480429 17331 start.go:241] acquiring machines lock for offline-containerd-20200724213753-14997: {Name:mk91e0531dbac8bf0bab062b1deb1a07bd4532ce Clock:{} Delay:500ms Timeout:15m0s Cancel:} I0724 21:45:44.480640 17331 start.go:245] acquired machines lock for "offline-containerd-20200724213753-14997" in 162.211µs I0724 21:45:44.480676 17331 start.go:89] Skipping create...Using existing machine configuration I0724 21:45:44.480683 17331 fix.go:53] fixHost starting: I0724 21:45:44.480973 17331 cli_runner.go:109] Run: docker container inspect offline-containerd-20200724213753-14997 --format={{.State.Status}} I0724 21:45:44.527043 17331 fix.go:105] recreateIfNeeded on offline-containerd-20200724213753-14997: state= err=unknown state "offline-containerd-20200724213753-14997": docker container inspect offline-containerd-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-containerd-20200724213753-14997 I0724 21:45:44.527075 17331 fix.go:110] machineExists: false. err=machine does not exist I0724 21:45:44.534966 17331 delete.go:123] DEMOLISHING offline-containerd-20200724213753-14997 ... I0724 21:45:44.535049 17331 cli_runner.go:109] Run: docker container inspect offline-containerd-20200724213753-14997 --format={{.State.Status}} W0724 21:45:44.577643 17331 stop.go:72] unable to get state: unknown state "offline-containerd-20200724213753-14997": docker container inspect offline-containerd-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-containerd-20200724213753-14997 I0724 21:45:44.577675 17331 delete.go:128] stophost failed (probably ok): ssh power off: unknown state "offline-containerd-20200724213753-14997": docker container inspect offline-containerd-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-containerd-20200724213753-14997 I0724 21:45:44.578085 17331 cli_runner.go:109] Run: docker container inspect offline-containerd-20200724213753-14997 --format={{.State.Status}} I0724 21:45:44.622121 17331 delete.go:81] Unable to get host status for offline-containerd-20200724213753-14997, assuming it has already been deleted: state: unknown state "offline-containerd-20200724213753-14997": docker container inspect offline-containerd-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-containerd-20200724213753-14997 I0724 21:45:44.622187 17331 cli_runner.go:109] Run: docker container inspect -f {{.Id}} offline-containerd-20200724213753-14997 I0724 21:45:44.665916 17331 kic.go:274] could not find the container offline-containerd-20200724213753-14997 to remove it. will try anyways I0724 21:45:44.665982 17331 cli_runner.go:109] Run: docker container inspect offline-containerd-20200724213753-14997 --format={{.State.Status}} W0724 21:45:44.709719 17331 oci.go:82] error getting container status, will try to delete anyways: unknown state "offline-containerd-20200724213753-14997": docker container inspect offline-containerd-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-containerd-20200724213753-14997 I0724 21:45:44.709792 17331 cli_runner.go:109] Run: docker exec --privileged -t offline-containerd-20200724213753-14997 /bin/bash -c "sudo init 0" I0724 21:45:44.753138 17331 oci.go:568] error shutdown offline-containerd-20200724213753-14997: docker exec --privileged -t offline-containerd-20200724213753-14997 /bin/bash -c "sudo init 0": exit status 1 stdout: stderr: Error: No such container: offline-containerd-20200724213753-14997 I0724 21:45:45.753401 17331 cli_runner.go:109] Run: docker container inspect offline-containerd-20200724213753-14997 --format={{.State.Status}} I0724 21:45:45.799620 17331 oci.go:580] temporary error verifying shutdown: unknown state "offline-containerd-20200724213753-14997": docker container inspect offline-containerd-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-containerd-20200724213753-14997 I0724 21:45:45.799644 17331 oci.go:582] temporary error: container offline-containerd-20200724213753-14997 status is but expect it to be exited I0724 21:45:45.799667 17331 retry.go:30] will retry after 357.131936ms: couldn't verify cointainer is exited. %v: unknown state "offline-containerd-20200724213753-14997": docker container inspect offline-containerd-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-containerd-20200724213753-14997 I0724 21:45:46.157156 17331 cli_runner.go:109] Run: docker container inspect offline-containerd-20200724213753-14997 --format={{.State.Status}} I0724 21:45:46.203175 17331 oci.go:580] temporary error verifying shutdown: unknown state "offline-containerd-20200724213753-14997": docker container inspect offline-containerd-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-containerd-20200724213753-14997 I0724 21:45:46.203201 17331 oci.go:582] temporary error: container offline-containerd-20200724213753-14997 status is but expect it to be exited I0724 21:45:46.203219 17331 retry.go:30] will retry after 660.492892ms: couldn't verify cointainer is exited. %v: unknown state "offline-containerd-20200724213753-14997": docker container inspect offline-containerd-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-containerd-20200724213753-14997 I0724 21:45:46.864071 17331 cli_runner.go:109] Run: docker container inspect offline-containerd-20200724213753-14997 --format={{.State.Status}} I0724 21:45:46.912684 17331 oci.go:580] temporary error verifying shutdown: unknown state "offline-containerd-20200724213753-14997": docker container inspect offline-containerd-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-containerd-20200724213753-14997 I0724 21:45:46.912714 17331 oci.go:582] temporary error: container offline-containerd-20200724213753-14997 status is but expect it to be exited I0724 21:45:46.912742 17331 retry.go:30] will retry after 920.315446ms: couldn't verify cointainer is exited. %v: unknown state "offline-containerd-20200724213753-14997": docker container inspect offline-containerd-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-containerd-20200724213753-14997 I0724 21:45:47.833451 17331 cli_runner.go:109] Run: docker container inspect offline-containerd-20200724213753-14997 --format={{.State.Status}} I0724 21:45:47.880423 17331 oci.go:580] temporary error verifying shutdown: unknown state "offline-containerd-20200724213753-14997": docker container inspect offline-containerd-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-containerd-20200724213753-14997 I0724 21:45:47.880443 17331 oci.go:582] temporary error: container offline-containerd-20200724213753-14997 status is but expect it to be exited I0724 21:45:47.880463 17331 retry.go:30] will retry after 1.635001613s: couldn't verify cointainer is exited. %v: unknown state "offline-containerd-20200724213753-14997": docker container inspect offline-containerd-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-containerd-20200724213753-14997 I0724 21:45:49.515851 17331 cli_runner.go:109] Run: docker container inspect offline-containerd-20200724213753-14997 --format={{.State.Status}} I0724 21:45:49.561072 17331 oci.go:580] temporary error verifying shutdown: unknown state "offline-containerd-20200724213753-14997": docker container inspect offline-containerd-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-containerd-20200724213753-14997 I0724 21:45:49.561098 17331 oci.go:582] temporary error: container offline-containerd-20200724213753-14997 status is but expect it to be exited I0724 21:45:49.561119 17331 retry.go:30] will retry after 1.982055195s: couldn't verify cointainer is exited. %v: unknown state "offline-containerd-20200724213753-14997": docker container inspect offline-containerd-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-containerd-20200724213753-14997 I0724 21:45:51.543435 17331 cli_runner.go:109] Run: docker container inspect offline-containerd-20200724213753-14997 --format={{.State.Status}} I0724 21:45:51.590427 17331 oci.go:580] temporary error verifying shutdown: unknown state "offline-containerd-20200724213753-14997": docker container inspect offline-containerd-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-containerd-20200724213753-14997 I0724 21:45:51.590459 17331 oci.go:582] temporary error: container offline-containerd-20200724213753-14997 status is but expect it to be exited I0724 21:45:51.590486 17331 retry.go:30] will retry after 3.011308614s: couldn't verify cointainer is exited. %v: unknown state "offline-containerd-20200724213753-14997": docker container inspect offline-containerd-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-containerd-20200724213753-14997 I0724 21:45:54.602191 17331 cli_runner.go:109] Run: docker container inspect offline-containerd-20200724213753-14997 --format={{.State.Status}} I0724 21:45:54.648042 17331 oci.go:580] temporary error verifying shutdown: unknown state "offline-containerd-20200724213753-14997": docker container inspect offline-containerd-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-containerd-20200724213753-14997 I0724 21:45:54.648064 17331 oci.go:582] temporary error: container offline-containerd-20200724213753-14997 status is but expect it to be exited I0724 21:45:54.648084 17331 retry.go:30] will retry after 6.715255694s: couldn't verify cointainer is exited. %v: unknown state "offline-containerd-20200724213753-14997": docker container inspect offline-containerd-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-containerd-20200724213753-14997 I0724 21:46:01.363783 17331 cli_runner.go:109] Run: docker container inspect offline-containerd-20200724213753-14997 --format={{.State.Status}} I0724 21:46:01.409186 17331 oci.go:580] temporary error verifying shutdown: unknown state "offline-containerd-20200724213753-14997": docker container inspect offline-containerd-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-containerd-20200724213753-14997 I0724 21:46:01.409211 17331 oci.go:582] temporary error: container offline-containerd-20200724213753-14997 status is but expect it to be exited I0724 21:46:01.409230 17331 retry.go:30] will retry after 6.138576273s: couldn't verify cointainer is exited. %v: unknown state "offline-containerd-20200724213753-14997": docker container inspect offline-containerd-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-containerd-20200724213753-14997 I0724 21:46:07.548200 17331 cli_runner.go:109] Run: docker container inspect offline-containerd-20200724213753-14997 --format={{.State.Status}} I0724 21:46:07.596871 17331 oci.go:580] temporary error verifying shutdown: unknown state "offline-containerd-20200724213753-14997": docker container inspect offline-containerd-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-containerd-20200724213753-14997 I0724 21:46:07.596913 17331 oci.go:582] temporary error: container offline-containerd-20200724213753-14997 status is but expect it to be exited I0724 21:46:07.596953 17331 oci.go:86] couldn't shut down offline-containerd-20200724213753-14997 (might be okay): verify shutdown: couldn't verify cointainer is exited. %v: unknown state "offline-containerd-20200724213753-14997": docker container inspect offline-containerd-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-containerd-20200724213753-14997 I0724 21:46:07.597041 17331 cli_runner.go:109] Run: docker rm -f -v offline-containerd-20200724213753-14997 W0724 21:46:07.641232 17331 delete.go:138] delete failed (probably ok) I0724 21:46:07.641261 17331 fix.go:117] Sleeping 1 second for extra luck! I0724 21:46:08.641427 17331 start.go:122] createHost starting for "" (driver="docker") I0724 21:46:08.672426 17331 start.go:158] libmachine.API.Create for "offline-containerd-20200724213753-14997" (driver="docker") I0724 21:46:08.672497 17331 client.go:161] LocalClient.Create starting I0724 21:46:08.672587 17331 main.go:115] libmachine: Reading certificate data from /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca.pem I0724 21:46:08.672652 17331 main.go:115] libmachine: Decoding PEM data... I0724 21:46:08.672695 17331 main.go:115] libmachine: Parsing certificate... I0724 21:46:08.672894 17331 main.go:115] libmachine: Reading certificate data from /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/cert.pem I0724 21:46:08.672935 17331 main.go:115] libmachine: Decoding PEM data... I0724 21:46:08.672964 17331 main.go:115] libmachine: Parsing certificate... I0724 21:46:08.673371 17331 cli_runner.go:109] Run: docker ps -a --format {{.Names}} I0724 21:46:08.720511 17331 cli_runner.go:109] Run: docker volume create offline-containerd-20200724213753-14997 --label name.minikube.sigs.k8s.io=offline-containerd-20200724213753-14997 --label created_by.minikube.sigs.k8s.io=true I0724 21:46:08.762684 17331 oci.go:101] Successfully created a docker volume offline-containerd-20200724213753-14997 I0724 21:46:08.762750 17331 cli_runner.go:109] Run: docker run --rm --entrypoint /usr/bin/test -v offline-containerd-20200724213753-14997:/var local/kicbase:-snapshot -d /var/lib I0724 21:46:08.805922 17331 client.go:164] LocalClient.Create took 133.402594ms I0724 21:46:10.806309 17331 start.go:125] duration metric: createHost completed in 2.164816635s I0724 21:46:10.806363 17331 fix.go:55] fixHost completed within 26.32567952s I0724 21:46:10.806373 17331 start.go:76] releasing machines lock for "offline-containerd-20200724213753-14997", held for 26.325711821s * Failed to start docker container. "minikube start -p offline-containerd-20200724213753-14997" may fix it: recreate: creating host: create: creating: setting up container node: preparing volume for offline-containerd-20200724213753-14997 container: docker run --rm --entrypoint /usr/bin/test -v offline-containerd-20200724213753-14997:/var local/kicbase:-snapshot -d /var/lib: exit status 125 stdout: stderr: docker: invalid reference format. See 'docker run --help'. I0724 21:46:10.806646 17331 exit.go:58] WithError(error provisioning host)=Failed to start host: recreate: creating host: create: creating: setting up container node: preparing volume for offline-containerd-20200724213753-14997 container: docker run --rm --entrypoint /usr/bin/test -v offline-containerd-20200724213753-14997:/var local/kicbase:-snapshot -d /var/lib: exit status 125 stdout: stderr: docker: invalid reference format. See 'docker run --help'. called from: goroutine 1 [running]: runtime/debug.Stack(0x0, 0x0, 0x100000000000000) /home/jenkins/actions-runner/_work/_tool/go/1.14.6/x64/src/runtime/debug/stack.go:24 +0x9d k8s.io/minikube/pkg/minikube/exit.WithError(0x1bad79a, 0x17, 0x1ebf200, 0xc000ae52e0) /home/jenkins/actions-runner/_work/minikube/minikube/pkg/minikube/exit/exit.go:58 +0x34 k8s.io/minikube/cmd/minikube/cmd.runStart(0x2cd0820, 0xc000464600, 0x2, 0xc) /home/jenkins/actions-runner/_work/minikube/minikube/cmd/minikube/cmd/start.go:171 +0xbe2 github.com/spf13/cobra.(*Command).execute(0x2cd0820, 0xc000464540, 0xc, 0xc, 0x2cd0820, 0xc000464540) /home/jenkins/go/pkg/mod/github.com/spf13/cobra@v1.0.0/command.go:846 +0x29d github.com/spf13/cobra.(*Command).ExecuteC(0x2ccf860, 0x0, 0x1, 0xc0008d4550) /home/jenkins/go/pkg/mod/github.com/spf13/cobra@v1.0.0/command.go:950 +0x349 github.com/spf13/cobra.(*Command).Execute(...) /home/jenkins/go/pkg/mod/github.com/spf13/cobra@v1.0.0/command.go:887 k8s.io/minikube/cmd/minikube/cmd.Execute() /home/jenkins/actions-runner/_work/minikube/minikube/cmd/minikube/cmd/root.go:106 +0x72c main.main() /home/jenkins/actions-runner/_work/minikube/minikube/cmd/minikube/main.go:71 +0x11f W0724 21:46:10.807059 17331 out.go:249] error provisioning host: Failed to start host: recreate: creating host: create: creating: setting up container node: preparing volume for offline-containerd-20200724213753-14997 container: docker run --rm --entrypoint /usr/bin/test -v offline-containerd-20200724213753-14997:/var local/kicbase:-snapshot -d /var/lib: exit status 125 stdout: stderr: docker: invalid reference format. See 'docker run --help'. * X error provisioning host: Failed to start host: recreate: creating host: create: creating: setting up container node: preparing volume for offline-containerd-20200724213753-14997 container: docker run --rm --entrypoint /usr/bin/test -v offline-containerd-20200724213753-14997:/var local/kicbase:-snapshot -d /var/lib: exit status 125 stdout: stderr: docker: invalid reference format. See 'docker run --help'. * * minikube is exiting due to an error. If the above message is not useful, open an issue: - https://github.com/kubernetes/minikube/issues/new/choose ** /stderr ** aab_offline_test.go:56: ./minikube-linux-amd64 start -p offline-containerd-20200724213753-14997 --alsologtostderr -v=1 --memory=2000 --wait=true --container-runtime containerd --vm-driver=docker --base-image=local/kicbase:-snapshot failed: exit status 70 panic.go:617: *** TestOffline/group/containerd FAILED at 2020-07-24 21:46:10.812673571 +0000 UTC m=+600.937038056 helpers_test.go:215: -----------------------post-mortem-------------------------------- helpers_test.go:223: ======> post-mortem[TestOffline/group/containerd]: docker inspect <====== helpers_test.go:224: (dbg) Run: docker inspect offline-containerd-20200724213753-14997 helpers_test.go:228: (dbg) docker inspect offline-containerd-20200724213753-14997: -- stdout -- [ { "CreatedAt": "2020-07-24T21:45:37Z", "Driver": "local", "Labels": { "created_by.minikube.sigs.k8s.io": "true", "name.minikube.sigs.k8s.io": "offline-containerd-20200724213753-14997" }, "Mountpoint": "/var/lib/docker/volumes/offline-containerd-20200724213753-14997/_data", "Name": "offline-containerd-20200724213753-14997", "Options": {}, "Scope": "local" } ] -- /stdout -- helpers_test.go:232: (dbg) Run: ./minikube-linux-amd64 status --format={{.Host}} -p offline-containerd-20200724213753-14997 -n offline-containerd-20200724213753-14997 helpers_test.go:232: (dbg) Non-zero exit: ./minikube-linux-amd64 status --format={{.Host}} -p offline-containerd-20200724213753-14997 -n offline-containerd-20200724213753-14997: exit status 7 (104.984415ms) -- stdout -- Nonexistent -- /stdout -- ** stderr ** E0724 21:46:10.963524 18374 status.go:118] status error: host: state: unknown state "offline-containerd-20200724213753-14997": docker container inspect offline-containerd-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-containerd-20200724213753-14997 ** /stderr ** helpers_test.go:232: status error: exit status 7 (may be ok) helpers_test.go:234: "offline-containerd-20200724213753-14997" host is not running, skipping log retrieval (state="Nonexistent") helpers_test.go:170: Cleaning up "offline-containerd-20200724213753-14997" profile ... helpers_test.go:171: (dbg) Run: ./minikube-linux-amd64 delete -p offline-containerd-20200724213753-14997 === CONT TestOffline/group/docker aab_offline_test.go:53: (dbg) Non-zero exit: ./minikube-linux-amd64 start -p offline-docker-20200724213753-14997 --alsologtostderr -v=1 --memory=2000 --wait=true --container-runtime docker --vm-driver=docker --base-image=local/kicbase:-snapshot: exit status 70 (8m43.314604935s) -- stdout -- * [offline-docker-20200724213753-14997] minikube v1.12.1 on Ubuntu 20.04 - KUBECONFIG=/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/kubeconfig - MINIKUBE_HOME=/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome * Using the docker driver based on user configuration * Starting control plane node offline-docker-20200724213753-14997 in cluster offline-docker-20200724213753-14997 * Pulling base image ... * Creating docker container (CPUs=2, Memory=2000MB) ... * docker "offline-docker-20200724213753-14997" container is missing, will recreate. * Creating docker container (CPUs=2, Memory=2000MB) ... -- /stdout -- ** stderr ** I0724 21:37:53.897269 17328 out.go:188] Setting JSON to false I0724 21:37:53.900041 17328 start.go:101] hostinfo: {"hostname":"mini-test-11-ubuntu","uptime":312,"bootTime":1595626361,"procs":334,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.4.0-1022-azure","virtualizationSystem":"kvm","virtualizationRole":"host","hostid":"c95cb721-f5cd-cb47-980f-2a6f7a0ad6b2"} I0724 21:37:53.901067 17328 start.go:111] virtualization: kvm host I0724 21:37:53.915114 17328 notify.go:125] Checking for updates... I0724 21:37:53.915516 17328 driver.go:287] Setting default libvirt URI to qemu:///system I0724 21:37:53.966348 17328 docker.go:87] docker version: linux-19.03.8 I0724 21:37:53.978406 17328 start.go:217] selected driver: docker I0724 21:37:53.978414 17328 start.go:623] validating driver "docker" against I0724 21:37:53.978427 17328 start.go:634] status for docker: {Installed:true Healthy:true NeedsImprovement:false Error: Fix: Doc:} I0724 21:37:53.978500 17328 cli_runner.go:109] Run: docker system info --format "{{json .}}" I0724 21:37:54.034421 17328 start_flags.go:223] no existing cluster config was found, will generate one from the flags I0724 21:37:54.034687 17328 start_flags.go:617] Waiting for all components: map[apiserver:true apps_running:true default_sa:true system_pods:true] I0724 21:37:54.034713 17328 cni.go:74] Creating CNI manager for "" I0724 21:37:54.034718 17328 cni.go:117] CNI unnecessary in this configuration, recommending no CNI I0724 21:37:54.034724 17328 start_flags.go:345] config: {Name:offline-docker-20200724213753-14997 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:local/kicbase:-snapshot Memory:2000 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.99.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio KubernetesConfig:{KubernetesVersion:v1.18.3 ClusterName:offline-docker-20200724213753-14997 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin: FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[] Addons:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true system_pods:true]} I0724 21:37:54.098063 17328 cache.go:117] Beginning downloading kic base image for docker with docker I0724 21:37:54.104143 17328 preload.go:97] Checking if preload exists for k8s version v1.18.3 and runtime docker I0724 21:37:54.104223 17328 preload.go:105] Found local preload: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v4-v1.18.3-docker-overlay2-amd64.tar.lz4 I0724 21:37:54.104247 17328 cache.go:51] Caching tarball of preloaded images I0724 21:37:54.104258 17328 preload.go:131] Found /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v4-v1.18.3-docker-overlay2-amd64.tar.lz4 in cache, skipping download I0724 21:37:54.104263 17328 cache.go:54] Finished verifying existence of preloaded tar for v1.18.3 on docker I0724 21:37:54.104407 17328 cache.go:137] Downloading local/kicbase:-snapshot to local daemon I0724 21:37:54.104432 17328 image.go:140] Writing local/kicbase:-snapshot to local daemon I0724 21:37:54.104538 17328 profile.go:150] Saving config to /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/offline-docker-20200724213753-14997/config.json ... I0724 21:37:54.104603 17328 lock.go:35] WriteFile acquiring /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/offline-docker-20200724213753-14997/config.json: {Name:mk2450e81aeef840d8df6b717e9d7670fca225df Clock:{} Delay:500ms Timeout:1m0s Cancel:} W0724 21:38:23.916100 17328 notify.go:56] Error getting json from minikube version url: error with http GET for endpoint https://storage.googleapis.com/minikube/releases.json: Get "https://storage.googleapis.com/minikube/releases.json": proxyconnect tcp: dial tcp 172.16.1.1:1: i/o timeout I0724 21:40:28.319501 17328 cache.go:151] failed to download local/kicbase:-snapshot, will try fallback image if available: getting remote image: Get "https://index.docker.io/v2/": proxyconnect tcp: dial tcp 172.16.1.1:1: i/o timeout I0724 21:40:28.319578 17328 cache.go:137] Downloading kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 to local daemon I0724 21:40:28.319586 17328 image.go:140] Writing kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 to local daemon I0724 21:43:02.395913 17328 cache.go:151] failed to download kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438, will try fallback image if available: getting remote image: Get "https://index.docker.io/v2/": proxyconnect tcp: dial tcp 172.16.1.1:1: i/o timeout I0724 21:43:02.395984 17328 cache.go:137] Downloading docker.pkg.github.com/kubernetes/minikube/kicbase:v0.0.10 to local daemon I0724 21:43:02.396003 17328 image.go:140] Writing docker.pkg.github.com/kubernetes/minikube/kicbase:v0.0.10 to local daemon I0724 21:45:36.675065 17328 cache.go:151] failed to download docker.pkg.github.com/kubernetes/minikube/kicbase:v0.0.10, will try fallback image if available: getting remote image: Get "https://docker.pkg.github.com/v2/": proxyconnect tcp: dial tcp 172.16.1.1:1: i/o timeout E0724 21:45:36.675146 17328 cache.go:172] Error downloading kic artifacts: failed to download kic base image or any fallback image I0724 21:45:36.675369 17328 cache.go:178] Successfully downloaded all kic artifacts I0724 21:45:36.675415 17328 start.go:241] acquiring machines lock for offline-docker-20200724213753-14997: {Name:mk91e0531dbac8bf0bab062b1deb1a07bd4532ce Clock:{} Delay:500ms Timeout:15m0s Cancel:} I0724 21:45:39.423504 17328 start.go:245] acquired machines lock for "offline-docker-20200724213753-14997" in 2.748036364s I0724 21:45:39.423572 17328 start.go:85] Provisioning new machine with config: &{Name:offline-docker-20200724213753-14997 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:local/kicbase:-snapshot Memory:2000 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.99.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio KubernetesConfig:{KubernetesVersion:v1.18.3 ClusterName:offline-docker-20200724213753-14997 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin: FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.18.3 ControlPlane:true Worker:true}] Addons:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true system_pods:true]} &{Name: IP: Port:8443 KubernetesVersion:v1.18.3 ControlPlane:true Worker:true} I0724 21:45:39.423669 17328 start.go:122] createHost starting for "" (driver="docker") I0724 21:45:39.451138 17328 start.go:158] libmachine.API.Create for "offline-docker-20200724213753-14997" (driver="docker") I0724 21:45:39.451188 17328 client.go:161] LocalClient.Create starting I0724 21:45:39.451257 17328 main.go:115] libmachine: Reading certificate data from /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca.pem I0724 21:45:39.451302 17328 main.go:115] libmachine: Decoding PEM data... I0724 21:45:39.451324 17328 main.go:115] libmachine: Parsing certificate... I0724 21:45:39.451447 17328 main.go:115] libmachine: Reading certificate data from /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/cert.pem I0724 21:45:39.451475 17328 main.go:115] libmachine: Decoding PEM data... I0724 21:45:39.451487 17328 main.go:115] libmachine: Parsing certificate... I0724 21:45:39.451842 17328 cli_runner.go:109] Run: docker ps -a --format {{.Names}} I0724 21:45:39.497125 17328 cli_runner.go:109] Run: docker volume create offline-docker-20200724213753-14997 --label name.minikube.sigs.k8s.io=offline-docker-20200724213753-14997 --label created_by.minikube.sigs.k8s.io=true I0724 21:45:39.547553 17328 oci.go:101] Successfully created a docker volume offline-docker-20200724213753-14997 I0724 21:45:39.547623 17328 cli_runner.go:109] Run: docker run --rm --entrypoint /usr/bin/test -v offline-docker-20200724213753-14997:/var local/kicbase:-snapshot -d /var/lib I0724 21:45:39.591104 17328 client.go:164] LocalClient.Create took 139.898233ms I0724 21:45:41.591467 17328 start.go:125] duration metric: createHost completed in 2.16778261s I0724 21:45:41.591511 17328 start.go:76] releasing machines lock for "offline-docker-20200724213753-14997", held for 2.167966423s I0724 21:45:41.592388 17328 cli_runner.go:109] Run: docker container inspect offline-docker-20200724213753-14997 --format={{.State.Status}} I0724 21:45:41.638625 17328 delete.go:81] Unable to get host status for offline-docker-20200724213753-14997, assuming it has already been deleted: state: unknown state "offline-docker-20200724213753-14997": docker container inspect offline-docker-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-docker-20200724213753-14997 ! StartHost failed, but will try again: creating host: create: creating: setting up container node: preparing volume for offline-docker-20200724213753-14997 container: docker run --rm --entrypoint /usr/bin/test -v offline-docker-20200724213753-14997:/var local/kicbase:-snapshot -d /var/lib: exit status 125 stdout: stderr: docker: invalid reference format. See 'docker run --help'. I0724 21:45:46.639140 17328 start.go:241] acquiring machines lock for offline-docker-20200724213753-14997: {Name:mk91e0531dbac8bf0bab062b1deb1a07bd4532ce Clock:{} Delay:500ms Timeout:15m0s Cancel:} I0724 21:46:10.806548 17328 start.go:245] acquired machines lock for "offline-docker-20200724213753-14997" in 24.167344867s I0724 21:46:10.806615 17328 start.go:89] Skipping create...Using existing machine configuration I0724 21:46:10.806632 17328 fix.go:53] fixHost starting: I0724 21:46:10.807065 17328 cli_runner.go:109] Run: docker container inspect offline-docker-20200724213753-14997 --format={{.State.Status}} I0724 21:46:10.853652 17328 fix.go:105] recreateIfNeeded on offline-docker-20200724213753-14997: state= err=unknown state "offline-docker-20200724213753-14997": docker container inspect offline-docker-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-docker-20200724213753-14997 I0724 21:46:10.853680 17328 fix.go:110] machineExists: false. err=machine does not exist I0724 21:46:10.870033 17328 delete.go:123] DEMOLISHING offline-docker-20200724213753-14997 ... I0724 21:46:10.870113 17328 cli_runner.go:109] Run: docker container inspect offline-docker-20200724213753-14997 --format={{.State.Status}} W0724 21:46:10.916929 17328 stop.go:72] unable to get state: unknown state "offline-docker-20200724213753-14997": docker container inspect offline-docker-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-docker-20200724213753-14997 I0724 21:46:10.916962 17328 delete.go:128] stophost failed (probably ok): ssh power off: unknown state "offline-docker-20200724213753-14997": docker container inspect offline-docker-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-docker-20200724213753-14997 I0724 21:46:10.917374 17328 cli_runner.go:109] Run: docker container inspect offline-docker-20200724213753-14997 --format={{.State.Status}} I0724 21:46:10.967084 17328 delete.go:81] Unable to get host status for offline-docker-20200724213753-14997, assuming it has already been deleted: state: unknown state "offline-docker-20200724213753-14997": docker container inspect offline-docker-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-docker-20200724213753-14997 I0724 21:46:10.967167 17328 cli_runner.go:109] Run: docker container inspect -f {{.Id}} offline-docker-20200724213753-14997 I0724 21:46:11.011787 17328 kic.go:274] could not find the container offline-docker-20200724213753-14997 to remove it. will try anyways I0724 21:46:11.011844 17328 cli_runner.go:109] Run: docker container inspect offline-docker-20200724213753-14997 --format={{.State.Status}} W0724 21:46:11.055672 17328 oci.go:82] error getting container status, will try to delete anyways: unknown state "offline-docker-20200724213753-14997": docker container inspect offline-docker-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-docker-20200724213753-14997 I0724 21:46:11.055754 17328 cli_runner.go:109] Run: docker exec --privileged -t offline-docker-20200724213753-14997 /bin/bash -c "sudo init 0" I0724 21:46:11.099274 17328 oci.go:568] error shutdown offline-docker-20200724213753-14997: docker exec --privileged -t offline-docker-20200724213753-14997 /bin/bash -c "sudo init 0": exit status 1 stdout: stderr: Error: No such container: offline-docker-20200724213753-14997 I0724 21:46:12.099586 17328 cli_runner.go:109] Run: docker container inspect offline-docker-20200724213753-14997 --format={{.State.Status}} I0724 21:46:12.146289 17328 oci.go:580] temporary error verifying shutdown: unknown state "offline-docker-20200724213753-14997": docker container inspect offline-docker-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-docker-20200724213753-14997 I0724 21:46:12.146317 17328 oci.go:582] temporary error: container offline-docker-20200724213753-14997 status is but expect it to be exited I0724 21:46:12.146349 17328 retry.go:30] will retry after 357.131936ms: couldn't verify cointainer is exited. %v: unknown state "offline-docker-20200724213753-14997": docker container inspect offline-docker-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-docker-20200724213753-14997 I0724 21:46:12.503910 17328 cli_runner.go:109] Run: docker container inspect offline-docker-20200724213753-14997 --format={{.State.Status}} I0724 21:46:12.554014 17328 oci.go:580] temporary error verifying shutdown: unknown state "offline-docker-20200724213753-14997": docker container inspect offline-docker-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-docker-20200724213753-14997 I0724 21:46:12.554046 17328 oci.go:582] temporary error: container offline-docker-20200724213753-14997 status is but expect it to be exited I0724 21:46:12.554065 17328 retry.go:30] will retry after 660.492892ms: couldn't verify cointainer is exited. %v: unknown state "offline-docker-20200724213753-14997": docker container inspect offline-docker-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-docker-20200724213753-14997 I0724 21:46:13.214820 17328 cli_runner.go:109] Run: docker container inspect offline-docker-20200724213753-14997 --format={{.State.Status}} I0724 21:46:13.262207 17328 oci.go:580] temporary error verifying shutdown: unknown state "offline-docker-20200724213753-14997": docker container inspect offline-docker-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-docker-20200724213753-14997 I0724 21:46:13.262230 17328 oci.go:582] temporary error: container offline-docker-20200724213753-14997 status is but expect it to be exited I0724 21:46:13.262249 17328 retry.go:30] will retry after 920.315446ms: couldn't verify cointainer is exited. %v: unknown state "offline-docker-20200724213753-14997": docker container inspect offline-docker-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-docker-20200724213753-14997 I0724 21:46:14.182898 17328 cli_runner.go:109] Run: docker container inspect offline-docker-20200724213753-14997 --format={{.State.Status}} I0724 21:46:14.226575 17328 oci.go:580] temporary error verifying shutdown: unknown state "offline-docker-20200724213753-14997": docker container inspect offline-docker-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-docker-20200724213753-14997 I0724 21:46:14.226617 17328 oci.go:582] temporary error: container offline-docker-20200724213753-14997 status is but expect it to be exited I0724 21:46:14.226644 17328 retry.go:30] will retry after 1.635001613s: couldn't verify cointainer is exited. %v: unknown state "offline-docker-20200724213753-14997": docker container inspect offline-docker-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-docker-20200724213753-14997 I0724 21:46:15.862098 17328 cli_runner.go:109] Run: docker container inspect offline-docker-20200724213753-14997 --format={{.State.Status}} I0724 21:46:15.907702 17328 oci.go:580] temporary error verifying shutdown: unknown state "offline-docker-20200724213753-14997": docker container inspect offline-docker-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-docker-20200724213753-14997 I0724 21:46:15.907732 17328 oci.go:582] temporary error: container offline-docker-20200724213753-14997 status is but expect it to be exited I0724 21:46:15.907752 17328 retry.go:30] will retry after 1.982055195s: couldn't verify cointainer is exited. %v: unknown state "offline-docker-20200724213753-14997": docker container inspect offline-docker-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-docker-20200724213753-14997 I0724 21:46:17.890182 17328 cli_runner.go:109] Run: docker container inspect offline-docker-20200724213753-14997 --format={{.State.Status}} I0724 21:46:17.935916 17328 oci.go:580] temporary error verifying shutdown: unknown state "offline-docker-20200724213753-14997": docker container inspect offline-docker-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-docker-20200724213753-14997 I0724 21:46:17.935936 17328 oci.go:582] temporary error: container offline-docker-20200724213753-14997 status is but expect it to be exited I0724 21:46:17.935954 17328 retry.go:30] will retry after 3.011308614s: couldn't verify cointainer is exited. %v: unknown state "offline-docker-20200724213753-14997": docker container inspect offline-docker-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-docker-20200724213753-14997 I0724 21:46:20.947654 17328 cli_runner.go:109] Run: docker container inspect offline-docker-20200724213753-14997 --format={{.State.Status}} I0724 21:46:20.996731 17328 oci.go:580] temporary error verifying shutdown: unknown state "offline-docker-20200724213753-14997": docker container inspect offline-docker-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-docker-20200724213753-14997 I0724 21:46:20.996751 17328 oci.go:582] temporary error: container offline-docker-20200724213753-14997 status is but expect it to be exited I0724 21:46:20.996769 17328 retry.go:30] will retry after 6.715255694s: couldn't verify cointainer is exited. %v: unknown state "offline-docker-20200724213753-14997": docker container inspect offline-docker-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-docker-20200724213753-14997 I0724 21:46:27.712433 17328 cli_runner.go:109] Run: docker container inspect offline-docker-20200724213753-14997 --format={{.State.Status}} I0724 21:46:27.762441 17328 oci.go:580] temporary error verifying shutdown: unknown state "offline-docker-20200724213753-14997": docker container inspect offline-docker-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-docker-20200724213753-14997 I0724 21:46:27.762469 17328 oci.go:582] temporary error: container offline-docker-20200724213753-14997 status is but expect it to be exited I0724 21:46:27.762491 17328 retry.go:30] will retry after 6.138576273s: couldn't verify cointainer is exited. %v: unknown state "offline-docker-20200724213753-14997": docker container inspect offline-docker-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-docker-20200724213753-14997 I0724 21:46:33.901372 17328 cli_runner.go:109] Run: docker container inspect offline-docker-20200724213753-14997 --format={{.State.Status}} I0724 21:46:33.948385 17328 oci.go:580] temporary error verifying shutdown: unknown state "offline-docker-20200724213753-14997": docker container inspect offline-docker-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-docker-20200724213753-14997 I0724 21:46:33.948406 17328 oci.go:582] temporary error: container offline-docker-20200724213753-14997 status is but expect it to be exited I0724 21:46:33.948459 17328 oci.go:86] couldn't shut down offline-docker-20200724213753-14997 (might be okay): verify shutdown: couldn't verify cointainer is exited. %v: unknown state "offline-docker-20200724213753-14997": docker container inspect offline-docker-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-docker-20200724213753-14997 I0724 21:46:33.948540 17328 cli_runner.go:109] Run: docker rm -f -v offline-docker-20200724213753-14997 W0724 21:46:33.994445 17328 delete.go:138] delete failed (probably ok) I0724 21:46:33.994468 17328 fix.go:117] Sleeping 1 second for extra luck! I0724 21:46:34.994706 17328 start.go:122] createHost starting for "" (driver="docker") I0724 21:46:35.017749 17328 start.go:158] libmachine.API.Create for "offline-docker-20200724213753-14997" (driver="docker") I0724 21:46:35.017810 17328 client.go:161] LocalClient.Create starting I0724 21:46:35.017902 17328 main.go:115] libmachine: Reading certificate data from /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca.pem I0724 21:46:35.017967 17328 main.go:115] libmachine: Decoding PEM data... I0724 21:46:35.018003 17328 main.go:115] libmachine: Parsing certificate... I0724 21:46:35.018199 17328 main.go:115] libmachine: Reading certificate data from /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/cert.pem I0724 21:46:35.018243 17328 main.go:115] libmachine: Decoding PEM data... I0724 21:46:35.018271 17328 main.go:115] libmachine: Parsing certificate... I0724 21:46:35.018706 17328 cli_runner.go:109] Run: docker ps -a --format {{.Names}} I0724 21:46:35.063214 17328 cli_runner.go:109] Run: docker volume create offline-docker-20200724213753-14997 --label name.minikube.sigs.k8s.io=offline-docker-20200724213753-14997 --label created_by.minikube.sigs.k8s.io=true I0724 21:46:35.109805 17328 oci.go:101] Successfully created a docker volume offline-docker-20200724213753-14997 I0724 21:46:35.109866 17328 cli_runner.go:109] Run: docker run --rm --entrypoint /usr/bin/test -v offline-docker-20200724213753-14997:/var local/kicbase:-snapshot -d /var/lib I0724 21:46:35.152187 17328 client.go:164] LocalClient.Create took 134.350519ms I0724 21:46:37.152444 17328 start.go:125] duration metric: createHost completed in 2.157689958s I0724 21:46:37.152497 17328 fix.go:55] fixHost completed within 26.345867083s I0724 21:46:37.152511 17328 start.go:76] releasing machines lock for "offline-docker-20200724213753-14997", held for 26.345913885s * Failed to start docker container. "minikube start -p offline-docker-20200724213753-14997" may fix it: recreate: creating host: create: creating: setting up container node: preparing volume for offline-docker-20200724213753-14997 container: docker run --rm --entrypoint /usr/bin/test -v offline-docker-20200724213753-14997:/var local/kicbase:-snapshot -d /var/lib: exit status 125 stdout: stderr: docker: invalid reference format. See 'docker run --help'. I0724 21:46:37.152724 17328 exit.go:58] WithError(error provisioning host)=Failed to start host: recreate: creating host: create: creating: setting up container node: preparing volume for offline-docker-20200724213753-14997 container: docker run --rm --entrypoint /usr/bin/test -v offline-docker-20200724213753-14997:/var local/kicbase:-snapshot -d /var/lib: exit status 125 stdout: stderr: docker: invalid reference format. See 'docker run --help'. called from: goroutine 1 [running]: runtime/debug.Stack(0x0, 0x0, 0x100000000000000) /home/jenkins/actions-runner/_work/_tool/go/1.14.6/x64/src/runtime/debug/stack.go:24 +0x9d k8s.io/minikube/pkg/minikube/exit.WithError(0x1bad79a, 0x17, 0x1ebf200, 0xc00000ee40) /home/jenkins/actions-runner/_work/minikube/minikube/pkg/minikube/exit/exit.go:58 +0x34 k8s.io/minikube/cmd/minikube/cmd.runStart(0x2cd0820, 0xc00040ac00, 0x2, 0xc) /home/jenkins/actions-runner/_work/minikube/minikube/cmd/minikube/cmd/start.go:171 +0xbe2 github.com/spf13/cobra.(*Command).execute(0x2cd0820, 0xc00040ab40, 0xc, 0xc, 0x2cd0820, 0xc00040ab40) /home/jenkins/go/pkg/mod/github.com/spf13/cobra@v1.0.0/command.go:846 +0x29d github.com/spf13/cobra.(*Command).ExecuteC(0x2ccf860, 0x0, 0x1, 0xc000042a50) /home/jenkins/go/pkg/mod/github.com/spf13/cobra@v1.0.0/command.go:950 +0x349 github.com/spf13/cobra.(*Command).Execute(...) /home/jenkins/go/pkg/mod/github.com/spf13/cobra@v1.0.0/command.go:887 k8s.io/minikube/cmd/minikube/cmd.Execute() /home/jenkins/actions-runner/_work/minikube/minikube/cmd/minikube/cmd/root.go:106 +0x72c main.main() /home/jenkins/actions-runner/_work/minikube/minikube/cmd/minikube/main.go:71 +0x11f W0724 21:46:37.153050 17328 out.go:249] error provisioning host: Failed to start host: recreate: creating host: create: creating: setting up container node: preparing volume for offline-docker-20200724213753-14997 container: docker run --rm --entrypoint /usr/bin/test -v offline-docker-20200724213753-14997:/var local/kicbase:-snapshot -d /var/lib: exit status 125 stdout: stderr: docker: invalid reference format. See 'docker run --help'. * X error provisioning host: Failed to start host: recreate: creating host: create: creating: setting up container node: preparing volume for offline-docker-20200724213753-14997 container: docker run --rm --entrypoint /usr/bin/test -v offline-docker-20200724213753-14997:/var local/kicbase:-snapshot -d /var/lib: exit status 125 stdout: stderr: docker: invalid reference format. See 'docker run --help'. * * minikube is exiting due to an error. If the above message is not useful, open an issue: - https://github.com/kubernetes/minikube/issues/new/choose ** /stderr ** aab_offline_test.go:56: ./minikube-linux-amd64 start -p offline-docker-20200724213753-14997 --alsologtostderr -v=1 --memory=2000 --wait=true --container-runtime docker --vm-driver=docker --base-image=local/kicbase:-snapshot failed: exit status 70 panic.go:617: *** TestOffline/group/docker FAILED at 2020-07-24 21:46:37.157768998 +0000 UTC m=+627.282133483 helpers_test.go:215: -----------------------post-mortem-------------------------------- helpers_test.go:223: ======> post-mortem[TestOffline/group/docker]: docker inspect <====== helpers_test.go:224: (dbg) Run: docker inspect offline-docker-20200724213753-14997 helpers_test.go:228: (dbg) docker inspect offline-docker-20200724213753-14997: -- stdout -- [ { "CreatedAt": "2020-07-24T21:45:39Z", "Driver": "local", "Labels": { "created_by.minikube.sigs.k8s.io": "true", "name.minikube.sigs.k8s.io": "offline-docker-20200724213753-14997" }, "Mountpoint": "/var/lib/docker/volumes/offline-docker-20200724213753-14997/_data", "Name": "offline-docker-20200724213753-14997", "Options": {}, "Scope": "local" } ] -- /stdout -- helpers_test.go:232: (dbg) Run: ./minikube-linux-amd64 status --format={{.Host}} -p offline-docker-20200724213753-14997 -n offline-docker-20200724213753-14997 helpers_test.go:232: (dbg) Non-zero exit: ./minikube-linux-amd64 status --format={{.Host}} -p offline-docker-20200724213753-14997 -n offline-docker-20200724213753-14997: exit status 7 (93.550189ms) -- stdout -- Nonexistent -- /stdout -- ** stderr ** E0724 21:46:37.299663 18824 status.go:118] status error: host: state: unknown state "offline-docker-20200724213753-14997": docker container inspect offline-docker-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-docker-20200724213753-14997 ** /stderr ** helpers_test.go:232: status error: exit status 7 (may be ok) helpers_test.go:234: "offline-docker-20200724213753-14997" host is not running, skipping log retrieval (state="Nonexistent") helpers_test.go:170: Cleaning up "offline-docker-20200724213753-14997" profile ... helpers_test.go:171: (dbg) Run: ./minikube-linux-amd64 delete -p offline-docker-20200724213753-14997 === CONT TestOffline/group/crio aab_offline_test.go:53: (dbg) Non-zero exit: ./minikube-linux-amd64 start -p offline-crio-20200724213753-14997 --alsologtostderr -v=1 --memory=2000 --wait=true --container-runtime crio --vm-driver=docker --base-image=local/kicbase:-snapshot: exit status 70 (9m9.629075524s) -- stdout -- * [offline-crio-20200724213753-14997] minikube v1.12.1 on Ubuntu 20.04 - KUBECONFIG=/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/kubeconfig - MINIKUBE_HOME=/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome * Using the docker driver based on user configuration * Starting control plane node offline-crio-20200724213753-14997 in cluster offline-crio-20200724213753-14997 * Pulling base image ... * Creating docker container (CPUs=2, Memory=2000MB) ... * docker "offline-crio-20200724213753-14997" container is missing, will recreate. * Creating docker container (CPUs=2, Memory=2000MB) ... -- /stdout -- ** stderr ** I0724 21:37:53.897685 17333 out.go:188] Setting JSON to false I0724 21:37:53.900042 17333 start.go:101] hostinfo: {"hostname":"mini-test-11-ubuntu","uptime":312,"bootTime":1595626361,"procs":334,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.4.0-1022-azure","virtualizationSystem":"kvm","virtualizationRole":"host","hostid":"c95cb721-f5cd-cb47-980f-2a6f7a0ad6b2"} I0724 21:37:53.901020 17333 start.go:111] virtualization: kvm host I0724 21:37:53.915117 17333 notify.go:125] Checking for updates... I0724 21:37:53.915505 17333 driver.go:287] Setting default libvirt URI to qemu:///system I0724 21:37:53.966534 17333 docker.go:87] docker version: linux-19.03.8 I0724 21:37:53.973726 17333 start.go:217] selected driver: docker I0724 21:37:53.973753 17333 start.go:623] validating driver "docker" against I0724 21:37:53.973808 17333 start.go:634] status for docker: {Installed:true Healthy:true NeedsImprovement:false Error: Fix: Doc:} I0724 21:37:53.973893 17333 cli_runner.go:109] Run: docker system info --format "{{json .}}" I0724 21:37:54.032285 17333 start_flags.go:223] no existing cluster config was found, will generate one from the flags I0724 21:37:54.032542 17333 start_flags.go:617] Waiting for all components: map[apiserver:true apps_running:true default_sa:true system_pods:true] I0724 21:37:54.032565 17333 cni.go:74] Creating CNI manager for "" I0724 21:37:54.032570 17333 cni.go:105] "docker" driver + crio runtime found, recommending kindnet I0724 21:37:54.032580 17333 start_flags.go:340] Found "CNI" CNI - setting NetworkPlugin=cni I0724 21:37:54.032592 17333 start_flags.go:345] config: {Name:offline-crio-20200724213753-14997 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:local/kicbase:-snapshot Memory:2000 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.99.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio KubernetesConfig:{KubernetesVersion:v1.18.3 ClusterName:offline-crio-20200724213753-14997 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:crio CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[] Addons:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true system_pods:true]} I0724 21:37:54.097112 17333 cache.go:117] Beginning downloading kic base image for docker with crio I0724 21:37:54.104147 17333 preload.go:97] Checking if preload exists for k8s version v1.18.3 and runtime crio I0724 21:37:54.104222 17333 preload.go:105] Found local preload: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v4-v1.18.3-cri-o-overlay-amd64.tar.lz4 I0724 21:37:54.104245 17333 cache.go:51] Caching tarball of preloaded images I0724 21:37:54.104255 17333 preload.go:131] Found /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v4-v1.18.3-cri-o-overlay-amd64.tar.lz4 in cache, skipping download I0724 21:37:54.104264 17333 cache.go:54] Finished verifying existence of preloaded tar for v1.18.3 on crio I0724 21:37:54.104410 17333 cache.go:137] Downloading local/kicbase:-snapshot to local daemon I0724 21:37:54.104432 17333 image.go:140] Writing local/kicbase:-snapshot to local daemon I0724 21:37:54.104568 17333 profile.go:150] Saving config to /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/offline-crio-20200724213753-14997/config.json ... I0724 21:37:54.104637 17333 lock.go:35] WriteFile acquiring /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/offline-crio-20200724213753-14997/config.json: {Name:mk2f5a9abd8a41492c2196467e9175bcfd60fdbe Clock:{} Delay:500ms Timeout:1m0s Cancel:} W0724 21:38:23.916126 17333 notify.go:56] Error getting json from minikube version url: error with http GET for endpoint https://storage.googleapis.com/minikube/releases.json: Get "https://storage.googleapis.com/minikube/releases.json": proxyconnect tcp: dial tcp 172.16.1.1:1: i/o timeout I0724 21:40:28.320065 17333 cache.go:151] failed to download local/kicbase:-snapshot, will try fallback image if available: getting remote image: Get "https://index.docker.io/v2/": proxyconnect tcp: dial tcp 172.16.1.1:1: i/o timeout I0724 21:40:28.320140 17333 cache.go:137] Downloading kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 to local daemon I0724 21:40:28.320157 17333 image.go:140] Writing kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 to local daemon I0724 21:43:02.396319 17333 cache.go:151] failed to download kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438, will try fallback image if available: getting remote image: Get "https://index.docker.io/v2/": proxyconnect tcp: dial tcp 172.16.1.1:1: i/o timeout I0724 21:43:02.396403 17333 cache.go:137] Downloading docker.pkg.github.com/kubernetes/minikube/kicbase:v0.0.10 to local daemon I0724 21:43:02.396416 17333 image.go:140] Writing docker.pkg.github.com/kubernetes/minikube/kicbase:v0.0.10 to local daemon I0724 21:45:36.675479 17333 cache.go:151] failed to download docker.pkg.github.com/kubernetes/minikube/kicbase:v0.0.10, will try fallback image if available: getting remote image: Get "https://docker.pkg.github.com/v2/": proxyconnect tcp: dial tcp 172.16.1.1:1: i/o timeout E0724 21:45:36.675535 17333 cache.go:172] Error downloading kic artifacts: failed to download kic base image or any fallback image I0724 21:45:36.675683 17333 cache.go:178] Successfully downloaded all kic artifacts I0724 21:45:36.675728 17333 start.go:241] acquiring machines lock for offline-crio-20200724213753-14997: {Name:mk91e0531dbac8bf0bab062b1deb1a07bd4532ce Clock:{} Delay:500ms Timeout:15m0s Cancel:} I0724 21:45:41.591685 17333 start.go:245] acquired machines lock for "offline-crio-20200724213753-14997" in 4.915928482s I0724 21:45:41.591749 17333 start.go:85] Provisioning new machine with config: &{Name:offline-crio-20200724213753-14997 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:local/kicbase:-snapshot Memory:2000 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.99.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio KubernetesConfig:{KubernetesVersion:v1.18.3 ClusterName:offline-crio-20200724213753-14997 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:crio CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.18.3 ControlPlane:true Worker:true}] Addons:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true system_pods:true]} &{Name: IP: Port:8443 KubernetesVersion:v1.18.3 ControlPlane:true Worker:true} I0724 21:45:41.591839 17333 start.go:122] createHost starting for "" (driver="docker") I0724 21:45:41.618036 17333 start.go:158] libmachine.API.Create for "offline-crio-20200724213753-14997" (driver="docker") I0724 21:45:41.618086 17333 client.go:161] LocalClient.Create starting I0724 21:45:41.618136 17333 main.go:115] libmachine: Reading certificate data from /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca.pem I0724 21:45:41.618186 17333 main.go:115] libmachine: Decoding PEM data... I0724 21:45:41.618216 17333 main.go:115] libmachine: Parsing certificate... I0724 21:45:41.618388 17333 main.go:115] libmachine: Reading certificate data from /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/cert.pem I0724 21:45:41.618423 17333 main.go:115] libmachine: Decoding PEM data... I0724 21:45:41.618439 17333 main.go:115] libmachine: Parsing certificate... I0724 21:45:41.618931 17333 cli_runner.go:109] Run: docker ps -a --format {{.Names}} I0724 21:45:41.662958 17333 cli_runner.go:109] Run: docker volume create offline-crio-20200724213753-14997 --label name.minikube.sigs.k8s.io=offline-crio-20200724213753-14997 --label created_by.minikube.sigs.k8s.io=true I0724 21:45:41.713647 17333 oci.go:101] Successfully created a docker volume offline-crio-20200724213753-14997 I0724 21:45:41.713728 17333 cli_runner.go:109] Run: docker run --rm --entrypoint /usr/bin/test -v offline-crio-20200724213753-14997:/var local/kicbase:-snapshot -d /var/lib I0724 21:45:41.756924 17333 client.go:164] LocalClient.Create took 138.819157ms I0724 21:45:43.757172 17333 start.go:125] duration metric: createHost completed in 2.165307738s I0724 21:45:43.757236 17333 start.go:76] releasing machines lock for "offline-crio-20200724213753-14997", held for 2.165512052s I0724 21:45:43.758100 17333 cli_runner.go:109] Run: docker container inspect offline-crio-20200724213753-14997 --format={{.State.Status}} I0724 21:45:43.804534 17333 delete.go:81] Unable to get host status for offline-crio-20200724213753-14997, assuming it has already been deleted: state: unknown state "offline-crio-20200724213753-14997": docker container inspect offline-crio-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-crio-20200724213753-14997 ! StartHost failed, but will try again: creating host: create: creating: setting up container node: preparing volume for offline-crio-20200724213753-14997 container: docker run --rm --entrypoint /usr/bin/test -v offline-crio-20200724213753-14997:/var local/kicbase:-snapshot -d /var/lib: exit status 125 stdout: stderr: docker: invalid reference format. See 'docker run --help'. I0724 21:45:48.804961 17333 start.go:241] acquiring machines lock for offline-crio-20200724213753-14997: {Name:mk91e0531dbac8bf0bab062b1deb1a07bd4532ce Clock:{} Delay:500ms Timeout:15m0s Cancel:} I0724 21:46:37.152706 17333 start.go:245] acquired machines lock for "offline-crio-20200724213753-14997" in 48.347659899s I0724 21:46:37.152770 17333 start.go:89] Skipping create...Using existing machine configuration I0724 21:46:37.152780 17333 fix.go:53] fixHost starting: I0724 21:46:37.153215 17333 cli_runner.go:109] Run: docker container inspect offline-crio-20200724213753-14997 --format={{.State.Status}} I0724 21:46:37.201794 17333 fix.go:105] recreateIfNeeded on offline-crio-20200724213753-14997: state= err=unknown state "offline-crio-20200724213753-14997": docker container inspect offline-crio-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-crio-20200724213753-14997 I0724 21:46:37.201841 17333 fix.go:110] machineExists: false. err=machine does not exist I0724 21:46:37.207913 17333 delete.go:123] DEMOLISHING offline-crio-20200724213753-14997 ... I0724 21:46:37.208003 17333 cli_runner.go:109] Run: docker container inspect offline-crio-20200724213753-14997 --format={{.State.Status}} W0724 21:46:37.252163 17333 stop.go:72] unable to get state: unknown state "offline-crio-20200724213753-14997": docker container inspect offline-crio-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-crio-20200724213753-14997 I0724 21:46:37.252197 17333 delete.go:128] stophost failed (probably ok): ssh power off: unknown state "offline-crio-20200724213753-14997": docker container inspect offline-crio-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-crio-20200724213753-14997 I0724 21:46:37.252651 17333 cli_runner.go:109] Run: docker container inspect offline-crio-20200724213753-14997 --format={{.State.Status}} I0724 21:46:37.297294 17333 delete.go:81] Unable to get host status for offline-crio-20200724213753-14997, assuming it has already been deleted: state: unknown state "offline-crio-20200724213753-14997": docker container inspect offline-crio-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-crio-20200724213753-14997 I0724 21:46:37.297371 17333 cli_runner.go:109] Run: docker container inspect -f {{.Id}} offline-crio-20200724213753-14997 I0724 21:46:37.340889 17333 kic.go:274] could not find the container offline-crio-20200724213753-14997 to remove it. will try anyways I0724 21:46:37.340989 17333 cli_runner.go:109] Run: docker container inspect offline-crio-20200724213753-14997 --format={{.State.Status}} W0724 21:46:37.387829 17333 oci.go:82] error getting container status, will try to delete anyways: unknown state "offline-crio-20200724213753-14997": docker container inspect offline-crio-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-crio-20200724213753-14997 I0724 21:46:37.387891 17333 cli_runner.go:109] Run: docker exec --privileged -t offline-crio-20200724213753-14997 /bin/bash -c "sudo init 0" I0724 21:46:37.433882 17333 oci.go:568] error shutdown offline-crio-20200724213753-14997: docker exec --privileged -t offline-crio-20200724213753-14997 /bin/bash -c "sudo init 0": exit status 1 stdout: stderr: Error: No such container: offline-crio-20200724213753-14997 I0724 21:46:38.434147 17333 cli_runner.go:109] Run: docker container inspect offline-crio-20200724213753-14997 --format={{.State.Status}} I0724 21:46:38.477386 17333 oci.go:580] temporary error verifying shutdown: unknown state "offline-crio-20200724213753-14997": docker container inspect offline-crio-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-crio-20200724213753-14997 I0724 21:46:38.477425 17333 oci.go:582] temporary error: container offline-crio-20200724213753-14997 status is but expect it to be exited I0724 21:46:38.477462 17333 retry.go:30] will retry after 357.131936ms: couldn't verify cointainer is exited. %v: unknown state "offline-crio-20200724213753-14997": docker container inspect offline-crio-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-crio-20200724213753-14997 I0724 21:46:38.834995 17333 cli_runner.go:109] Run: docker container inspect offline-crio-20200724213753-14997 --format={{.State.Status}} I0724 21:46:38.879081 17333 oci.go:580] temporary error verifying shutdown: unknown state "offline-crio-20200724213753-14997": docker container inspect offline-crio-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-crio-20200724213753-14997 I0724 21:46:38.879102 17333 oci.go:582] temporary error: container offline-crio-20200724213753-14997 status is but expect it to be exited I0724 21:46:38.879120 17333 retry.go:30] will retry after 660.492892ms: couldn't verify cointainer is exited. %v: unknown state "offline-crio-20200724213753-14997": docker container inspect offline-crio-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-crio-20200724213753-14997 I0724 21:46:39.539958 17333 cli_runner.go:109] Run: docker container inspect offline-crio-20200724213753-14997 --format={{.State.Status}} I0724 21:46:39.589370 17333 oci.go:580] temporary error verifying shutdown: unknown state "offline-crio-20200724213753-14997": docker container inspect offline-crio-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-crio-20200724213753-14997 I0724 21:46:39.589407 17333 oci.go:582] temporary error: container offline-crio-20200724213753-14997 status is but expect it to be exited I0724 21:46:39.589469 17333 retry.go:30] will retry after 920.315446ms: couldn't verify cointainer is exited. %v: unknown state "offline-crio-20200724213753-14997": docker container inspect offline-crio-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-crio-20200724213753-14997 I0724 21:46:40.510218 17333 cli_runner.go:109] Run: docker container inspect offline-crio-20200724213753-14997 --format={{.State.Status}} I0724 21:46:40.557297 17333 oci.go:580] temporary error verifying shutdown: unknown state "offline-crio-20200724213753-14997": docker container inspect offline-crio-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-crio-20200724213753-14997 I0724 21:46:40.557328 17333 oci.go:582] temporary error: container offline-crio-20200724213753-14997 status is but expect it to be exited I0724 21:46:40.557351 17333 retry.go:30] will retry after 1.635001613s: couldn't verify cointainer is exited. %v: unknown state "offline-crio-20200724213753-14997": docker container inspect offline-crio-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-crio-20200724213753-14997 I0724 21:46:42.192726 17333 cli_runner.go:109] Run: docker container inspect offline-crio-20200724213753-14997 --format={{.State.Status}} I0724 21:46:42.237183 17333 oci.go:580] temporary error verifying shutdown: unknown state "offline-crio-20200724213753-14997": docker container inspect offline-crio-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-crio-20200724213753-14997 I0724 21:46:42.237222 17333 oci.go:582] temporary error: container offline-crio-20200724213753-14997 status is but expect it to be exited I0724 21:46:42.237241 17333 retry.go:30] will retry after 1.982055195s: couldn't verify cointainer is exited. %v: unknown state "offline-crio-20200724213753-14997": docker container inspect offline-crio-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-crio-20200724213753-14997 I0724 21:46:44.219663 17333 cli_runner.go:109] Run: docker container inspect offline-crio-20200724213753-14997 --format={{.State.Status}} I0724 21:46:44.264659 17333 oci.go:580] temporary error verifying shutdown: unknown state "offline-crio-20200724213753-14997": docker container inspect offline-crio-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-crio-20200724213753-14997 I0724 21:46:44.264680 17333 oci.go:582] temporary error: container offline-crio-20200724213753-14997 status is but expect it to be exited I0724 21:46:44.264698 17333 retry.go:30] will retry after 3.011308614s: couldn't verify cointainer is exited. %v: unknown state "offline-crio-20200724213753-14997": docker container inspect offline-crio-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-crio-20200724213753-14997 I0724 21:46:47.276540 17333 cli_runner.go:109] Run: docker container inspect offline-crio-20200724213753-14997 --format={{.State.Status}} I0724 21:46:47.321417 17333 oci.go:580] temporary error verifying shutdown: unknown state "offline-crio-20200724213753-14997": docker container inspect offline-crio-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-crio-20200724213753-14997 I0724 21:46:47.321452 17333 oci.go:582] temporary error: container offline-crio-20200724213753-14997 status is but expect it to be exited I0724 21:46:47.321469 17333 retry.go:30] will retry after 6.715255694s: couldn't verify cointainer is exited. %v: unknown state "offline-crio-20200724213753-14997": docker container inspect offline-crio-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-crio-20200724213753-14997 I0724 21:46:54.037038 17333 cli_runner.go:109] Run: docker container inspect offline-crio-20200724213753-14997 --format={{.State.Status}} I0724 21:46:54.085448 17333 oci.go:580] temporary error verifying shutdown: unknown state "offline-crio-20200724213753-14997": docker container inspect offline-crio-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-crio-20200724213753-14997 I0724 21:46:54.085474 17333 oci.go:582] temporary error: container offline-crio-20200724213753-14997 status is but expect it to be exited I0724 21:46:54.085499 17333 retry.go:30] will retry after 6.138576273s: couldn't verify cointainer is exited. %v: unknown state "offline-crio-20200724213753-14997": docker container inspect offline-crio-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-crio-20200724213753-14997 I0724 21:47:00.224468 17333 cli_runner.go:109] Run: docker container inspect offline-crio-20200724213753-14997 --format={{.State.Status}} I0724 21:47:00.270753 17333 oci.go:580] temporary error verifying shutdown: unknown state "offline-crio-20200724213753-14997": docker container inspect offline-crio-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-crio-20200724213753-14997 I0724 21:47:00.270787 17333 oci.go:582] temporary error: container offline-crio-20200724213753-14997 status is but expect it to be exited I0724 21:47:00.270824 17333 oci.go:86] couldn't shut down offline-crio-20200724213753-14997 (might be okay): verify shutdown: couldn't verify cointainer is exited. %v: unknown state "offline-crio-20200724213753-14997": docker container inspect offline-crio-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-crio-20200724213753-14997 I0724 21:47:00.270892 17333 cli_runner.go:109] Run: docker rm -f -v offline-crio-20200724213753-14997 W0724 21:47:00.313155 17333 delete.go:138] delete failed (probably ok) I0724 21:47:00.313193 17333 fix.go:117] Sleeping 1 second for extra luck! I0724 21:47:01.313335 17333 start.go:122] createHost starting for "" (driver="docker") I0724 21:47:01.335431 17333 start.go:158] libmachine.API.Create for "offline-crio-20200724213753-14997" (driver="docker") I0724 21:47:01.335468 17333 client.go:161] LocalClient.Create starting I0724 21:47:01.335512 17333 main.go:115] libmachine: Reading certificate data from /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca.pem I0724 21:47:01.335557 17333 main.go:115] libmachine: Decoding PEM data... I0724 21:47:01.335588 17333 main.go:115] libmachine: Parsing certificate... I0724 21:47:01.335732 17333 main.go:115] libmachine: Reading certificate data from /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/cert.pem I0724 21:47:01.335764 17333 main.go:115] libmachine: Decoding PEM data... I0724 21:47:01.335782 17333 main.go:115] libmachine: Parsing certificate... I0724 21:47:01.336081 17333 cli_runner.go:109] Run: docker ps -a --format {{.Names}} I0724 21:47:01.381685 17333 cli_runner.go:109] Run: docker volume create offline-crio-20200724213753-14997 --label name.minikube.sigs.k8s.io=offline-crio-20200724213753-14997 --label created_by.minikube.sigs.k8s.io=true I0724 21:47:01.425007 17333 oci.go:101] Successfully created a docker volume offline-crio-20200724213753-14997 I0724 21:47:01.425094 17333 cli_runner.go:109] Run: docker run --rm --entrypoint /usr/bin/test -v offline-crio-20200724213753-14997:/var local/kicbase:-snapshot -d /var/lib I0724 21:47:01.466256 17333 client.go:164] LocalClient.Create took 130.777537ms I0724 21:47:03.466549 17333 start.go:125] duration metric: createHost completed in 2.153152392s I0724 21:47:03.466603 17333 fix.go:55] fixHost completed within 26.313823546s I0724 21:47:03.466614 17333 start.go:76] releasing machines lock for "offline-crio-20200724213753-14997", held for 26.313865549s * Failed to start docker container. "minikube start -p offline-crio-20200724213753-14997" may fix it: recreate: creating host: create: creating: setting up container node: preparing volume for offline-crio-20200724213753-14997 container: docker run --rm --entrypoint /usr/bin/test -v offline-crio-20200724213753-14997:/var local/kicbase:-snapshot -d /var/lib: exit status 125 stdout: stderr: docker: invalid reference format. See 'docker run --help'. I0724 21:47:03.466870 17333 exit.go:58] WithError(error provisioning host)=Failed to start host: recreate: creating host: create: creating: setting up container node: preparing volume for offline-crio-20200724213753-14997 container: docker run --rm --entrypoint /usr/bin/test -v offline-crio-20200724213753-14997:/var local/kicbase:-snapshot -d /var/lib: exit status 125 stdout: stderr: docker: invalid reference format. See 'docker run --help'. called from: goroutine 1 [running]: runtime/debug.Stack(0x0, 0x0, 0x100000000000000) /home/jenkins/actions-runner/_work/_tool/go/1.14.6/x64/src/runtime/debug/stack.go:24 +0x9d k8s.io/minikube/pkg/minikube/exit.WithError(0x1bad79a, 0x17, 0x1ebf200, 0xc0003cf9c0) /home/jenkins/actions-runner/_work/minikube/minikube/pkg/minikube/exit/exit.go:58 +0x34 k8s.io/minikube/cmd/minikube/cmd.runStart(0x2cd0820, 0xc0003fe840, 0x2, 0xc) /home/jenkins/actions-runner/_work/minikube/minikube/cmd/minikube/cmd/start.go:171 +0xbe2 github.com/spf13/cobra.(*Command).execute(0x2cd0820, 0xc0003fe780, 0xc, 0xc, 0x2cd0820, 0xc0003fe780) /home/jenkins/go/pkg/mod/github.com/spf13/cobra@v1.0.0/command.go:846 +0x29d github.com/spf13/cobra.(*Command).ExecuteC(0x2ccf860, 0x0, 0x1, 0xc000042870) /home/jenkins/go/pkg/mod/github.com/spf13/cobra@v1.0.0/command.go:950 +0x349 github.com/spf13/cobra.(*Command).Execute(...) /home/jenkins/go/pkg/mod/github.com/spf13/cobra@v1.0.0/command.go:887 k8s.io/minikube/cmd/minikube/cmd.Execute() /home/jenkins/actions-runner/_work/minikube/minikube/cmd/minikube/cmd/root.go:106 +0x72c main.main() /home/jenkins/actions-runner/_work/minikube/minikube/cmd/minikube/main.go:71 +0x11f W0724 21:47:03.467301 17333 out.go:249] error provisioning host: Failed to start host: recreate: creating host: create: creating: setting up container node: preparing volume for offline-crio-20200724213753-14997 container: docker run --rm --entrypoint /usr/bin/test -v offline-crio-20200724213753-14997:/var local/kicbase:-snapshot -d /var/lib: exit status 125 stdout: stderr: docker: invalid reference format. See 'docker run --help'. * X error provisioning host: Failed to start host: recreate: creating host: create: creating: setting up container node: preparing volume for offline-crio-20200724213753-14997 container: docker run --rm --entrypoint /usr/bin/test -v offline-crio-20200724213753-14997:/var local/kicbase:-snapshot -d /var/lib: exit status 125 stdout: stderr: docker: invalid reference format. See 'docker run --help'. * * minikube is exiting due to an error. If the above message is not useful, open an issue: - https://github.com/kubernetes/minikube/issues/new/choose ** /stderr ** aab_offline_test.go:56: ./minikube-linux-amd64 start -p offline-crio-20200724213753-14997 --alsologtostderr -v=1 --memory=2000 --wait=true --container-runtime crio --vm-driver=docker --base-image=local/kicbase:-snapshot failed: exit status 70 panic.go:617: *** TestOffline/group/crio FAILED at 2020-07-24 21:47:03.473165151 +0000 UTC m=+653.597529636 helpers_test.go:215: -----------------------post-mortem-------------------------------- helpers_test.go:223: ======> post-mortem[TestOffline/group/crio]: docker inspect <====== helpers_test.go:224: (dbg) Run: docker inspect offline-crio-20200724213753-14997 helpers_test.go:228: (dbg) docker inspect offline-crio-20200724213753-14997: -- stdout -- [ { "CreatedAt": "2020-07-24T21:45:41Z", "Driver": "local", "Labels": { "created_by.minikube.sigs.k8s.io": "true", "name.minikube.sigs.k8s.io": "offline-crio-20200724213753-14997" }, "Mountpoint": "/var/lib/docker/volumes/offline-crio-20200724213753-14997/_data", "Name": "offline-crio-20200724213753-14997", "Options": {}, "Scope": "local" } ] -- /stdout -- helpers_test.go:232: (dbg) Run: ./minikube-linux-amd64 status --format={{.Host}} -p offline-crio-20200724213753-14997 -n offline-crio-20200724213753-14997 helpers_test.go:232: (dbg) Non-zero exit: ./minikube-linux-amd64 status --format={{.Host}} -p offline-crio-20200724213753-14997 -n offline-crio-20200724213753-14997: exit status 7 (106.143235ms) -- stdout -- Nonexistent -- /stdout -- ** stderr ** E0724 21:47:03.624656 19257 status.go:118] status error: host: state: unknown state "offline-crio-20200724213753-14997": docker container inspect offline-crio-20200724213753-14997 --format={{.State.Status}}: exit status 1 stdout: stderr: Error: No such container: offline-crio-20200724213753-14997 ** /stderr ** helpers_test.go:232: status error: exit status 7 (may be ok) helpers_test.go:234: "offline-crio-20200724213753-14997" host is not running, skipping log retrieval (state="Nonexistent") helpers_test.go:170: Cleaning up "offline-crio-20200724213753-14997" profile ... helpers_test.go:171: (dbg) Run: ./minikube-linux-amd64 delete -p offline-crio-20200724213753-14997 --- FAIL: TestOffline (550.15s) --- FAIL: TestOffline/group (0.00s) --- FAIL: TestOffline/group/containerd (497.49s) --- FAIL: TestOffline/group/docker (523.82s) --- FAIL: TestOffline/group/crio (550.15s) === RUN TestAddons addons_test.go:44: (dbg) Run: ./minikube-linux-amd64 start -p addons-20200724214703-14997 --wait=false --memory=2600 --alsologtostderr --addons=ingress --addons=registry --addons=metrics-server --addons=helm-tiller --addons=olm --vm-driver=docker --base-image=local/kicbase:-snapshot addons_test.go:44: (dbg) Done: ./minikube-linux-amd64 start -p addons-20200724214703-14997 --wait=false --memory=2600 --alsologtostderr --addons=ingress --addons=registry --addons=metrics-server --addons=helm-tiller --addons=olm --vm-driver=docker --base-image=local/kicbase:-snapshot: (1m37.585313079s) === RUN TestAddons/parallel === RUN TestAddons/parallel/Registry === PAUSE TestAddons/parallel/Registry === RUN TestAddons/parallel/Ingress === PAUSE TestAddons/parallel/Ingress === RUN TestAddons/parallel/MetricsServer === PAUSE TestAddons/parallel/MetricsServer === RUN TestAddons/parallel/HelmTiller === PAUSE TestAddons/parallel/HelmTiller === RUN TestAddons/parallel/Olm === PAUSE TestAddons/parallel/Olm === CONT TestAddons/parallel/Registry === CONT TestAddons/parallel/HelmTiller === CONT TestAddons/parallel/MetricsServer === CONT TestAddons/parallel/Ingress === CONT TestAddons/parallel/Olm addons_test.go:334: Skipping olm test till this timeout issue is solved https://github.com/operator-framework/operator-lifecycle-manager/issues/1534#issuecomment-632342257 === CONT TestAddons/parallel/Registry addons_test.go:173: registry stabilized in 18.030153ms === CONT TestAddons/parallel/HelmTiller addons_test.go:293: tiller-deploy stabilized in 18.254268ms === CONT TestAddons/parallel/MetricsServer addons_test.go:249: metrics-server stabilized in 18.99902ms === CONT TestAddons/parallel/Ingress addons_test.go:100: (dbg) TestAddons/parallel/Ingress: waiting 12m0s for pods matching "app.kubernetes.io/name=ingress-nginx" in namespace "kube-system" ... === CONT TestAddons/parallel/Registry addons_test.go:175: (dbg) TestAddons/parallel/Registry: waiting 6m0s for pods matching "actual-registry=true" in namespace "kube-system" ... === CONT TestAddons/parallel/HelmTiller addons_test.go:295: (dbg) TestAddons/parallel/HelmTiller: waiting 6m0s for pods matching "app=helm" in namespace "kube-system" ... === CONT TestAddons/parallel/MetricsServer addons_test.go:251: (dbg) TestAddons/parallel/MetricsServer: waiting 6m0s for pods matching "k8s-app=metrics-server" in namespace "kube-system" ... === CONT TestAddons/parallel/HelmTiller helpers_test.go:332: "tiller-deploy-78ff886c54-vr5xx" [7f7045bb-0219-4cd8-8132-d1958d9f17e3] Running === CONT TestAddons/parallel/Registry helpers_test.go:332: "registry-wccps" [88134e94-6099-4ed9-8092-af8fd643712e] Running === CONT TestAddons/parallel/MetricsServer helpers_test.go:332: "metrics-server-7bc6d75975-qdzbm" [42a63c7d-8709-4b04-b30c-5df3dbbfb00a] Running === CONT TestAddons/parallel/Ingress helpers_test.go:332: "ingress-nginx-admission-create-bzgwm" [5f899f9f-ff19-492f-a717-e58163ba6756] Succeeded: Initialized:PodCompleted / Ready:PodCompleted / ContainersReady:PodCompleted addons_test.go:100: (dbg) TestAddons/parallel/Ingress: app.kubernetes.io/name=ingress-nginx healthy within 10.655341ms addons_test.go:105: (dbg) Run: kubectl --context addons-20200724214703-14997 replace --force -f testdata/nginx-ing.yaml addons_test.go:105: (dbg) Non-zero exit: kubectl --context addons-20200724214703-14997 replace --force -f testdata/nginx-ing.yaml: exit status 1 (2.079468643s) ** stderr ** Error from server (InternalError): Internal error occurred: failed calling webhook "validate.nginx.ingress.kubernetes.io": Post https://ingress-nginx-controller-admission.kube-system.svc:443/extensions/v1beta1/ingresses?timeout=30s: dial tcp 10.103.182.227:443: connect: connection refused ** /stderr ** addons_test.go:105: (dbg) Run: kubectl --context addons-20200724214703-14997 replace --force -f testdata/nginx-ing.yaml addons_test.go:105: (dbg) Non-zero exit: kubectl --context addons-20200724214703-14997 replace --force -f testdata/nginx-ing.yaml: exit status 1 (1.231441136s) ** stderr ** Error from server (InternalError): Internal error occurred: failed calling webhook "validate.nginx.ingress.kubernetes.io": Post https://ingress-nginx-controller-admission.kube-system.svc:443/extensions/v1beta1/ingresses?timeout=30s: dial tcp 10.103.182.227:443: connect: connection refused ** /stderr ** === CONT TestAddons/parallel/Registry addons_test.go:175: (dbg) TestAddons/parallel/Registry: actual-registry=true healthy within 5.013800563s === CONT TestAddons/parallel/MetricsServer addons_test.go:251: (dbg) TestAddons/parallel/MetricsServer: k8s-app=metrics-server healthy within 5.013217723s addons_test.go:257: (dbg) Run: kubectl --context addons-20200724214703-14997 top pods -n kube-system === CONT TestAddons/parallel/HelmTiller addons_test.go:295: (dbg) TestAddons/parallel/HelmTiller: app=helm healthy within 5.013636152s addons_test.go:310: (dbg) Run: kubectl --context addons-20200724214703-14997 run --rm helm-test --restart=Never --image=alpine/helm:2.16.3 -it --namespace=kube-system --serviceaccount=tiller -- version === CONT TestAddons/parallel/Registry addons_test.go:178: (dbg) TestAddons/parallel/Registry: waiting 10m0s for pods matching "registry-proxy=true" in namespace "kube-system" ... helpers_test.go:332: "registry-proxy-4vjqj" [b9f36d12-8e87-4e69-a8fe-9ec4fa1f7c5f] Running === CONT TestAddons/parallel/MetricsServer addons_test.go:257: (dbg) Non-zero exit: kubectl --context addons-20200724214703-14997 top pods -n kube-system: exit status 1 (82.93676ms) ** stderr ** error: metrics not available yet ** /stderr ** === CONT TestAddons/parallel/Ingress addons_test.go:105: (dbg) Run: kubectl --context addons-20200724214703-14997 replace --force -f testdata/nginx-ing.yaml addons_test.go:105: (dbg) Non-zero exit: kubectl --context addons-20200724214703-14997 replace --force -f testdata/nginx-ing.yaml: exit status 1 (1.230522273s) ** stderr ** Error from server (InternalError): Internal error occurred: failed calling webhook "validate.nginx.ingress.kubernetes.io": Post https://ingress-nginx-controller-admission.kube-system.svc:443/extensions/v1beta1/ingresses?timeout=30s: dial tcp 10.103.182.227:443: connect: connection refused ** /stderr ** === CONT TestAddons/parallel/MetricsServer addons_test.go:257: (dbg) Run: kubectl --context addons-20200724214703-14997 top pods -n kube-system addons_test.go:257: (dbg) Non-zero exit: kubectl --context addons-20200724214703-14997 top pods -n kube-system: exit status 1 (78.568458ms) ** stderr ** error: metrics not available yet ** /stderr ** === CONT TestAddons/parallel/HelmTiller addons_test.go:310: (dbg) Done: kubectl --context addons-20200724214703-14997 run --rm helm-test --restart=Never --image=alpine/helm:2.16.3 -it --namespace=kube-system --serviceaccount=tiller -- version: (4.277476815s) addons_test.go:327: (dbg) Run: ./minikube-linux-amd64 -p addons-20200724214703-14997 addons disable helm-tiller --alsologtostderr -v=1 === CONT TestAddons/parallel/Ingress addons_test.go:105: (dbg) Run: kubectl --context addons-20200724214703-14997 replace --force -f testdata/nginx-ing.yaml === CONT TestAddons/parallel/Registry addons_test.go:178: (dbg) TestAddons/parallel/Registry: registry-proxy=true healthy within 5.007438017s addons_test.go:183: (dbg) Run: kubectl --context addons-20200724214703-14997 delete po -l run=registry-test --now addons_test.go:188: (dbg) Run: kubectl --context addons-20200724214703-14997 run --rm registry-test --restart=Never --image=busybox -it -- sh -c "wget --spider -S http://registry.kube-system.svc.cluster.local" === CONT TestAddons/parallel/Ingress addons_test.go:105: (dbg) Non-zero exit: kubectl --context addons-20200724214703-14997 replace --force -f testdata/nginx-ing.yaml: exit status 1 (1.216556s) ** stderr ** Error from server (InternalError): Internal error occurred: failed calling webhook "validate.nginx.ingress.kubernetes.io": Post https://ingress-nginx-controller-admission.kube-system.svc:443/extensions/v1beta1/ingresses?timeout=30s: dial tcp 10.103.182.227:443: connect: connection refused ** /stderr ** === CONT TestAddons/parallel/MetricsServer addons_test.go:257: (dbg) Run: kubectl --context addons-20200724214703-14997 top pods -n kube-system addons_test.go:257: (dbg) Non-zero exit: kubectl --context addons-20200724214703-14997 top pods -n kube-system: exit status 1 (87.461975ms) ** stderr ** error: metrics not available yet ** /stderr ** === CONT TestAddons/parallel/Registry addons_test.go:188: (dbg) Done: kubectl --context addons-20200724214703-14997 run --rm registry-test --restart=Never --image=busybox -it -- sh -c "wget --spider -S http://registry.kube-system.svc.cluster.local": (3.651834552s) addons_test.go:202: (dbg) Run: ./minikube-linux-amd64 -p addons-20200724214703-14997 ip 2020/07/24 21:48:55 [DEBUG] GET http://172.17.0.3:5000 addons_test.go:231: (dbg) Run: ./minikube-linux-amd64 -p addons-20200724214703-14997 addons disable registry --alsologtostderr -v=1 === CONT TestAddons/parallel/Ingress addons_test.go:105: (dbg) Run: kubectl --context addons-20200724214703-14997 replace --force -f testdata/nginx-ing.yaml addons_test.go:119: (dbg) Run: kubectl --context addons-20200724214703-14997 replace --force -f testdata/nginx-pod-svc.yaml addons_test.go:124: (dbg) TestAddons/parallel/Ingress: waiting 4m0s for pods matching "run=nginx" in namespace "default" ... helpers_test.go:332: "nginx" [a9de9503-4f48-4baf-b735-f9c2abdfe4d8] Pending / Ready:ContainersNotReady (containers with unready status: [nginx]) / ContainersReady:ContainersNotReady (containers with unready status: [nginx]) === CONT TestAddons/parallel/MetricsServer addons_test.go:257: (dbg) Run: kubectl --context addons-20200724214703-14997 top pods -n kube-system addons_test.go:257: (dbg) Non-zero exit: kubectl --context addons-20200724214703-14997 top pods -n kube-system: exit status 1 (77.03655ms) ** stderr ** error: metrics not available yet ** /stderr ** === CONT TestAddons/parallel/Ingress helpers_test.go:332: "nginx" [a9de9503-4f48-4baf-b735-f9c2abdfe4d8] Running === CONT TestAddons/parallel/MetricsServer addons_test.go:257: (dbg) Run: kubectl --context addons-20200724214703-14997 top pods -n kube-system addons_test.go:257: (dbg) Non-zero exit: kubectl --context addons-20200724214703-14997 top pods -n kube-system: exit status 1 (76.357604ms) ** stderr ** error: metrics not available yet ** /stderr ** === CONT TestAddons/parallel/Ingress addons_test.go:124: (dbg) TestAddons/parallel/Ingress: run=nginx healthy within 11.006331476s addons_test.go:134: (dbg) Run: ./minikube-linux-amd64 -p addons-20200724214703-14997 ssh "curl -s http://127.0.0.1/ -H 'Host: nginx.example.com'" addons_test.go:155: (dbg) Run: ./minikube-linux-amd64 -p addons-20200724214703-14997 addons disable ingress --alsologtostderr -v=1 addons_test.go:155: (dbg) Done: ./minikube-linux-amd64 -p addons-20200724214703-14997 addons disable ingress --alsologtostderr -v=1: (2.18854591s) === CONT TestAddons/parallel/MetricsServer addons_test.go:257: (dbg) Run: kubectl --context addons-20200724214703-14997 top pods -n kube-system addons_test.go:257: (dbg) Non-zero exit: kubectl --context addons-20200724214703-14997 top pods -n kube-system: exit status 1 (77.135557ms) ** stderr ** error: metrics not available yet ** /stderr ** addons_test.go:257: (dbg) Run: kubectl --context addons-20200724214703-14997 top pods -n kube-system addons_test.go:257: (dbg) Non-zero exit: kubectl --context addons-20200724214703-14997 top pods -n kube-system: exit status 1 (76.398606ms) ** stderr ** error: metrics not available yet ** /stderr ** addons_test.go:257: (dbg) Run: kubectl --context addons-20200724214703-14997 top pods -n kube-system addons_test.go:275: (dbg) Run: ./minikube-linux-amd64 -p addons-20200724214703-14997 addons disable metrics-server --alsologtostderr -v=1 === CONT TestAddons addons_test.go:71: (dbg) Run: ./minikube-linux-amd64 stop -p addons-20200724214703-14997 addons_test.go:71: (dbg) Done: ./minikube-linux-amd64 stop -p addons-20200724214703-14997: (6.308772998s) addons_test.go:75: (dbg) Run: ./minikube-linux-amd64 addons enable dashboard -p addons-20200724214703-14997 addons_test.go:79: (dbg) Run: ./minikube-linux-amd64 addons disable dashboard -p addons-20200724214703-14997 helpers_test.go:170: Cleaning up "addons-20200724214703-14997" profile ... helpers_test.go:171: (dbg) Run: ./minikube-linux-amd64 delete -p addons-20200724214703-14997 helpers_test.go:171: (dbg) Done: ./minikube-linux-amd64 delete -p addons-20200724214703-14997: (4.801052873s) --- PASS: TestAddons (195.22s) --- PASS: TestAddons/parallel (0.00s) --- SKIP: TestAddons/parallel/Olm (0.00s) --- PASS: TestAddons/parallel/HelmTiller (9.91s) --- PASS: TestAddons/parallel/Registry (14.43s) --- PASS: TestAddons/parallel/Ingress (29.24s) --- PASS: TestAddons/parallel/MetricsServer (86.29s) === RUN TestCertOptions === PAUSE TestCertOptions === RUN TestDockerFlags === PAUSE TestDockerFlags === RUN TestForceSystemdFlag === PAUSE TestForceSystemdFlag === RUN TestForceSystemdEnv === PAUSE TestForceSystemdEnv === RUN TestKVMDriverInstallOrUpdate === PAUSE TestKVMDriverInstallOrUpdate === RUN TestHyperKitDriverInstallOrUpdate driver_install_or_update_test.go:102: Skip if not darwin. --- SKIP: TestHyperKitDriverInstallOrUpdate (0.00s) === RUN TestErrorSpam === PAUSE TestErrorSpam === RUN TestFunctional === RUN TestFunctional/serial === RUN TestFunctional/serial/CopySyncFile functional_test.go:903: local sync path: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/files/etc/test/nested/copy/14997/hosts === RUN TestFunctional/serial/StartWithProxy functional_test.go:220: (dbg) Run: ./minikube-linux-amd64 start -p functional-20200724215019-14997 --memory=2800 --apiserver-port=8441 --wait=true --vm-driver=docker --base-image=local/kicbase:-snapshot functional_test.go:220: (dbg) Done: ./minikube-linux-amd64 start -p functional-20200724215019-14997 --memory=2800 --apiserver-port=8441 --wait=true --vm-driver=docker --base-image=local/kicbase:-snapshot: (52.105865611s) === RUN TestFunctional/serial/SoftStart functional_test.go:252: (dbg) Run: ./minikube-linux-amd64 start -p functional-20200724215019-14997 functional_test.go:252: (dbg) Done: ./minikube-linux-amd64 start -p functional-20200724215019-14997: (3.469187404s) functional_test.go:256: soft start took 3.469655737s for "functional-20200724215019-14997" cluster. === RUN TestFunctional/serial/KubeContext functional_test.go:273: (dbg) Run: kubectl config current-context === RUN TestFunctional/serial/KubectlGetPods functional_test.go:286: (dbg) Run: kubectl --context functional-20200724215019-14997 get po -A === RUN TestFunctional/serial/CacheCmd === RUN TestFunctional/serial/CacheCmd/cache === RUN TestFunctional/serial/CacheCmd/cache/add functional_test.go:457: (dbg) Run: ./minikube-linux-amd64 -p functional-20200724215019-14997 cache add busybox:latest functional_test.go:457: (dbg) Done: ./minikube-linux-amd64 -p functional-20200724215019-14997 cache add busybox:latest: (1.96956084s) functional_test.go:457: (dbg) Run: ./minikube-linux-amd64 -p functional-20200724215019-14997 cache add busybox:1.28.4-glibc functional_test.go:457: (dbg) Done: ./minikube-linux-amd64 -p functional-20200724215019-14997 cache add busybox:1.28.4-glibc: (1.800805976s) functional_test.go:457: (dbg) Run: ./minikube-linux-amd64 -p functional-20200724215019-14997 cache add k8s.gcr.io/pause:latest functional_test.go:457: (dbg) Done: ./minikube-linux-amd64 -p functional-20200724215019-14997 cache add k8s.gcr.io/pause:latest: (1.241789335s) === RUN TestFunctional/serial/CacheCmd/cache/delete_busybox:1.28.4-glibc functional_test.go:464: (dbg) Run: ./minikube-linux-amd64 cache delete busybox:1.28.4-glibc === RUN TestFunctional/serial/CacheCmd/cache/list functional_test.go:471: (dbg) Run: ./minikube-linux-amd64 cache list === RUN TestFunctional/serial/CacheCmd/cache/verify_cache_inside_node functional_test.go:484: (dbg) Run: ./minikube-linux-amd64 -p functional-20200724215019-14997 ssh sudo crictl images === RUN TestFunctional/serial/CacheCmd/cache/cache_reload functional_test.go:497: (dbg) Run: ./minikube-linux-amd64 -p functional-20200724215019-14997 ssh sudo docker rmi busybox:latest functional_test.go:503: (dbg) Run: ./minikube-linux-amd64 -p functional-20200724215019-14997 ssh sudo crictl inspecti busybox:latest functional_test.go:503: (dbg) Non-zero exit: ./minikube-linux-amd64 -p functional-20200724215019-14997 ssh sudo crictl inspecti busybox:latest: exit status 1 (294.606164ms) -- stdout -- FATA[0000] no such image "busybox:latest" present -- /stdout -- ** stderr ** ssh: Process exited with status 1 ** /stderr ** functional_test.go:508: (dbg) Run: ./minikube-linux-amd64 -p functional-20200724215019-14997 cache reload functional_test.go:508: (dbg) Done: ./minikube-linux-amd64 -p functional-20200724215019-14997 cache reload: (1.181756486s) functional_test.go:513: (dbg) Run: ./minikube-linux-amd64 -p functional-20200724215019-14997 ssh sudo crictl inspecti busybox:latest === RUN TestFunctional/serial/CacheCmd/cache/delete functional_test.go:522: (dbg) Run: ./minikube-linux-amd64 cache delete busybox:latest functional_test.go:522: (dbg) Run: ./minikube-linux-amd64 cache delete k8s.gcr.io/pause:latest === RUN TestFunctional/serial/MinikubeKubectlCmd functional_test.go:304: (dbg) Run: ./minikube-linux-amd64 -p functional-20200724215019-14997 kubectl -- --context functional-20200724215019-14997 get pods === PAUSE TestFunctional === RUN TestGvisorAddon gvisor_addon_test.go:33: skipping test because --gvisor=false --- SKIP: TestGvisorAddon (0.00s) === RUN TestJSONOutput json_output_test.go:42: (dbg) Run: ./minikube-linux-amd64 start -p json-output-20200724215123-14997 --memory=2200 --output=json --wait=true --vm-driver=docker --base-image=local/kicbase:-snapshot json_output_test.go:42: (dbg) Done: ./minikube-linux-amd64 start -p json-output-20200724215123-14997 --memory=2200 --output=json --wait=true --vm-driver=docker --base-image=local/kicbase:-snapshot: (54.612827329s) === RUN TestJSONOutput/serial === RUN TestJSONOutput/serial/DistinctCurrentSteps json_output_test.go:78: step 3 has already been assigned to another step: Starting control plane node json-output-20200724215123-14997 in cluster json-output-20200724215123-14997 Cannot use for: Pulling base image ... [Validation: valid Context Attributes, specversion: 1.0 type: io.k8s.sigs.minikube.step source: https://minikube.sigs.k8s.io/ id: e4c1c62e-f58a-4157-a35f-3f45867f4c02 datacontenttype: application/json Data, { "currentstep": "0", "message": "[json-output-20200724215123-14997] minikube v1.12.1 on Ubuntu 20.04\n", "name": "Initial Minikube Setup", "totalsteps": "12" } Validation: valid Context Attributes, specversion: 1.0 type: io.k8s.sigs.minikube.info source: https://minikube.sigs.k8s.io/ id: 78482adb-b788-4837-bf5d-56dc352cadaf datacontenttype: application/json Data, { "message": "KUBECONFIG=/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/kubeconfig\n" } Validation: valid Context Attributes, specversion: 1.0 type: io.k8s.sigs.minikube.info source: https://minikube.sigs.k8s.io/ id: c1099761-3b13-44f7-b35b-0705a3c93781 datacontenttype: application/json Data, { "message": "MINIKUBE_HOME=/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome\n" } Validation: valid Context Attributes, specversion: 1.0 type: io.k8s.sigs.minikube.step source: https://minikube.sigs.k8s.io/ id: 4658e336-b807-4862-832f-7a441ffc7606 datacontenttype: application/json Data, { "currentstep": "1", "message": "Using the docker driver based on user configuration\n", "name": "Selecting Driver", "totalsteps": "12" } Validation: valid Context Attributes, specversion: 1.0 type: io.k8s.sigs.minikube.step source: https://minikube.sigs.k8s.io/ id: 1366dfca-2063-4d82-9d39-1e9f00a7aa5b datacontenttype: application/json Data, { "currentstep": "3", "message": "Starting control plane node json-output-20200724215123-14997 in cluster json-output-20200724215123-14997\n", "name": "Starting Node", "totalsteps": "12" } Validation: valid Context Attributes, specversion: 1.0 type: io.k8s.sigs.minikube.step source: https://minikube.sigs.k8s.io/ id: 76c4601a-d072-4de0-b799-92e6f9b13abe datacontenttype: application/json Data, { "currentstep": "3", "message": "Pulling base image ...\n", "name": "Starting Node", "totalsteps": "12" } Validation: valid Context Attributes, specversion: 1.0 type: io.k8s.sigs.minikube.warning source: https://minikube.sigs.k8s.io/ id: 594ef999-fc71-40c5-b697-0af301696559 datacontenttype: application/json Data, { "message": "minikube was unable to download local/kicbase:-snapshot, but successfully downloaded kicbase/stable:v0.0.10 as a fallback image\n" } Validation: valid Context Attributes, specversion: 1.0 type: io.k8s.sigs.minikube.step source: https://minikube.sigs.k8s.io/ id: 31d82c12-2c4e-49c8-8024-ca1252ba8c5a datacontenttype: application/json Data, { "currentstep": "6", "message": "Creating docker container (CPUs=2, Memory=2200MB) ...\n", "name": "Creating Container", "totalsteps": "12" } Validation: valid Context Attributes, specversion: 1.0 type: io.k8s.sigs.minikube.step source: https://minikube.sigs.k8s.io/ id: c223bcaa-d4a6-40d1-b650-53da08525676 datacontenttype: application/json Data, { "currentstep": "8", "message": "Preparing Kubernetes v1.18.3 on Docker 19.03.2 ...\n", "name": "Preparing Kubernetes", "totalsteps": "12" } Validation: valid Context Attributes, specversion: 1.0 type: io.k8s.sigs.minikube.step source: https://minikube.sigs.k8s.io/ id: 594866fd-78bc-4d0f-ba41-ef4296dcf2a7 datacontenttype: application/json Data, { "currentstep": "10", "message": "Verifying Kubernetes components...\n", "name": "Verifying Kubernetes", "totalsteps": "12" } Validation: valid Context Attributes, specversion: 1.0 type: io.k8s.sigs.minikube.step source: https://minikube.sigs.k8s.io/ id: 9ccac991-522c-43b3-819f-c37281701498 datacontenttype: application/json Data, { "currentstep": "11", "message": "Enabled addons: default-storageclass, storage-provisioner\n", "name": "Enabling Addons", "totalsteps": "12" } Validation: valid Context Attributes, specversion: 1.0 type: io.k8s.sigs.minikube.step source: https://minikube.sigs.k8s.io/ id: 2ad1b902-4b58-4e1f-b612-0bcad169f7bd datacontenttype: application/json Data, { "currentstep": "12", "message": "Done! kubectl is now configured to use \"json-output-20200724215123-14997\"\n", "name": "Done", "totalsteps": "12" } ] === RUN TestJSONOutput/serial/IncreasingCurrentSteps json_output_test.go:97: current step is not in increasing order: [Validation: valid Context Attributes, specversion: 1.0 type: io.k8s.sigs.minikube.step source: https://minikube.sigs.k8s.io/ id: e4c1c62e-f58a-4157-a35f-3f45867f4c02 datacontenttype: application/json Data, { "currentstep": "0", "message": "[json-output-20200724215123-14997] minikube v1.12.1 on Ubuntu 20.04\n", "name": "Initial Minikube Setup", "totalsteps": "12" } Validation: valid Context Attributes, specversion: 1.0 type: io.k8s.sigs.minikube.info source: https://minikube.sigs.k8s.io/ id: 78482adb-b788-4837-bf5d-56dc352cadaf datacontenttype: application/json Data, { "message": "KUBECONFIG=/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/kubeconfig\n" } Validation: valid Context Attributes, specversion: 1.0 type: io.k8s.sigs.minikube.info source: https://minikube.sigs.k8s.io/ id: c1099761-3b13-44f7-b35b-0705a3c93781 datacontenttype: application/json Data, { "message": "MINIKUBE_HOME=/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome\n" } Validation: valid Context Attributes, specversion: 1.0 type: io.k8s.sigs.minikube.step source: https://minikube.sigs.k8s.io/ id: 4658e336-b807-4862-832f-7a441ffc7606 datacontenttype: application/json Data, { "currentstep": "1", "message": "Using the docker driver based on user configuration\n", "name": "Selecting Driver", "totalsteps": "12" } Validation: valid Context Attributes, specversion: 1.0 type: io.k8s.sigs.minikube.step source: https://minikube.sigs.k8s.io/ id: 1366dfca-2063-4d82-9d39-1e9f00a7aa5b datacontenttype: application/json Data, { "currentstep": "3", "message": "Starting control plane node json-output-20200724215123-14997 in cluster json-output-20200724215123-14997\n", "name": "Starting Node", "totalsteps": "12" } Validation: valid Context Attributes, specversion: 1.0 type: io.k8s.sigs.minikube.step source: https://minikube.sigs.k8s.io/ id: 76c4601a-d072-4de0-b799-92e6f9b13abe datacontenttype: application/json Data, { "currentstep": "3", "message": "Pulling base image ...\n", "name": "Starting Node", "totalsteps": "12" } Validation: valid Context Attributes, specversion: 1.0 type: io.k8s.sigs.minikube.warning source: https://minikube.sigs.k8s.io/ id: 594ef999-fc71-40c5-b697-0af301696559 datacontenttype: application/json Data, { "message": "minikube was unable to download local/kicbase:-snapshot, but successfully downloaded kicbase/stable:v0.0.10 as a fallback image\n" } Validation: valid Context Attributes, specversion: 1.0 type: io.k8s.sigs.minikube.step source: https://minikube.sigs.k8s.io/ id: 31d82c12-2c4e-49c8-8024-ca1252ba8c5a datacontenttype: application/json Data, { "currentstep": "6", "message": "Creating docker container (CPUs=2, Memory=2200MB) ...\n", "name": "Creating Container", "totalsteps": "12" } Validation: valid Context Attributes, specversion: 1.0 type: io.k8s.sigs.minikube.step source: https://minikube.sigs.k8s.io/ id: c223bcaa-d4a6-40d1-b650-53da08525676 datacontenttype: application/json Data, { "currentstep": "8", "message": "Preparing Kubernetes v1.18.3 on Docker 19.03.2 ...\n", "name": "Preparing Kubernetes", "totalsteps": "12" } Validation: valid Context Attributes, specversion: 1.0 type: io.k8s.sigs.minikube.step source: https://minikube.sigs.k8s.io/ id: 594866fd-78bc-4d0f-ba41-ef4296dcf2a7 datacontenttype: application/json Data, { "currentstep": "10", "message": "Verifying Kubernetes components...\n", "name": "Verifying Kubernetes", "totalsteps": "12" } Validation: valid Context Attributes, specversion: 1.0 type: io.k8s.sigs.minikube.step source: https://minikube.sigs.k8s.io/ id: 9ccac991-522c-43b3-819f-c37281701498 datacontenttype: application/json Data, { "currentstep": "11", "message": "Enabled addons: default-storageclass, storage-provisioner\n", "name": "Enabling Addons", "totalsteps": "12" } Validation: valid Context Attributes, specversion: 1.0 type: io.k8s.sigs.minikube.step source: https://minikube.sigs.k8s.io/ id: 2ad1b902-4b58-4e1f-b612-0bcad169f7bd datacontenttype: application/json Data, { "currentstep": "12", "message": "Done! kubectl is now configured to use \"json-output-20200724215123-14997\"\n", "name": "Done", "totalsteps": "12" } ] === CONT TestJSONOutput helpers_test.go:170: Cleaning up "json-output-20200724215123-14997" profile ... helpers_test.go:171: (dbg) Run: ./minikube-linux-amd64 delete -p json-output-20200724215123-14997 helpers_test.go:171: (dbg) Done: ./minikube-linux-amd64 delete -p json-output-20200724215123-14997: (3.55879557s) --- FAIL: TestJSONOutput (58.17s) --- FAIL: TestJSONOutput/serial (0.00s) --- FAIL: TestJSONOutput/serial/DistinctCurrentSteps (0.00s) --- FAIL: TestJSONOutput/serial/IncreasingCurrentSteps (0.00s) === RUN TestJSONOutputError json_output_test.go:112: (dbg) Run: ./minikube-linux-amd64 start -p json-output-error-20200724215221-14997 --memory=2200 --output=json --wait=true --driver=fail json_output_test.go:112: (dbg) Non-zero exit: ./minikube-linux-amd64 start -p json-output-error-20200724215221-14997 --memory=2200 --output=json --wait=true --driver=fail: exit status 69 (60.647936ms) -- stdout -- {"data":{"currentstep":"0","message":"[json-output-error-20200724215221-14997] minikube v1.12.1 on Ubuntu 20.04\n","name":"Initial Minikube Setup","totalsteps":"12"},"datacontenttype":"application/json","id":"d5ef6cec-67f7-4c59-b638-f3583c0c23dc","source":"https://minikube.sigs.k8s.io/","specversion":"1.0","type":"io.k8s.sigs.minikube.step"} {"data":{"message":"KUBECONFIG=/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/kubeconfig\n"},"datacontenttype":"application/json","id":"a45c1cf2-007c-42f3-890a-3c205c974818","source":"https://minikube.sigs.k8s.io/","specversion":"1.0","type":"io.k8s.sigs.minikube.info"} {"data":{"message":"MINIKUBE_HOME=/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome\n"},"datacontenttype":"application/json","id":"7e03485e-21e6-4470-a112-cd747b912919","source":"https://minikube.sigs.k8s.io/","specversion":"1.0","type":"io.k8s.sigs.minikube.info"} {"data":{"exitcode":"69","message":"The driver 'fail' is not supported on linux\n"},"datacontenttype":"application/json","id":"800840fc-22d1-43a4-ae69-c93c891d5ad8","source":"https://minikube.sigs.k8s.io/","specversion":"1.0","type":"io.k8s.sigs.minikube.error"} -- /stdout -- helpers_test.go:170: Cleaning up "json-output-error-20200724215221-14997" profile ... helpers_test.go:171: (dbg) Run: ./minikube-linux-amd64 delete -p json-output-error-20200724215221-14997 --- PASS: TestJSONOutputError (0.34s) === RUN TestMultiNode === RUN TestMultiNode/serial === RUN TestMultiNode/serial/FreshStart2Nodes multinode_test.go:66: (dbg) Run: ./minikube-linux-amd64 start -p multinode-20200724215222-14997 --wait=true --memory=2200 --nodes=2 --vm-driver=docker --base-image=local/kicbase:-snapshot multinode_test.go:66: (dbg) Done: ./minikube-linux-amd64 start -p multinode-20200724215222-14997 --wait=true --memory=2200 --nodes=2 --vm-driver=docker --base-image=local/kicbase:-snapshot: (1m10.338142528s) multinode_test.go:72: (dbg) Run: ./minikube-linux-amd64 -p multinode-20200724215222-14997 status --alsologtostderr === RUN TestMultiNode/serial/AddNode multinode_test.go:90: (dbg) Run: ./minikube-linux-amd64 node add -p multinode-20200724215222-14997 -v 3 --alsologtostderr multinode_test.go:90: (dbg) Done: ./minikube-linux-amd64 node add -p multinode-20200724215222-14997 -v 3 --alsologtostderr: (20.724063621s) multinode_test.go:96: (dbg) Run: ./minikube-linux-amd64 -p multinode-20200724215222-14997 status --alsologtostderr === RUN TestMultiNode/serial/StopNode multinode_test.go:112: (dbg) Run: ./minikube-linux-amd64 -p multinode-20200724215222-14997 node stop m03 multinode_test.go:112: (dbg) Done: ./minikube-linux-amd64 -p multinode-20200724215222-14997 node stop m03: (1.390822201s) multinode_test.go:118: (dbg) Run: ./minikube-linux-amd64 -p multinode-20200724215222-14997 status multinode_test.go:118: (dbg) Non-zero exit: ./minikube-linux-amd64 -p multinode-20200724215222-14997 status: exit status 7 (627.595244ms) -- stdout -- multinode-20200724215222-14997 type: Control Plane host: Running kubelet: Running apiserver: Running kubeconfig: Configured multinode-20200724215222-14997-m02 type: Worker host: Running kubelet: Running multinode-20200724215222-14997-m03 type: Worker host: Stopped kubelet: Stopped -- /stdout -- multinode_test.go:125: (dbg) Run: ./minikube-linux-amd64 -p multinode-20200724215222-14997 status --alsologtostderr multinode_test.go:125: (dbg) Non-zero exit: ./minikube-linux-amd64 -p multinode-20200724215222-14997 status --alsologtostderr: exit status 7 (640.408336ms) -- stdout -- multinode-20200724215222-14997 type: Control Plane host: Running kubelet: Running apiserver: Running kubeconfig: Configured multinode-20200724215222-14997-m02 type: Worker host: Running kubelet: Running multinode-20200724215222-14997-m03 type: Worker host: Stopped kubelet: Stopped -- /stdout -- ** stderr ** I0724 21:53:56.639829 55088 mustload.go:64] Loading cluster: multinode-20200724215222-14997 I0724 21:53:56.640118 55088 status.go:124] checking status of multinode-20200724215222-14997 ... I0724 21:53:56.640564 55088 cli_runner.go:109] Run: docker container inspect multinode-20200724215222-14997 --format={{.State.Status}} I0724 21:53:56.690236 55088 status.go:188] multinode-20200724215222-14997 host status = "Running" (err=) I0724 21:53:56.690273 55088 host.go:65] Checking if "multinode-20200724215222-14997" exists ... I0724 21:53:56.690557 55088 cli_runner.go:109] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" multinode-20200724215222-14997 I0724 21:53:56.739895 55088 host.go:65] Checking if "multinode-20200724215222-14997" exists ... I0724 21:53:56.740166 55088 system_pods.go:161] Checking kubelet status ... I0724 21:53:56.740226 55088 ssh_runner.go:148] Run: systemctl --version I0724 21:53:56.740277 55088 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" multinode-20200724215222-14997 I0724 21:53:56.790917 55088 sshutil.go:44] new ssh client: &{IP:127.0.0.1 Port:32783 SSHKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/multinode-20200724215222-14997/id_rsa Username:docker} I0724 21:53:56.885382 55088 ssh_runner.go:148] Run: sudo systemctl is-active --quiet service kubelet I0724 21:53:56.896277 55088 status.go:232] multinode-20200724215222-14997 kubelet status = Running I0724 21:53:56.897255 55088 kubeconfig.go:93] found "multinode-20200724215222-14997" server: "https://172.17.0.4:8443" I0724 21:53:56.897284 55088 api_server.go:146] Checking apiserver status ... I0724 21:53:56.897325 55088 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 21:53:56.907644 55088 ssh_runner.go:148] Run: sudo egrep ^[0-9]+:freezer: /proc/1949/cgroup I0724 21:53:56.916148 55088 api_server.go:162] apiserver freezer: "9:freezer:/docker/d232f38f22df2f05e75477787bcdeddaaf58e35f3db0c2c5128918679eb2a1de/kubepods/burstable/pod8ea3eff5406575bb435cdaedf3d1c764/ad4c9ae8969ad15b57bec373dbd0156deac51a9471dea97caa7787d33ac27843" I0724 21:53:56.916226 55088 ssh_runner.go:148] Run: sudo cat /sys/fs/cgroup/freezer/docker/d232f38f22df2f05e75477787bcdeddaaf58e35f3db0c2c5128918679eb2a1de/kubepods/burstable/pod8ea3eff5406575bb435cdaedf3d1c764/ad4c9ae8969ad15b57bec373dbd0156deac51a9471dea97caa7787d33ac27843/freezer.state I0724 21:53:56.924251 55088 api_server.go:184] freezer state: "THAWED" I0724 21:53:56.924288 55088 api_server.go:221] Checking apiserver healthz at https://172.17.0.4:8443/healthz ... I0724 21:53:56.929249 55088 api_server.go:241] https://172.17.0.4:8443/healthz returned 200: ok I0724 21:53:56.929268 55088 status.go:253] multinode-20200724215222-14997 apiserver status = Running (err=) I0724 21:53:56.929276 55088 status.go:126] multinode-20200724215222-14997 status: &{Name:multinode-20200724215222-14997 Host:Running Kubelet:Running APIServer:Running Kubeconfig:Configured Worker:false} I0724 21:53:56.929292 55088 status.go:124] checking status of multinode-20200724215222-14997-m02 ... I0724 21:53:56.929559 55088 cli_runner.go:109] Run: docker container inspect multinode-20200724215222-14997-m02 --format={{.State.Status}} I0724 21:53:56.977930 55088 status.go:188] multinode-20200724215222-14997-m02 host status = "Running" (err=) I0724 21:53:56.977954 55088 host.go:65] Checking if "multinode-20200724215222-14997-m02" exists ... I0724 21:53:56.978227 55088 cli_runner.go:109] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" multinode-20200724215222-14997-m02 I0724 21:53:57.028521 55088 host.go:65] Checking if "multinode-20200724215222-14997-m02" exists ... I0724 21:53:57.028817 55088 system_pods.go:161] Checking kubelet status ... I0724 21:53:57.028866 55088 ssh_runner.go:148] Run: sudo systemctl is-active --quiet service kubelet I0724 21:53:57.028912 55088 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" multinode-20200724215222-14997-m02 I0724 21:53:57.080509 55088 sshutil.go:44] new ssh client: &{IP:127.0.0.1 Port:32787 SSHKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/multinode-20200724215222-14997-m02/id_rsa Username:docker} I0724 21:53:57.171468 55088 status.go:232] multinode-20200724215222-14997-m02 kubelet status = Running I0724 21:53:57.171492 55088 status.go:126] multinode-20200724215222-14997-m02 status: &{Name:multinode-20200724215222-14997-m02 Host:Running Kubelet:Running APIServer:Irrelevant Kubeconfig:Irrelevant Worker:true} I0724 21:53:57.171504 55088 status.go:124] checking status of multinode-20200724215222-14997-m03 ... I0724 21:53:57.171785 55088 cli_runner.go:109] Run: docker container inspect multinode-20200724215222-14997-m03 --format={{.State.Status}} I0724 21:53:57.223808 55088 status.go:188] multinode-20200724215222-14997-m03 host status = "Stopped" (err=) I0724 21:53:57.223850 55088 status.go:201] host is not running, skipping remaining checks I0724 21:53:57.223859 55088 status.go:126] multinode-20200724215222-14997-m03 status: &{Name:multinode-20200724215222-14997-m03 Host:Stopped Kubelet:Stopped APIServer:Stopped Kubeconfig:Stopped Worker:true} ** /stderr ** === RUN TestMultiNode/serial/StartAfterStop multinode_test.go:145: (dbg) Run: docker version -f {{.Server.Version}} multinode_test.go:155: (dbg) Run: ./minikube-linux-amd64 -p multinode-20200724215222-14997 node start m03 --alsologtostderr multinode_test.go:155: (dbg) Done: ./minikube-linux-amd64 -p multinode-20200724215222-14997 node start m03 --alsologtostderr: (9.283605353s) multinode_test.go:162: (dbg) Run: ./minikube-linux-amd64 -p multinode-20200724215222-14997 status multinode_test.go:176: (dbg) Run: kubectl get nodes === RUN TestMultiNode/serial/DeleteNode multinode_test.go:265: (dbg) Run: ./minikube-linux-amd64 -p multinode-20200724215222-14997 node delete m03 multinode_test.go:265: (dbg) Done: ./minikube-linux-amd64 -p multinode-20200724215222-14997 node delete m03: (8.116031494s) multinode_test.go:271: (dbg) Run: ./minikube-linux-amd64 -p multinode-20200724215222-14997 status --alsologtostderr multinode_test.go:285: (dbg) Run: docker volume ls === RUN TestMultiNode/serial/StopMultiNode multinode_test.go:184: (dbg) Run: ./minikube-linux-amd64 -p multinode-20200724215222-14997 stop multinode_test.go:184: (dbg) Done: ./minikube-linux-amd64 -p multinode-20200724215222-14997 stop: (7.376082801s) multinode_test.go:190: (dbg) Run: ./minikube-linux-amd64 -p multinode-20200724215222-14997 status multinode_test.go:190: (dbg) Non-zero exit: ./minikube-linux-amd64 -p multinode-20200724215222-14997 status: exit status 7 (152.403122ms) -- stdout -- multinode-20200724215222-14997 type: Control Plane host: Stopped kubelet: Stopped apiserver: Stopped kubeconfig: Stopped multinode-20200724215222-14997-m02 type: Worker host: Stopped kubelet: Stopped -- /stdout -- multinode_test.go:197: (dbg) Run: ./minikube-linux-amd64 -p multinode-20200724215222-14997 status --alsologtostderr multinode_test.go:197: (dbg) Non-zero exit: ./minikube-linux-amd64 -p multinode-20200724215222-14997 status --alsologtostderr: exit status 7 (161.999091ms) -- stdout -- multinode-20200724215222-14997 type: Control Plane host: Stopped kubelet: Stopped apiserver: Stopped kubeconfig: Stopped multinode-20200724215222-14997-m02 type: Worker host: Stopped kubelet: Stopped -- /stdout -- ** stderr ** I0724 21:54:24.356974 59491 mustload.go:64] Loading cluster: multinode-20200724215222-14997 I0724 21:54:24.357219 59491 status.go:124] checking status of multinode-20200724215222-14997 ... I0724 21:54:24.357637 59491 cli_runner.go:109] Run: docker container inspect multinode-20200724215222-14997 --format={{.State.Status}} I0724 21:54:24.406768 59491 status.go:188] multinode-20200724215222-14997 host status = "Stopped" (err=) I0724 21:54:24.406800 59491 status.go:201] host is not running, skipping remaining checks I0724 21:54:24.406807 59491 status.go:126] multinode-20200724215222-14997 status: &{Name:multinode-20200724215222-14997 Host:Stopped Kubelet:Stopped APIServer:Stopped Kubeconfig:Stopped Worker:false} I0724 21:54:24.406826 59491 status.go:124] checking status of multinode-20200724215222-14997-m02 ... I0724 21:54:24.407107 59491 cli_runner.go:109] Run: docker container inspect multinode-20200724215222-14997-m02 --format={{.State.Status}} I0724 21:54:24.456985 59491 status.go:188] multinode-20200724215222-14997-m02 host status = "Stopped" (err=) I0724 21:54:24.457013 59491 status.go:201] host is not running, skipping remaining checks I0724 21:54:24.457022 59491 status.go:126] multinode-20200724215222-14997-m02 status: &{Name:multinode-20200724215222-14997-m02 Host:Stopped Kubelet:Stopped APIServer:Stopped Kubeconfig:Stopped Worker:true} ** /stderr ** === RUN TestMultiNode/serial/RestartMultiNode multinode_test.go:213: (dbg) Run: docker version -f {{.Server.Version}} multinode_test.go:223: (dbg) Run: ./minikube-linux-amd64 start -p multinode-20200724215222-14997 --wait=true --vm-driver=docker --base-image=local/kicbase:-snapshot multinode_test.go:223: (dbg) Done: ./minikube-linux-amd64 start -p multinode-20200724215222-14997 --wait=true --vm-driver=docker --base-image=local/kicbase:-snapshot: (1m15.350574378s) multinode_test.go:229: (dbg) Run: ./minikube-linux-amd64 -p multinode-20200724215222-14997 status --alsologtostderr multinode_test.go:245: (dbg) Run: kubectl get nodes multinode_test.go:253: (dbg) Run: kubectl get nodes -o "go-template='{{range .items}}{{range .status.conditions}}{{if eq .type "Ready"}} {{.status}}{{"\n"}}{{end}}{{end}}{{end}}'" === CONT TestMultiNode helpers_test.go:170: Cleaning up "multinode-20200724215222-14997" profile ... helpers_test.go:171: (dbg) Run: ./minikube-linux-amd64 delete -p multinode-20200724215222-14997 helpers_test.go:171: (dbg) Done: ./minikube-linux-amd64 delete -p multinode-20200724215222-14997: (12.881570018s) --- PASS: TestMultiNode (242.10s) --- PASS: TestMultiNode/serial (229.22s) --- PASS: TestMultiNode/serial/FreshStart2Nodes (70.96s) --- PASS: TestMultiNode/serial/AddNode (21.58s) --- PASS: TestMultiNode/serial/StopNode (2.66s) --- PASS: TestMultiNode/serial/StartAfterStop (10.76s) --- PASS: TestMultiNode/serial/DeleteNode (8.78s) --- PASS: TestMultiNode/serial/StopMultiNode (7.69s) --- PASS: TestMultiNode/serial/RestartMultiNode (106.79s) === RUN TestNetworkPlugins === PAUSE TestNetworkPlugins === RUN TestChangeNoneUser none_test.go:38: Only test none driver. --- SKIP: TestChangeNoneUser (0.00s) === RUN TestPause === PAUSE TestPause === RUN TestPreload preload_test.go:43: (dbg) Run: ./minikube-linux-amd64 start -p test-preload-20200724215624-14997 --memory=2200 --alsologtostderr --wait=true --preload=false --vm-driver=docker --base-image=local/kicbase:-snapshot --kubernetes-version=v1.17.0 preload_test.go:43: (dbg) Done: ./minikube-linux-amd64 start -p test-preload-20200724215624-14997 --memory=2200 --alsologtostderr --wait=true --preload=false --vm-driver=docker --base-image=local/kicbase:-snapshot --kubernetes-version=v1.17.0: (1m21.399403631s) preload_test.go:50: (dbg) Run: ./minikube-linux-amd64 ssh -p test-preload-20200724215624-14997 -- docker pull busybox preload_test.go:50: (dbg) Done: ./minikube-linux-amd64 ssh -p test-preload-20200724215624-14997 -- docker pull busybox: (1.663350352s) preload_test.go:60: (dbg) Run: ./minikube-linux-amd64 start -p test-preload-20200724215624-14997 --memory=2200 --alsologtostderr -v=1 --wait=true --vm-driver=docker --base-image=local/kicbase:-snapshot --kubernetes-version=v1.17.3 preload_test.go:60: (dbg) Done: ./minikube-linux-amd64 start -p test-preload-20200724215624-14997 --memory=2200 --alsologtostderr -v=1 --wait=true --vm-driver=docker --base-image=local/kicbase:-snapshot --kubernetes-version=v1.17.3: (52.104835809s) preload_test.go:64: (dbg) Run: ./minikube-linux-amd64 ssh -p test-preload-20200724215624-14997 -- docker images helpers_test.go:170: Cleaning up "test-preload-20200724215624-14997" profile ... helpers_test.go:171: (dbg) Run: ./minikube-linux-amd64 delete -p test-preload-20200724215624-14997 helpers_test.go:171: (dbg) Done: ./minikube-linux-amd64 delete -p test-preload-20200724215624-14997: (4.063996108s) --- PASS: TestPreload (139.60s) === RUN TestSkaffold skaffold_test.go:56: (dbg) Run: ./minikube-linux-amd64 start -p minikube --memory=2200 --vm-driver=docker --base-image=local/kicbase:-snapshot skaffold_test.go:56: (dbg) Done: ./minikube-linux-amd64 start -p minikube --memory=2200 --vm-driver=docker --base-image=local/kicbase:-snapshot: (39.269136806s) skaffold_test.go:73: (dbg) Run: which minikube skaffold_test.go:73: (dbg) Run: which docker skaffold_test.go:81: (dbg) Run: /tmp/skaffold.exe823407918 run --kube-context minikube --status-check=true --port-forward=false skaffold_test.go:81: (dbg) Done: /tmp/skaffold.exe823407918 run --kube-context minikube --status-check=true --port-forward=false: (30.329791087s) skaffold_test.go:87: (dbg) TestSkaffold: waiting 1m0s for pods matching "app=leeroy-app" in namespace "default" ... helpers_test.go:332: "leeroy-app-6cdbb5f7bf-bt267" [e065c879-160c-472e-80f5-38cd460b66e1] Running skaffold_test.go:87: (dbg) TestSkaffold: app=leeroy-app healthy within 5.012289515s skaffold_test.go:90: (dbg) TestSkaffold: waiting 1m0s for pods matching "app=leeroy-web" in namespace "default" ... helpers_test.go:332: "leeroy-web-79ff7b4888-l5pln" [22ae085a-f37a-490e-8809-f72e125b5719] Running skaffold_test.go:90: (dbg) TestSkaffold: app=leeroy-web healthy within 5.005551354s helpers_test.go:170: Cleaning up "minikube" profile ... helpers_test.go:171: (dbg) Run: ./minikube-linux-amd64 delete -p minikube helpers_test.go:171: (dbg) Done: ./minikube-linux-amd64 delete -p minikube: (3.999749226s) --- PASS: TestSkaffold (84.44s) === RUN TestStartStop === PAUSE TestStartStop === RUN TestRunningBinaryUpgrade === PAUSE TestRunningBinaryUpgrade === RUN TestStoppedBinaryUpgrade === PAUSE TestStoppedBinaryUpgrade === RUN TestKubernetesUpgrade === PAUSE TestKubernetesUpgrade === RUN TestMissingContainerUpgrade === PAUSE TestMissingContainerUpgrade === CONT TestCertOptions === CONT TestPause === CONT TestErrorSpam === CONT TestCertOptions cert_options_test.go:46: (dbg) Run: ./minikube-linux-amd64 start -p cert-options-20200724220008-14997 --memory=1900 --apiserver-ips=127.0.0.1 --apiserver-ips=192.168.15.15 --apiserver-names=localhost --apiserver-names=www.google.com --apiserver-port=8555 --vm-driver=docker --base-image=local/kicbase:-snapshot === RUN TestPause/serial === CONT TestErrorSpam error_spam_test.go:62: (dbg) Run: ./minikube-linux-amd64 start -p nospam-20200724220008-14997 -n=1 --memory=2250 --wait=false --vm-driver=docker --base-image=local/kicbase:-snapshot === CONT TestMissingContainerUpgrade === RUN TestPause/serial/Start === CONT TestKubernetesUpgrade === CONT TestPause/serial/Start pause_test.go:67: (dbg) Run: ./minikube-linux-amd64 start -p pause-20200724220008-14997 --memory=1800 --install-addons=false --wait=all --vm-driver=docker --base-image=local/kicbase:-snapshot === CONT TestKubernetesUpgrade version_upgrade_test.go:163: (dbg) Run: ./minikube-linux-amd64 start -p kubernetes-upgrade-20200724220008-14997 --memory=2200 --kubernetes-version=v1.13.0 --alsologtostderr -v=1 --vm-driver=docker --base-image=local/kicbase:-snapshot === CONT TestNetworkPlugins === RUN TestNetworkPlugins/group === RUN TestNetworkPlugins/group/auto === CONT TestFunctional === PAUSE TestNetworkPlugins/group/auto === RUN TestFunctional/parallel === RUN TestNetworkPlugins/group/kubenet === RUN TestFunctional/parallel/ComponentHealth === CONT TestKVMDriverInstallOrUpdate === PAUSE TestFunctional/parallel/ComponentHealth === CONT TestRunningBinaryUpgrade === RUN TestFunctional/parallel/ConfigCmd === PAUSE TestFunctional/parallel/ConfigCmd === RUN TestFunctional/parallel/DashboardCmd === PAUSE TestFunctional/parallel/DashboardCmd === RUN TestFunctional/parallel/DryRun === PAUSE TestFunctional/parallel/DryRun === RUN TestFunctional/parallel/StatusCmd === PAUSE TestFunctional/parallel/StatusCmd === RUN TestFunctional/parallel/LogsCmd === PAUSE TestFunctional/parallel/LogsCmd === RUN TestFunctional/parallel/MountCmd === PAUSE TestNetworkPlugins/group/kubenet === PAUSE TestFunctional/parallel/MountCmd === RUN TestFunctional/parallel/ProfileCmd === PAUSE TestFunctional/parallel/ProfileCmd === RUN TestNetworkPlugins/group/bridge === RUN TestFunctional/parallel/ServiceCmd === PAUSE TestFunctional/parallel/ServiceCmd === RUN TestFunctional/parallel/AddonsCmd === PAUSE TestFunctional/parallel/AddonsCmd === RUN TestFunctional/parallel/PersistentVolumeClaim === PAUSE TestFunctional/parallel/PersistentVolumeClaim === RUN TestFunctional/parallel/TunnelCmd === PAUSE TestFunctional/parallel/TunnelCmd === RUN TestFunctional/parallel/SSHCmd === PAUSE TestFunctional/parallel/SSHCmd === RUN TestFunctional/parallel/MySQL === PAUSE TestFunctional/parallel/MySQL === PAUSE TestNetworkPlugins/group/bridge === RUN TestFunctional/parallel/FileSync === PAUSE TestFunctional/parallel/FileSync === RUN TestFunctional/parallel/CertSync === PAUSE TestFunctional/parallel/CertSync === RUN TestFunctional/parallel/UpdateContextCmd === PAUSE TestFunctional/parallel/UpdateContextCmd === RUN TestFunctional/parallel/DockerEnv === PAUSE TestFunctional/parallel/DockerEnv === RUN TestFunctional/parallel/NodeLabels === PAUSE TestFunctional/parallel/NodeLabels === CONT TestStoppedBinaryUpgrade === RUN TestNetworkPlugins/group/enable-default-cni === PAUSE TestNetworkPlugins/group/enable-default-cni === RUN TestNetworkPlugins/group/flannel net_test.go:66: flannel is not yet compatible with Docker driver: iptables v1.8.3 (legacy): Couldn't load target `CNI-x': No such file or directory === RUN TestNetworkPlugins/group/kindnet === PAUSE TestNetworkPlugins/group/kindnet === RUN TestNetworkPlugins/group/false === PAUSE TestNetworkPlugins/group/false === RUN TestNetworkPlugins/group/custom-weave === PAUSE TestNetworkPlugins/group/custom-weave === RUN TestNetworkPlugins/group/calico === PAUSE TestNetworkPlugins/group/calico === RUN TestNetworkPlugins/group/cilium === PAUSE TestNetworkPlugins/group/cilium === CONT TestStartStop === RUN TestStartStop/group === RUN TestStartStop/group/old-k8s-version === PAUSE TestStartStop/group/old-k8s-version === RUN TestStartStop/group/newest-cni === PAUSE TestStartStop/group/newest-cni === RUN TestStartStop/group/containerd === PAUSE TestStartStop/group/containerd === RUN TestStartStop/group/crio === PAUSE TestStartStop/group/crio === RUN TestStartStop/group/embed-certs === PAUSE TestStartStop/group/embed-certs === CONT TestForceSystemdFlag docker_test.go:80: (dbg) Run: ./minikube-linux-amd64 start -p force-systemd-flag-20200724220008-14997 --memory=1800 --force-systemd --alsologtostderr -v=5 --vm-driver=docker --base-image=local/kicbase:-snapshot === CONT TestStoppedBinaryUpgrade version_upgrade_test.go:133: (dbg) Run: /tmp/minikube-v1.8.0.788345410.exe start -p stopped-upgrade-20200724220008-14997 --memory=2200 --vm-driver=docker --base-image=local/kicbase:-snapshot version_upgrade_test.go:133: (dbg) Non-zero exit: /tmp/minikube-v1.8.0.788345410.exe start -p stopped-upgrade-20200724220008-14997 --memory=2200 --vm-driver=docker --base-image=local/kicbase:-snapshot: exit status 64 (50.264579ms) ** stderr ** Error: unknown flag: --base-image See 'minikube start --help' for usage. ** /stderr ** > docker-machine-driver-kvm2.sha256: 65 B / 65 B [-------] 100.00% ? p/s 0s=== CONT TestMissingContainerUpgrade version_upgrade_test.go:245: (dbg) Run: /tmp/minikube-v1.9.1.093451957.exe start -p missing-upgrade-20200724220008-14997 --memory=2200 --vm-driver=docker --base-image=local/kicbase:-snapshot version_upgrade_test.go:245: (dbg) Non-zero exit: /tmp/minikube-v1.9.1.093451957.exe start -p missing-upgrade-20200724220008-14997 --memory=2200 --vm-driver=docker --base-image=local/kicbase:-snapshot: exit status 64 (92.827356ms) ** stderr ** Error: unknown flag: --base-image See 'minikube start --help' for usage. ** /stderr ** === CONT TestRunningBinaryUpgrade version_upgrade_test.go:90: (dbg) Run: /tmp/minikube-v1.9.0.451153583.exe start -p running-upgrade-20200724220008-14997 --memory=2200 --vm-driver=docker --base-image=local/kicbase:-snapshot version_upgrade_test.go:90: (dbg) Non-zero exit: /tmp/minikube-v1.9.0.451153583.exe start -p running-upgrade-20200724220008-14997 --memory=2200 --vm-driver=docker --base-image=local/kicbase:-snapshot: exit status 64 (63.547246ms) ** stderr ** Error: unknown flag: --base-image See 'minikube start --help' for usage. ** /stderr ** === CONT TestMissingContainerUpgrade version_upgrade_test.go:245: (dbg) Run: /tmp/minikube-v1.9.1.093451957.exe start -p missing-upgrade-20200724220008-14997 --memory=2200 --vm-driver=docker --base-image=local/kicbase:-snapshot > docker-machine-driver-kvm2: 883.55 KiB / 48.57 MiB [>_____] 1.78% ? p/s ? version_upgrade_test.go:245: (dbg) Non-zero exit: /tmp/minikube-v1.9.1.093451957.exe start -p missing-upgrade-20200724220008-14997 --memory=2200 --vm-driver=docker --base-image=local/kicbase:-snapshot: exit status 64 (79.968116ms) ** stderr ** Error: unknown flag: --base-image See 'minikube start --help' for usage. ** /stderr ** > docker-machine-driver-kvm2: 1.84 MiB / 48.57 MiB [>_______] 3.79% ? p/s ?=== CONT TestRunningBinaryUpgrade version_upgrade_test.go:90: (dbg) Run: /tmp/minikube-v1.9.0.451153583.exe start -p running-upgrade-20200724220008-14997 --memory=2200 --vm-driver=docker --base-image=local/kicbase:-snapshot version_upgrade_test.go:90: (dbg) Non-zero exit: /tmp/minikube-v1.9.0.451153583.exe start -p running-upgrade-20200724220008-14997 --memory=2200 --vm-driver=docker --base-image=local/kicbase:-snapshot: exit status 64 (68.892395ms) ** stderr ** Error: unknown flag: --base-image See 'minikube start --help' for usage. ** /stderr ** === CONT TestStoppedBinaryUpgrade version_upgrade_test.go:133: (dbg) Run: /tmp/minikube-v1.8.0.788345410.exe start -p stopped-upgrade-20200724220008-14997 --memory=2200 --vm-driver=docker --base-image=local/kicbase:-snapshot version_upgrade_test.go:133: (dbg) Non-zero exit: /tmp/minikube-v1.8.0.788345410.exe start -p stopped-upgrade-20200724220008-14997 --memory=2200 --vm-driver=docker --base-image=local/kicbase:-snapshot: exit status 64 (72.81865ms) ** stderr ** Error: unknown flag: --base-image See 'minikube start --help' for usage. ** /stderr ** > docker-machine-driver-kvm2: 2.34 MiB / 48.57 MiB [>_______] 4.82% ? p/s ? > docker-machine-driver-kvm2: 2.49 MiB / 48.57 MiB 5.13% 2.71 MiB p/s ETA > docker-machine-driver-kvm2: 2.66 MiB / 48.57 MiB 5.47% 2.71 MiB p/s ETA > docker-machine-driver-kvm2: 2.87 MiB / 48.57 MiB 5.91% 2.71 MiB p/s ETA > docker-machine-driver-kvm2: 3.10 MiB / 48.57 MiB 6.39% 2.60 MiB p/s ETA === CONT TestMissingContainerUpgrade version_upgrade_test.go:245: (dbg) Run: /tmp/minikube-v1.9.1.093451957.exe start -p missing-upgrade-20200724220008-14997 --memory=2200 --vm-driver=docker --base-image=local/kicbase:-snapshot version_upgrade_test.go:245: (dbg) Non-zero exit: /tmp/minikube-v1.9.1.093451957.exe start -p missing-upgrade-20200724220008-14997 --memory=2200 --vm-driver=docker --base-image=local/kicbase:-snapshot: exit status 64 (76.092364ms) ** stderr ** Error: unknown flag: --base-image See 'minikube start --help' for usage. ** /stderr ** version_upgrade_test.go:251: release start failed: exit status 64 panic.go:617: *** TestMissingContainerUpgrade FAILED at 2020-07-24 22:00:11.374454198 +0000 UTC m=+1441.498818683 helpers_test.go:215: -----------------------post-mortem-------------------------------- helpers_test.go:223: ======> post-mortem[TestMissingContainerUpgrade]: docker inspect <====== helpers_test.go:224: (dbg) Run: docker inspect missing-upgrade-20200724220008-14997 > docker-machine-driver-kvm2: 3.34 MiB / 48.57 MiB 6.87% 2.60 MiB p/s ETA helpers_test.go:224: (dbg) Non-zero exit: docker inspect missing-upgrade-20200724220008-14997: exit status 1 (84.669723ms) -- stdout -- [] -- /stdout -- ** stderr ** Error: No such object: missing-upgrade-20200724220008-14997 ** /stderr ** helpers_test.go:226: failed to get docker inspect: exit status 1 helpers_test.go:232: (dbg) Run: ./minikube-linux-amd64 status --format={{.Host}} -p missing-upgrade-20200724220008-14997 -n missing-upgrade-20200724220008-14997 helpers_test.go:232: (dbg) Non-zero exit: ./minikube-linux-amd64 status --format={{.Host}} -p missing-upgrade-20200724220008-14997 -n missing-upgrade-20200724220008-14997: exit status 65 (129.193028ms) -- stdout -- * There is no local cluster named "missing-upgrade-20200724220008-14997" - To fix this, run: "minikube start -p missing-upgrade-20200724220008-14997" -- /stdout -- helpers_test.go:232: status error: exit status 65 (may be ok) helpers_test.go:234: "missing-upgrade-20200724220008-14997" host is not running, skipping log retrieval (state="* There is no local cluster named \"missing-upgrade-20200724220008-14997\"\n - To fix this, run: \"minikube start -p missing-upgrade-20200724220008-14997\"") helpers_test.go:170: Cleaning up "missing-upgrade-20200724220008-14997" profile ... helpers_test.go:171: (dbg) Run: ./minikube-linux-amd64 delete -p missing-upgrade-20200724220008-14997 === CONT TestStoppedBinaryUpgrade version_upgrade_test.go:133: (dbg) Run: /tmp/minikube-v1.8.0.788345410.exe start -p stopped-upgrade-20200724220008-14997 --memory=2200 --vm-driver=docker --base-image=local/kicbase:-snapshot > docker-machine-driver-kvm2: 3.59 MiB / 48.57 MiB 7.38% 2.60 MiB p/s ETA version_upgrade_test.go:133: (dbg) Non-zero exit: /tmp/minikube-v1.8.0.788345410.exe start -p stopped-upgrade-20200724220008-14997 --memory=2200 --vm-driver=docker --base-image=local/kicbase:-snapshot: exit status 64 (81.481515ms) ** stderr ** Error: unknown flag: --base-image See 'minikube start --help' for usage. ** /stderr ** version_upgrade_test.go:139: legacy v1.8.0 start failed: exit status 64 panic.go:617: *** TestStoppedBinaryUpgrade FAILED at 2020-07-24 22:00:11.715769364 +0000 UTC m=+1441.840133849 helpers_test.go:215: -----------------------post-mortem-------------------------------- helpers_test.go:223: ======> post-mortem[TestStoppedBinaryUpgrade]: docker inspect <====== helpers_test.go:224: (dbg) Run: docker inspect stopped-upgrade-20200724220008-14997 === CONT TestRunningBinaryUpgrade version_upgrade_test.go:90: (dbg) Run: /tmp/minikube-v1.9.0.451153583.exe start -p running-upgrade-20200724220008-14997 --memory=2200 --vm-driver=docker --base-image=local/kicbase:-snapshot === CONT TestStoppedBinaryUpgrade helpers_test.go:224: (dbg) Non-zero exit: docker inspect stopped-upgrade-20200724220008-14997: exit status 1 (85.900804ms) -- stdout -- [] -- /stdout -- ** stderr ** Error: No such object: stopped-upgrade-20200724220008-14997 ** /stderr ** helpers_test.go:226: failed to get docker inspect: exit status 1 helpers_test.go:232: (dbg) Run: ./minikube-linux-amd64 status --format={{.Host}} -p stopped-upgrade-20200724220008-14997 -n stopped-upgrade-20200724220008-14997 > docker-machine-driver-kvm2: 3.90 MiB / 48.57 MiB 8.03% 2.52 MiB p/s ETA === CONT TestRunningBinaryUpgrade version_upgrade_test.go:90: (dbg) Non-zero exit: /tmp/minikube-v1.9.0.451153583.exe start -p running-upgrade-20200724220008-14997 --memory=2200 --vm-driver=docker --base-image=local/kicbase:-snapshot: exit status 64 (73.307782ms) ** stderr ** Error: unknown flag: --base-image See 'minikube start --help' for usage. ** /stderr ** version_upgrade_test.go:96: legacy v1.9.0 start failed: exit status 64 panic.go:617: *** TestRunningBinaryUpgrade FAILED at 2020-07-24 22:00:11.875214266 +0000 UTC m=+1441.999578651 helpers_test.go:215: -----------------------post-mortem-------------------------------- helpers_test.go:223: ======> post-mortem[TestRunningBinaryUpgrade]: docker inspect <====== helpers_test.go:224: (dbg) Run: docker inspect running-upgrade-20200724220008-14997 === CONT TestStoppedBinaryUpgrade helpers_test.go:232: (dbg) Non-zero exit: ./minikube-linux-amd64 status --format={{.Host}} -p stopped-upgrade-20200724220008-14997 -n stopped-upgrade-20200724220008-14997: exit status 65 (100.053427ms) -- stdout -- * There is no local cluster named "stopped-upgrade-20200724220008-14997" - To fix this, run: "minikube start -p stopped-upgrade-20200724220008-14997" -- /stdout -- helpers_test.go:232: status error: exit status 65 (may be ok) helpers_test.go:234: "stopped-upgrade-20200724220008-14997" host is not running, skipping log retrieval (state="* There is no local cluster named \"stopped-upgrade-20200724220008-14997\"\n - To fix this, run: \"minikube start -p stopped-upgrade-20200724220008-14997\"") helpers_test.go:170: Cleaning up "stopped-upgrade-20200724220008-14997" profile ... helpers_test.go:171: (dbg) Run: ./minikube-linux-amd64 delete -p stopped-upgrade-20200724220008-14997 === CONT TestRunningBinaryUpgrade helpers_test.go:224: (dbg) Non-zero exit: docker inspect running-upgrade-20200724220008-14997: exit status 1 (105.590789ms) -- stdout -- [] -- /stdout -- ** stderr ** Error: No such object: running-upgrade-20200724220008-14997 ** /stderr ** helpers_test.go:226: failed to get docker inspect: exit status 1 helpers_test.go:232: (dbg) Run: ./minikube-linux-amd64 status --format={{.Host}} -p running-upgrade-20200724220008-14997 -n running-upgrade-20200724220008-14997 --- FAIL: TestMissingContainerUpgrade (3.87s) === CONT TestForceSystemdEnv docker_test.go:108: (dbg) Run: ./minikube-linux-amd64 start -p force-systemd-env-20200724220012-14997 --memory=1800 --alsologtostderr -v=5 --vm-driver=docker --base-image=local/kicbase:-snapshot > docker-machine-driver-kvm2: 4.18 MiB / 48.57 MiB 8.61% 2.52 MiB p/s ETA === CONT TestRunningBinaryUpgrade helpers_test.go:232: (dbg) Non-zero exit: ./minikube-linux-amd64 status --format={{.Host}} -p running-upgrade-20200724220008-14997 -n running-upgrade-20200724220008-14997: exit status 65 (103.627758ms) -- stdout -- * There is no local cluster named "running-upgrade-20200724220008-14997" - To fix this, run: "minikube start -p running-upgrade-20200724220008-14997" -- /stdout -- helpers_test.go:232: status error: exit status 65 (may be ok) helpers_test.go:234: "running-upgrade-20200724220008-14997" host is not running, skipping log retrieval (state="* There is no local cluster named \"running-upgrade-20200724220008-14997\"\n - To fix this, run: \"minikube start -p running-upgrade-20200724220008-14997\"") helpers_test.go:170: Cleaning up "running-upgrade-20200724220008-14997" profile ... helpers_test.go:171: (dbg) Run: ./minikube-linux-amd64 delete -p running-upgrade-20200724220008-14997 > docker-machine-driver-kvm2: 4.45 MiB / 48.57 MiB 9.16% 2.52 MiB p/s ETA --- FAIL: TestStoppedBinaryUpgrade (4.18s) === CONT TestDockerFlags docker_test.go:41: (dbg) Run: ./minikube-linux-amd64 start -p docker-flags-20200724220012-14997 --cache-images=false --memory=1800 --install-addons=false --wait=false --docker-env=FOO=BAR --docker-env=BAZ=BAT --docker-opt=debug --docker-opt=icc=true --alsologtostderr -v=5 --vm-driver=docker --base-image=local/kicbase:-snapshot > docker-machine-driver-kvm2: 4.71 MiB / 48.57 MiB 9.71% 2.44 MiB p/s ETA --- FAIL: TestRunningBinaryUpgrade (4.33s) === CONT TestFunctional/parallel/ComponentHealth functional_test.go:314: (dbg) Run: kubectl --context functional-20200724215019-14997 get cs -o=json === CONT TestFunctional/parallel/PersistentVolumeClaim fn_pvc_test.go:42: (dbg) TestFunctional/parallel/PersistentVolumeClaim: waiting 4m0s for pods matching "integration-test=storage-provisioner" in namespace "kube-system" ... helpers_test.go:332: "storage-provisioner" [61fa82d5-273f-4a82-9cd2-27c2a53a248a] Running > docker-machine-driver-kvm2: 4.93 MiB / 48.57 MiB 10.15% 2.44 MiB p/s ETA > docker-machine-driver-kvm2: 5.18 MiB / 48.57 MiB 10.66% 2.44 MiB p/s ETA > docker-machine-driver-kvm2: 5.48 MiB / 48.57 MiB 11.28% 2.37 MiB p/s ETA > docker-machine-driver-kvm2: 5.76 MiB / 48.57 MiB 11.86% 2.37 MiB p/s ETA > docker-machine-driver-kvm2: 6.08 MiB / 48.57 MiB 12.51% 2.37 MiB p/s ETA > docker-machine-driver-kvm2: 6.44 MiB / 48.57 MiB 13.26% 2.32 MiB p/s ETA > docker-machine-driver-kvm2: 6.84 MiB / 48.57 MiB 14.08% 2.32 MiB p/s ETA > docker-machine-driver-kvm2: 7.24 MiB / 48.57 MiB 14.90% 2.32 MiB p/s ETA > docker-machine-driver-kvm2: 7.65 MiB / 48.57 MiB 15.76% 2.30 MiB p/s ETA > docker-machine-driver-kvm2: 8.12 MiB / 48.57 MiB 16.71% 2.30 MiB p/s ETA > docker-machine-driver-kvm2: 8.65 MiB / 48.57 MiB 17.81% 2.30 MiB p/s ETA > docker-machine-driver-kvm2: 9.15 MiB / 48.57 MiB 18.83% 2.31 MiB p/s ETA > docker-machine-driver-kvm2: 9.68 MiB / 48.57 MiB 19.92% 2.31 MiB p/s ETA > docker-machine-driver-kvm2: 10.23 MiB / 48.57 MiB 21.05% 2.31 MiB p/s ET > docker-machine-driver-kvm2: 10.91 MiB / 48.57 MiB 22.45% 2.35 MiB p/s ET > docker-machine-driver-kvm2: 11.32 MiB / 48.57 MiB 23.31% 2.35 MiB p/s ET > docker-machine-driver-kvm2: 11.95 MiB / 48.57 MiB 24.61% 2.35 MiB p/s ET > docker-machine-driver-kvm2: 12.60 MiB / 48.57 MiB 25.94% 2.38 MiB p/s ET > docker-machine-driver-kvm2: 13.28 MiB / 48.57 MiB 27.34% 2.38 MiB p/s ET > docker-machine-driver-kvm2: 13.98 MiB / 48.57 MiB 28.78% 2.38 MiB p/s ET > docker-machine-driver-kvm2: 14.81 MiB / 48.57 MiB 30.49% 2.47 MiB p/s ET > docker-machine-driver-kvm2: 15.56 MiB / 48.57 MiB 32.02% 2.47 MiB p/s ET > docker-machine-driver-kvm2: 16.34 MiB / 48.57 MiB 33.63% 2.47 MiB p/s ET > docker-machine-driver-kvm2: 17.08 MiB / 48.57 MiB 35.17% 2.55 MiB p/s ET > docker-machine-driver-kvm2: 17.96 MiB / 48.57 MiB 36.98% 2.55 MiB p/s ET fn_pvc_test.go:42: (dbg) TestFunctional/parallel/PersistentVolumeClaim: integration-test=storage-provisioner healthy within 5.015254084s fn_pvc_test.go:47: (dbg) Run: kubectl --context functional-20200724215019-14997 get storageclass -o=json > docker-machine-driver-kvm2: 18.89 MiB / 48.57 MiB 38.89% 2.55 MiB p/s ET fn_pvc_test.go:67: (dbg) Run: kubectl --context functional-20200724215019-14997 apply -f testdata/pvc.yaml > docker-machine-driver-kvm2: 19.77 MiB / 48.57 MiB 40.71% 2.68 MiB p/s ET > docker-machine-driver-kvm2: 20.67 MiB / 48.57 MiB 42.55% 2.68 MiB p/s ET > docker-machine-driver-kvm2: 21.55 MiB / 48.57 MiB 44.36% 2.68 MiB p/s ET > docker-machine-driver-kvm2: 22.58 MiB / 48.57 MiB 46.48% 2.81 MiB p/s ET > docker-machine-driver-kvm2: 23.64 MiB / 48.57 MiB 48.67% 2.81 MiB p/s ET > docker-machine-driver-kvm2: 24.64 MiB / 48.57 MiB 50.72% 2.81 MiB p/s ET > docker-machine-driver-kvm2: 25.67 MiB / 48.57 MiB 52.84% 2.96 MiB p/s ET > docker-machine-driver-kvm2: 26.71 MiB / 48.57 MiB 54.99% 2.96 MiB p/s ET > docker-machine-driver-kvm2: 27.86 MiB / 48.57 MiB 57.35% 2.96 MiB p/s ET > docker-machine-driver-kvm2: 28.80 MiB / 48.57 MiB 59.30% 3.10 MiB p/s ET > docker-machine-driver-kvm2: 29.92 MiB / 48.57 MiB 61.59% 3.10 MiB p/s ET > docker-machine-driver-kvm2: 31.04 MiB / 48.57 MiB 63.91% 3.10 MiB p/s ET > docker-machine-driver-kvm2: 32.21 MiB / 48.57 MiB 66.30% 3.27 MiB p/s ET > docker-machine-driver-kvm2: 33.39 MiB / 48.57 MiB 68.73% 3.27 MiB p/s ET > docker-machine-driver-kvm2: 34.80 MiB / 48.57 MiB 71.64% 3.27 MiB p/s ET > docker-machine-driver-kvm2: 36.02 MiB / 48.57 MiB 74.17% 3.47 MiB p/s ET > docker-machine-driver-kvm2: 37.29 MiB / 48.57 MiB 76.76% 3.47 MiB p/s ET > docker-machine-driver-kvm2: 38.56 MiB / 48.57 MiB 79.39% 3.47 MiB p/s ET > docker-machine-driver-kvm2: 39.88 MiB / 48.57 MiB 82.09% 3.66 MiB p/s ET > docker-machine-driver-kvm2: 41.44 MiB / 48.57 MiB 85.31% 3.66 MiB p/s ET > docker-machine-driver-kvm2: 42.80 MiB / 48.57 MiB 88.11% 3.66 MiB p/s ET > docker-machine-driver-kvm2: 44.19 MiB / 48.57 MiB 90.98% 3.89 MiB p/s ET > docker-machine-driver-kvm2: 45.60 MiB / 48.57 MiB 93.89% 3.89 MiB p/s ET > docker-machine-driver-kvm2: 47.05 MiB / 48.57 MiB 96.86% 3.89 MiB p/s ET > docker-machine-driver-kvm2: 48.57 MiB / 48.57 MiB 100.00% 3.86 MiB p/s 1 fn_pvc_test.go:67: (dbg) Done: kubectl --context functional-20200724215019-14997 apply -f testdata/pvc.yaml: (5.61645379s) fn_pvc_test.go:73: (dbg) Run: kubectl --context functional-20200724215019-14997 get pvc testpvc -o=json > docker-machine-driver-kvm2.sha256: 65 B / 65 B [-------] 100.00% ? p/s 0s > docker-machine-driver-kvm2: 1.52 MiB / 48.57 MiB [>_______] 3.12% ? p/s ? > docker-machine-driver-kvm2: 4.42 MiB / 48.57 MiB [>_______] 9.11% ? p/s ? > docker-machine-driver-kvm2: 7.66 MiB / 48.57 MiB [->_____] 15.77% ? p/s ? fn_pvc_test.go:73: (dbg) Run: kubectl --context functional-20200724215019-14997 get pvc testpvc -o=json === CONT TestFunctional/parallel/NodeLabels functional_test.go:141: (dbg) Run: kubectl --context functional-20200724215019-14997 get nodes --output=go-template "--template='{{range $k, $v := (index .items 0).metadata.labels}}{{$k}} {{end}}'" === CONT TestFunctional/parallel/DockerEnv functional_test.go:166: (dbg) Run: /bin/bash -c "eval $(./minikube-linux-amd64 -p functional-20200724215019-14997 docker-env) && ./minikube-linux-amd64 status -p functional-20200724215019-14997" > docker-machine-driver-kvm2: 11.02 MiB / 48.57 MiB 22.68% 15.82 MiB p/s E > docker-machine-driver-kvm2: 14.19 MiB / 48.57 MiB 29.21% 15.82 MiB p/s E > docker-machine-driver-kvm2: 17.67 MiB / 48.57 MiB 36.38% 15.82 MiB p/s E > docker-machine-driver-kvm2: 21.11 MiB / 48.57 MiB 43.46% 15.88 MiB p/s E > docker-machine-driver-kvm2: 24.72 MiB / 48.57 MiB 50.89% 15.88 MiB p/s E functional_test.go:186: (dbg) Run: /bin/bash -c "eval $(./minikube-linux-amd64 -p functional-20200724215019-14997 docker-env) && docker images" > docker-machine-driver-kvm2: 28.35 MiB / 48.57 MiB 58.36% 15.88 MiB p/s E > docker-machine-driver-kvm2: 32.22 MiB / 48.57 MiB 66.33% 16.05 MiB p/s E > docker-machine-driver-kvm2: 35.88 MiB / 48.57 MiB 73.86% 16.05 MiB p/s E > docker-machine-driver-kvm2: 39.56 MiB / 48.57 MiB 81.45% 16.05 MiB p/s E > docker-machine-driver-kvm2: 43.61 MiB / 48.57 MiB 89.78% 16.24 MiB p/s E > docker-machine-driver-kvm2: 47.56 MiB / 48.57 MiB 97.92% 16.24 MiB p/s E > docker-machine-driver-kvm2: 48.57 MiB / 48.57 MiB 100.00% 18.33 MiB p/s --- PASS: TestKVMDriverInstallOrUpdate (19.23s) === CONT TestFunctional/parallel/UpdateContextCmd functional_test.go:1007: (dbg) Run: ./minikube-linux-amd64 -p functional-20200724215019-14997 update-context --alsologtostderr -v=2 === CONT TestFunctional/parallel/DockerEnv functional_test.go:186: (dbg) Done: /bin/bash -c "eval $(./minikube-linux-amd64 -p functional-20200724215019-14997 docker-env) && docker images": (1.852837443s) === CONT TestFunctional/parallel/CertSync functional_test.go:989: Checking for existence of /etc/ssl/certs/14997.pem within VM functional_test.go:990: (dbg) Run: ./minikube-linux-amd64 -p functional-20200724215019-14997 ssh "sudo cat /etc/ssl/certs/14997.pem" === CONT TestFunctional/parallel/UpdateContextCmd functional_test.go:1007: (dbg) Done: ./minikube-linux-amd64 -p functional-20200724215019-14997 update-context --alsologtostderr -v=2: (4.690614622s) === CONT TestFunctional/parallel/FileSync functional_test.go:950: Checking for existence of /etc/test/nested/copy/14997/hosts within VM functional_test.go:951: (dbg) Run: ./minikube-linux-amd64 -p functional-20200724215019-14997 ssh "sudo cat /etc/test/nested/copy/14997/hosts" === CONT TestFunctional/parallel/CertSync functional_test.go:990: (dbg) Done: ./minikube-linux-amd64 -p functional-20200724215019-14997 ssh "sudo cat /etc/ssl/certs/14997.pem": (7.072372795s) functional_test.go:989: Checking for existence of /usr/share/ca-certificates/14997.pem within VM functional_test.go:990: (dbg) Run: ./minikube-linux-amd64 -p functional-20200724215019-14997 ssh "sudo cat /usr/share/ca-certificates/14997.pem" === CONT TestFunctional/parallel/FileSync functional_test.go:951: (dbg) Done: ./minikube-linux-amd64 -p functional-20200724215019-14997 ssh "sudo cat /etc/test/nested/copy/14997/hosts": (2.817636371s) functional_test.go:956: file sync test content: Test file for checking file sync process === CONT TestFunctional/parallel/MySQL functional_test.go:855: (dbg) Run: kubectl --context functional-20200724215019-14997 replace --force -f testdata/mysql.yaml functional_test.go:860: (dbg) TestFunctional/parallel/MySQL: waiting 10m0s for pods matching "app=mysql" in namespace "default" ... === CONT TestFunctional/parallel/CertSync functional_test.go:989: Checking for existence of /etc/ssl/certs/51391683.0 within VM functional_test.go:990: (dbg) Run: ./minikube-linux-amd64 -p functional-20200724215019-14997 ssh "sudo cat /etc/ssl/certs/51391683.0" === CONT TestFunctional/parallel/SSHCmd functional_test.go:820: (dbg) Run: ./minikube-linux-amd64 -p functional-20200724215019-14997 ssh "echo hello" functional_test.go:837: (dbg) Run: ./minikube-linux-amd64 -p functional-20200724215019-14997 ssh "cat /etc/hostname" === CONT TestFunctional/parallel/MySQL helpers_test.go:332: "mysql-78ff7d6cf9-znqtq" [cf79cb38-8c8f-4043-83af-b92f1266dc2c] Pending / Ready:ContainersNotReady (containers with unready status: [mysql]) / ContainersReady:ContainersNotReady (containers with unready status: [mysql]) === CONT TestFunctional/parallel/TunnelCmd === RUN TestFunctional/parallel/TunnelCmd/serial === RUN TestFunctional/parallel/TunnelCmd/serial/StartTunnel fn_tunnel_cmd_test.go:122: (dbg) daemon: [./minikube-linux-amd64 -p functional-20200724215019-14997 tunnel --alsologtostderr] === RUN TestFunctional/parallel/TunnelCmd/serial/WaitService fn_tunnel_cmd_test.go:142: (dbg) Run: kubectl --context functional-20200724215019-14997 apply -f testdata/testsvc.yaml fn_tunnel_cmd_test.go:146: (dbg) TestFunctional/parallel/TunnelCmd/serial/WaitService: waiting 4m0s for pods matching "run=nginx-svc" in namespace "default" ... helpers_test.go:332: "nginx-svc" [b7f77eae-2c2b-48e6-bae1-3fe98960fa83] Pending / Ready:ContainersNotReady (containers with unready status: [nginx]) / ContainersReady:ContainersNotReady (containers with unready status: [nginx]) === CONT TestFunctional/parallel/MySQL helpers_test.go:332: "mysql-78ff7d6cf9-znqtq" [cf79cb38-8c8f-4043-83af-b92f1266dc2c] Running === CONT TestFunctional/parallel/TunnelCmd/serial/WaitService helpers_test.go:332: "nginx-svc" [b7f77eae-2c2b-48e6-bae1-3fe98960fa83] Running === CONT TestFunctional/parallel/MySQL functional_test.go:860: (dbg) TestFunctional/parallel/MySQL: app=mysql healthy within 31.831396805s functional_test.go:867: (dbg) Run: kubectl --context functional-20200724215019-14997 exec mysql-78ff7d6cf9-znqtq -- mysql -ppassword -e "show databases;" === CONT TestFunctional/parallel/TunnelCmd/serial/WaitService fn_tunnel_cmd_test.go:146: (dbg) TestFunctional/parallel/TunnelCmd/serial/WaitService: run=nginx-svc healthy within 30.527023809s === RUN TestFunctional/parallel/TunnelCmd/serial/WaitService/IngressIP fn_tunnel_cmd_test.go:160: (dbg) Run: kubectl --context functional-20200724215019-14997 get svc nginx-svc -o jsonpath={.status.loadBalancer.ingress[0].ip} === RUN TestFunctional/parallel/TunnelCmd/serial/AccessDirect fn_tunnel_cmd_test.go:225: tunnel at http://10.96.171.208 is working! === RUN TestFunctional/parallel/TunnelCmd/serial/DNSResolutionByDig fn_tunnel_cmd_test.go:92: DNS forwarding is supported for darwin only now, skipping test DNS forwarding === RUN TestFunctional/parallel/TunnelCmd/serial/DNSResolutionByDscacheutil fn_tunnel_cmd_test.go:92: DNS forwarding is supported for darwin only now, skipping test DNS forwarding === RUN TestFunctional/parallel/TunnelCmd/serial/AccessThroughDNS fn_tunnel_cmd_test.go:92: DNS forwarding is supported for darwin only now, skipping test DNS forwarding === RUN TestFunctional/parallel/TunnelCmd/serial/DeleteTunnel fn_tunnel_cmd_test.go:360: (dbg) stopping [./minikube-linux-amd64 -p functional-20200724215019-14997 tunnel --alsologtostderr] ... === CONT TestFunctional/parallel/MySQL functional_test.go:867: (dbg) Non-zero exit: kubectl --context functional-20200724215019-14997 exec mysql-78ff7d6cf9-znqtq -- mysql -ppassword -e "show databases;": exit status 1 (241.275181ms) ** stderr ** Warning: Using a password on the command line interface can be insecure. ERROR 2002 (HY000): Can't connect to local MySQL server through socket '/var/run/mysqld/mysqld.sock' (2) command terminated with exit code 1 ** /stderr ** === CONT TestFunctional/parallel/LogsCmd functional_test.go:566: (dbg) Run: ./minikube-linux-amd64 -p functional-20200724215019-14997 logs === CONT TestFunctional/parallel/MySQL functional_test.go:867: (dbg) Run: kubectl --context functional-20200724215019-14997 exec mysql-78ff7d6cf9-znqtq -- mysql -ppassword -e "show databases;" functional_test.go:867: (dbg) Non-zero exit: kubectl --context functional-20200724215019-14997 exec mysql-78ff7d6cf9-znqtq -- mysql -ppassword -e "show databases;": exit status 1 (187.3712ms) ** stderr ** Warning: Using a password on the command line interface can be insecure. ERROR 2002 (HY000): Can't connect to local MySQL server through socket '/var/run/mysqld/mysqld.sock' (2) command terminated with exit code 1 ** /stderr ** functional_test.go:867: (dbg) Run: kubectl --context functional-20200724215019-14997 exec mysql-78ff7d6cf9-znqtq -- mysql -ppassword -e "show databases;" functional_test.go:867: (dbg) Non-zero exit: kubectl --context functional-20200724215019-14997 exec mysql-78ff7d6cf9-znqtq -- mysql -ppassword -e "show databases;": exit status 1 (1.780305568s) ** stderr ** Warning: Using a password on the command line interface can be insecure. ERROR 2002 (HY000): Can't connect to local MySQL server through socket '/var/run/mysqld/mysqld.sock' (2) command terminated with exit code 1 ** /stderr ** functional_test.go:867: (dbg) Run: kubectl --context functional-20200724215019-14997 exec mysql-78ff7d6cf9-znqtq -- mysql -ppassword -e "show databases;" functional_test.go:867: (dbg) Non-zero exit: kubectl --context functional-20200724215019-14997 exec mysql-78ff7d6cf9-znqtq -- mysql -ppassword -e "show databases;": exit status 1 (247.845806ms) ** stderr ** Warning: Using a password on the command line interface can be insecure. ERROR 1045 (28000): Access denied for user 'root'@'localhost' (using password: YES) command terminated with exit code 1 ** /stderr ** === CONT TestFunctional/parallel/LogsCmd functional_test.go:566: (dbg) Done: ./minikube-linux-amd64 -p functional-20200724215019-14997 logs: (6.877713844s) === CONT TestFunctional/parallel/AddonsCmd functional_test.go:787: (dbg) Run: ./minikube-linux-amd64 -p functional-20200724215019-14997 addons list functional_test.go:798: (dbg) Run: ./minikube-linux-amd64 -p functional-20200724215019-14997 addons list -o json === CONT TestFunctional/parallel/ServiceCmd functional_test.go:681: (dbg) Run: kubectl --context functional-20200724215019-14997 create deployment hello-node --image=k8s.gcr.io/echoserver:1.4 functional_test.go:685: (dbg) Run: kubectl --context functional-20200724215019-14997 expose deployment hello-node --type=NodePort --port=8080 functional_test.go:690: (dbg) TestFunctional/parallel/ServiceCmd: waiting 10m0s for pods matching "app=hello-node" in namespace "default" ... helpers_test.go:332: "hello-node-7bf657c596-zl77l" [22f4b6d7-1b1a-49d4-a543-a85307089545] Pending / Ready:ContainersNotReady (containers with unready status: [echoserver]) / ContainersReady:ContainersNotReady (containers with unready status: [echoserver]) === CONT TestFunctional/parallel/MySQL functional_test.go:867: (dbg) Run: kubectl --context functional-20200724215019-14997 exec mysql-78ff7d6cf9-znqtq -- mysql -ppassword -e "show databases;" functional_test.go:867: (dbg) Non-zero exit: kubectl --context functional-20200724215019-14997 exec mysql-78ff7d6cf9-znqtq -- mysql -ppassword -e "show databases;": exit status 1 (1.123697082s) ** stderr ** Warning: Using a password on the command line interface can be insecure. ERROR 2002 (HY000): Can't connect to local MySQL server through socket '/var/run/mysqld/mysqld.sock' (2) command terminated with exit code 1 ** /stderr ** functional_test.go:867: (dbg) Run: kubectl --context functional-20200724215019-14997 exec mysql-78ff7d6cf9-znqtq -- mysql -ppassword -e "show databases;" functional_test.go:867: (dbg) Done: kubectl --context functional-20200724215019-14997 exec mysql-78ff7d6cf9-znqtq -- mysql -ppassword -e "show databases;": (3.697611794s) === CONT TestFunctional/parallel/ProfileCmd === RUN TestFunctional/parallel/ProfileCmd/profile_not_create functional_test.go:582: (dbg) Run: ./minikube-linux-amd64 profile lis functional_test.go:586: (dbg) Run: ./minikube-linux-amd64 profile list --output json === RUN TestFunctional/parallel/ProfileCmd/profile_list functional_test.go:607: (dbg) Run: ./minikube-linux-amd64 profile list === RUN TestFunctional/parallel/ProfileCmd/profile_json_output functional_test.go:629: (dbg) Run: ./minikube-linux-amd64 profile list --output json === CONT TestFunctional/parallel/DryRun functional_test.go:430: (dbg) Run: ./minikube-linux-amd64 start -p functional-20200724215019-14997 --dry-run --memory 250MB --alsologtostderr --vm-driver=docker --base-image=local/kicbase:-snapshot functional_test.go:430: (dbg) Non-zero exit: ./minikube-linux-amd64 start -p functional-20200724215019-14997 --dry-run --memory 250MB --alsologtostderr --vm-driver=docker --base-image=local/kicbase:-snapshot: exit status 78 (220.794867ms) -- stdout -- * [functional-20200724215019-14997] minikube v1.12.1 on Ubuntu 20.04 - KUBECONFIG=/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/kubeconfig - MINIKUBE_HOME=/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome * Using the docker driver based on existing profile -- /stdout -- ** stderr ** I0724 22:01:27.809921 102610 out.go:188] Setting JSON to false I0724 22:01:27.812711 102610 start.go:101] hostinfo: {"hostname":"mini-test-11-ubuntu","uptime":1726,"bootTime":1595626361,"procs":661,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.4.0-1022-azure","virtualizationSystem":"kvm","virtualizationRole":"host","hostid":"c95cb721-f5cd-cb47-980f-2a6f7a0ad6b2"} I0724 22:01:27.813387 102610 start.go:111] virtualization: kvm host I0724 22:01:27.824264 102610 driver.go:287] Setting default libvirt URI to qemu:///system I0724 22:01:27.888624 102610 docker.go:87] docker version: linux-19.03.8 I0724 22:01:27.900725 102610 start.go:217] selected driver: docker I0724 22:01:27.900757 102610 start.go:623] validating driver "docker" against &{Name:functional-20200724215019-14997 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 Memory:2800 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.99.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio KubernetesConfig:{KubernetesVersion:v1.18.3 ClusterName:functional-20200724215019-14997 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin: FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8441 NodeName:} Nodes:[{Name: IP:172.17.0.3 Port:8441 KubernetesVersion:v1.18.3 ControlPlane:true Worker:true}] Addons:map[ambassador:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false helm-tiller:false ingress:false ingress-dns:false istio:false istio-provisioner:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-gluster:false] VerifyComponents:map[apiserver:true apps_running:true default_sa:true system_pods:true]} I0724 22:01:27.901518 102610 start.go:634] status for docker: {Installed:true Healthy:true NeedsImprovement:false Error: Fix: Doc:} I0724 22:01:27.901636 102610 cli_runner.go:109] Run: docker system info --format "{{json .}}" X Requested memory allocation 250MB is less than the usable minimum of MB ** /stderr ** functional_test.go:441: (dbg) Run: ./minikube-linux-amd64 start -p functional-20200724215019-14997 --dry-run --alsologtostderr -v=1 --vm-driver=docker --base-image=local/kicbase:-snapshot === CONT TestFunctional/parallel/MountCmd fn_mount_cmd_test.go:72: (dbg) daemon: [./minikube-linux-amd64 mount -p functional-20200724215019-14997 /tmp/mounttest555612822:/mount-9p --alsologtostderr -v=1] fn_mount_cmd_test.go:106: wrote "test-1595628088180752813" to /tmp/mounttest555612822/created-by-test fn_mount_cmd_test.go:106: wrote "test-1595628088180752813" to /tmp/mounttest555612822/created-by-test-removed-by-pod fn_mount_cmd_test.go:106: wrote "test-1595628088180752813" to /tmp/mounttest555612822/test-1595628088180752813 fn_mount_cmd_test.go:114: (dbg) Run: ./minikube-linux-amd64 -p functional-20200724215019-14997 ssh "findmnt -T /mount-9p | grep 9p" fn_mount_cmd_test.go:114: (dbg) Non-zero exit: ./minikube-linux-amd64 -p functional-20200724215019-14997 ssh "findmnt -T /mount-9p | grep 9p": exit status 1 (363.111764ms) ** stderr ** ssh: Process exited with status 1 ** /stderr ** fn_mount_cmd_test.go:114: (dbg) Run: ./minikube-linux-amd64 -p functional-20200724215019-14997 ssh "findmnt -T /mount-9p | grep 9p" fn_mount_cmd_test.go:128: (dbg) Run: ./minikube-linux-amd64 -p functional-20200724215019-14997 ssh -- ls -la /mount-9p fn_mount_cmd_test.go:132: guest mount directory contents total 2 -rw-r--r-- 1 docker docker 24 Jul 24 22:01 created-by-test -rw-r--r-- 1 docker docker 24 Jul 24 22:01 created-by-test-removed-by-pod -rw-r--r-- 1 docker docker 24 Jul 24 22:01 test-1595628088180752813 fn_mount_cmd_test.go:136: (dbg) Run: ./minikube-linux-amd64 -p functional-20200724215019-14997 ssh cat /mount-9p/test-1595628088180752813 fn_mount_cmd_test.go:147: (dbg) Run: kubectl --context functional-20200724215019-14997 replace --force -f testdata/busybox-mount-test.yaml fn_mount_cmd_test.go:152: (dbg) TestFunctional/parallel/MountCmd: waiting 4m0s for pods matching "integration-test=busybox-mount" in namespace "default" ... helpers_test.go:332: "busybox-mount" [9e595afc-beb1-4aee-81b1-b868ac5efad0] Pending helpers_test.go:332: "busybox-mount" [9e595afc-beb1-4aee-81b1-b868ac5efad0] Pending / Ready:ContainersNotReady (containers with unready status: [mount-munger]) / ContainersReady:ContainersNotReady (containers with unready status: [mount-munger]) === CONT TestFunctional/parallel/ServiceCmd helpers_test.go:332: "hello-node-7bf657c596-zl77l" [22f4b6d7-1b1a-49d4-a543-a85307089545] Running === CONT TestFunctional/parallel/MountCmd helpers_test.go:332: "busybox-mount" [9e595afc-beb1-4aee-81b1-b868ac5efad0] Succeeded: Initialized:PodCompleted / Ready:PodCompleted / ContainersReady:PodCompleted fn_mount_cmd_test.go:152: (dbg) TestFunctional/parallel/MountCmd: integration-test=busybox-mount healthy within 3.068490731s fn_mount_cmd_test.go:168: (dbg) Run: kubectl --context functional-20200724215019-14997 logs busybox-mount fn_mount_cmd_test.go:180: (dbg) Run: ./minikube-linux-amd64 -p functional-20200724215019-14997 ssh stat /mount-9p/created-by-test fn_mount_cmd_test.go:180: (dbg) Done: ./minikube-linux-amd64 -p functional-20200724215019-14997 ssh stat /mount-9p/created-by-test: (2.801014591s) fn_mount_cmd_test.go:180: (dbg) Run: ./minikube-linux-amd64 -p functional-20200724215019-14997 ssh stat /mount-9p/created-by-pod === CONT TestFunctional/parallel/ServiceCmd functional_test.go:690: (dbg) TestFunctional/parallel/ServiceCmd: app=hello-node healthy within 24.01208036s functional_test.go:694: (dbg) Run: ./minikube-linux-amd64 -p functional-20200724215019-14997 service list === CONT TestFunctional/parallel/MountCmd fn_mount_cmd_test.go:180: (dbg) Done: ./minikube-linux-amd64 -p functional-20200724215019-14997 ssh stat /mount-9p/created-by-pod: (7.254428084s) fn_mount_cmd_test.go:89: (dbg) Run: ./minikube-linux-amd64 -p functional-20200724215019-14997 ssh "sudo umount -f /mount-9p" === CONT TestFunctional/parallel/ServiceCmd functional_test.go:694: (dbg) Done: ./minikube-linux-amd64 -p functional-20200724215019-14997 service list: (5.244947668s) functional_test.go:707: (dbg) Run: ./minikube-linux-amd64 -p functional-20200724215019-14997 service --namespace=default --https --url hello-node === CONT TestFunctional/parallel/MountCmd fn_mount_cmd_test.go:93: (dbg) stopping [./minikube-linux-amd64 mount -p functional-20200724215019-14997 /tmp/mounttest555612822:/mount-9p --alsologtostderr -v=1] ... === CONT TestFunctional/parallel/ServiceCmd functional_test.go:716: found endpoint: https://172.17.0.3:31081 functional_test.go:727: (dbg) Run: ./minikube-linux-amd64 -p functional-20200724215019-14997 service hello-node --url --format={{.IP}} === CONT TestFunctional/parallel/StatusCmd functional_test.go:340: (dbg) Run: ./minikube-linux-amd64 -p functional-20200724215019-14997 status === CONT TestFunctional/parallel/ServiceCmd functional_test.go:736: (dbg) Run: ./minikube-linux-amd64 -p functional-20200724215019-14997 service hello-node --url === CONT TestFunctional/parallel/StatusCmd functional_test.go:346: (dbg) Run: ./minikube-linux-amd64 -p functional-20200724215019-14997 status -f host:{{.Host}},kublet:{{.Kubelet}},apiserver:{{.APIServer}},kubeconfig:{{.Kubeconfig}} === CONT TestFunctional/parallel/ServiceCmd functional_test.go:742: found endpoint for hello-node: http://172.17.0.3:31081 functional_test.go:753: Attempting to fetch http://172.17.0.3:31081 ... functional_test.go:772: http://172.17.0.3:31081: success! body: CLIENT VALUES: client_address=172.18.0.1 command=GET real path=/ query=nil request_version=1.1 request_uri=http://172.17.0.3:8080/ SERVER VALUES: server_version=nginx: 1.10.0 - lua: 10001 HEADERS RECEIVED: accept-encoding=gzip host=172.17.0.3:31081 user-agent=Go-http-client/1.1 BODY: -no body in request- === CONT TestFunctional/parallel/DashboardCmd functional_test.go:385: (dbg) daemon: [./minikube-linux-amd64 dashboard --url -p functional-20200724215019-14997 --alsologtostderr -v=1] === CONT TestFunctional/parallel/StatusCmd functional_test.go:357: (dbg) Run: ./minikube-linux-amd64 -p functional-20200724215019-14997 status -o json === CONT TestFunctional/parallel/ConfigCmd === CONT TestFunctional/parallel/ConfigCmd functional_test.go:548: (dbg) Run: ./minikube-linux-amd64 -p functional-20200724215019-14997 config unset cpus functional_test.go:548: (dbg) Run: ./minikube-linux-amd64 -p functional-20200724215019-14997 config get cpus functional_test.go:548: (dbg) Non-zero exit: ./minikube-linux-amd64 -p functional-20200724215019-14997 config get cpus: exit status 64 (67.066491ms) ** stderr ** Error: specified key could not be found in config ** /stderr ** functional_test.go:548: (dbg) Run: ./minikube-linux-amd64 -p functional-20200724215019-14997 config set cpus 2 functional_test.go:548: (dbg) Run: ./minikube-linux-amd64 -p functional-20200724215019-14997 config get cpus functional_test.go:548: (dbg) Run: ./minikube-linux-amd64 -p functional-20200724215019-14997 config unset cpus functional_test.go:548: (dbg) Run: ./minikube-linux-amd64 -p functional-20200724215019-14997 config get cpus functional_test.go:548: (dbg) Non-zero exit: ./minikube-linux-amd64 -p functional-20200724215019-14997 config get cpus: exit status 64 (70.70183ms) ** stderr ** Error: specified key could not be found in config ** /stderr ** === CONT TestNetworkPlugins/group/auto === RUN TestNetworkPlugins/group/auto/Start net_test.go:80: (dbg) Run: ./minikube-linux-amd64 start -p auto-20200724220146-14997 --memory=1800 --alsologtostderr --wait=true --wait-timeout=25m --vm-driver=docker --base-image=local/kicbase:-snapshot === CONT TestPause/serial/Start pause_test.go:67: (dbg) Done: ./minikube-linux-amd64 start -p pause-20200724220008-14997 --memory=1800 --install-addons=false --wait=all --vm-driver=docker --base-image=local/kicbase:-snapshot: (1m38.154470758s) === RUN TestPause/serial/SecondStartNoReconfiguration pause_test.go:78: (dbg) Run: ./minikube-linux-amd64 start -p pause-20200724220008-14997 --alsologtostderr -v=1 === CONT TestCertOptions cert_options_test.go:46: (dbg) Done: ./minikube-linux-amd64 start -p cert-options-20200724220008-14997 --memory=1900 --apiserver-ips=127.0.0.1 --apiserver-ips=192.168.15.15 --apiserver-names=localhost --apiserver-names=www.google.com --apiserver-port=8555 --vm-driver=docker --base-image=local/kicbase:-snapshot: (1m43.989271576s) cert_options_test.go:57: (dbg) Run: ./minikube-linux-amd64 -p cert-options-20200724220008-14997 ssh "openssl x509 -text -noout -in /var/lib/minikube/certs/apiserver.crt" cert_options_test.go:72: (dbg) Run: kubectl --context cert-options-20200724220008-14997 config view helpers_test.go:170: Cleaning up "cert-options-20200724220008-14997" profile ... helpers_test.go:171: (dbg) Run: ./minikube-linux-amd64 delete -p cert-options-20200724220008-14997 === CONT TestErrorSpam error_spam_test.go:62: (dbg) Done: ./minikube-linux-amd64 start -p nospam-20200724220008-14997 -n=1 --memory=2250 --wait=false --vm-driver=docker --base-image=local/kicbase:-snapshot: (1m45.025672049s) error_spam_test.go:77: unexpected stderr: "! minikube was unable to download local/kicbase:-snapshot, but successfully downloaded kicbase/stable:v0.0.10 as a fallback image" error_spam_test.go:91: minikube stdout: * [nospam-20200724220008-14997] minikube v1.12.1 on Ubuntu 20.04 - KUBECONFIG=/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/kubeconfig - MINIKUBE_HOME=/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome * Using the docker driver based on user configuration * Starting control plane node nospam-20200724220008-14997 in cluster nospam-20200724220008-14997 * Pulling base image ... * Creating docker container (CPUs=2, Memory=2250MB) ... * Preparing Kubernetes v1.18.3 on Docker 19.03.2 ... * Verifying Kubernetes components... * Enabled addons: default-storageclass, storage-provisioner * Done! kubectl is now configured to use "nospam-20200724220008-14997" error_spam_test.go:92: minikube stderr: ! minikube was unable to download local/kicbase:-snapshot, but successfully downloaded kicbase/stable:v0.0.10 as a fallback image error_spam_test.go:94: *** TestErrorSpam FAILED at 2020-07-24 22:01:53.200134294 +0000 UTC m=+1543.324498779 helpers_test.go:215: -----------------------post-mortem-------------------------------- helpers_test.go:223: ======> post-mortem[TestErrorSpam]: docker inspect <====== helpers_test.go:224: (dbg) Run: docker inspect nospam-20200724220008-14997 helpers_test.go:228: (dbg) docker inspect nospam-20200724220008-14997: -- stdout -- [ { "Id": "f09d6d19ab116a492ec839c6059d22702635d404054b49af9ca12cdd31288149", "Created": "2020-07-24T22:00:58.493430925Z", "Path": "/usr/local/bin/entrypoint", "Args": [ "/sbin/init" ], "State": { "Status": "running", "Running": true, "Paused": false, "Restarting": false, "OOMKilled": false, "Dead": false, "Pid": 96020, "ExitCode": 0, "Error": "", "StartedAt": "2020-07-24T22:00:59.259002861Z", "FinishedAt": "0001-01-01T00:00:00Z" }, "Image": "sha256:e6bc41c39dc48b2b472936db36aedb28527ce0f675ed1bc20d029125c9ccf578", "ResolvConfPath": "/var/lib/docker/containers/f09d6d19ab116a492ec839c6059d22702635d404054b49af9ca12cdd31288149/resolv.conf", "HostnamePath": "/var/lib/docker/containers/f09d6d19ab116a492ec839c6059d22702635d404054b49af9ca12cdd31288149/hostname", "HostsPath": "/var/lib/docker/containers/f09d6d19ab116a492ec839c6059d22702635d404054b49af9ca12cdd31288149/hosts", "LogPath": "/var/lib/docker/containers/f09d6d19ab116a492ec839c6059d22702635d404054b49af9ca12cdd31288149/f09d6d19ab116a492ec839c6059d22702635d404054b49af9ca12cdd31288149-json.log", "Name": "/nospam-20200724220008-14997", "RestartCount": 0, "Driver": "overlay2", "Platform": "linux", "MountLabel": "", "ProcessLabel": "", "AppArmorProfile": "unconfined", "ExecIDs": null, "HostConfig": { "Binds": [ "/lib/modules:/lib/modules:ro", "nospam-20200724220008-14997:/var" ], "ContainerIDFile": "", "LogConfig": { "Type": "json-file", "Config": {} }, "NetworkMode": "default", "PortBindings": { "22/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "2376/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "5000/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "8443/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ] }, "RestartPolicy": { "Name": "no", "MaximumRetryCount": 0 }, "AutoRemove": false, "VolumeDriver": "", "VolumesFrom": null, "CapAdd": null, "CapDrop": null, "Capabilities": null, "Dns": [], "DnsOptions": [], "DnsSearch": [], "ExtraHosts": null, "GroupAdd": null, "IpcMode": "private", "Cgroup": "", "Links": null, "OomScoreAdj": 0, "PidMode": "", "Privileged": true, "PublishAllPorts": false, "ReadonlyRootfs": false, "SecurityOpt": [ "seccomp=unconfined", "apparmor=unconfined", "label=disable" ], "Tmpfs": { "/run": "", "/tmp": "" }, "UTSMode": "", "UsernsMode": "", "ShmSize": 67108864, "Runtime": "runc", "ConsoleSize": [ 0, 0 ], "Isolation": "", "CpuShares": 0, "Memory": 2359296000, "NanoCpus": 2000000000, "CgroupParent": "", "BlkioWeight": 0, "BlkioWeightDevice": [], "BlkioDeviceReadBps": null, "BlkioDeviceWriteBps": null, "BlkioDeviceReadIOps": null, "BlkioDeviceWriteIOps": null, "CpuPeriod": 0, "CpuQuota": 0, "CpuRealtimePeriod": 0, "CpuRealtimeRuntime": 0, "CpusetCpus": "", "CpusetMems": "", "Devices": [], "DeviceCgroupRules": null, "DeviceRequests": null, "KernelMemory": 0, "KernelMemoryTCP": 0, "MemoryReservation": 0, "MemorySwap": -1, "MemorySwappiness": null, "OomKillDisable": false, "PidsLimit": null, "Ulimits": null, "CpuCount": 0, "CpuPercent": 0, "IOMaximumIOps": 0, "IOMaximumBandwidth": 0, "MaskedPaths": null, "ReadonlyPaths": null }, "GraphDriver": { "Data": { "LowerDir": "/var/lib/docker/overlay2/c91c07a9161b581274fcccd28527d34fa71cf9028b4a215b9d88759590abe51f-init/diff:/var/lib/docker/overlay2/33ef5d7ed4fbb30d55d2e5925de9b5c359ee2f6bcaf5a31308f0c4c11e358759/diff:/var/lib/docker/overlay2/3cf31b1deb19cf9de586952311d9f94baab00dac6393d96e1a80da943c3823e6/diff:/var/lib/docker/overlay2/5e25be0edbb143d613aab16e23056ece18370c2427214464ccc0b8f033fc76c5/diff:/var/lib/docker/overlay2/933d7b760faf1726c0231eb48cbf3e7af8aeea7d1f449959b7bf07bbe9a84376/diff:/var/lib/docker/overlay2/69ec5555996fe29cf36b7e927034690ecb66f32d4c72396f3c2f3d73303ab10c/diff:/var/lib/docker/overlay2/891f9311e0e93aca44c576d873c0860bf67e972178833fc3ac0569308996933c/diff:/var/lib/docker/overlay2/431ddf5eea9395c020c3a27539c6984764cb7a0dd8f3eeadfbb24f37a897caf7/diff:/var/lib/docker/overlay2/ef7d3d9d53c985fdb46a41c8bcbc04c7d98baa3d8030b1432d115657269a4611/diff:/var/lib/docker/overlay2/d976f7471257da8ef85e0de564cae8e6b16eeb8f83fff6750b3985387f87052c/diff:/var/lib/docker/overlay2/16ed6f6abe25e98b50b10529e70d835da8b64ca99934243f70e5cc63cca44ba1/diff:/var/lib/docker/overlay2/a6495d6f579c32bd0faf387dc3b48c733dae0219e3cfc5d94f9f6ba027261668/diff:/var/lib/docker/overlay2/a34db548da9e24b0323e43e53767c49484fa19509ba9c1e738fb42330788ec52/diff:/var/lib/docker/overlay2/03f93f74dafc48e2bb0234297546abf70dc9918c047a7e3b500ed2242aec09ff/diff:/var/lib/docker/overlay2/b0d08d0be034080c25561d9451d8081050a591c2991a7f3d242234b9bf7c046d/diff:/var/lib/docker/overlay2/d7be6ed61eedc76988e74fe1d87a6a1ef287912654acea5566e7f9cb797b3e3c/diff:/var/lib/docker/overlay2/bde6a4a42d5652b3ef22e3a1dddd0f6fb765d2cd9f5b1a38d4ecb9b44c45b998/diff:/var/lib/docker/overlay2/0f384da0b034b730d4bdd22f270821719c0ac0b7b7d608c3a52fa89b9a386b90/diff:/var/lib/docker/overlay2/46e9d50bc162d2d523c12da3e378e5b859515056dc7d7f11344c1c42c64718d4/diff:/var/lib/docker/overlay2/6e7552cdb94dfc1c9a0a8efae25c758d93fff3d2eec58a3e2b710496d0c94653/diff:/var/lib/docker/overlay2/b9217960caef08b70ded801ca89f5a0808227383f2ab1de5ae7313c200a8090b/diff:/var/lib/docker/overlay2/d11fc39cafccd3e3cb8fba6cfdda2bbfdb99c2e41619ce169b0d6ede0b3f6a64/diff:/var/lib/docker/overlay2/0f89620291cba4bc3afac7c9fa1faf205450d5dcae032c3a9882009633b72b4a/diff", "MergedDir": "/var/lib/docker/overlay2/c91c07a9161b581274fcccd28527d34fa71cf9028b4a215b9d88759590abe51f/merged", "UpperDir": "/var/lib/docker/overlay2/c91c07a9161b581274fcccd28527d34fa71cf9028b4a215b9d88759590abe51f/diff", "WorkDir": "/var/lib/docker/overlay2/c91c07a9161b581274fcccd28527d34fa71cf9028b4a215b9d88759590abe51f/work" }, "Name": "overlay2" }, "Mounts": [ { "Type": "bind", "Source": "/lib/modules", "Destination": "/lib/modules", "Mode": "ro", "RW": false, "Propagation": "rprivate" }, { "Type": "volume", "Name": "nospam-20200724220008-14997", "Source": "/var/lib/docker/volumes/nospam-20200724220008-14997/_data", "Destination": "/var", "Driver": "local", "Mode": "z", "RW": true, "Propagation": "" } ], "Config": { "Hostname": "nospam-20200724220008-14997", "Domainname": "", "User": "root", "AttachStdin": false, "AttachStdout": false, "AttachStderr": false, "ExposedPorts": { "22/tcp": {}, "2376/tcp": {}, "5000/tcp": {}, "8443/tcp": {} }, "Tty": true, "OpenStdin": false, "StdinOnce": false, "Env": [ "container=docker", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], "Cmd": null, "Image": "kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438", "Volumes": null, "WorkingDir": "", "Entrypoint": [ "/usr/local/bin/entrypoint", "/sbin/init" ], "OnBuild": null, "Labels": { "created_by.minikube.sigs.k8s.io": "true", "mode.minikube.sigs.k8s.io": "nospam-20200724220008-14997", "name.minikube.sigs.k8s.io": "nospam-20200724220008-14997", "role.minikube.sigs.k8s.io": "" }, "StopSignal": "SIGRTMIN+3" }, "NetworkSettings": { "Bridge": "", "SandboxID": "df93619afc86bb4eebae4c70b8b8896a0bb12dd69e62622b3172086801bccc4e", "HairpinMode": false, "LinkLocalIPv6Address": "", "LinkLocalIPv6PrefixLen": 0, "Ports": { "22/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32824" } ], "2376/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32823" } ], "5000/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32822" } ], "8443/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32821" } ] }, "SandboxKey": "/var/run/docker/netns/df93619afc86", "SecondaryIPAddresses": null, "SecondaryIPv6Addresses": null, "EndpointID": "6f2f423ea595e056cfa679a8d67c34d0cf1649e3709ae9be811bd399ba31025f", "Gateway": "172.17.0.1", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "IPAddress": "172.17.0.6", "IPPrefixLen": 16, "IPv6Gateway": "", "MacAddress": "02:42:ac:11:00:06", "Networks": { "bridge": { "IPAMConfig": null, "Links": null, "Aliases": null, "NetworkID": "d4a420189740571bc15c14b2ebac1ab66e95c9c6796461aa96bb3486a5f7e03d", "EndpointID": "6f2f423ea595e056cfa679a8d67c34d0cf1649e3709ae9be811bd399ba31025f", "Gateway": "172.17.0.1", "IPAddress": "172.17.0.6", "IPPrefixLen": 16, "IPv6Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "MacAddress": "02:42:ac:11:00:06", "DriverOpts": null } } } } ] -- /stdout -- helpers_test.go:232: (dbg) Run: ./minikube-linux-amd64 status --format={{.Host}} -p nospam-20200724220008-14997 -n nospam-20200724220008-14997 helpers_test.go:232: (dbg) Non-zero exit: ./minikube-linux-amd64 status --format={{.Host}} -p nospam-20200724220008-14997 -n nospam-20200724220008-14997: exit status 2 (7.080329797s) -- stdout -- Running -- /stdout -- ** stderr ** E0724 22:02:00.359265 112528 status.go:256] Error apiserver status: https://172.17.0.6:8443/healthz returned error 500: [+]ping ok [+]log ok [-]etcd failed: reason withheld [+]poststarthook/start-kube-apiserver-admission-initializer ok [+]poststarthook/generic-apiserver-start-informers ok [+]poststarthook/start-apiextensions-informers ok [+]poststarthook/start-apiextensions-controllers ok [+]poststarthook/crd-informer-synced ok [+]poststarthook/bootstrap-controller ok [+]poststarthook/rbac/bootstrap-roles ok [+]poststarthook/scheduling/bootstrap-system-priority-classes ok [+]poststarthook/start-cluster-authentication-info-controller ok [+]poststarthook/start-kube-aggregator-informers ok [+]poststarthook/apiservice-registration-controller ok [+]poststarthook/apiservice-status-available-controller ok [+]poststarthook/kube-apiserver-autoregistration ok [+]autoregister-completion ok [+]poststarthook/apiservice-openapi-controller ok healthz check failed ** /stderr ** helpers_test.go:232: status error: exit status 2 (may be ok) helpers_test.go:237: <<< TestErrorSpam FAILED: start of post-mortem logs <<< helpers_test.go:238: ======> post-mortem[TestErrorSpam]: minikube logs <====== helpers_test.go:240: (dbg) Run: ./minikube-linux-amd64 -p nospam-20200724220008-14997 logs -n 25 2020/07/24 22:02:01 [DEBUG] GET http://127.0.0.1:35649/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/ === CONT TestFunctional/parallel/DashboardCmd functional_test.go:390: (dbg) stopping [./minikube-linux-amd64 dashboard --url -p functional-20200724215019-14997 --alsologtostderr -v=1] ... helpers_test.go:446: unable to kill pid 109321: os: process already finished === CONT TestNetworkPlugins/group/false === RUN TestNetworkPlugins/group/false/Start net_test.go:80: (dbg) Run: ./minikube-linux-amd64 start -p false-20200724220201-14997 --memory=1800 --alsologtostderr --wait=true --wait-timeout=25m --cni=false --vm-driver=docker --base-image=local/kicbase:-snapshot === CONT TestCertOptions helpers_test.go:171: (dbg) Done: ./minikube-linux-amd64 delete -p cert-options-20200724220008-14997: (12.872639131s) --- PASS: TestCertOptions (117.38s) === CONT TestNetworkPlugins/group/cilium === RUN TestNetworkPlugins/group/cilium/Start net_test.go:80: (dbg) Run: ./minikube-linux-amd64 start -p cilium-20200724220205-14997 --memory=1800 --alsologtostderr --wait=true --wait-timeout=25m --cni=cilium --vm-driver=docker --base-image=local/kicbase:-snapshot === CONT TestErrorSpam helpers_test.go:240: (dbg) Done: ./minikube-linux-amd64 -p nospam-20200724220008-14997 logs -n 25: (8.952294649s) helpers_test.go:245: TestErrorSpam logs: -- stdout -- * ==> Docker <== * -- Logs begin at Fri 2020-07-24 22:00:59 UTC, end at Fri 2020-07-24 22:02:01 UTC. -- * Jul 24 22:01:13 nospam-20200724220008-14997 dockerd[361]: time="2020-07-24T22:01:13.275073961Z" level=info msg="ClientConn switching balancer to \"pick_first\"" module=grpc * Jul 24 22:01:13 nospam-20200724220008-14997 dockerd[361]: time="2020-07-24T22:01:13.275156166Z" level=info msg="pickfirstBalancer: HandleSubConnStateChange: 0xc00083d9c0, CONNECTING" module=grpc * Jul 24 22:01:13 nospam-20200724220008-14997 dockerd[361]: time="2020-07-24T22:01:13.275202869Z" level=info msg="blockingPicker: the picked transport is not ready, loop back to repick" module=grpc * Jul 24 22:01:13 nospam-20200724220008-14997 dockerd[361]: time="2020-07-24T22:01:13.276119729Z" level=info msg="pickfirstBalancer: HandleSubConnStateChange: 0xc00083d9c0, READY" module=grpc * Jul 24 22:01:13 nospam-20200724220008-14997 dockerd[361]: time="2020-07-24T22:01:13.279196427Z" level=info msg="parsed scheme: \"unix\"" module=grpc * Jul 24 22:01:13 nospam-20200724220008-14997 dockerd[361]: time="2020-07-24T22:01:13.279227629Z" level=info msg="scheme \"unix\" not registered, fallback to default scheme" module=grpc * Jul 24 22:01:13 nospam-20200724220008-14997 dockerd[361]: time="2020-07-24T22:01:13.279250331Z" level=info msg="ccResolverWrapper: sending update to cc: {[{unix:///run/containerd/containerd.sock 0 }] }" module=grpc * Jul 24 22:01:13 nospam-20200724220008-14997 dockerd[361]: time="2020-07-24T22:01:13.279265832Z" level=info msg="ClientConn switching balancer to \"pick_first\"" module=grpc * Jul 24 22:01:13 nospam-20200724220008-14997 dockerd[361]: time="2020-07-24T22:01:13.279333936Z" level=info msg="pickfirstBalancer: HandleSubConnStateChange: 0xc00083df40, CONNECTING" module=grpc * Jul 24 22:01:13 nospam-20200724220008-14997 dockerd[361]: time="2020-07-24T22:01:13.280727926Z" level=info msg="pickfirstBalancer: HandleSubConnStateChange: 0xc00083df40, READY" module=grpc * Jul 24 22:01:13 nospam-20200724220008-14997 dockerd[361]: time="2020-07-24T22:01:13.286818620Z" level=info msg="[graphdriver] using prior storage driver: overlay2" * Jul 24 22:01:13 nospam-20200724220008-14997 dockerd[361]: time="2020-07-24T22:01:13.326400976Z" level=warning msg="Your kernel does not support swap memory limit" * Jul 24 22:01:13 nospam-20200724220008-14997 dockerd[361]: time="2020-07-24T22:01:13.326428977Z" level=warning msg="Your kernel does not support cgroup rt period" * Jul 24 22:01:13 nospam-20200724220008-14997 dockerd[361]: time="2020-07-24T22:01:13.326436578Z" level=warning msg="Your kernel does not support cgroup rt runtime" * Jul 24 22:01:13 nospam-20200724220008-14997 dockerd[361]: time="2020-07-24T22:01:13.326443078Z" level=warning msg="Your kernel does not support cgroup blkio weight" * Jul 24 22:01:13 nospam-20200724220008-14997 dockerd[361]: time="2020-07-24T22:01:13.326449379Z" level=warning msg="Your kernel does not support cgroup blkio weight_device" * Jul 24 22:01:13 nospam-20200724220008-14997 dockerd[361]: time="2020-07-24T22:01:13.326610889Z" level=info msg="Loading containers: start." * Jul 24 22:01:13 nospam-20200724220008-14997 dockerd[361]: time="2020-07-24T22:01:13.485615657Z" level=info msg="Default bridge (docker0) is assigned with an IP address 172.18.0.0/16. Daemon option --bip can be used to set a preferred IP address" * Jul 24 22:01:13 nospam-20200724220008-14997 dockerd[361]: time="2020-07-24T22:01:13.554041376Z" level=info msg="Loading containers: done." * Jul 24 22:01:13 nospam-20200724220008-14997 dockerd[361]: time="2020-07-24T22:01:13.605047970Z" level=warning msg="Not using native diff for overlay2, this may cause degraded performance for building images: kernel has CONFIG_OVERLAY_FS_REDIRECT_DIR enabled" storage-driver=overlay2 * Jul 24 22:01:13 nospam-20200724220008-14997 dockerd[361]: time="2020-07-24T22:01:13.605355590Z" level=info msg="Docker daemon" commit=6a30dfca03 graphdriver(s)=overlay2 version=19.03.2 * Jul 24 22:01:13 nospam-20200724220008-14997 dockerd[361]: time="2020-07-24T22:01:13.605427695Z" level=info msg="Daemon has completed initialization" * Jul 24 22:01:13 nospam-20200724220008-14997 dockerd[361]: time="2020-07-24T22:01:13.633280893Z" level=info msg="API listen on [::]:2376" * Jul 24 22:01:13 nospam-20200724220008-14997 dockerd[361]: time="2020-07-24T22:01:13.633278993Z" level=info msg="API listen on /var/run/docker.sock" * Jul 24 22:01:13 nospam-20200724220008-14997 systemd[1]: Started Docker Application Container Engine. * * ==> container status <== * CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID * 4bcd90baf003c 303ce5db0e90d 31 seconds ago Running etcd 0 718bee8512783 * 344a69b20b84f 76216c34ed0c7 31 seconds ago Running kube-scheduler 0 9462d75f96b79 * f24aa11d1e00c da26705ccb4b5 31 seconds ago Running kube-controller-manager 0 224958b02e10f * 419ff9c5594bc 7e28efa976bd1 32 seconds ago Running kube-apiserver 0 634234300ba4e * * ==> describe nodes <== * Name: nospam-20200724220008-14997 * Roles: master * Labels: beta.kubernetes.io/arch=amd64 * beta.kubernetes.io/os=linux * kubernetes.io/arch=amd64 * kubernetes.io/hostname=nospam-20200724220008-14997 * kubernetes.io/os=linux * minikube.k8s.io/commit=40eac8ce825d2bb784efa63b900c8d788ea49faf * minikube.k8s.io/name=nospam-20200724220008-14997 * minikube.k8s.io/updated_at=2020_07_24T22_01_47_0700 * minikube.k8s.io/version=v1.12.1 * node-role.kubernetes.io/master= * Annotations: kubeadm.alpha.kubernetes.io/cri-socket: /var/run/dockershim.sock * node.alpha.kubernetes.io/ttl: 0 * volumes.kubernetes.io/controller-managed-attach-detach: true * CreationTimestamp: Fri, 24 Jul 2020 22:01:36 +0000 * Taints: * Unschedulable: false * Lease: * HolderIdentity: nospam-20200724220008-14997 * AcquireTime: * RenewTime: Fri, 24 Jul 2020 22:01:57 +0000 * Conditions: * Type Status LastHeartbeatTime LastTransitionTime Reason Message * ---- ------ ----------------- ------------------ ------ ------- * MemoryPressure False Fri, 24 Jul 2020 22:01:48 +0000 Fri, 24 Jul 2020 22:01:35 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available * DiskPressure False Fri, 24 Jul 2020 22:01:48 +0000 Fri, 24 Jul 2020 22:01:35 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure * PIDPressure False Fri, 24 Jul 2020 22:01:48 +0000 Fri, 24 Jul 2020 22:01:35 +0000 KubeletHasSufficientPID kubelet has sufficient PID available * Ready True Fri, 24 Jul 2020 22:01:48 +0000 Fri, 24 Jul 2020 22:01:36 +0000 KubeletReady kubelet is posting ready status * Addresses: * InternalIP: 172.17.0.6 * Hostname: nospam-20200724220008-14997 * Capacity: * cpu: 16 * ephemeral-storage: 128884272Ki * hugepages-1Gi: 0 * hugepages-2Mi: 0 * memory: 65817044Ki * pods: 110 * Allocatable: * cpu: 16 * ephemeral-storage: 128884272Ki * hugepages-1Gi: 0 * hugepages-2Mi: 0 * memory: 65817044Ki * pods: 110 * System Info: * Machine ID: a92e7783ae8744bfb8e9cab984f6bbdd * System UUID: 73509b75-99d3-4b0d-af40-8a8efd1d5c42 * Boot ID: 65219ec9-ab55-4151-85fa-6cbcd6144529 * Kernel Version: 5.4.0-1022-azure * OS Image: Ubuntu 19.10 * Operating System: linux * Architecture: amd64 * Container Runtime Version: docker://19.3.2 * Kubelet Version: v1.18.3 * Kube-Proxy Version: v1.18.3 * Non-terminated Pods: (7 in total) * Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits AGE * --------- ---- ------------ ---------- --------------- ------------- --- * kube-system coredns-66bff467f8-wtn7x 100m (0%) 0 (0%) 70Mi (0%) 170Mi (0%) 9s * kube-system etcd-nospam-20200724220008-14997 0 (0%) 0 (0%) 0 (0%) 0 (0%) 14s * kube-system kube-apiserver-nospam-20200724220008-14997 250m (1%) 0 (0%) 0 (0%) 0 (0%) 14s * kube-system kube-controller-manager-nospam-20200724220008-14997 200m (1%) 0 (0%) 0 (0%) 0 (0%) 14s * kube-system kube-proxy-dtp4h 0 (0%) 0 (0%) 0 (0%) 0 (0%) 1s * kube-system kube-scheduler-nospam-20200724220008-14997 100m (0%) 0 (0%) 0 (0%) 0 (0%) 14s * kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 9s * Allocated resources: * (Total limits may be over 100 percent, i.e., overcommitted.) * Resource Requests Limits * -------- -------- ------ * cpu 650m (4%) 0 (0%) * memory 70Mi (0%) 170Mi (0%) * ephemeral-storage 0 (0%) 0 (0%) * hugepages-1Gi 0 (0%) 0 (0%) * hugepages-2Mi 0 (0%) 0 (0%) * Events: * Type Reason Age From Message * ---- ------ ---- ---- ------- * Normal NodeHasNoDiskPressure 38s (x5 over 40s) kubelet, nospam-20200724220008-14997 Node nospam-20200724220008-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientPID 38s (x5 over 40s) kubelet, nospam-20200724220008-14997 Node nospam-20200724220008-14997 status is now: NodeHasSufficientPID * Normal NodeHasSufficientMemory 37s (x6 over 40s) kubelet, nospam-20200724220008-14997 Node nospam-20200724220008-14997 status is now: NodeHasSufficientMemory * Normal Starting 15s kubelet, nospam-20200724220008-14997 Starting kubelet. * Warning SystemOOM 15s kubelet, nospam-20200724220008-14997 System OOM encountered, victim process: registry-server, pid: 26041 * Warning SystemOOM 15s kubelet, nospam-20200724220008-14997 System OOM encountered, victim process: registry-server, pid: 28567 * Normal NodeHasSufficientMemory 15s kubelet, nospam-20200724220008-14997 Node nospam-20200724220008-14997 status is now: NodeHasSufficientMemory * Normal NodeHasNoDiskPressure 15s kubelet, nospam-20200724220008-14997 Node nospam-20200724220008-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientPID 15s kubelet, nospam-20200724220008-14997 Node nospam-20200724220008-14997 status is now: NodeHasSufficientPID * Normal NodeAllocatableEnforced 15s kubelet, nospam-20200724220008-14997 Updated Node Allocatable limit across pods * * ==> dmesg <== * [ +0.012252] FS-Cache: O-key=[8] '56c00f0000000000' * [ +0.004428] FS-Cache: N-cookie c=0000000081bc1685 [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.008042] FS-Cache: N-cookie d=00000000087e516b n=00000000639e5e65 * [ +0.007221] FS-Cache: N-key=[8] '56c00f0000000000' * [ +0.005631] FS-Cache: Duplicate cookie detected * [ +0.003841] FS-Cache: O-cookie c=0000000011bf07ab [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.005951] FS-Cache: O-cookie d=00000000087e516b n=00000000251e97b7 * [ +0.004790] FS-Cache: O-key=[8] '56c00f0000000000' * [ +0.003643] FS-Cache: N-cookie c=00000000e2eec606 [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.005689] FS-Cache: N-cookie d=00000000087e516b n=000000007550b1ca * [ +0.004826] FS-Cache: N-key=[8] '56c00f0000000000' * [ +1.890005] FS-Cache: Duplicate cookie detected * [ +0.022632] FS-Cache: O-cookie c=0000000036b15294 [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.008521] FS-Cache: O-cookie d=00000000087e516b n=00000000efd734b0 * [ +0.007450] FS-Cache: O-key=[8] '55c00f0000000000' * [ +0.004298] FS-Cache: N-cookie c=00000000c6760349 [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.007729] FS-Cache: N-cookie d=00000000087e516b n=000000002b6d28f2 * [ +0.006730] FS-Cache: N-key=[8] '55c00f0000000000' * [ +2.781512] FS-Cache: Duplicate cookie detected * [ +0.004691] FS-Cache: O-cookie c=0000000026b60cdb [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.006817] FS-Cache: O-cookie d=00000000087e516b n=0000000083eaff9d * [ +0.006146] FS-Cache: O-key=[8] '5ac00f0000000000' * [ +0.004163] FS-Cache: N-cookie c=000000006a4278df [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.006241] FS-Cache: N-cookie d=00000000087e516b n=00000000e4c3dd6e * [ +0.005074] FS-Cache: N-key=[8] '5ac00f0000000000' * * ==> etcd [4bcd90baf003] <== * 2020-07-24 22:01:48.199732 W | etcdserver: read-only range request "key:\"/registry/serviceaccounts/kube-system/service-controller\" " with result "range_response_count:0 size:5" took too long (116.02141ms) to execute * 2020-07-24 22:01:48.569012 W | etcdserver: read-only range request "key:\"/registry/serviceaccounts/kube-system/persistent-volume-binder\" " with result "range_response_count:0 size:5" took too long (331.227438ms) to execute * 2020-07-24 22:01:49.166226 W | etcdserver: read-only range request "key:\"/registry/serviceaccounts/kube-system/replicaset-controller\" " with result "range_response_count:1 size:210" took too long (450.709612ms) to execute * 2020-07-24 22:01:49.166338 W | etcdserver: read-only range request "key:\"/registry/pods/kube-system/kube-apiserver-nospam-20200724220008-14997\" " with result "range_response_count:1 size:5488" took too long (179.805652ms) to execute * 2020-07-24 22:01:50.385294 W | etcdserver: request "header: txn: success:> failure:<>>" with result "size:16" took too long (973.171795ms) to execute * 2020-07-24 22:01:51.023674 W | etcdserver: read-only range request "key:\"/registry/serviceaccounts/kube-system/generic-garbage-collector\" " with result "range_response_count:1 size:218" took too long (540.418325ms) to execute * 2020-07-24 22:01:54.406357 W | wal: sync duration of 1.069381958s, expected less than 1s * 2020-07-24 22:01:55.705099 W | etcdserver: read-only range request "key:\"/registry/health\" " with result "error:context canceled" took too long (1.999983303s) to execute * WARNING: 2020/07/24 22:01:55 grpc: Server.processUnaryRPC failed to write status: connection error: desc = "transport is closing" * 2020-07-24 22:01:57.254798 W | etcdserver: read-only range request "key:\"/registry/health\" " with result "error:context deadline exceeded" took too long (2.000030102s) to execute * 2020-07-24 22:01:57.972687 W | etcdserver: read-only range request "key:\"/registry/health\" " with result "error:context canceled" took too long (2.000068306s) to execute * WARNING: 2020/07/24 22:01:57 grpc: Server.processUnaryRPC failed to write status: connection error: desc = "transport is closing" * 2020-07-24 22:02:00.359163 W | etcdserver: read-only range request "key:\"/registry/health\" " with result "error:context canceled" took too long (1.999776152s) to execute * WARNING: 2020/07/24 22:02:00 grpc: Server.processUnaryRPC failed to write status: connection error: desc = "transport is closing" * 2020-07-24 22:02:01.573369 W | etcdserver: request "header: txn: success: > failure: >>" with result "size:18" took too long (8.236229387s) to execute * 2020-07-24 22:02:01.595341 W | wal: sync duration of 7.188746753s, expected less than 1s * 2020-07-24 22:02:01.596805 W | etcdserver: read-only range request "key:\"/registry/services/endpoints/kube-system/k8s.io-minikube-hostpath\" " with result "range_response_count:0 size:5" took too long (1.25249179s) to execute * 2020-07-24 22:02:01.596898 W | etcdserver: read-only range request "key:\"/registry/jobs/\" range_end:\"/registry/jobs0\" limit:500 " with result "range_response_count:0 size:5" took too long (2.284783368s) to execute * 2020-07-24 22:02:01.596925 W | etcdserver: read-only range request "key:\"/registry/pods/kube-system/kube-controller-manager-nospam-20200724220008-14997\" " with result "range_response_count:1 size:5334" took too long (4.154109649s) to execute * 2020-07-24 22:02:01.597010 W | etcdserver: read-only range request "key:\"/registry/namespaces/default\" " with result "range_response_count:1 size:257" took too long (6.441641214s) to execute * 2020-07-24 22:02:01.597180 W | etcdserver: read-only range request "key:\"/registry/pods/kube-system/coredns-66bff467f8-wtn7x\" " with result "range_response_count:1 size:3349" took too long (4.954921432s) to execute * 2020-07-24 22:02:01.597276 W | etcdserver: read-only range request "key:\"/registry/events/\" range_end:\"/registry/events0\" limit:500 " with result "range_response_count:15 size:11148" took too long (7.878102892s) to execute * 2020-07-24 22:02:05.501658 W | wal: sync duration of 1.186641049s, expected less than 1s * 2020-07-24 22:02:05.551950 W | etcdserver: read-only range request "key:\"/registry/namespaces/default\" " with result "range_response_count:1 size:257" took too long (396.644693ms) to execute * 2020-07-24 22:02:05.551981 W | etcdserver: read-only range request "key:\"/registry/health\" " with result "range_response_count:0 size:5" took too long (296.810748ms) to execute * * ==> kernel <== * 22:02:08 up 29 min, 0 users, load average: 12.71, 5.90, 3.00 * Linux nospam-20200724220008-14997 5.4.0-1022-azure #22-Ubuntu SMP Fri Jul 10 06:14:37 UTC 2020 x86_64 x86_64 x86_64 GNU/Linux * PRETTY_NAME="Ubuntu 19.10" * * ==> kube-apiserver [419ff9c5594b] <== * Trace[1950063174]: [1.236586884s] [1.236506878s] Object stored in database * I0724 22:02:01.597428 1 trace.go:116] Trace[2078740885]: "Delete" url:/api/v1/namespaces/kube-system/endpoints/k8s.io-minikube-hostpath,user-agent:kube-controller-manager/v1.18.3 (linux/amd64) kubernetes/2e7996e/system:serviceaccount:kube-system:endpoint-controller,client:172.17.0.6 (started: 2020-07-24 22:02:00.343934882 +0000 UTC m=+29.815003935) (total time: 1.253467158s): * Trace[2078740885]: [1.253467158s] [1.253405754s] END * I0724 22:02:01.597849 1 trace.go:116] Trace[567055707]: "Get" url:/api/v1/namespaces/kube-system/pods/kube-controller-manager-nospam-20200724220008-14997,user-agent:kubelet/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:172.17.0.6 (started: 2020-07-24 22:01:57.442383328 +0000 UTC m=+26.913452481) (total time: 4.155433441s): * Trace[567055707]: [4.155212125s] [4.155203525s] About to write a response * I0724 22:02:01.597860 1 trace.go:116] Trace[1373582263]: "Update" url:/apis/certificates.k8s.io/v1beta1/certificatesigningrequests/csr-cfhzl/approval,user-agent:kube-controller-manager/v1.18.3 (linux/amd64) kubernetes/2e7996e/system:serviceaccount:kube-system:certificate-controller,client:172.17.0.6 (started: 2020-07-24 22:02:00.541678232 +0000 UTC m=+30.012747385) (total time: 1.056159938s): * Trace[1373582263]: [1.056159938s] [1.056080533s] END * I0724 22:02:01.598079 1 trace.go:116] Trace[876323537]: "Get" url:/api/v1/namespaces/default,user-agent:kube-apiserver/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:127.0.0.1 (started: 2020-07-24 22:01:55.154879465 +0000 UTC m=+24.625948618) (total time: 6.44316752s): * Trace[876323537]: [6.443141318s] [6.443132518s] About to write a response * I0724 22:02:01.598078 1 trace.go:116] Trace[1179952512]: "Get" url:/api/v1/namespaces/kube-system/pods/coredns-66bff467f8-wtn7x,user-agent:kube-scheduler/v1.18.3 (linux/amd64) kubernetes/2e7996e/scheduler,client:172.17.0.6 (started: 2020-07-24 22:01:56.64150864 +0000 UTC m=+26.112577693) (total time: 4.956545945s): * Trace[1179952512]: [4.956496142s] [4.956489541s] About to write a response * I0724 22:02:01.598096 1 trace.go:116] Trace[1978859779]: "GuaranteedUpdate etcd3" type:*coordination.Lease (started: 2020-07-24 22:01:57.386022909 +0000 UTC m=+26.857091962) (total time: 4.212053378s): * Trace[1978859779]: [4.212029776s] [4.211530042s] Transaction committed * I0724 22:02:01.598176 1 trace.go:116] Trace[1252714902]: "Update" url:/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/nospam-20200724220008-14997,user-agent:kubelet/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:172.17.0.6 (started: 2020-07-24 22:01:57.3859009 +0000 UTC m=+26.856969953) (total time: 4.212254292s): * Trace[1252714902]: [4.212215189s] [4.212126483s] Object stored in database * I0724 22:02:01.598296 1 trace.go:116] Trace[1255358232]: "List etcd3" key:/events,resourceVersion:0,limit:500,continue: (started: 2020-07-24 22:01:53.718830115 +0000 UTC m=+23.189899268) (total time: 7.879437085s): * Trace[1255358232]: [7.879437085s] [7.879437085s] END * I0724 22:02:01.598653 1 trace.go:116] Trace[1827655414]: "List" url:/apis/events.k8s.io/v1beta1/events,user-agent:kube-controller-manager/v1.18.3 (linux/amd64) kubernetes/2e7996e/shared-informers,client:172.17.0.6 (started: 2020-07-24 22:01:53.718810014 +0000 UTC m=+23.189879067) (total time: 7.879816311s): * Trace[1827655414]: [7.879494989s] [7.879479188s] Listing from storage done * I0724 22:02:01.599638 1 trace.go:116] Trace[739004745]: "GuaranteedUpdate etcd3" type:*core.Node (started: 2020-07-24 22:02:00.368251873 +0000 UTC m=+29.839320926) (total time: 1.23135972s): * Trace[739004745]: [1.229139166s] [1.227365543s] Transaction committed * I0724 22:02:01.600813 1 trace.go:116] Trace[1578114790]: "Patch" url:/api/v1/nodes/nospam-20200724220008-14997,user-agent:kube-controller-manager/v1.18.3 (linux/amd64) kubernetes/2e7996e/system:serviceaccount:kube-system:ttl-controller,client:172.17.0.6 (started: 2020-07-24 22:02:00.368168067 +0000 UTC m=+29.839237120) (total time: 1.232614908s): * Trace[1578114790]: [1.229318079s] [1.227809074s] About to apply patch * I0724 22:02:01.697495 1 trace.go:116] Trace[427368186]: "Create" url:/apis/apps/v1/namespaces/kube-system/controllerrevisions,user-agent:kube-controller-manager/v1.18.3 (linux/amd64) kubernetes/2e7996e/system:serviceaccount:kube-system:daemon-set-controller,client:172.17.0.6 (started: 2020-07-24 22:02:00.474766579 +0000 UTC m=+29.945835632) (total time: 1.222687618s): * Trace[427368186]: [1.222687618s] [1.222589011s] END * * ==> kube-controller-manager [f24aa11d1e00] <== * I0724 22:01:53.538518 1 disruption.go:339] Sending events to api server. * I0724 22:01:53.542200 1 shared_informer.go:230] Caches are synced for ReplicationController * I0724 22:01:53.645475 1 shared_informer.go:223] Waiting for caches to sync for resource quota * I0724 22:01:53.686477 1 shared_informer.go:230] Caches are synced for persistent volume * I0724 22:01:53.768606 1 shared_informer.go:230] Caches are synced for expand * I0724 22:01:53.822132 1 shared_informer.go:230] Caches are synced for attach detach * E0724 22:02:00.339159 1 certificate_controller.go:150] Sync csr-cfhzl failed with : error updating approval for csr: etcdserver: request timed out * E0724 22:02:00.340555 1 serviceaccounts_controller.go:180] kube-system failed with : etcdserver: request timed out * E0724 22:02:00.341562 1 event.go:260] Server rejected event '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"nospam-20200724220008-14997.1624cf50a907a17c", GenerateName:"", Namespace:"default", SelfLink:"", UID:"", ResourceVersion:"", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Node", Namespace:"", Name:"nospam-20200724220008-14997", UID:"9a2a5f2f-e872-4683-8396-c202ddb9dc7d", APIVersion:"v1", ResourceVersion:"", FieldPath:""}, Reason:"RegisteredNode", Message:"Node nospam-20200724220008-14997 event: Registered Node nospam-20200724220008-14997 in Controller", Source:v1.EventSource{Component:"node-controller", Host:""}, FirstTimestamp:v1.Time{Time:time.Time{wall:0xbfbef4745409b77c, ext:22338972810, loc:(*time.Location)(0x6d09200)}}, LastTimestamp:v1.Time{Time:time.Time{wall:0xbfbef4745409b77c, ext:22338972810, loc:(*time.Location)(0x6d09200)}}, Count:1, Type:"Normal", EventTime:v1.MicroTime{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'etcdserver: request timed out' (will not retry!) * W0724 22:02:00.359175 1 endpointslice_controller.go:260] Error syncing endpoint slices for service "kube-system/kube-dns", retrying. Error: Error creating EndpointSlice for Service kube-system/kube-dns: etcdserver: request timed out * I0724 22:02:00.359246 1 event.go:278] Event(v1.ObjectReference{Kind:"Service", Namespace:"kube-system", Name:"kube-dns", UID:"77d97fd9-a60e-4a34-a61b-fcee2db6f36a", APIVersion:"v1", ResourceVersion:"211", FieldPath:""}): type: 'Warning' reason: 'FailedToUpdateEndpointSlices' Error updating Endpoint Slices for Service kube-system/kube-dns: Error creating EndpointSlice for Service kube-system/kube-dns: etcdserver: request timed out * E0724 22:02:00.361298 1 ttl_controller.go:220] etcdserver: request timed out * E0724 22:02:00.363756 1 controller_utils.go:210] unable to remove [&Taint{Key:node.kubernetes.io/not-ready,Value:,Effect:NoSchedule,TimeAdded:,}] unneeded taint from unresponsive Node "nospam-20200724220008-14997": etcdserver: request timed out * E0724 22:02:00.364105 1 node_lifecycle_controller.go:605] Failed to taint NoSchedule on node , requeue it: failed to swap taints of node &Node{ObjectMeta:{nospam-20200724220008-14997 /api/v1/nodes/nospam-20200724220008-14997 9a2a5f2f-e872-4683-8396-c202ddb9dc7d 287 0 2020-07-24 22:01:36 +0000 UTC map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/os:linux kubernetes.io/arch:amd64 kubernetes.io/hostname:nospam-20200724220008-14997 kubernetes.io/os:linux minikube.k8s.io/commit:40eac8ce825d2bb784efa63b900c8d788ea49faf minikube.k8s.io/name:nospam-20200724220008-14997 minikube.k8s.io/updated_at:2020_07_24T22_01_47_0700 minikube.k8s.io/version:v1.12.1 node-role.kubernetes.io/master:] map[kubeadm.alpha.kubernetes.io/cri-socket:/var/run/dockershim.sock volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubeadm Update v1 2020-07-24 22:01:46 +0000 UTC FieldsV1 FieldsV1{Raw:*[123 34 102 58 109 101 116 97 100 97 116 97 34 58 123 34 102 58 97 110 110 111 116 97 116 105 111 110 115 34 58 123 34 102 58 107 117 98 101 97 100 109 46 97 108 112 104 97 46 107 117 98 101 114 110 101 116 101 115 46 105 111 47 99 114 105 45 115 111 99 107 101 116 34 58 123 125 125 44 34 102 58 108 97 98 101 108 115 34 58 123 34 102 58 110 111 100 101 45 114 111 108 101 46 107 117 98 101 114 110 101 116 101 115 46 105 111 47 109 97 115 116 101 114 34 58 123 125 125 125 125],}} {kubelet Update v1 2020-07-24 22:01:48 +0000 UTC FieldsV1 &FieldsV1{Raw:*[123 34 102 58 109 101 116 97 100 97 116 97 34 58 123 34 102 58 97 110 110 111 116 97 116 105 111 110 115 34 58 123 34 46 34 58 123 125 44 34 102 58 118 111 108 117 109 101 115 46 107 117 98 101 114 110 101 116 101 115 46 105 111 47 99 111 110 116 114 111 108 108 101 114 45 109 97 110 97 103 101 100 45 97 116 116 97 99 104 45 100 101 116 97 99 104 34 58 123 125 125 44 34 102 58 108 97 98 101 108 115 34 58 123 34 46 34 58 123 125 44 34 102 58 98 101 116 97 46 107 117 98 101 114 110 101 116 101 115 46 105 111 47 97 114 99 104 34 58 123 125 44 34 102 58 98 101 116 97 46 107 117 98 101 114 110 101 116 101 115 46 105 111 47 111 115 34 58 123 125 44 34 102 58 107 117 98 101 114 110 101 116 101 115 46 105 111 47 97 114 99 104 34 58 123 125 44 34 102 58 107 117 98 101 114 110 101 116 101 115 46 105 111 47 104 111 115 116 110 97 109 101 34 58 123 125 44 34 102 58 107 117 98 101 114 110 101 116 101 115 46 105 111 47 111 115 34 58 123 125 125 125 44 34 102 58 115 116 97 116 117 115 34 58 123 34 102 58 97 100 100 114 101 115 115 101 115 34 58 123 34 46 34 58 123 125 44 34 107 58 123 92 34 116 121 112 101 92 34 58 92 34 72 111 115 116 110 97 109 101 92 34 125 34 58 123 34 46 34 58 123 125 44 34 102 58 97 100 100 114 101 115 115 34 58 123 125 44 34 102 58 116 121 112 101 34 58 123 125 125 44 34 107 58 123 92 34 116 121 112 101 92 34 58 92 34 73 110 116 101 114 110 97 108 73 80 92 34 125 34 58 123 34 46 34 58 123 125 44 34 102 58 97 100 100 114 101 115 115 34 58 123 125 44 34 102 58 116 121 112 101 34 58 123 125 125 125 44 34 102 58 97 108 108 111 99 97 116 97 98 108 101 34 58 123 34 46 34 58 123 125 44 34 102 58 99 112 117 34 58 123 125 44 34 102 58 101 112 104 101 109 101 114 97 108 45 115 116 111 114 97 103 101 34 58 123 125 44 34 102 58 104 117 103 101 112 97 103 101 115 45 49 71 105 34 58 123 125 44 34 102 58 104 117 103 101 112 97 103 101 115 45 50 77 105 34 58 123 125 44 34 102 58 109 101 109 111 114 121 34 58 123 125 44 34 102 58 112 111 100 115 34 58 123 125 125 44 34 102 58 99 97 112 97 99 105 116 121 34 58 123 34 46 34 58 123 125 44 34 102 58 99 112 117 34 58 123 125 44 34 102 58 101 112 104 101 109 101 114 97 108 45 115 116 111 114 97 103 101 34 58 123 125 44 34 102 58 104 117 103 101 112 97 103 101 115 45 49 71 105 34 58 123 125 44 34 102 58 104 117 103 101 112 97 103 101 115 45 50 77 105 34 58 123 125 44 34 102 58 109 101 109 111 114 121 34 58 123 125 44 34 102 58 112 111 100 115 34 58 123 125 125 44 34 102 58 99 111 110 100 105 116 105 111 110 115 34 58 123 34 46 34 58 123 125 44 34 107 58 123 92 34 116 121 112 101 92 34 58 92 34 68 105 115 107 80 114 101 115 115 117 114 101 92 34 125 34 58 123 34 46 34 58 123 125 44 34 102 58 108 97 115 116 72 101 97 114 116 98 101 97 116 84 105 109 101 34 58 123 125 44 34 102 58 108 97 115 116 84 114 97 110 115 105 116 105 111 110 84 105 109 101 34 58 123 125 44 34 102 58 109 101 115 115 97 103 101 34 58 123 125 44 34 102 58 114 101 97 115 111 110 34 58 123 125 44 34 102 58 115 116 97 116 117 115 34 58 123 125 44 34 102 58 116 121 112 101 34 58 123 125 125 44 34 107 58 123 92 34 116 121 112 101 92 34 58 92 34 77 101 109 111 114 121 80 114 101 115 115 117 114 101 92 34 125 34 58 123 34 46 34 58 123 125 44 34 102 58 108 97 115 116 72 101 97 114 116 98 101 97 116 84 105 109 101 34 58 123 125 44 34 102 58 108 97 115 116 84 114 97 110 115 105 116 105 111 110 84 105 109 101 34 58 123 125 44 34 102 58 109 101 115 115 97 103 101 34 58 123 125 44 34 102 58 114 101 97 115 111 110 34 58 123 125 44 34 102 58 115 116 97 116 117 115 34 58 123 125 44 34 102 58 116 121 112 101 34 58 123 125 125 44 34 107 58 123 92 34 116 121 112 101 92 34 58 92 34 80 73 68 80 114 101 115 115 117 114 101 92 34 125 34 58 123 34 46 34 58 123 125 44 34 102 58 108 97 115 116 72 101 97 114 116 98 101 97 116 84 105 109 101 34 58 123 125 44 34 102 58 108 97 115 116 84 114 97 110 115 105 116 105 111 110 84 105 109 101 34 58 123 125 44 34 102 58 109 101 115 115 97 103 101 34 58 123 125 44 34 102 58 114 101 97 115 111 110 34 58 123 125 44 34 102 58 115 116 97 116 117 115 34 58 123 125 44 34 102 58 116 121 112 101 34 58 123 125 125 44 34 107 58 123 92 34 116 121 112 101 92 34 58 92 34 82 101 97 100 121 92 34 125 34 58 123 34 46 34 58 123 125 44 34 102 58 108 97 115 116 72 101 97 114 116 98 101 97 116 84 105 109 101 34 58 123 125 44 34 102 58 108 97 115 116 84 114 97 110 115 105 116 105 111 110 84 105 109 101 34 58 123 125 44 34 102 58 109 101 115 115 97 103 101 34 58 123 125 44 34 102 58 114 101 97 115 111 110 34 58 123 125 44 34 102 58 115 116 97 116 117 115 34 58 123 125 44 34 102 58 116 121 112 101 34 58 123 125 125 125 44 34 102 58 100 97 101 109 111 110 69 110 100 112 111 105 110 116 115 34 58 123 34 102 58 107 117 98 101 108 101 116 69 110 100 112 111 105 110 116 34 58 123 34 102 58 80 111 114 116 34 58 123 125 125 125 44 34 102 58 105 109 97 103 101 115 34 58 123 125 44 34 102 58 110 111 100 101 73 110 102 111 34 58 123 34 102 58 97 114 99 104 105 116 101 99 116 117 114 101 34 58 123 125 44 34 102 58 98 111 111 116 73 68 34 58 123 125 44 34 102 58 99 111 110 116 97 105 110 101 114 82 117 110 116 105 109 101 86 101 114 115 105 111 110 34 58 123 125 44 34 102 58 107 101 114 110 101 108 86 101 114 115 105 111 110 34 58 123 125 44 34 102 58 107 117 98 101 80 114 111 120 121 86 101 114 115 105 111 110 34 58 123 125 44 34 102 58 107 117 98 101 108 101 116 86 101 114 115 105 111 110 34 58 123 125 44 34 102 58 109 97 99 104 105 110 101 73 68 34 58 123 125 44 34 102 58 111 112 101 114 97 116 105 110 103 83 121 115 116 101 109 34 58 123 125 44 34 102 58 111 115 73 109 97 103 101 34 58 123 125 44 34 102 58 115 121 115 116 101 109 85 85 73 68 34 58 123 125 125 125 125],}} {kubectl Update v1 2020-07-24 22:01:52 +0000 UTC FieldsV1 &FieldsV1{Raw:*[123 34 102 58 109 101 116 97 100 97 116 97 34 58 123 34 102 58 108 97 98 101 108 115 34 58 123 34 102 58 109 105 110 105 107 117 98 101 46 107 56 115 46 105 111 47 99 111 109 109 105 116 34 58 123 125 44 34 102 58 109 105 110 105 107 117 98 101 46 107 56 115 46 105 111 47 110 97 109 101 34 58 123 125 44 34 102 58 109 105 110 105 107 117 98 101 46 107 56 115 46 105 111 47 117 112 100 97 116 101 100 95 97 116 34 58 123 125 44 34 102 58 109 105 110 105 107 117 98 101 46 107 56 115 46 105 111 47 118 101 114 115 105 111 110 34 58 123 125 125 125 125],}}]},Spec:NodeSpec{PodCIDR:,DoNotUseExternalID:,ProviderID:,Unschedulable:false,Taints:[]Taint{Taint{Key:node.kubernetes.io/not-ready,Value:,Effect:NoSchedule,TimeAdded:,},},ConfigSource:nil,PodCIDRs:[],},Status:NodeStatus{Capacity:ResourceList{cpu: {{16 0} {} 16 DecimalSI},ephemeral-storage: {{131977494528 0} {} BinarySI},hugepages-1Gi: {{0 0} {} 0 DecimalSI},hugepages-2Mi: {{0 0} {} 0 DecimalSI},memory: {{67396653056 0} {} 65817044Ki BinarySI},pods: {{110 0} {} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{16 0} {} 16 DecimalSI},ephemeral-storage: {{131977494528 0} {} BinarySI},hugepages-1Gi: {{0 0} {} 0 DecimalSI},hugepages-2Mi: {{0 0} {} 0 DecimalSI},memory: {{67396653056 0} {} 65817044Ki BinarySI},pods: {{110 0} {} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2020-07-24 22:01:48 +0000 UTC,LastTransitionTime:2020-07-24 22:01:35 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2020-07-24 22:01:48 +0000 UTC,LastTransitionTime:2020-07-24 22:01:35 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2020-07-24 22:01:48 +0000 UTC,LastTransitionTime:2020-07-24 22:01:35 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2020-07-24 22:01:48 +0000 UTC,LastTransitionTime:2020-07-24 22:01:36 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.17.0.6,},NodeAddress{Type:Hostname,Address:nospam-20200724220008-14997,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:a92e7783ae8744bfb8e9cab984f6bbdd,SystemUUID:73509b75-99d3-4b0d-af40-8a8efd1d5c42,BootID:65219ec9-ab55-4151-85fa-6cbcd6144529,KernelVersion:5.4.0-1022-azure,OSImage:Ubuntu 19.10,ContainerRuntimeVersion:docker://19.3.2,KubeletVersion:v1.18.3,KubeProxyVersion:v1.18.3,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[k8s.gcr.io/etcd@sha256:4afb99b4690b418ffc2ceb67e1a17376457e441c1f09ab55447f0aaf992fa646 k8s.gcr.io/etcd:3.4.3-0],SizeBytes:288426917,},ContainerImage{Names:[kubernetesui/dashboard@sha256:a705c04e83badb4fdb2b95eb6b126f3c2759677b2f953742f3b08a1fada07d9d kubernetesui/dashboard:v2.0.1],SizeBytes:222771101,},ContainerImage{Names:[k8s.gcr.io/kube-apiserver@sha256:e1c8ce568634f79f76b6e8168c929511ad841ea7692271caf6fd3779c3545c2d k8s.gcr.io/kube-apiserver:v1.18.3],SizeBytes:172997403,},ContainerImage{Names:[k8s.gcr.io/kube-controller-manager@sha256:d62a4f41625e1631a2683cbdf1c9c9bd27f0b9c5d8d8202990236fc0d5ef1703 k8s.gcr.io/kube-controller-manager:v1.18.3],SizeBytes:162388763,},ContainerImage{Names:[k8s.gcr.io/kube-proxy@sha256:6a093c22e305039b7bd6c3f8eab8f202ad8238066ed210857b25524443aa8aff k8s.gcr.io/kube-proxy:v1.18.3],SizeBytes:117090625,},ContainerImage{Names:[k8s.gcr.io/kube-scheduler@sha256:5381cd9680bf5fb16a5c8ac60141eaab242c1c4960f1c32a21807efcca3e765b k8s.gcr.io/kube-scheduler:v1.18.3],SizeBytes:95279899,},ContainerImage{Names:[gcr.io/k8s-minikube/storage-provisioner@sha256:088daa9fcbccf04c3f415d77d5a6360d2803922190b675cb7fc88a9d2d91985a gcr.io/k8s-minikube/storage-provisioner:v1.8.1],SizeBytes:80815640,},ContainerImage{Names:[k8s.gcr.io/coredns@sha256:2c8d61c46f484d881db43b34d13ca47a269336e576c81cf007ca740fa9ec0800 k8s.gcr.io/coredns:1.6.7],SizeBytes:43794147,},ContainerImage{Names:[kubernetesui/metrics-scraper@sha256:555981a24f184420f3be0c79d4efb6c948a85cfce84034f85a563f4151a81cbf kubernetesui/metrics-scraper:v1.0.4],SizeBytes:36937728,},ContainerImage{Names:[k8s.gcr.io/pause@sha256:927d98197ec1141a368550822d18fa1c60bdae27b78b0c004f705f548c07814f k8s.gcr.io/pause:3.2],SizeBytes:682696,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} * E0724 22:02:00.468439 1 daemon_controller.go:292] kube-system/kube-proxy failed with : failed to construct revisions of DaemonSet: etcdserver: request timed out * E0724 22:02:01.599303 1 certificate_controller.go:150] Sync csr-cfhzl failed with : error updating approval for csr: Operation cannot be fulfilled on certificatesigningrequests.certificates.k8s.io "csr-cfhzl": the object has been modified; please apply your changes to the latest version and try again * I0724 22:02:01.645850 1 shared_informer.go:230] Caches are synced for resource quota * I0724 22:02:01.667907 1 shared_informer.go:230] Caches are synced for garbage collector * I0724 22:02:01.667933 1 garbagecollector.go:142] Garbage collector: all resource monitors have synced. Proceeding to collect garbage * I0724 22:02:01.667990 1 shared_informer.go:230] Caches are synced for garbage collector * I0724 22:02:01.696260 1 shared_informer.go:230] Caches are synced for resource quota * I0724 22:02:01.734257 1 event.go:278] Event(v1.ObjectReference{Kind:"DaemonSet", Namespace:"kube-system", Name:"kube-proxy", UID:"368ff8a0-3473-4be6-a418-606d177f0419", APIVersion:"apps/v1", ResourceVersion:"216", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: kube-proxy-dtp4h * E0724 22:02:01.828860 1 daemon_controller.go:292] kube-system/kube-proxy failed with : error storing status for daemon set &v1.DaemonSet{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"kube-proxy", GenerateName:"", Namespace:"kube-system", SelfLink:"/apis/apps/v1/namespaces/kube-system/daemonsets/kube-proxy", UID:"368ff8a0-3473-4be6-a418-606d177f0419", ResourceVersion:"216", Generation:1, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:63731224907, loc:(*time.Location)(0x6d09200)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{"k8s-app":"kube-proxy"}, Annotations:map[string]string{"deprecated.daemonset.template.generation":"1"}, OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:"kubeadm", Operation:"Update", APIVersion:"apps/v1", Time:(*v1.Time)(0xc00123b2c0), FieldsType:"FieldsV1", FieldsV1:(*v1.FieldsV1)(0xc00123b2e0)}}}, Spec:v1.DaemonSetSpec{Selector:(*v1.LabelSelector)(0xc00123b300), Template:v1.PodTemplateSpec{ObjectMeta:v1.ObjectMeta{Name:"", GenerateName:"", Namespace:"", SelfLink:"", UID:"", ResourceVersion:"", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{"k8s-app":"kube-proxy"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, Spec:v1.PodSpec{Volumes:[]v1.Volume{v1.Volume{Name:"kube-proxy", VolumeSource:v1.VolumeSource{HostPath:(*v1.HostPathVolumeSource)(nil), EmptyDir:(*v1.EmptyDirVolumeSource)(nil), GCEPersistentDisk:(*v1.GCEPersistentDiskVolumeSource)(nil), AWSElasticBlockStore:(*v1.AWSElasticBlockStoreVolumeSource)(nil), GitRepo:(*v1.GitRepoVolumeSource)(nil), Secret:(*v1.SecretVolumeSource)(nil), NFS:(*v1.NFSVolumeSource)(nil), ISCSI:(*v1.ISCSIVolumeSource)(nil), Glusterfs:(*v1.GlusterfsVolumeSource)(nil), PersistentVolumeClaim:(*v1.PersistentVolumeClaimVolumeSource)(nil), RBD:(*v1.RBDVolumeSource)(nil), FlexVolume:(*v1.FlexVolumeSource)(nil), Cinder:(*v1.CinderVolumeSource)(nil), CephFS:(*v1.CephFSVolumeSource)(nil), Flocker:(*v1.FlockerVolumeSource)(nil), DownwardAPI:(*v1.DownwardAPIVolumeSource)(nil), FC:(*v1.FCVolumeSource)(nil), AzureFile:(*v1.AzureFileVolumeSource)(nil), ConfigMap:(*v1.ConfigMapVolumeSource)(0xc000b9d980), VsphereVolume:(*v1.VsphereVirtualDiskVolumeSource)(nil), Quobyte:(*v1.QuobyteVolumeSource)(nil), AzureDisk:(*v1.AzureDiskVolumeSource)(nil), PhotonPersistentDisk:(*v1.PhotonPersistentDiskVolumeSource)(nil), Projected:(*v1.ProjectedVolumeSource)(nil), PortworxVolume:(*v1.PortworxVolumeSource)(nil), ScaleIO:(*v1.ScaleIOVolumeSource)(nil), StorageOS:(*v1.StorageOSVolumeSource)(nil), CSI:(*v1.CSIVolumeSource)(nil)}}, v1.Volume{Name:"xtables-lock", VolumeSource:v1.VolumeSource{HostPath:(*v1.HostPathVolumeSource)(0xc00123b320), EmptyDir:(*v1.EmptyDirVolumeSource)(nil), GCEPersistentDisk:(*v1.GCEPersistentDiskVolumeSource)(nil), AWSElasticBlockStore:(*v1.AWSElasticBlockStoreVolumeSource)(nil), GitRepo:(*v1.GitRepoVolumeSource)(nil), Secret:(*v1.SecretVolumeSource)(nil), NFS:(*v1.NFSVolumeSource)(nil), ISCSI:(*v1.ISCSIVolumeSource)(nil), Glusterfs:(*v1.GlusterfsVolumeSource)(nil), PersistentVolumeClaim:(*v1.PersistentVolumeClaimVolumeSource)(nil), RBD:(*v1.RBDVolumeSource)(nil), FlexVolume:(*v1.FlexVolumeSource)(nil), Cinder:(*v1.CinderVolumeSource)(nil), CephFS:(*v1.CephFSVolumeSource)(nil), Flocker:(*v1.FlockerVolumeSource)(nil), DownwardAPI:(*v1.DownwardAPIVolumeSource)(nil), FC:(*v1.FCVolumeSource)(nil), AzureFile:(*v1.AzureFileVolumeSource)(nil), ConfigMap:(*v1.ConfigMapVolumeSource)(nil), VsphereVolume:(*v1.VsphereVirtualDiskVolumeSource)(nil), Quobyte:(*v1.QuobyteVolumeSource)(nil), AzureDisk:(*v1.AzureDiskVolumeSource)(nil), PhotonPersistentDisk:(*v1.PhotonPersistentDiskVolumeSource)(nil), Projected:(*v1.ProjectedVolumeSource)(nil), PortworxVolume:(*v1.PortworxVolumeSource)(nil), ScaleIO:(*v1.ScaleIOVolumeSource)(nil), StorageOS:(*v1.StorageOSVolumeSource)(nil), CSI:(*v1.CSIVolumeSource)(nil)}}, v1.Volume{Name:"lib-modules", VolumeSource:v1.VolumeSource{HostPath:(*v1.HostPathVolumeSource)(0xc00123b340), EmptyDir:(*v1.EmptyDirVolumeSource)(nil), GCEPersistentDisk:(*v1.GCEPersistentDiskVolumeSource)(nil), AWSElasticBlockStore:(*v1.AWSElasticBlockStoreVolumeSource)(nil), GitRepo:(*v1.GitRepoVolumeSource)(nil), Secret:(*v1.SecretVolumeSource)(nil), NFS:(*v1.NFSVolumeSource)(nil), ISCSI:(*v1.ISCSIVolumeSource)(nil), Glusterfs:(*v1.GlusterfsVolumeSource)(nil), PersistentVolumeClaim:(*v1.PersistentVolumeClaimVolumeSource)(nil), RBD:(*v1.RBDVolumeSource)(nil), FlexVolume:(*v1.FlexVolumeSource)(nil), Cinder:(*v1.CinderVolumeSource)(nil), CephFS:(*v1.CephFSVolumeSource)(nil), Flocker:(*v1.FlockerVolumeSource)(nil), DownwardAPI:(*v1.DownwardAPIVolumeSource)(nil), FC:(*v1.FCVolumeSource)(nil), AzureFile:(*v1.AzureFileVolumeSource)(nil), ConfigMap:(*v1.ConfigMapVolumeSource)(nil), VsphereVolume:(*v1.VsphereVirtualDiskVolumeSource)(nil), Quobyte:(*v1.QuobyteVolumeSource)(nil), AzureDisk:(*v1.AzureDiskVolumeSource)(nil), PhotonPersistentDisk:(*v1.PhotonPersistentDiskVolumeSource)(nil), Projected:(*v1.ProjectedVolumeSource)(nil), PortworxVolume:(*v1.PortworxVolumeSource)(nil), ScaleIO:(*v1.ScaleIOVolumeSource)(nil), StorageOS:(*v1.StorageOSVolumeSource)(nil), CSI:(*v1.CSIVolumeSource)(nil)}}}, InitContainers:[]v1.Container(nil), Containers:[]v1.Container{v1.Container{Name:"kube-proxy", Image:"k8s.gcr.io/kube-proxy:v1.18.3", Command:[]string{"/usr/local/bin/kube-proxy", "--config=/var/lib/kube-proxy/config.conf", "--hostname-override=$(NODE_NAME)"}, Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar{v1.EnvVar{Name:"NODE_NAME", Value:"", ValueFrom:(*v1.EnvVarSource)(0xc00123b3a0)}}, Resources:v1.ResourceRequirements{Limits:v1.ResourceList(nil), Requests:v1.ResourceList(nil)}, VolumeMounts:[]v1.VolumeMount{v1.VolumeMount{Name:"kube-proxy", ReadOnly:false, MountPath:"/var/lib/kube-proxy", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil), SubPathExpr:""}, v1.VolumeMount{Name:"xtables-lock", ReadOnly:false, MountPath:"/run/xtables.lock", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil), SubPathExpr:""}, v1.VolumeMount{Name:"lib-modules", ReadOnly:true, MountPath:"/lib/modules", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil), SubPathExpr:""}}, VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), StartupProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(0xc000f47590), Stdin:false, StdinOnce:false, TTY:false}}, EphemeralContainers:[]v1.EphemeralContainer(nil), RestartPolicy:"Always", TerminationGracePeriodSeconds:(*int64)(0xc001493958), ActiveDeadlineSeconds:(*int64)(nil), DNSPolicy:"ClusterFirst", NodeSelector:map[string]string{"kubernetes.io/os":"linux"}, ServiceAccountName:"kube-proxy", DeprecatedServiceAccount:"kube-proxy", AutomountServiceAccountToken:(*bool)(nil), NodeName:"", HostNetwork:true, HostPID:false, HostIPC:false, ShareProcessNamespace:(*bool)(nil), SecurityContext:(*v1.PodSecurityContext)(0xc000241570), ImagePullSecrets:[]v1.LocalObjectReference(nil), Hostname:"", Subdomain:"", Affinity:(*v1.Affinity)(nil), SchedulerName:"default-scheduler", Tolerations:[]v1.Toleration{v1.Toleration{Key:"CriticalAddonsOnly", Operator:"Exists", Value:"", Effect:"", TolerationSeconds:(*int64)(nil)}, v1.Toleration{Key:"", Operator:"Exists", Value:"", Effect:"", TolerationSeconds:(*int64)(nil)}}, HostAliases:[]v1.HostAlias(nil), PriorityClassName:"system-node-critical", Priority:(*int32)(nil), DNSConfig:(*v1.PodDNSConfig)(nil), ReadinessGates:[]v1.PodReadinessGate(nil), RuntimeClassName:(*string)(nil), EnableServiceLinks:(*bool)(nil), PreemptionPolicy:(*v1.PreemptionPolicy)(nil), Overhead:v1.ResourceList(nil), TopologySpreadConstraints:[]v1.TopologySpreadConstraint(nil)}}, UpdateStrategy:v1.DaemonSetUpdateStrategy{Type:"RollingUpdate", RollingUpdate:(*v1.RollingUpdateDaemonSet)(0xc000d9e1a0)}, MinReadySeconds:0, RevisionHistoryLimit:(*int32)(0xc0014939a8)}, Status:v1.DaemonSetStatus{CurrentNumberScheduled:0, NumberMisscheduled:0, DesiredNumberScheduled:0, NumberReady:0, ObservedGeneration:0, UpdatedNumberScheduled:0, NumberAvailable:0, NumberUnavailable:0, CollisionCount:(*int32)(nil), Conditions:[]v1.DaemonSetCondition(nil)}}: Operation cannot be fulfilled on daemonsets.apps "kube-proxy": the object has been modified; please apply your changes to the latest version and try again * W0724 22:02:01.829771 1 endpointslice_controller.go:260] Error syncing endpoint slices for service "kube-system/kube-dns", retrying. Error: Error deleting kube-dns-8p7zb EndpointSlice for Service kube-system/kube-dns: endpointslices.discovery.k8s.io "kube-dns-8p7zb" not found * I0724 22:02:01.834492 1 event.go:278] Event(v1.ObjectReference{Kind:"Service", Namespace:"kube-system", Name:"kube-dns", UID:"77d97fd9-a60e-4a34-a61b-fcee2db6f36a", APIVersion:"v1", ResourceVersion:"211", FieldPath:""}): type: 'Warning' reason: 'FailedToUpdateEndpointSlices' Error updating Endpoint Slices for Service kube-system/kube-dns: Error deleting kube-dns-8p7zb EndpointSlice for Service kube-system/kube-dns: endpointslices.discovery.k8s.io "kube-dns-8p7zb" not found * * ==> kube-scheduler [344a69b20b84] <== * E0724 22:01:36.242618 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope * E0724 22:01:36.242667 1 reflector.go:178] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:233: Failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope * E0724 22:01:37.139700 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope * E0724 22:01:37.246556 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope * E0724 22:01:37.381275 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope * E0724 22:01:37.382452 1 reflector.go:178] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:233: Failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope * E0724 22:01:37.425585 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope * E0724 22:01:37.563453 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope * E0724 22:01:37.724669 1 reflector.go:178] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system" * E0724 22:01:37.750233 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1beta1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope * E0724 22:01:37.820942 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope * E0724 22:01:38.839882 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope * E0724 22:01:39.414867 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope * E0724 22:01:39.470093 1 reflector.go:178] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:233: Failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope * E0724 22:01:39.486190 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope * E0724 22:01:39.488046 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope * E0724 22:01:39.493795 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1beta1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope * E0724 22:01:39.924300 1 reflector.go:178] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system" * E0724 22:01:40.317458 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope * E0724 22:01:40.540615 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope * E0724 22:01:43.435410 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope * E0724 22:01:44.161373 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope * E0724 22:01:44.534828 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope * E0724 22:01:44.644617 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope * I0724 22:01:45.640931 1 shared_informer.go:230] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file * * ==> kubelet <== * -- Logs begin at Fri 2020-07-24 22:00:59 UTC, end at Fri 2020-07-24 22:02:08 UTC. -- * Jul 24 22:01:48 nospam-20200724220008-14997 kubelet[2407]: I0724 22:01:48.037739 2407 kubelet_node_status.go:73] Successfully registered node nospam-20200724220008-14997 * Jul 24 22:01:48 nospam-20200724220008-14997 kubelet[2407]: I0724 22:01:48.038291 2407 topology_manager.go:233] [topologymanager] Topology Admit Handler * Jul 24 22:01:48 nospam-20200724220008-14997 kubelet[2407]: I0724 22:01:48.056355 2407 reconciler.go:224] operationExecutor.VerifyControllerAttachedVolume started for volume "usr-share-ca-certificates" (UniqueName: "kubernetes.io/host-path/a5a09bb5f811b98d4bf223cf9a6aaa89-usr-share-ca-certificates") pod "kube-apiserver-nospam-20200724220008-14997" (UID: "a5a09bb5f811b98d4bf223cf9a6aaa89") * Jul 24 22:01:48 nospam-20200724220008-14997 kubelet[2407]: I0724 22:01:48.056454 2407 reconciler.go:224] operationExecutor.VerifyControllerAttachedVolume started for volume "ca-certs" (UniqueName: "kubernetes.io/host-path/a5a09bb5f811b98d4bf223cf9a6aaa89-ca-certs") pod "kube-apiserver-nospam-20200724220008-14997" (UID: "a5a09bb5f811b98d4bf223cf9a6aaa89") * Jul 24 22:01:48 nospam-20200724220008-14997 kubelet[2407]: I0724 22:01:48.056573 2407 reconciler.go:224] operationExecutor.VerifyControllerAttachedVolume started for volume "etc-ca-certificates" (UniqueName: "kubernetes.io/host-path/a5a09bb5f811b98d4bf223cf9a6aaa89-etc-ca-certificates") pod "kube-apiserver-nospam-20200724220008-14997" (UID: "a5a09bb5f811b98d4bf223cf9a6aaa89") * Jul 24 22:01:48 nospam-20200724220008-14997 kubelet[2407]: I0724 22:01:48.056614 2407 reconciler.go:224] operationExecutor.VerifyControllerAttachedVolume started for volume "k8s-certs" (UniqueName: "kubernetes.io/host-path/a5a09bb5f811b98d4bf223cf9a6aaa89-k8s-certs") pod "kube-apiserver-nospam-20200724220008-14997" (UID: "a5a09bb5f811b98d4bf223cf9a6aaa89") * Jul 24 22:01:48 nospam-20200724220008-14997 kubelet[2407]: I0724 22:01:48.056647 2407 reconciler.go:224] operationExecutor.VerifyControllerAttachedVolume started for volume "usr-local-share-ca-certificates" (UniqueName: "kubernetes.io/host-path/a5a09bb5f811b98d4bf223cf9a6aaa89-usr-local-share-ca-certificates") pod "kube-apiserver-nospam-20200724220008-14997" (UID: "a5a09bb5f811b98d4bf223cf9a6aaa89") * Jul 24 22:01:48 nospam-20200724220008-14997 kubelet[2407]: I0724 22:01:48.257245 2407 reconciler.go:157] Reconciler: start to sync state * Jul 24 22:01:49 nospam-20200724220008-14997 kubelet[2407]: E0724 22:01:49.168643 2407 kubelet.go:1663] Failed creating a mirror pod for "etcd-nospam-20200724220008-14997_kube-system(34a45a9ce63552328606b4b68eb08f58)": pods "etcd-nospam-20200724220008-14997" already exists * Jul 24 22:02:01 nospam-20200724220008-14997 kubelet[2407]: I0724 22:02:01.698581 2407 topology_manager.go:233] [topologymanager] Topology Admit Handler * Jul 24 22:02:01 nospam-20200724220008-14997 kubelet[2407]: I0724 22:02:01.706907 2407 topology_manager.go:233] [topologymanager] Topology Admit Handler * Jul 24 22:02:01 nospam-20200724220008-14997 kubelet[2407]: I0724 22:02:01.765105 2407 topology_manager.go:233] [topologymanager] Topology Admit Handler * Jul 24 22:02:01 nospam-20200724220008-14997 kubelet[2407]: I0724 22:02:01.857242 2407 reconciler.go:224] operationExecutor.VerifyControllerAttachedVolume started for volume "storage-provisioner-token-qmmw9" (UniqueName: "kubernetes.io/secret/911941de-c2a1-4e8b-8fee-e62436260e57-storage-provisioner-token-qmmw9") pod "storage-provisioner" (UID: "911941de-c2a1-4e8b-8fee-e62436260e57") * Jul 24 22:02:01 nospam-20200724220008-14997 kubelet[2407]: I0724 22:02:01.857302 2407 reconciler.go:224] operationExecutor.VerifyControllerAttachedVolume started for volume "config-volume" (UniqueName: "kubernetes.io/configmap/5c7ebbe6-f29b-4a7b-8440-386446a1fc32-config-volume") pod "coredns-66bff467f8-wtn7x" (UID: "5c7ebbe6-f29b-4a7b-8440-386446a1fc32") * Jul 24 22:02:01 nospam-20200724220008-14997 kubelet[2407]: I0724 22:02:01.857338 2407 reconciler.go:224] operationExecutor.VerifyControllerAttachedVolume started for volume "coredns-token-jjttk" (UniqueName: "kubernetes.io/secret/5c7ebbe6-f29b-4a7b-8440-386446a1fc32-coredns-token-jjttk") pod "coredns-66bff467f8-wtn7x" (UID: "5c7ebbe6-f29b-4a7b-8440-386446a1fc32") * Jul 24 22:02:01 nospam-20200724220008-14997 kubelet[2407]: I0724 22:02:01.857377 2407 reconciler.go:224] operationExecutor.VerifyControllerAttachedVolume started for volume "tmp" (UniqueName: "kubernetes.io/host-path/911941de-c2a1-4e8b-8fee-e62436260e57-tmp") pod "storage-provisioner" (UID: "911941de-c2a1-4e8b-8fee-e62436260e57") * Jul 24 22:02:01 nospam-20200724220008-14997 kubelet[2407]: I0724 22:02:01.957621 2407 reconciler.go:224] operationExecutor.VerifyControllerAttachedVolume started for volume "xtables-lock" (UniqueName: "kubernetes.io/host-path/e77bc831-b0cc-4d60-b213-9bf6d6e40251-xtables-lock") pod "kube-proxy-dtp4h" (UID: "e77bc831-b0cc-4d60-b213-9bf6d6e40251") * Jul 24 22:02:01 nospam-20200724220008-14997 kubelet[2407]: I0724 22:02:01.957716 2407 reconciler.go:224] operationExecutor.VerifyControllerAttachedVolume started for volume "kube-proxy-token-4dz2j" (UniqueName: "kubernetes.io/secret/e77bc831-b0cc-4d60-b213-9bf6d6e40251-kube-proxy-token-4dz2j") pod "kube-proxy-dtp4h" (UID: "e77bc831-b0cc-4d60-b213-9bf6d6e40251") * Jul 24 22:02:01 nospam-20200724220008-14997 kubelet[2407]: I0724 22:02:01.957826 2407 reconciler.go:224] operationExecutor.VerifyControllerAttachedVolume started for volume "kube-proxy" (UniqueName: "kubernetes.io/configmap/e77bc831-b0cc-4d60-b213-9bf6d6e40251-kube-proxy") pod "kube-proxy-dtp4h" (UID: "e77bc831-b0cc-4d60-b213-9bf6d6e40251") * Jul 24 22:02:01 nospam-20200724220008-14997 kubelet[2407]: I0724 22:02:01.958038 2407 reconciler.go:224] operationExecutor.VerifyControllerAttachedVolume started for volume "lib-modules" (UniqueName: "kubernetes.io/host-path/e77bc831-b0cc-4d60-b213-9bf6d6e40251-lib-modules") pod "kube-proxy-dtp4h" (UID: "e77bc831-b0cc-4d60-b213-9bf6d6e40251") * Jul 24 22:02:07 nospam-20200724220008-14997 kubelet[2407]: W0724 22:02:07.737067 2407 pod_container_deletor.go:77] Container "b1bb3a0df2008a5dc21921d4a4a2832c40ccfb0d9d3971127f41eeff6cb9fb3e" not found in pod's containers * Jul 24 22:02:07 nospam-20200724220008-14997 kubelet[2407]: W0724 22:02:07.738470 2407 docker_sandbox.go:400] failed to read pod IP from plugin/docker: Couldn't find network status for kube-system/coredns-66bff467f8-wtn7x through plugin: invalid network status for * Jul 24 22:02:07 nospam-20200724220008-14997 kubelet[2407]: W0724 22:02:07.765890 2407 pod_container_deletor.go:77] Container "67983f44675d1bbced50a3a49b73fbfbe3cf4f8b8e8150dc0bf517af1907f99c" not found in pod's containers * Jul 24 22:02:07 nospam-20200724220008-14997 kubelet[2407]: W0724 22:02:07.939242 2407 pod_container_deletor.go:77] Container "1ec115eddb40c0f0e7c401fff8791c0c224dcd25f2eee2925fd331e6c4026c38" not found in pod's containers * Jul 24 22:02:08 nospam-20200724220008-14997 kubelet[2407]: W0724 22:02:08.958454 2407 docker_sandbox.go:400] failed to read pod IP from plugin/docker: Couldn't find network status for kube-system/coredns-66bff467f8-wtn7x through plugin: invalid network status for -- /stdout -- ** stderr ** E0724 22:02:09.080029 113177 style.go:178] unable to parse "* E0724 22:02:00.364105 1 node_lifecycle_controller.go:605] Failed to taint NoSchedule on node , requeue it: failed to swap taints of node &Node{ObjectMeta:{nospam-20200724220008-14997 /api/v1/nodes/nospam-20200724220008-14997 9a2a5f2f-e872-4683-8396-c202ddb9dc7d 287 0 2020-07-24 22:01:36 +0000 UTC map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/os:linux kubernetes.io/arch:amd64 kubernetes.io/hostname:nospam-20200724220008-14997 kubernetes.io/os:linux minikube.k8s.io/commit:40eac8ce825d2bb784efa63b900c8d788ea49faf minikube.k8s.io/name:nospam-20200724220008-14997 minikube.k8s.io/updated_at:2020_07_24T22_01_47_0700 minikube.k8s.io/version:v1.12.1 node-role.kubernetes.io/master:] map[kubeadm.alpha.kubernetes.io/cri-socket:/var/run/dockershim.sock volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubeadm Update v1 2020-07-24 22:01:46 +0000 UTC FieldsV1 FieldsV1{Raw:*[123 34 102 58 109 101 116 97 100 97 116 97 34 58 123 34 102 58 97 110 110 111 116 97 116 105 111 110 115 34 58 123 34 102 58 107 117 98 101 97 100 109 46 97 108 112 104 97 46 107 117 98 101 114 110 101 116 101 115 46 105 111 47 99 114 105 45 115 111 99 107 101 116 34 58 123 125 125 44 34 102 58 108 97 98 101 108 115 34 58 123 34 102 58 110 111 100 101 45 114 111 108 101 46 107 117 98 101 114 110 101 116 101 115 46 105 111 47 109 97 115 116 101 114 34 58 123 125 125 125 125],}} {kubelet Update v1 2020-07-24 22:01:48 +0000 UTC FieldsV1 &FieldsV1{Raw:*[123 34 102 58 109 101 116 97 100 97 116 97 34 58 123 34 102 58 97 110 110 111 116 97 116 105 111 110 115 34 58 123 34 46 34 58 123 125 44 34 102 58 118 111 108 117 109 101 115 46 107 117 98 101 114 110 101 116 101 115 46 105 111 47 99 111 110 116 114 111 108 108 101 114 45 109 97 110 97 103 101 100 45 97 116 116 97 99 104 45 100 101 116 97 99 104 34 58 123 125 125 44 34 102 58 108 97 98 101 108 115 34 58 123 34 46 34 58 123 125 44 34 102 58 98 101 116 97 46 107 117 98 101 114 110 101 116 101 115 46 105 111 47 97 114 99 104 34 58 123 125 44 34 102 58 98 101 116 97 46 107 117 98 101 114 110 101 116 101 115 46 105 111 47 111 115 34 58 123 125 44 34 102 58 107 117 98 101 114 110 101 116 101 115 46 105 111 47 97 114 99 104 34 58 123 125 44 34 102 58 107 117 98 101 114 110 101 116 101 115 46 105 111 47 104 111 115 116 110 97 109 101 34 58 123 125 44 34 102 58 107 117 98 101 114 110 101 116 101 115 46 105 111 47 111 115 34 58 123 125 125 125 44 34 102 58 115 116 97 116 117 115 34 58 123 34 102 58 97 100 100 114 101 115 115 101 115 34 58 123 34 46 34 58 123 125 44 34 107 58 123 92 34 116 121 112 101 92 34 58 92 34 72 111 115 116 110 97 109 101 92 34 125 34 58 123 34 46 34 58 123 125 44 34 102 58 97 100 100 114 101 115 115 34 58 123 125 44 34 102 58 116 121 112 101 34 58 123 125 125 44 34 107 58 123 92 34 116 121 112 101 92 34 58 92 34 73 110 116 101 114 110 97 108 73 80 92 34 125 34 58 123 34 46 34 58 123 125 44 34 102 58 97 100 100 114 101 115 115 34 58 123 125 44 34 102 58 116 121 112 101 34 58 123 125 125 125 44 34 102 58 97 108 108 111 99 97 116 97 98 108 101 34 58 123 34 46 34 58 123 125 44 34 102 58 99 112 117 34 58 123 125 44 34 102 58 101 112 104 101 109 101 114 97 108 45 115 116 111 114 97 103 101 34 58 123 125 44 34 102 58 104 117 103 101 112 97 103 101 115 45 49 71 105 34 58 123 125 44 34 102 58 104 117 103 101 112 97 103 101 115 45 50 77 105 34 58 123 125 44 34 102 58 109 101 109 111 114 121 34 58 123 125 44 34 102 58 112 111 100 115 34 58 123 125 125 44 34 102 58 99 97 112 97 99 105 116 121 34 58 123 34 46 34 58 123 125 44 34 102 58 99 112 117 34 58 123 125 44 34 102 58 101 112 104 101 109 101 114 97 108 45 115 116 111 114 97 103 101 34 58 123 125 44 34 102 58 104 117 103 101 112 97 103 101 115 45 49 71 105 34 58 123 125 44 34 102 58 104 117 103 101 112 97 103 101 115 45 50 77 105 34 58 123 125 44 34 102 58 109 101 109 111 114 121 34 58 123 125 44 34 102 58 112 111 100 115 34 58 123 125 125 44 34 102 58 99 111 110 100 105 116 105 111 110 115 34 58 123 34 46 34 58 123 125 44 34 107 58 123 92 34 116 121 112 101 92 34 58 92 34 68 105 115 107 80 114 101 115 115 117 114 101 92 34 125 34 58 123 34 46 34 58 123 125 44 34 102 58 108 97 115 116 72 101 97 114 116 98 101 97 116 84 105 109 101 34 58 123 125 44 34 102 58 108 97 115 116 84 114 97 110 115 105 116 105 111 110 84 105 109 101 34 58 123 125 44 34 102 58 109 101 115 115 97 103 101 34 58 123 125 44 34 102 58 114 101 97 115 111 110 34 58 123 125 44 34 102 58 115 116 97 116 117 115 34 58 123 125 44 34 102 58 116 121 112 101 34 58 123 125 125 44 34 107 58 123 92 34 116 121 112 101 92 34 58 92 34 77 101 109 111 114 121 80 114 101 115 115 117 114 101 92 34 125 34 58 123 34 46 34 58 123 125 44 34 102 58 108 97 115 116 72 101 97 114 116 98 101 97 116 84 105 109 101 34 58 123 125 44 34 102 58 108 97 115 116 84 114 97 110 115 105 116 105 111 110 84 105 109 101 34 58 123 125 44 34 102 58 109 101 115 115 97 103 101 34 58 123 125 44 34 102 58 114 101 97 115 111 110 34 58 123 125 44 34 102 58 115 116 97 116 117 115 34 58 123 125 44 34 102 58 116 121 112 101 34 58 123 125 125 44 34 107 58 123 92 34 116 121 112 101 92 34 58 92 34 80 73 68 80 114 101 115 115 117 114 101 92 34 125 34 58 123 34 46 34 58 123 125 44 34 102 58 108 97 115 116 72 101 97 114 116 98 101 97 116 84 105 109 101 34 58 123 125 44 34 102 58 108 97 115 116 84 114 97 110 115 105 116 105 111 110 84 105 109 101 34 58 123 125 44 34 102 58 109 101 115 115 97 103 101 34 58 123 125 44 34 102 58 114 101 97 115 111 110 34 58 123 125 44 34 102 58 115 116 97 116 117 115 34 58 123 125 44 34 102 58 116 121 112 101 34 58 123 125 125 44 34 107 58 123 92 34 116 121 112 101 92 34 58 92 34 82 101 97 100 121 92 34 125 34 58 123 34 46 34 58 123 125 44 34 102 58 108 97 115 116 72 101 97 114 116 98 101 97 116 84 105 109 101 34 58 123 125 44 34 102 58 108 97 115 116 84 114 97 110 115 105 116 105 111 110 84 105 109 101 34 58 123 125 44 34 102 58 109 101 115 115 97 103 101 34 58 123 125 44 34 102 58 114 101 97 115 111 110 34 58 123 125 44 34 102 58 115 116 97 116 117 115 34 58 123 125 44 34 102 58 116 121 112 101 34 58 123 125 125 125 44 34 102 58 100 97 101 109 111 110 69 110 100 112 111 105 110 116 115 34 58 123 34 102 58 107 117 98 101 108 101 116 69 110 100 112 111 105 110 116 34 58 123 34 102 58 80 111 114 116 34 58 123 125 125 125 44 34 102 58 105 109 97 103 101 115 34 58 123 125 44 34 102 58 110 111 100 101 73 110 102 111 34 58 123 34 102 58 97 114 99 104 105 116 101 99 116 117 114 101 34 58 123 125 44 34 102 58 98 111 111 116 73 68 34 58 123 125 44 34 102 58 99 111 110 116 97 105 110 101 114 82 117 110 116 105 109 101 86 101 114 115 105 111 110 34 58 123 125 44 34 102 58 107 101 114 110 101 108 86 101 114 115 105 111 110 34 58 123 125 44 34 102 58 107 117 98 101 80 114 111 120 121 86 101 114 115 105 111 110 34 58 123 125 44 34 102 58 107 117 98 101 108 101 116 86 101 114 115 105 111 110 34 58 123 125 44 34 102 58 109 97 99 104 105 110 101 73 68 34 58 123 125 44 34 102 58 111 112 101 114 97 116 105 110 103 83 121 115 116 101 109 34 58 123 125 44 34 102 58 111 115 73 109 97 103 101 34 58 123 125 44 34 102 58 115 121 115 116 101 109 85 85 73 68 34 58 123 125 125 125 125],}} {kubectl Update v1 2020-07-24 22:01:52 +0000 UTC FieldsV1 &FieldsV1{Raw:*[123 34 102 58 109 101 116 97 100 97 116 97 34 58 123 34 102 58 108 97 98 101 108 115 34 58 123 34 102 58 109 105 110 105 107 117 98 101 46 107 56 115 46 105 111 47 99 111 109 109 105 116 34 58 123 125 44 34 102 58 109 105 110 105 107 117 98 101 46 107 56 115 46 105 111 47 110 97 109 101 34 58 123 125 44 34 102 58 109 105 110 105 107 117 98 101 46 107 56 115 46 105 111 47 117 112 100 97 116 101 100 95 97 116 34 58 123 125 44 34 102 58 109 105 110 105 107 117 98 101 46 107 56 115 46 105 111 47 118 101 114 115 105 111 110 34 58 123 125 125 125 125],}}]},Spec:NodeSpec{PodCIDR:,DoNotUseExternalID:,ProviderID:,Unschedulable:false,Taints:[]Taint{Taint{Key:node.kubernetes.io/not-ready,Value:,Effect:NoSchedule,TimeAdded:,},},ConfigSource:nil,PodCIDRs:[],},Status:NodeStatus{Capacity:ResourceList{cpu: {{16 0} {} 16 DecimalSI},ephemeral-storage: {{131977494528 0} {} BinarySI},hugepages-1Gi: {{0 0} {} 0 DecimalSI},hugepages-2Mi: {{0 0} {} 0 DecimalSI},memory: {{67396653056 0} {} 65817044Ki BinarySI},pods: {{110 0} {} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{16 0} {} 16 DecimalSI},ephemeral-storage: {{131977494528 0} {} BinarySI},hugepages-1Gi: {{0 0} {} 0 DecimalSI},hugepages-2Mi: {{0 0} {} 0 DecimalSI},memory: {{67396653056 0} {} 65817044Ki BinarySI},pods: {{110 0} {} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2020-07-24 22:01:48 +0000 UTC,LastTransitionTime:2020-07-24 22:01:35 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2020-07-24 22:01:48 +0000 UTC,LastTransitionTime:2020-07-24 22:01:35 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2020-07-24 22:01:48 +0000 UTC,LastTransitionTime:2020-07-24 22:01:35 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2020-07-24 22:01:48 +0000 UTC,LastTransitionTime:2020-07-24 22:01:36 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.17.0.6,},NodeAddress{Type:Hostname,Address:nospam-20200724220008-14997,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:a92e7783ae8744bfb8e9cab984f6bbdd,SystemUUID:73509b75-99d3-4b0d-af40-8a8efd1d5c42,BootID:65219ec9-ab55-4151-85fa-6cbcd6144529,KernelVersion:5.4.0-1022-azure,OSImage:Ubuntu 19.10,ContainerRuntimeVersion:docker://19.3.2,KubeletVersion:v1.18.3,KubeProxyVersion:v1.18.3,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[k8s.gcr.io/etcd@sha256:4afb99b4690b418ffc2ceb67e1a17376457e441c1f09ab55447f0aaf992fa646 k8s.gcr.io/etcd:3.4.3-0],SizeBytes:288426917,},ContainerImage{Names:[kubernetesui/dashboard@sha256:a705c04e83badb4fdb2b95eb6b126f3c2759677b2f953742f3b08a1fada07d9d kubernetesui/dashboard:v2.0.1],SizeBytes:222771101,},ContainerImage{Names:[k8s.gcr.io/kube-apiserver@sha256:e1c8ce568634f79f76b6e8168c929511ad841ea7692271caf6fd3779c3545c2d k8s.gcr.io/kube-apiserver:v1.18.3],SizeBytes:172997403,},ContainerImage{Names:[k8s.gcr.io/kube-controller-manager@sha256:d62a4f41625e1631a2683cbdf1c9c9bd27f0b9c5d8d8202990236fc0d5ef1703 k8s.gcr.io/kube-controller-manager:v1.18.3],SizeBytes:162388763,},ContainerImage{Names:[k8s.gcr.io/kube-proxy@sha256:6a093c22e305039b7bd6c3f8eab8f202ad8238066ed210857b25524443aa8aff k8s.gcr.io/kube-proxy:v1.18.3],SizeBytes:117090625,},ContainerImage{Names:[k8s.gcr.io/kube-scheduler@sha256:5381cd9680bf5fb16a5c8ac60141eaab242c1c4960f1c32a21807efcca3e765b k8s.gcr.io/kube-scheduler:v1.18.3],SizeBytes:95279899,},ContainerImage{Names:[gcr.io/k8s-minikube/storage-provisioner@sha256:088daa9fcbccf04c3f415d77d5a6360d2803922190b675cb7fc88a9d2d91985a gcr.io/k8s-minikube/storage-provisioner:v1.8.1],SizeBytes:80815640,},ContainerImage{Names:[k8s.gcr.io/coredns@sha256:2c8d61c46f484d881db43b34d13ca47a269336e576c81cf007ca740fa9ec0800 k8s.gcr.io/coredns:1.6.7],SizeBytes:43794147,},ContainerImage{Names:[kubernetesui/metrics-scraper@sha256:555981a24f184420f3be0c79d4efb6c948a85cfce84034f85a563f4151a81cbf kubernetesui/metrics-scraper:v1.0.4],SizeBytes:36937728,},ContainerImage{Names:[k8s.gcr.io/pause@sha256:927d98197ec1141a368550822d18fa1c60bdae27b78b0c004f705f548c07814f k8s.gcr.io/pause:3.2],SizeBytes:682696,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},}\n": template: * E0724 22:02:00.364105 1 node_lifecycle_controller.go:605] Failed to taint NoSchedule on node , requeue it: failed to swap taints of node &Node{ObjectMeta:{nospam-20200724220008-14997 /api/v1/nodes/nospam-20200724220008-14997 9a2a5f2f-e872-4683-8396-c202ddb9dc7d 287 0 2020-07-24 22:01:36 +0000 UTC map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/os:linux kubernetes.io/arch:amd64 kubernetes.io/hostname:nospam-20200724220008-14997 kubernetes.io/os:linux minikube.k8s.io/commit:40eac8ce825d2bb784efa63b900c8d788ea49faf minikube.k8s.io/name:nospam-20200724220008-14997 minikube.k8s.io/updated_at:2020_07_24T22_01_47_0700 minikube.k8s.io/version:v1.12.1 node-role.kubernetes.io/master:] map[kubeadm.alpha.kubernetes.io/cri-socket:/var/run/dockershim.sock volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubeadm Update v1 2020-07-24 22:01:46 +0000 UTC FieldsV1 FieldsV1{Raw:*[123 34 102 58 109 101 116 97 100 97 116 97 34 58 123 34 102 58 97 110 110 111 116 97 116 105 111 110 115 34 58 123 34 102 58 107 117 98 101 97 100 109 46 97 108 112 104 97 46 107 117 98 101 114 110 101 116 101 115 46 105 111 47 99 114 105 45 115 111 99 107 101 116 34 58 123 125 125 44 34 102 58 108 97 98 101 108 115 34 58 123 34 102 58 110 111 100 101 45 114 111 108 101 46 107 117 98 101 114 110 101 116 101 115 46 105 111 47 109 97 115 116 101 114 34 58 123 125 125 125 125],}} {kubelet Update v1 2020-07-24 22:01:48 +0000 UTC FieldsV1 &FieldsV1{Raw:*[123 34 102 58 109 101 116 97 100 97 116 97 34 58 123 34 102 58 97 110 110 111 116 97 116 105 111 110 115 34 58 123 34 46 34 58 123 125 44 34 102 58 118 111 108 117 109 101 115 46 107 117 98 101 114 110 101 116 101 115 46 105 111 47 99 111 110 116 114 111 108 108 101 114 45 109 97 110 97 103 101 100 45 97 116 116 97 99 104 45 100 101 116 97 99 104 34 58 123 125 125 44 34 102 58 108 97 98 101 108 115 34 58 123 34 46 34 58 123 125 44 34 102 58 98 101 116 97 46 107 117 98 101 114 110 101 116 101 115 46 105 111 47 97 114 99 104 34 58 123 125 44 34 102 58 98 101 116 97 46 107 117 98 101 114 110 101 116 101 115 46 105 111 47 111 115 34 58 123 125 44 34 102 58 107 117 98 101 114 110 101 116 101 115 46 105 111 47 97 114 99 104 34 58 123 125 44 34 102 58 107 117 98 101 114 110 101 116 101 115 46 105 111 47 104 111 115 116 110 97 109 101 34 58 123 125 44 34 102 58 107 117 98 101 114 110 101 116 101 115 46 105 111 47 111 115 34 58 123 125 125 125 44 34 102 58 115 116 97 116 117 115 34 58 123 34 102 58 97 100 100 114 101 115 115 101 115 34 58 123 34 46 34 58 123 125 44 34 107 58 123 92 34 116 121 112 101 92 34 58 92 34 72 111 115 116 110 97 109 101 92 34 125 34 58 123 34 46 34 58 123 125 44 34 102 58 97 100 100 114 101 115 115 34 58 123 125 44 34 102 58 116 121 112 101 34 58 123 125 125 44 34 107 58 123 92 34 116 121 112 101 92 34 58 92 34 73 110 116 101 114 110 97 108 73 80 92 34 125 34 58 123 34 46 34 58 123 125 44 34 102 58 97 100 100 114 101 115 115 34 58 123 125 44 34 102 58 116 121 112 101 34 58 123 125 125 125 44 34 102 58 97 108 108 111 99 97 116 97 98 108 101 34 58 123 34 46 34 58 123 125 44 34 102 58 99 112 117 34 58 123 125 44 34 102 58 101 112 104 101 109 101 114 97 108 45 115 116 111 114 97 103 101 34 58 123 125 44 34 102 58 104 117 103 101 112 97 103 101 115 45 49 71 105 34 58 123 125 44 34 102 58 104 117 103 101 112 97 103 101 115 45 50 77 105 34 58 123 125 44 34 102 58 109 101 109 111 114 121 34 58 123 125 44 34 102 58 112 111 100 115 34 58 123 125 125 44 34 102 58 99 97 112 97 99 105 116 121 34 58 123 34 46 34 58 123 125 44 34 102 58 99 112 117 34 58 123 125 44 34 102 58 101 112 104 101 109 101 114 97 108 45 115 116 111 114 97 103 101 34 58 123 125 44 34 102 58 104 117 103 101 112 97 103 101 115 45 49 71 105 34 58 123 125 44 34 102 58 104 117 103 101 112 97 103 101 115 45 50 77 105 34 58 123 125 44 34 102 58 109 101 109 111 114 121 34 58 123 125 44 34 102 58 112 111 100 115 34 58 123 125 125 44 34 102 58 99 111 110 100 105 116 105 111 110 115 34 58 123 34 46 34 58 123 125 44 34 107 58 123 92 34 116 121 112 101 92 34 58 92 34 68 105 115 107 80 114 101 115 115 117 114 101 92 34 125 34 58 123 34 46 34 58 123 125 44 34 102 58 108 97 115 116 72 101 97 114 116 98 101 97 116 84 105 109 101 34 58 123 125 44 34 102 58 108 97 115 116 84 114 97 110 115 105 116 105 111 110 84 105 109 101 34 58 123 125 44 34 102 58 109 101 115 115 97 103 101 34 58 123 125 44 34 102 58 114 101 97 115 111 110 34 58 123 125 44 34 102 58 115 116 97 116 117 115 34 58 123 125 44 34 102 58 116 121 112 101 34 58 123 125 125 44 34 107 58 123 92 34 116 121 112 101 92 34 58 92 34 77 101 109 111 114 121 80 114 101 115 115 117 114 101 92 34 125 34 58 123 34 46 34 58 123 125 44 34 102 58 108 97 115 116 72 101 97 114 116 98 101 97 116 84 105 109 101 34 58 123 125 44 34 102 58 108 97 115 116 84 114 97 110 115 105 116 105 111 110 84 105 109 101 34 58 123 125 44 34 102 58 109 101 115 115 97 103 101 34 58 123 125 44 34 102 58 114 101 97 115 111 110 34 58 123 125 44 34 102 58 115 116 97 116 117 115 34 58 123 125 44 34 102 58 116 121 112 101 34 58 123 125 125 44 34 107 58 123 92 34 116 121 112 101 92 34 58 92 34 80 73 68 80 114 101 115 115 117 114 101 92 34 125 34 58 123 34 46 34 58 123 125 44 34 102 58 108 97 115 116 72 101 97 114 116 98 101 97 116 84 105 109 101 34 58 123 125 44 34 102 58 108 97 115 116 84 114 97 110 115 105 116 105 111 110 84 105 109 101 34 58 123 125 44 34 102 58 109 101 115 115 97 103 101 34 58 123 125 44 34 102 58 114 101 97 115 111 110 34 58 123 125 44 34 102 58 115 116 97 116 117 115 34 58 123 125 44 34 102 58 116 121 112 101 34 58 123 125 125 44 34 107 58 123 92 34 116 121 112 101 92 34 58 92 34 82 101 97 100 121 92 34 125 34 58 123 34 46 34 58 123 125 44 34 102 58 108 97 115 116 72 101 97 114 116 98 101 97 116 84 105 109 101 34 58 123 125 44 34 102 58 108 97 115 116 84 114 97 110 115 105 116 105 111 110 84 105 109 101 34 58 123 125 44 34 102 58 109 101 115 115 97 103 101 34 58 123 125 44 34 102 58 114 101 97 115 111 110 34 58 123 125 44 34 102 58 115 116 97 116 117 115 34 58 123 125 44 34 102 58 116 121 112 101 34 58 123 125 125 125 44 34 102 58 100 97 101 109 111 110 69 110 100 112 111 105 110 116 115 34 58 123 34 102 58 107 117 98 101 108 101 116 69 110 100 112 111 105 110 116 34 58 123 34 102 58 80 111 114 116 34 58 123 125 125 125 44 34 102 58 105 109 97 103 101 115 34 58 123 125 44 34 102 58 110 111 100 101 73 110 102 111 34 58 123 34 102 58 97 114 99 104 105 116 101 99 116 117 114 101 34 58 123 125 44 34 102 58 98 111 111 116 73 68 34 58 123 125 44 34 102 58 99 111 110 116 97 105 110 101 114 82 117 110 116 105 109 101 86 101 114 115 105 111 110 34 58 123 125 44 34 102 58 107 101 114 110 101 108 86 101 114 115 105 111 110 34 58 123 125 44 34 102 58 107 117 98 101 80 114 111 120 121 86 101 114 115 105 111 110 34 58 123 125 44 34 102 58 107 117 98 101 108 101 116 86 101 114 115 105 111 110 34 58 123 125 44 34 102 58 109 97 99 104 105 110 101 73 68 34 58 123 125 44 34 102 58 111 112 101 114 97 116 105 110 103 83 121 115 116 101 109 34 58 123 125 44 34 102 58 111 115 73 109 97 103 101 34 58 123 125 44 34 102 58 115 121 115 116 101 109 85 85 73 68 34 58 123 125 125 125 125],}} {kubectl Update v1 2020-07-24 22:01:52 +0000 UTC FieldsV1 &FieldsV1{Raw:*[123 34 102 58 109 101 116 97 100 97 116 97 34 58 123 34 102 58 108 97 98 101 108 115 34 58 123 34 102 58 109 105 110 105 107 117 98 101 46 107 56 115 46 105 111 47 99 111 109 109 105 116 34 58 123 125 44 34 102 58 109 105 110 105 107 117 98 101 46 107 56 115 46 105 111 47 110 97 109 101 34 58 123 125 44 34 102 58 109 105 110 105 107 117 98 101 46 107 56 115 46 105 111 47 117 112 100 97 116 101 100 95 97 116 34 58 123 125 44 34 102 58 109 105 110 105 107 117 98 101 46 107 56 115 46 105 111 47 118 101 114 115 105 111 110 34 58 123 125 125 125 125],}}]},Spec:NodeSpec{PodCIDR:,DoNotUseExternalID:,ProviderID:,Unschedulable:false,Taints:[]Taint{Taint{Key:node.kubernetes.io/not-ready,Value:,Effect:NoSchedule,TimeAdded:,},},ConfigSource:nil,PodCIDRs:[],},Status:NodeStatus{Capacity:ResourceList{cpu: {{16 0} {} 16 DecimalSI},ephemeral-storage: {{131977494528 0} {} BinarySI},hugepages-1Gi: {{0 0} {} 0 DecimalSI},hugepages-2Mi: {{0 0} {} 0 DecimalSI},memory: {{67396653056 0} {} 65817044Ki BinarySI},pods: {{110 0} {} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{16 0} {} 16 DecimalSI},ephemeral-storage: {{131977494528 0} {} BinarySI},hugepages-1Gi: {{0 0} {} 0 DecimalSI},hugepages-2Mi: {{0 0} {} 0 DecimalSI},memory: {{67396653056 0} {} 65817044Ki BinarySI},pods: {{110 0} {} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2020-07-24 22:01:48 +0000 UTC,LastTransitionTime:2020-07-24 22:01:35 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2020-07-24 22:01:48 +0000 UTC,LastTransitionTime:2020-07-24 22:01:35 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2020-07-24 22:01:48 +0000 UTC,LastTransitionTime:2020-07-24 22:01:35 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2020-07-24 22:01:48 +0000 UTC,LastTransitionTime:2020-07-24 22:01:36 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:172.17.0.6,},NodeAddress{Type:Hostname,Address:nospam-20200724220008-14997,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:a92e7783ae8744bfb8e9cab984f6bbdd,SystemUUID:73509b75-99d3-4b0d-af40-8a8efd1d5c42,BootID:65219ec9-ab55-4151-85fa-6cbcd6144529,KernelVersion:5.4.0-1022-azure,OSImage:Ubuntu 19.10,ContainerRuntimeVersion:docker://19.3.2,KubeletVersion:v1.18.3,KubeProxyVersion:v1.18.3,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[k8s.gcr.io/etcd@sha256:4afb99b4690b418ffc2ceb67e1a17376457e441c1f09ab55447f0aaf992fa646 k8s.gcr.io/etcd:3.4.3-0],SizeBytes:288426917,},ContainerImage{Names:[kubernetesui/dashboard@sha256:a705c04e83badb4fdb2b95eb6b126f3c2759677b2f953742f3b08a1fada07d9d kubernetesui/dashboard:v2.0.1],SizeBytes:222771101,},ContainerImage{Names:[k8s.gcr.io/kube-apiserver@sha256:e1c8ce568634f79f76b6e8168c929511ad841ea7692271caf6fd3779c3545c2d k8s.gcr.io/kube-apiserver:v1.18.3],SizeBytes:172997403,},ContainerImage{Names:[k8s.gcr.io/kube-controller-manager@sha256:d62a4f41625e1631a2683cbdf1c9c9bd27f0b9c5d8d8202990236fc0d5ef1703 k8s.gcr.io/kube-controller-manager:v1.18.3],SizeBytes:162388763,},ContainerImage{Names:[k8s.gcr.io/kube-proxy@sha256:6a093c22e305039b7bd6c3f8eab8f202ad8238066ed210857b25524443aa8aff k8s.gcr.io/kube-proxy:v1.18.3],SizeBytes:117090625,},ContainerImage{Names:[k8s.gcr.io/kube-scheduler@sha256:5381cd9680bf5fb16a5c8ac60141eaab242c1c4960f1c32a21807efcca3e765b k8s.gcr.io/kube-scheduler:v1.18.3],SizeBytes:95279899,},ContainerImage{Names:[gcr.io/k8s-minikube/storage-provisioner@sha256:088daa9fcbccf04c3f415d77d5a6360d2803922190b675cb7fc88a9d2d91985a gcr.io/k8s-minikube/storage-provisioner:v1.8.1],SizeBytes:80815640,},ContainerImage{Names:[k8s.gcr.io/coredns@sha256:2c8d61c46f484d881db43b34d13ca47a269336e576c81cf007ca740fa9ec0800 k8s.gcr.io/coredns:1.6.7],SizeBytes:43794147,},ContainerImage{Names:[kubernetesui/metrics-scraper@sha256:555981a24f184420f3be0c79d4efb6c948a85cfce84034f85a563f4151a81cbf kubernetesui/metrics-scraper:v1.0.4],SizeBytes:36937728,},ContainerImage{Names:[k8s.gcr.io/pause@sha256:927d98197ec1141a368550822d18fa1c60bdae27b78b0c004f705f548c07814f k8s.gcr.io/pause:3.2],SizeBytes:682696,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} :1: unexpected "}" in operand - returning raw string. ** /stderr ** helpers_test.go:247: (dbg) Run: ./minikube-linux-amd64 status --format={{.APIServer}} -p nospam-20200724220008-14997 -n nospam-20200724220008-14997 helpers_test.go:254: (dbg) Run: kubectl --context nospam-20200724220008-14997 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running helpers_test.go:254: (dbg) Done: kubectl --context nospam-20200724220008-14997 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running: (4.177127745s) helpers_test.go:260: non-running pods: helpers_test.go:262: ======> post-mortem[TestErrorSpam]: describe non-running pods <====== helpers_test.go:265: (dbg) Run: kubectl --context nospam-20200724220008-14997 describe pod helpers_test.go:265: (dbg) Non-zero exit: kubectl --context nospam-20200724220008-14997 describe pod : exit status 1 (75.535409ms) ** stderr ** error: resource name may not be empty ** /stderr ** helpers_test.go:267: kubectl --context nospam-20200724220008-14997 describe pod : exit status 1 helpers_test.go:170: Cleaning up "nospam-20200724220008-14997" profile ... helpers_test.go:171: (dbg) Run: ./minikube-linux-amd64 delete -p nospam-20200724220008-14997 === CONT TestKubernetesUpgrade version_upgrade_test.go:163: (dbg) Done: ./minikube-linux-amd64 start -p kubernetes-upgrade-20200724220008-14997 --memory=2200 --kubernetes-version=v1.13.0 --alsologtostderr -v=1 --vm-driver=docker --base-image=local/kicbase:-snapshot: (2m16.653688436s) version_upgrade_test.go:168: (dbg) Run: ./minikube-linux-amd64 stop -p kubernetes-upgrade-20200724220008-14997 === CONT TestErrorSpam helpers_test.go:171: (dbg) Done: ./minikube-linux-amd64 delete -p nospam-20200724220008-14997: (12.888465187s) --- FAIL: TestErrorSpam (138.72s) === CONT TestNetworkPlugins/group/calico === RUN TestNetworkPlugins/group/calico/Start net_test.go:80: (dbg) Run: ./minikube-linux-amd64 start -p calico-20200724220226-14997 --memory=1800 --alsologtostderr --wait=true --wait-timeout=25m --cni=calico --vm-driver=docker --base-image=local/kicbase:-snapshot === CONT TestPause/serial/SecondStartNoReconfiguration pause_test.go:78: (dbg) Done: ./minikube-linux-amd64 start -p pause-20200724220008-14997 --alsologtostderr -v=1: (46.103619698s) === RUN TestPause/serial/Pause pause_test.go:95: (dbg) Run: ./minikube-linux-amd64 pause -p pause-20200724220008-14997 --alsologtostderr -v=5 === RUN TestPause/serial/Unpause pause_test.go:105: (dbg) Run: ./minikube-linux-amd64 unpause -p pause-20200724220008-14997 --alsologtostderr -v=5 === RUN TestPause/serial/PauseAgain pause_test.go:95: (dbg) Run: ./minikube-linux-amd64 pause -p pause-20200724220008-14997 --alsologtostderr -v=5 === CONT TestForceSystemdFlag docker_test.go:80: (dbg) Done: ./minikube-linux-amd64 start -p force-systemd-flag-20200724220008-14997 --memory=1800 --force-systemd --alsologtostderr -v=5 --vm-driver=docker --base-image=local/kicbase:-snapshot: (2m26.636603493s) docker_test.go:85: (dbg) Run: ./minikube-linux-amd64 -p force-systemd-flag-20200724220008-14997 ssh "docker info --format {{.CgroupDriver}}" === RUN TestPause/serial/DeletePaused pause_test.go:115: (dbg) Run: ./minikube-linux-amd64 delete -p pause-20200724220008-14997 --alsologtostderr -v=5 === CONT TestForceSystemdFlag docker_test.go:85: (dbg) Done: ./minikube-linux-amd64 -p force-systemd-flag-20200724220008-14997 ssh "docker info --format {{.CgroupDriver}}": (5.091597077s) helpers_test.go:170: Cleaning up "force-systemd-flag-20200724220008-14997" profile ... helpers_test.go:171: (dbg) Run: ./minikube-linux-amd64 delete -p force-systemd-flag-20200724220008-14997 === CONT TestKubernetesUpgrade version_upgrade_test.go:168: (dbg) Done: ./minikube-linux-amd64 stop -p kubernetes-upgrade-20200724220008-14997: (20.715354875s) version_upgrade_test.go:173: (dbg) Run: ./minikube-linux-amd64 -p kubernetes-upgrade-20200724220008-14997 status --format={{.Host}} version_upgrade_test.go:173: (dbg) Non-zero exit: ./minikube-linux-amd64 -p kubernetes-upgrade-20200724220008-14997 status --format={{.Host}}: exit status 7 (110.827299ms) -- stdout -- Stopped -- /stdout -- version_upgrade_test.go:175: status error: exit status 7 (may be ok) version_upgrade_test.go:184: (dbg) Run: ./minikube-linux-amd64 start -p kubernetes-upgrade-20200724220008-14997 --memory=2200 --kubernetes-version=v1.18.4-rc.0 --alsologtostderr -v=1 --vm-driver=docker --base-image=local/kicbase:-snapshot === CONT TestForceSystemdFlag helpers_test.go:171: (dbg) Done: ./minikube-linux-amd64 delete -p force-systemd-flag-20200724220008-14997: (8.405789034s) --- PASS: TestForceSystemdFlag (160.13s) === CONT TestNetworkPlugins/group/custom-weave === RUN TestNetworkPlugins/group/custom-weave/Start net_test.go:80: (dbg) Run: ./minikube-linux-amd64 start -p custom-weave-20200724220248-14997 --memory=1800 --alsologtostderr --wait=true --wait-timeout=25m --cni=testdata/weavenet.yaml --vm-driver=docker --base-image=local/kicbase:-snapshot === CONT TestPause/serial/DeletePaused pause_test.go:115: (dbg) Done: ./minikube-linux-amd64 delete -p pause-20200724220008-14997 --alsologtostderr -v=5: (17.863434245s) === RUN TestPause/serial/VerifyDeletedResources pause_test.go:125: (dbg) Run: ./minikube-linux-amd64 profile list --output json pause_test.go:151: (dbg) Run: docker ps -a pause_test.go:156: (dbg) Run: docker volume inspect pause-20200724220008-14997 pause_test.go:156: (dbg) Non-zero exit: docker volume inspect pause-20200724220008-14997: exit status 1 (53.117658ms) -- stdout -- [] -- /stdout -- ** stderr ** Error: No such volume: pause-20200724220008-14997 ** /stderr ** === CONT TestPause helpers_test.go:170: Cleaning up "pause-20200724220008-14997" profile ... helpers_test.go:171: (dbg) Run: ./minikube-linux-amd64 delete -p pause-20200724220008-14997 --- PASS: TestPause (165.53s) --- PASS: TestPause/serial (165.11s) --- PASS: TestPause/serial/Start (98.15s) --- PASS: TestPause/serial/SecondStartNoReconfiguration (46.11s) --- PASS: TestPause/serial/Pause (0.84s) --- PASS: TestPause/serial/Unpause (0.75s) --- PASS: TestPause/serial/PauseAgain (0.85s) --- PASS: TestPause/serial/DeletePaused (17.86s) --- PASS: TestPause/serial/VerifyDeletedResources (0.54s) === CONT TestNetworkPlugins/group/enable-default-cni === RUN TestNetworkPlugins/group/enable-default-cni/Start net_test.go:80: (dbg) Run: ./minikube-linux-amd64 start -p enable-default-cni-20200724220253-14997 --memory=1800 --alsologtostderr --wait=true --wait-timeout=25m --enable-default-cni=true --vm-driver=docker --base-image=local/kicbase:-snapshot === CONT TestDockerFlags docker_test.go:41: (dbg) Done: ./minikube-linux-amd64 start -p docker-flags-20200724220012-14997 --cache-images=false --memory=1800 --install-addons=false --wait=false --docker-env=FOO=BAR --docker-env=BAZ=BAT --docker-opt=debug --docker-opt=icc=true --alsologtostderr -v=5 --vm-driver=docker --base-image=local/kicbase:-snapshot: (2m44.943069694s) docker_test.go:46: (dbg) Run: ./minikube-linux-amd64 -p docker-flags-20200724220012-14997 ssh "sudo systemctl show docker --property=Environment --no-pager" docker_test.go:57: (dbg) Run: ./minikube-linux-amd64 -p docker-flags-20200724220012-14997 ssh "sudo systemctl show docker --property=ExecStart --no-pager" helpers_test.go:170: Cleaning up "docker-flags-20200724220012-14997" profile ... helpers_test.go:171: (dbg) Run: ./minikube-linux-amd64 delete -p docker-flags-20200724220012-14997 helpers_test.go:171: (dbg) Done: ./minikube-linux-amd64 delete -p docker-flags-20200724220012-14997: (13.737712839s) --- PASS: TestDockerFlags (179.61s) === CONT TestNetworkPlugins/group/kindnet === RUN TestNetworkPlugins/group/kindnet/Start net_test.go:80: (dbg) Run: ./minikube-linux-amd64 start -p kindnet-20200724220311-14997 --memory=1800 --alsologtostderr --wait=true --wait-timeout=25m --cni=kindnet --vm-driver=docker --base-image=local/kicbase:-snapshot === CONT TestForceSystemdEnv docker_test.go:108: (dbg) Done: ./minikube-linux-amd64 start -p force-systemd-env-20200724220012-14997 --memory=1800 --alsologtostderr -v=5 --vm-driver=docker --base-image=local/kicbase:-snapshot: (3m18.392514447s) docker_test.go:113: (dbg) Run: ./minikube-linux-amd64 -p force-systemd-env-20200724220012-14997 ssh "docker info --format {{.CgroupDriver}}" helpers_test.go:170: Cleaning up "force-systemd-env-20200724220012-14997" profile ... helpers_test.go:171: (dbg) Run: ./minikube-linux-amd64 delete -p force-systemd-env-20200724220012-14997 helpers_test.go:171: (dbg) Done: ./minikube-linux-amd64 delete -p force-systemd-env-20200724220012-14997: (12.686330309s) --- PASS: TestForceSystemdEnv (211.58s) === CONT TestNetworkPlugins/group/kubenet === RUN TestNetworkPlugins/group/kubenet/Start net_test.go:80: (dbg) Run: ./minikube-linux-amd64 start -p kubenet-20200724220343-14997 --memory=1800 --alsologtostderr --wait=true --wait-timeout=25m --network-plugin=kubenet --vm-driver=docker --base-image=local/kicbase:-snapshot === CONT TestNetworkPlugins/group/auto/Start net_test.go:80: (dbg) Done: ./minikube-linux-amd64 start -p auto-20200724220146-14997 --memory=1800 --alsologtostderr --wait=true --wait-timeout=25m --vm-driver=docker --base-image=local/kicbase:-snapshot: (2m5.009038198s) === RUN TestNetworkPlugins/group/auto/KubeletFlags net_test.go:102: (dbg) Run: ./minikube-linux-amd64 ssh -p auto-20200724220146-14997 "pgrep -a kubelet" === RUN TestNetworkPlugins/group/auto/NetCatPod net_test.go:125: (dbg) Run: kubectl --context auto-20200724220146-14997 replace --force -f testdata/netcat-deployment.yaml net_test.go:125: (dbg) Done: kubectl --context auto-20200724220146-14997 replace --force -f testdata/netcat-deployment.yaml: (1.042123109s) net_test.go:139: (dbg) TestNetworkPlugins/group/auto/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ... helpers_test.go:332: "netcat-7987c4c66b-7p7gg" [4e0fd294-9f63-4ee5-ba5a-c966303840a3] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils]) helpers_test.go:317: TestNetworkPlugins/group/auto/NetCatPod: WARNING: pod list for "default" "app=netcat" returned: etcdserver: request timed out helpers_test.go:332: "netcat-7987c4c66b-7p7gg" [4e0fd294-9f63-4ee5-ba5a-c966303840a3] Running === CONT TestNetworkPlugins/group/false/Start net_test.go:80: (dbg) Done: ./minikube-linux-amd64 start -p false-20200724220201-14997 --memory=1800 --alsologtostderr --wait=true --wait-timeout=25m --cni=false --vm-driver=docker --base-image=local/kicbase:-snapshot: (2m20.752720769s) === RUN TestNetworkPlugins/group/false/KubeletFlags net_test.go:102: (dbg) Run: ./minikube-linux-amd64 ssh -p false-20200724220201-14997 "pgrep -a kubelet" === RUN TestNetworkPlugins/group/false/NetCatPod net_test.go:125: (dbg) Run: kubectl --context false-20200724220201-14997 replace --force -f testdata/netcat-deployment.yaml net_test.go:139: (dbg) TestNetworkPlugins/group/false/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ... helpers_test.go:332: "netcat-7987c4c66b-42pt9" [12d68feb-b892-4d1c-b7b2-23f18041c60c] Pending helpers_test.go:332: "netcat-7987c4c66b-42pt9" [12d68feb-b892-4d1c-b7b2-23f18041c60c] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils]) === CONT TestNetworkPlugins/group/auto/NetCatPod net_test.go:139: (dbg) TestNetworkPlugins/group/auto/NetCatPod: app=netcat healthy within 33.004362736s === RUN TestNetworkPlugins/group/auto/DNS net_test.go:156: (dbg) Run: kubectl --context auto-20200724220146-14997 exec deployment/netcat -- nslookup kubernetes.default net_test.go:156: (dbg) Non-zero exit: kubectl --context auto-20200724220146-14997 exec deployment/netcat -- nslookup kubernetes.default: exit status 1 (15.223849666s) -- stdout -- ;; connection timed out; no servers could be reached -- /stdout -- ** stderr ** command terminated with exit code 1 ** /stderr ** === CONT TestNetworkPlugins/group/false/NetCatPod helpers_test.go:332: "netcat-7987c4c66b-42pt9" [12d68feb-b892-4d1c-b7b2-23f18041c60c] Running === CONT TestNetworkPlugins/group/auto/DNS net_test.go:156: (dbg) Run: kubectl --context auto-20200724220146-14997 exec deployment/netcat -- nslookup kubernetes.default === CONT TestNetworkPlugins/group/false/NetCatPod net_test.go:139: (dbg) TestNetworkPlugins/group/false/NetCatPod: app=netcat healthy within 23.007179749s === RUN TestNetworkPlugins/group/false/DNS net_test.go:156: (dbg) Run: kubectl --context false-20200724220201-14997 exec deployment/netcat -- nslookup kubernetes.default === RUN TestNetworkPlugins/group/false/Localhost net_test.go:175: (dbg) Run: kubectl --context false-20200724220201-14997 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080" === RUN TestNetworkPlugins/group/false/HairPin net_test.go:188: (dbg) Run: kubectl --context false-20200724220201-14997 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080" net_test.go:188: (dbg) Non-zero exit: kubectl --context false-20200724220201-14997 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080": exit status 1 (5.281366239s) ** stderr ** command terminated with exit code 1 ** /stderr ** === CONT TestNetworkPlugins/group/false net_test.go:204: "false" test finished in 4m43.993965229s, failed=false helpers_test.go:170: Cleaning up "false-20200724220201-14997" profile ... helpers_test.go:171: (dbg) Run: ./minikube-linux-amd64 delete -p false-20200724220201-14997 === CONT TestNetworkPlugins/group/auto/DNS net_test.go:156: (dbg) Non-zero exit: kubectl --context auto-20200724220146-14997 exec deployment/netcat -- nslookup kubernetes.default: exit status 1 (15.219005196s) -- stdout -- ;; connection timed out; no servers could be reached -- /stdout -- ** stderr ** command terminated with exit code 1 ** /stderr ** net_test.go:156: (dbg) Run: kubectl --context auto-20200724220146-14997 exec deployment/netcat -- nslookup kubernetes.default === CONT TestNetworkPlugins/group/false helpers_test.go:171: (dbg) Done: ./minikube-linux-amd64 delete -p false-20200724220201-14997: (11.826502566s) === CONT TestNetworkPlugins/group/bridge === RUN TestNetworkPlugins/group/bridge/Start net_test.go:80: (dbg) Run: ./minikube-linux-amd64 start -p bridge-20200724220503-14997 --memory=1800 --alsologtostderr --wait=true --wait-timeout=25m --cni=bridge --vm-driver=docker --base-image=local/kicbase:-snapshot === CONT TestNetworkPlugins/group/auto/DNS net_test.go:156: (dbg) Non-zero exit: kubectl --context auto-20200724220146-14997 exec deployment/netcat -- nslookup kubernetes.default: exit status 1 (15.319318917s) -- stdout -- ;; connection timed out; no servers could be reached -- /stdout -- ** stderr ** command terminated with exit code 1 ** /stderr ** net_test.go:156: (dbg) Run: kubectl --context auto-20200724220146-14997 exec deployment/netcat -- nslookup kubernetes.default net_test.go:156: (dbg) Non-zero exit: kubectl --context auto-20200724220146-14997 exec deployment/netcat -- nslookup kubernetes.default: exit status 1 (15.217809112s) -- stdout -- ;; connection timed out; no servers could be reached -- /stdout -- ** stderr ** command terminated with exit code 1 ** /stderr ** net_test.go:156: (dbg) Run: kubectl --context auto-20200724220146-14997 exec deployment/netcat -- nslookup kubernetes.default === CONT TestNetworkPlugins/group/enable-default-cni/Start net_test.go:80: (dbg) Done: ./minikube-linux-amd64 start -p enable-default-cni-20200724220253-14997 --memory=1800 --alsologtostderr --wait=true --wait-timeout=25m --enable-default-cni=true --vm-driver=docker --base-image=local/kicbase:-snapshot: (2m52.996457589s) === RUN TestNetworkPlugins/group/enable-default-cni/KubeletFlags net_test.go:102: (dbg) Run: ./minikube-linux-amd64 ssh -p enable-default-cni-20200724220253-14997 "pgrep -a kubelet" === RUN TestNetworkPlugins/group/enable-default-cni/NetCatPod net_test.go:125: (dbg) Run: kubectl --context enable-default-cni-20200724220253-14997 replace --force -f testdata/netcat-deployment.yaml net_test.go:125: (dbg) Done: kubectl --context enable-default-cni-20200724220253-14997 replace --force -f testdata/netcat-deployment.yaml: (1.893018857s) net_test.go:139: (dbg) TestNetworkPlugins/group/enable-default-cni/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ... helpers_test.go:332: "netcat-7987c4c66b-wk7g2" [8b1e9e6c-1dc2-45e3-adf6-d86095a052b7] Pending helpers_test.go:332: "netcat-7987c4c66b-wk7g2" [8b1e9e6c-1dc2-45e3-adf6-d86095a052b7] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils]) === CONT TestNetworkPlugins/group/cilium/Start net_test.go:80: (dbg) Done: ./minikube-linux-amd64 start -p cilium-20200724220205-14997 --memory=1800 --alsologtostderr --wait=true --wait-timeout=25m --cni=cilium --vm-driver=docker --base-image=local/kicbase:-snapshot: (3m48.70796554s) === RUN TestNetworkPlugins/group/cilium/ControllerPod net_test.go:88: (dbg) TestNetworkPlugins/group/cilium/ControllerPod: waiting 10m0s for pods matching "k8s-app=cilium" in namespace "kube-system" ... helpers_test.go:332: "cilium-smhv2" [3897a552-916f-42f7-95fe-e00d95049107] Running === CONT TestNetworkPlugins/group/auto/DNS net_test.go:156: (dbg) Non-zero exit: kubectl --context auto-20200724220146-14997 exec deployment/netcat -- nslookup kubernetes.default: exit status 1 (20.687533854s) -- stdout -- ;; connection timed out; no servers could be reached -- /stdout -- ** stderr ** command terminated with exit code 1 ** /stderr ** === CONT TestNetworkPlugins/group/enable-default-cni/NetCatPod helpers_test.go:332: "netcat-7987c4c66b-wk7g2" [8b1e9e6c-1dc2-45e3-adf6-d86095a052b7] Running === CONT TestNetworkPlugins/group/cilium/ControllerPod net_test.go:88: (dbg) TestNetworkPlugins/group/cilium/ControllerPod: k8s-app=cilium healthy within 5.023110988s === RUN TestNetworkPlugins/group/cilium/KubeletFlags net_test.go:102: (dbg) Run: ./minikube-linux-amd64 ssh -p cilium-20200724220205-14997 "pgrep -a kubelet" === RUN TestNetworkPlugins/group/cilium/NetCatPod net_test.go:125: (dbg) Run: kubectl --context cilium-20200724220205-14997 replace --force -f testdata/netcat-deployment.yaml === CONT TestNetworkPlugins/group/enable-default-cni/NetCatPod net_test.go:139: (dbg) TestNetworkPlugins/group/enable-default-cni/NetCatPod: app=netcat healthy within 12.007099718s === RUN TestNetworkPlugins/group/enable-default-cni/DNS net_test.go:156: (dbg) Run: kubectl --context enable-default-cni-20200724220253-14997 exec deployment/netcat -- nslookup kubernetes.default === CONT TestNetworkPlugins/group/cilium/NetCatPod net_test.go:125: (dbg) Done: kubectl --context cilium-20200724220205-14997 replace --force -f testdata/netcat-deployment.yaml: (1.455272601s) net_test.go:139: (dbg) TestNetworkPlugins/group/cilium/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ... helpers_test.go:332: "netcat-7987c4c66b-mfnf5" [be775d73-0fea-4521-aeeb-bce54eb1b7be] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils]) === CONT TestNetworkPlugins/group/auto/DNS net_test.go:156: (dbg) Run: kubectl --context auto-20200724220146-14997 exec deployment/netcat -- nslookup kubernetes.default === CONT TestNetworkPlugins/group/cilium/NetCatPod helpers_test.go:332: "netcat-7987c4c66b-mfnf5" [be775d73-0fea-4521-aeeb-bce54eb1b7be] Running === CONT TestNetworkPlugins/group/kubenet/Start net_test.go:80: (dbg) Done: ./minikube-linux-amd64 start -p kubenet-20200724220343-14997 --memory=1800 --alsologtostderr --wait=true --wait-timeout=25m --network-plugin=kubenet --vm-driver=docker --base-image=local/kicbase:-snapshot: (2m27.325472532s) === RUN TestNetworkPlugins/group/kubenet/KubeletFlags net_test.go:102: (dbg) Run: ./minikube-linux-amd64 ssh -p kubenet-20200724220343-14997 "pgrep -a kubelet" === RUN TestNetworkPlugins/group/kubenet/NetCatPod net_test.go:125: (dbg) Run: kubectl --context kubenet-20200724220343-14997 replace --force -f testdata/netcat-deployment.yaml net_test.go:139: (dbg) TestNetworkPlugins/group/kubenet/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ... helpers_test.go:332: "netcat-7987c4c66b-mgq44" [48f5455c-a32c-4b0f-95d8-fa35c2728481] Pending helpers_test.go:332: "netcat-7987c4c66b-mgq44" [48f5455c-a32c-4b0f-95d8-fa35c2728481] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils]) === CONT TestNetworkPlugins/group/cilium/NetCatPod net_test.go:139: (dbg) TestNetworkPlugins/group/cilium/NetCatPod: app=netcat healthy within 12.025775784s === RUN TestNetworkPlugins/group/cilium/DNS net_test.go:156: (dbg) Run: kubectl --context cilium-20200724220205-14997 exec deployment/netcat -- nslookup kubernetes.default === RUN TestNetworkPlugins/group/cilium/Localhost net_test.go:175: (dbg) Run: kubectl --context cilium-20200724220205-14997 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080" === RUN TestNetworkPlugins/group/cilium/HairPin net_test.go:188: (dbg) Run: kubectl --context cilium-20200724220205-14997 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080" === CONT TestNetworkPlugins/group/cilium net_test.go:204: "cilium" test finished in 6m5.714437392s, failed=false helpers_test.go:170: Cleaning up "cilium-20200724220205-14997" profile ... helpers_test.go:171: (dbg) Run: ./minikube-linux-amd64 delete -p cilium-20200724220205-14997 === CONT TestNetworkPlugins/group/enable-default-cni/DNS net_test.go:156: (dbg) Non-zero exit: kubectl --context enable-default-cni-20200724220253-14997 exec deployment/netcat -- nslookup kubernetes.default: exit status 1 (15.295395354s) -- stdout -- ;; connection timed out; no servers could be reached -- /stdout -- ** stderr ** command terminated with exit code 1 ** /stderr ** === CONT TestNetworkPlugins/group/auto/DNS net_test.go:156: (dbg) Non-zero exit: kubectl --context auto-20200724220146-14997 exec deployment/netcat -- nslookup kubernetes.default: exit status 1 (15.24596462s) -- stdout -- ;; connection timed out; no servers could be reached -- /stdout -- ** stderr ** command terminated with exit code 1 ** /stderr ** === CONT TestNetworkPlugins/group/enable-default-cni/DNS net_test.go:156: (dbg) Run: kubectl --context enable-default-cni-20200724220253-14997 exec deployment/netcat -- nslookup kubernetes.default === CONT TestNetworkPlugins/group/kubenet/NetCatPod helpers_test.go:332: "netcat-7987c4c66b-mgq44" [48f5455c-a32c-4b0f-95d8-fa35c2728481] Running === CONT TestNetworkPlugins/group/cilium helpers_test.go:171: (dbg) Done: ./minikube-linux-amd64 delete -p cilium-20200724220205-14997: (6.03443661s) === CONT TestStartStop/group/old-k8s-version === RUN TestStartStop/group/old-k8s-version/serial === RUN TestStartStop/group/old-k8s-version/serial/FirstStart start_stop_delete_test.go:149: (dbg) Run: ./minikube-linux-amd64 start -p old-k8s-version-20200724220619-14997 --memory=2200 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --container-runtime=docker --vm-driver=docker --base-image=local/kicbase:-snapshot --kubernetes-version=v1.13.0 === CONT TestNetworkPlugins/group/kubenet/NetCatPod net_test.go:139: (dbg) TestNetworkPlugins/group/kubenet/NetCatPod: app=netcat healthy within 11.576524516s === RUN TestNetworkPlugins/group/kubenet/DNS net_test.go:156: (dbg) Run: kubectl --context kubenet-20200724220343-14997 exec deployment/netcat -- nslookup kubernetes.default === CONT TestNetworkPlugins/group/auto/DNS net_test.go:156: (dbg) Run: kubectl --context auto-20200724220146-14997 exec deployment/netcat -- nslookup kubernetes.default === CONT TestNetworkPlugins/group/enable-default-cni/DNS net_test.go:156: (dbg) Non-zero exit: kubectl --context enable-default-cni-20200724220253-14997 exec deployment/netcat -- nslookup kubernetes.default: exit status 1 (15.722220378s) -- stdout -- ;; connection timed out; no servers could be reached -- /stdout -- ** stderr ** command terminated with exit code 1 ** /stderr ** net_test.go:156: (dbg) Run: kubectl --context enable-default-cni-20200724220253-14997 exec deployment/netcat -- nslookup kubernetes.default === CONT TestNetworkPlugins/group/bridge/Start net_test.go:80: (dbg) Done: ./minikube-linux-amd64 start -p bridge-20200724220503-14997 --memory=1800 --alsologtostderr --wait=true --wait-timeout=25m --cni=bridge --vm-driver=docker --base-image=local/kicbase:-snapshot: (1m32.783694773s) === RUN TestNetworkPlugins/group/bridge/KubeletFlags net_test.go:102: (dbg) Run: ./minikube-linux-amd64 ssh -p bridge-20200724220503-14997 "pgrep -a kubelet" === RUN TestNetworkPlugins/group/bridge/NetCatPod net_test.go:125: (dbg) Run: kubectl --context bridge-20200724220503-14997 replace --force -f testdata/netcat-deployment.yaml === CONT TestNetworkPlugins/group/kubenet/DNS net_test.go:156: (dbg) Non-zero exit: kubectl --context kubenet-20200724220343-14997 exec deployment/netcat -- nslookup kubernetes.default: exit status 1 (15.224002295s) -- stdout -- ;; connection timed out; no servers could be reached -- /stdout -- ** stderr ** command terminated with exit code 1 ** /stderr ** === CONT TestNetworkPlugins/group/bridge/NetCatPod net_test.go:125: (dbg) Done: kubectl --context bridge-20200724220503-14997 replace --force -f testdata/netcat-deployment.yaml: (1.318808405s) net_test.go:139: (dbg) TestNetworkPlugins/group/bridge/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ... helpers_test.go:332: "netcat-7987c4c66b-kczvb" [361c3580-c4e5-445a-8256-d5d5b9444080] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils]) === CONT TestNetworkPlugins/group/kubenet/DNS net_test.go:156: (dbg) Run: kubectl --context kubenet-20200724220343-14997 exec deployment/netcat -- nslookup kubernetes.default === CONT TestNetworkPlugins/group/auto/DNS net_test.go:156: (dbg) Non-zero exit: kubectl --context auto-20200724220146-14997 exec deployment/netcat -- nslookup kubernetes.default: exit status 1 (15.207171325s) -- stdout -- ;; connection timed out; no servers could be reached -- /stdout -- ** stderr ** command terminated with exit code 1 ** /stderr ** === CONT TestNetworkPlugins/group/bridge/NetCatPod helpers_test.go:332: "netcat-7987c4c66b-kczvb" [361c3580-c4e5-445a-8256-d5d5b9444080] Running net_test.go:139: (dbg) TestNetworkPlugins/group/bridge/NetCatPod: app=netcat healthy within 11.007941278s === RUN TestNetworkPlugins/group/bridge/DNS net_test.go:156: (dbg) Run: kubectl --context bridge-20200724220503-14997 exec deployment/netcat -- nslookup kubernetes.default === CONT TestNetworkPlugins/group/enable-default-cni/DNS net_test.go:156: (dbg) Non-zero exit: kubectl --context enable-default-cni-20200724220253-14997 exec deployment/netcat -- nslookup kubernetes.default: exit status 1 (17.96601547s) -- stdout -- ;; connection timed out; no servers could be reached -- /stdout -- ** stderr ** command terminated with exit code 1 ** /stderr ** === CONT TestNetworkPlugins/group/auto/DNS net_test.go:156: (dbg) Run: kubectl --context auto-20200724220146-14997 exec deployment/netcat -- nslookup kubernetes.default === CONT TestNetworkPlugins/group/kubenet/DNS net_test.go:156: (dbg) Non-zero exit: kubectl --context kubenet-20200724220343-14997 exec deployment/netcat -- nslookup kubernetes.default: exit status 1 (15.203897719s) -- stdout -- ;; connection timed out; no servers could be reached -- /stdout -- ** stderr ** command terminated with exit code 1 ** /stderr ** net_test.go:156: (dbg) Run: kubectl --context kubenet-20200724220343-14997 exec deployment/netcat -- nslookup kubernetes.default === CONT TestNetworkPlugins/group/enable-default-cni/DNS net_test.go:156: (dbg) Run: kubectl --context enable-default-cni-20200724220253-14997 exec deployment/netcat -- nslookup kubernetes.default === CONT TestNetworkPlugins/group/bridge/DNS net_test.go:156: (dbg) Non-zero exit: kubectl --context bridge-20200724220503-14997 exec deployment/netcat -- nslookup kubernetes.default: exit status 1 (15.237450004s) -- stdout -- ;; connection timed out; no servers could be reached -- /stdout -- ** stderr ** command terminated with exit code 1 ** /stderr ** net_test.go:156: (dbg) Run: kubectl --context bridge-20200724220503-14997 exec deployment/netcat -- nslookup kubernetes.default === CONT TestNetworkPlugins/group/auto/DNS net_test.go:156: (dbg) Non-zero exit: kubectl --context auto-20200724220146-14997 exec deployment/netcat -- nslookup kubernetes.default: exit status 1 (15.256305386s) -- stdout -- ;; connection timed out; no servers could be reached -- /stdout -- ** stderr ** command terminated with exit code 1 ** /stderr ** === CONT TestNetworkPlugins/group/kubenet/DNS net_test.go:156: (dbg) Non-zero exit: kubectl --context kubenet-20200724220343-14997 exec deployment/netcat -- nslookup kubernetes.default: exit status 1 (15.253447866s) -- stdout -- ;; connection timed out; no servers could be reached -- /stdout -- ** stderr ** command terminated with exit code 1 ** /stderr ** === CONT TestNetworkPlugins/group/enable-default-cni/DNS net_test.go:156: (dbg) Non-zero exit: kubectl --context enable-default-cni-20200724220253-14997 exec deployment/netcat -- nslookup kubernetes.default: exit status 1 (15.242318797s) -- stdout -- ;; connection timed out; no servers could be reached -- /stdout -- ** stderr ** command terminated with exit code 1 ** /stderr ** === CONT TestNetworkPlugins/group/kubenet/DNS net_test.go:156: (dbg) Run: kubectl --context kubenet-20200724220343-14997 exec deployment/netcat -- nslookup kubernetes.default === CONT TestNetworkPlugins/group/enable-default-cni/DNS net_test.go:156: (dbg) Run: kubectl --context enable-default-cni-20200724220253-14997 exec deployment/netcat -- nslookup kubernetes.default === CONT TestNetworkPlugins/group/auto/DNS net_test.go:156: (dbg) Run: kubectl --context auto-20200724220146-14997 exec deployment/netcat -- nslookup kubernetes.default === CONT TestNetworkPlugins/group/bridge/DNS net_test.go:156: (dbg) Non-zero exit: kubectl --context bridge-20200724220503-14997 exec deployment/netcat -- nslookup kubernetes.default: exit status 1 (15.210406303s) -- stdout -- ;; connection timed out; no servers could be reached -- /stdout -- ** stderr ** command terminated with exit code 1 ** /stderr ** net_test.go:156: (dbg) Run: kubectl --context bridge-20200724220503-14997 exec deployment/netcat -- nslookup kubernetes.default === CONT TestNetworkPlugins/group/kubenet/DNS net_test.go:156: (dbg) Non-zero exit: kubectl --context kubenet-20200724220343-14997 exec deployment/netcat -- nslookup kubernetes.default: exit status 1 (15.233769351s) -- stdout -- ;; connection timed out; no servers could be reached -- /stdout -- ** stderr ** command terminated with exit code 1 ** /stderr ** === CONT TestNetworkPlugins/group/enable-default-cni/DNS net_test.go:156: (dbg) Non-zero exit: kubectl --context enable-default-cni-20200724220253-14997 exec deployment/netcat -- nslookup kubernetes.default: exit status 1 (15.227895451s) -- stdout -- ;; connection timed out; no servers could be reached -- /stdout -- ** stderr ** command terminated with exit code 1 ** /stderr ** === CONT TestNetworkPlugins/group/kubenet/DNS net_test.go:156: (dbg) Run: kubectl --context kubenet-20200724220343-14997 exec deployment/netcat -- nslookup kubernetes.default === CONT TestNetworkPlugins/group/auto/DNS net_test.go:156: (dbg) Non-zero exit: kubectl --context auto-20200724220146-14997 exec deployment/netcat -- nslookup kubernetes.default: exit status 1 (15.220803769s) -- stdout -- ;; connection timed out; no servers could be reached -- /stdout -- ** stderr ** command terminated with exit code 1 ** /stderr ** === CONT TestNetworkPlugins/group/enable-default-cni/DNS net_test.go:156: (dbg) Run: kubectl --context enable-default-cni-20200724220253-14997 exec deployment/netcat -- nslookup kubernetes.default === CONT TestNetworkPlugins/group/bridge/DNS net_test.go:156: (dbg) Non-zero exit: kubectl --context bridge-20200724220503-14997 exec deployment/netcat -- nslookup kubernetes.default: exit status 1 (15.209534139s) -- stdout -- ;; connection timed out; no servers could be reached -- /stdout -- ** stderr ** command terminated with exit code 1 ** /stderr ** net_test.go:156: (dbg) Run: kubectl --context bridge-20200724220503-14997 exec deployment/netcat -- nslookup kubernetes.default === CONT TestNetworkPlugins/group/kubenet/DNS net_test.go:156: (dbg) Non-zero exit: kubectl --context kubenet-20200724220343-14997 exec deployment/netcat -- nslookup kubernetes.default: exit status 1 (15.222391457s) -- stdout -- ;; connection timed out; no servers could be reached -- /stdout -- ** stderr ** command terminated with exit code 1 ** /stderr ** === CONT TestNetworkPlugins/group/enable-default-cni/DNS net_test.go:156: (dbg) Non-zero exit: kubectl --context enable-default-cni-20200724220253-14997 exec deployment/netcat -- nslookup kubernetes.default: exit status 1 (15.228412623s) -- stdout -- ;; connection timed out; no servers could be reached -- /stdout -- ** stderr ** command terminated with exit code 1 ** /stderr ** === CONT TestNetworkPlugins/group/auto/DNS net_test.go:156: (dbg) Run: kubectl --context auto-20200724220146-14997 exec deployment/netcat -- nslookup kubernetes.default === CONT TestNetworkPlugins/group/kubenet/DNS net_test.go:156: (dbg) Run: kubectl --context kubenet-20200724220343-14997 exec deployment/netcat -- nslookup kubernetes.default === CONT TestNetworkPlugins/group/bridge/DNS net_test.go:156: (dbg) Non-zero exit: kubectl --context bridge-20200724220503-14997 exec deployment/netcat -- nslookup kubernetes.default: exit status 1 (15.20753952s) -- stdout -- ;; connection timed out; no servers could be reached -- /stdout -- ** stderr ** command terminated with exit code 1 ** /stderr ** === CONT TestNetworkPlugins/group/enable-default-cni/DNS net_test.go:156: (dbg) Run: kubectl --context enable-default-cni-20200724220253-14997 exec deployment/netcat -- nslookup kubernetes.default === CONT TestNetworkPlugins/group/bridge/DNS net_test.go:156: (dbg) Run: kubectl --context bridge-20200724220503-14997 exec deployment/netcat -- nslookup kubernetes.default === CONT TestNetworkPlugins/group/auto/DNS net_test.go:156: (dbg) Non-zero exit: kubectl --context auto-20200724220146-14997 exec deployment/netcat -- nslookup kubernetes.default: exit status 1 (15.240934393s) -- stdout -- ;; connection timed out; no servers could be reached -- /stdout -- ** stderr ** command terminated with exit code 1 ** /stderr ** === CONT TestNetworkPlugins/group/kubenet/DNS net_test.go:156: (dbg) Non-zero exit: kubectl --context kubenet-20200724220343-14997 exec deployment/netcat -- nslookup kubernetes.default: exit status 1 (15.226128468s) -- stdout -- ;; connection timed out; no servers could be reached -- /stdout -- ** stderr ** command terminated with exit code 1 ** /stderr ** === CONT TestNetworkPlugins/group/enable-default-cni/DNS net_test.go:156: (dbg) Non-zero exit: kubectl --context enable-default-cni-20200724220253-14997 exec deployment/netcat -- nslookup kubernetes.default: exit status 1 (15.227654619s) -- stdout -- ;; connection timed out; no servers could be reached -- /stdout -- ** stderr ** command terminated with exit code 1 ** /stderr ** === CONT TestNetworkPlugins/group/bridge/DNS net_test.go:156: (dbg) Non-zero exit: kubectl --context bridge-20200724220503-14997 exec deployment/netcat -- nslookup kubernetes.default: exit status 1 (15.202000592s) -- stdout -- ;; connection timed out; no servers could be reached -- /stdout -- ** stderr ** command terminated with exit code 1 ** /stderr ** === CONT TestNetworkPlugins/group/kubenet/DNS net_test.go:156: (dbg) Run: kubectl --context kubenet-20200724220343-14997 exec deployment/netcat -- nslookup kubernetes.default === CONT TestNetworkPlugins/group/bridge/DNS net_test.go:156: (dbg) Run: kubectl --context bridge-20200724220503-14997 exec deployment/netcat -- nslookup kubernetes.default === CONT TestNetworkPlugins/group/enable-default-cni/DNS net_test.go:156: (dbg) Run: kubectl --context enable-default-cni-20200724220253-14997 exec deployment/netcat -- nslookup kubernetes.default === CONT TestNetworkPlugins/group/kubenet/DNS net_test.go:156: (dbg) Non-zero exit: kubectl --context kubenet-20200724220343-14997 exec deployment/netcat -- nslookup kubernetes.default: exit status 1 (15.227771898s) -- stdout -- ;; connection timed out; no servers could be reached -- /stdout -- ** stderr ** command terminated with exit code 1 ** /stderr ** === CONT TestNetworkPlugins/group/bridge/DNS net_test.go:156: (dbg) Non-zero exit: kubectl --context bridge-20200724220503-14997 exec deployment/netcat -- nslookup kubernetes.default: exit status 1 (15.21942709s) -- stdout -- ;; connection timed out; no servers could be reached -- /stdout -- ** stderr ** command terminated with exit code 1 ** /stderr ** === CONT TestKubernetesUpgrade version_upgrade_test.go:184: (dbg) Done: ./minikube-linux-amd64 start -p kubernetes-upgrade-20200724220008-14997 --memory=2200 --kubernetes-version=v1.18.4-rc.0 --alsologtostderr -v=1 --vm-driver=docker --base-image=local/kicbase:-snapshot: (5m49.912562153s) version_upgrade_test.go:189: (dbg) Run: kubectl --context kubernetes-upgrade-20200724220008-14997 version --output=json version_upgrade_test.go:208: Attempting to downgrade Kubernetes (should fail) version_upgrade_test.go:210: (dbg) Run: ./minikube-linux-amd64 start -p kubernetes-upgrade-20200724220008-14997 --memory=2200 --kubernetes-version=v1.13.0 --vm-driver=docker --base-image=local/kicbase:-snapshot version_upgrade_test.go:210: (dbg) Non-zero exit: ./minikube-linux-amd64 start -p kubernetes-upgrade-20200724220008-14997 --memory=2200 --kubernetes-version=v1.13.0 --vm-driver=docker --base-image=local/kicbase:-snapshot: exit status 78 (103.154366ms) -- stdout -- * [kubernetes-upgrade-20200724220008-14997] minikube v1.12.1 on Ubuntu 20.04 - KUBECONFIG=/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/kubeconfig - MINIKUBE_HOME=/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome ! You have selected Kubernetes 1.13.0, but the existing cluster is running Kubernetes 1.18.4-rc.0 -- /stdout -- ** stderr ** X Non-destructive downgrades are not supported, but you can proceed with one of the following options: 1) Recreate the cluster with Kubernetes 1.13.0, by running: minikube delete -p kubernetes-upgrade-20200724220008-14997 minikube start -p kubernetes-upgrade-20200724220008-14997 --kubernetes-version=v1.13.0 2) Create a second cluster with Kubernetes 1.13.0, by running: minikube start -p kubernetes-upgrade-20200724220008-149972 --kubernetes-version=v1.13.0 3) Use the existing cluster at version Kubernetes 1.18.4-rc.0, by running: minikube start -p kubernetes-upgrade-20200724220008-14997 --kubernetes-version=v1.18.4-rc.0 ** /stderr ** version_upgrade_test.go:214: Attempting restart after unsuccessful downgrade version_upgrade_test.go:216: (dbg) Run: ./minikube-linux-amd64 start -p kubernetes-upgrade-20200724220008-14997 --memory=2200 --kubernetes-version=v1.18.4-rc.0 --alsologtostderr -v=1 --vm-driver=docker --base-image=local/kicbase:-snapshot === CONT TestNetworkPlugins/group/bridge/DNS net_test.go:156: (dbg) Run: kubectl --context bridge-20200724220503-14997 exec deployment/netcat -- nslookup kubernetes.default === CONT TestNetworkPlugins/group/enable-default-cni/DNS net_test.go:156: (dbg) Non-zero exit: kubectl --context enable-default-cni-20200724220253-14997 exec deployment/netcat -- nslookup kubernetes.default: exit status 1 (15.210290464s) -- stdout -- ;; connection timed out; no servers could be reached -- /stdout -- ** stderr ** command terminated with exit code 1 ** /stderr ** === CONT TestStartStop/group/old-k8s-version/serial/FirstStart start_stop_delete_test.go:149: (dbg) Done: ./minikube-linux-amd64 start -p old-k8s-version-20200724220619-14997 --memory=2200 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --container-runtime=docker --vm-driver=docker --base-image=local/kicbase:-snapshot --kubernetes-version=v1.13.0: (2m22.481577888s) === RUN TestStartStop/group/old-k8s-version/serial/DeployApp start_stop_delete_test.go:158: (dbg) Run: kubectl --context old-k8s-version-20200724220619-14997 create -f testdata/busybox.yaml start_stop_delete_test.go:158: (dbg) TestStartStop/group/old-k8s-version/serial/DeployApp: waiting 8m0s for pods matching "integration-test=busybox" in namespace "default" ... helpers_test.go:332: "busybox" [3cca9503-cdfa-11ea-b441-024258e8c4d8] Pending helpers_test.go:332: "busybox" [3cca9503-cdfa-11ea-b441-024258e8c4d8] Pending / Ready:ContainersNotReady (containers with unready status: [busybox]) / ContainersReady:ContainersNotReady (containers with unready status: [busybox]) === CONT TestNetworkPlugins/group/kubenet/DNS net_test.go:156: (dbg) Run: kubectl --context kubenet-20200724220343-14997 exec deployment/netcat -- nslookup kubernetes.default === CONT TestStartStop/group/old-k8s-version/serial/DeployApp helpers_test.go:332: "busybox" [3cca9503-cdfa-11ea-b441-024258e8c4d8] Running === CONT TestNetworkPlugins/group/bridge/DNS net_test.go:156: (dbg) Non-zero exit: kubectl --context bridge-20200724220503-14997 exec deployment/netcat -- nslookup kubernetes.default: exit status 1 (15.232154722s) -- stdout -- ;; connection timed out; no servers could be reached -- /stdout -- ** stderr ** command terminated with exit code 1 ** /stderr ** === CONT TestStartStop/group/old-k8s-version/serial/DeployApp start_stop_delete_test.go:158: (dbg) TestStartStop/group/old-k8s-version/serial/DeployApp: integration-test=busybox healthy within 10.015736532s start_stop_delete_test.go:158: (dbg) Run: kubectl --context old-k8s-version-20200724220619-14997 exec busybox -- /bin/sh -c "ulimit -n" === RUN TestStartStop/group/old-k8s-version/serial/Stop start_stop_delete_test.go:164: (dbg) Run: ./minikube-linux-amd64 stop -p old-k8s-version-20200724220619-14997 --alsologtostderr -v=3 === CONT TestKubernetesUpgrade version_upgrade_test.go:216: (dbg) Done: ./minikube-linux-amd64 start -p kubernetes-upgrade-20200724220008-14997 --memory=2200 --kubernetes-version=v1.18.4-rc.0 --alsologtostderr -v=1 --vm-driver=docker --base-image=local/kicbase:-snapshot: (20.166029398s) helpers_test.go:170: Cleaning up "kubernetes-upgrade-20200724220008-14997" profile ... helpers_test.go:171: (dbg) Run: ./minikube-linux-amd64 delete -p kubernetes-upgrade-20200724220008-14997 === CONT TestNetworkPlugins/group/auto/DNS net_test.go:156: (dbg) Run: kubectl --context auto-20200724220146-14997 exec deployment/netcat -- nslookup kubernetes.default === CONT TestNetworkPlugins/group/bridge/DNS net_test.go:156: (dbg) Run: kubectl --context bridge-20200724220503-14997 exec deployment/netcat -- nslookup kubernetes.default === CONT TestNetworkPlugins/group/kubenet/DNS net_test.go:156: (dbg) Non-zero exit: kubectl --context kubenet-20200724220343-14997 exec deployment/netcat -- nslookup kubernetes.default: exit status 1 (15.203391379s) -- stdout -- ;; connection timed out; no servers could be reached -- /stdout -- ** stderr ** command terminated with exit code 1 ** /stderr ** === CONT TestKubernetesUpgrade helpers_test.go:171: (dbg) Done: ./minikube-linux-amd64 delete -p kubernetes-upgrade-20200724220008-14997: (5.987187376s) --- PASS: TestKubernetesUpgrade (533.76s) === CONT TestStartStop/group/crio === RUN TestStartStop/group/crio/serial === RUN TestStartStop/group/crio/serial/FirstStart start_stop_delete_test.go:149: (dbg) Run: ./minikube-linux-amd64 start -p crio-20200724220901-14997 --memory=2200 --alsologtostderr --wait=true --container-runtime=crio --disable-driver-mounts --extra-config=kubeadm.ignore-preflight-errors=SystemVerification --vm-driver=docker --base-image=local/kicbase:-snapshot --kubernetes-version=v1.15.7 === CONT TestNetworkPlugins/group/enable-default-cni/DNS net_test.go:156: (dbg) Run: kubectl --context enable-default-cni-20200724220253-14997 exec deployment/netcat -- nslookup kubernetes.default === CONT TestStartStop/group/old-k8s-version/serial/Stop start_stop_delete_test.go:164: (dbg) Done: ./minikube-linux-amd64 stop -p old-k8s-version-20200724220619-14997 --alsologtostderr -v=3: (15.927134704s) === RUN TestStartStop/group/old-k8s-version/serial/EnableAddonAfterStop start_stop_delete_test.go:174: (dbg) Run: ./minikube-linux-amd64 status --format={{.Host}} -p old-k8s-version-20200724220619-14997 -n old-k8s-version-20200724220619-14997 start_stop_delete_test.go:174: (dbg) Non-zero exit: ./minikube-linux-amd64 status --format={{.Host}} -p old-k8s-version-20200724220619-14997 -n old-k8s-version-20200724220619-14997: exit status 7 (122.899846ms) -- stdout -- Stopped -- /stdout -- start_stop_delete_test.go:174: status error: exit status 7 (may be ok) start_stop_delete_test.go:181: (dbg) Run: ./minikube-linux-amd64 addons enable dashboard -p old-k8s-version-20200724220619-14997 === RUN TestStartStop/group/old-k8s-version/serial/SecondStart start_stop_delete_test.go:190: (dbg) Run: ./minikube-linux-amd64 start -p old-k8s-version-20200724220619-14997 --memory=2200 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --container-runtime=docker --vm-driver=docker --base-image=local/kicbase:-snapshot --kubernetes-version=v1.13.0 === CONT TestNetworkPlugins/group/auto/DNS net_test.go:156: (dbg) Non-zero exit: kubectl --context auto-20200724220146-14997 exec deployment/netcat -- nslookup kubernetes.default: exit status 1 (15.213895881s) -- stdout -- ;; connection timed out; no servers could be reached -- /stdout -- ** stderr ** command terminated with exit code 1 ** /stderr ** === CONT TestNetworkPlugins/group/kubenet/DNS net_test.go:156: (dbg) Run: kubectl --context kubenet-20200724220343-14997 exec deployment/netcat -- nslookup kubernetes.default === CONT TestNetworkPlugins/group/bridge/DNS net_test.go:156: (dbg) Non-zero exit: kubectl --context bridge-20200724220503-14997 exec deployment/netcat -- nslookup kubernetes.default: exit status 1 (15.209140078s) -- stdout -- ;; connection timed out; no servers could be reached -- /stdout -- ** stderr ** command terminated with exit code 1 ** /stderr ** === CONT TestNetworkPlugins/group/enable-default-cni/DNS net_test.go:156: (dbg) Non-zero exit: kubectl --context enable-default-cni-20200724220253-14997 exec deployment/netcat -- nslookup kubernetes.default: exit status 1 (15.284541122s) -- stdout -- ;; connection timed out; no servers could be reached -- /stdout -- ** stderr ** command terminated with exit code 1 ** /stderr ** === CONT TestNetworkPlugins/group/bridge/DNS net_test.go:156: (dbg) Run: kubectl --context bridge-20200724220503-14997 exec deployment/netcat -- nslookup kubernetes.default === CONT TestNetworkPlugins/group/kubenet/DNS net_test.go:156: (dbg) Non-zero exit: kubectl --context kubenet-20200724220343-14997 exec deployment/netcat -- nslookup kubernetes.default: exit status 1 (15.23728471s) -- stdout -- ;; connection timed out; no servers could be reached -- /stdout -- ** stderr ** command terminated with exit code 1 ** /stderr ** === CONT TestNetworkPlugins/group/bridge/DNS net_test.go:156: (dbg) Non-zero exit: kubectl --context bridge-20200724220503-14997 exec deployment/netcat -- nslookup kubernetes.default: exit status 1 (15.494400995s) -- stdout -- ;; connection timed out; no servers could be reached -- /stdout -- ** stderr ** command terminated with exit code 1 ** /stderr ** === CONT TestNetworkPlugins/group/enable-default-cni/DNS net_test.go:156: (dbg) Run: kubectl --context enable-default-cni-20200724220253-14997 exec deployment/netcat -- nslookup kubernetes.default net_test.go:156: (dbg) Non-zero exit: kubectl --context enable-default-cni-20200724220253-14997 exec deployment/netcat -- nslookup kubernetes.default: exit status 1 (15.250219498s) -- stdout -- ;; connection timed out; no servers could be reached -- /stdout -- ** stderr ** command terminated with exit code 1 ** /stderr ** === CONT TestNetworkPlugins/group/kubenet/DNS net_test.go:156: (dbg) Run: kubectl --context kubenet-20200724220343-14997 exec deployment/netcat -- nslookup kubernetes.default === CONT TestNetworkPlugins/group/bridge/DNS net_test.go:156: (dbg) Run: kubectl --context bridge-20200724220503-14997 exec deployment/netcat -- nslookup kubernetes.default === CONT TestNetworkPlugins/group/kubenet/DNS net_test.go:156: (dbg) Non-zero exit: kubectl --context kubenet-20200724220343-14997 exec deployment/netcat -- nslookup kubernetes.default: exit status 1 (15.193677926s) -- stdout -- ;; connection timed out; no servers could be reached -- /stdout -- ** stderr ** command terminated with exit code 1 ** /stderr ** === CONT TestNetworkPlugins/group/auto/DNS net_test.go:156: (dbg) Run: kubectl --context auto-20200724220146-14997 exec deployment/netcat -- nslookup kubernetes.default === CONT TestNetworkPlugins/group/bridge/DNS net_test.go:156: (dbg) Non-zero exit: kubectl --context bridge-20200724220503-14997 exec deployment/netcat -- nslookup kubernetes.default: exit status 1 (15.223004035s) -- stdout -- ;; connection timed out; no servers could be reached -- /stdout -- ** stderr ** command terminated with exit code 1 ** /stderr ** === CONT TestNetworkPlugins/group/auto/DNS net_test.go:156: (dbg) Non-zero exit: kubectl --context auto-20200724220146-14997 exec deployment/netcat -- nslookup kubernetes.default: exit status 1 (15.269387659s) -- stdout -- ;; connection timed out; no servers could be reached -- /stdout -- ** stderr ** command terminated with exit code 1 ** /stderr ** net_test.go:162: failed to do nslookup on kubernetes.default: exit status 1 net_test.go:167: failed nslookup: got=";; connection timed out; no servers could be reached\n\n", want=*"10.96.0.1"* === CONT TestNetworkPlugins/group/auto net_test.go:204: "auto" test finished in 10m27.340341221s, failed=true net_test.go:205: *** TestNetworkPlugins/group/auto FAILED at 2020-07-24 22:10:35.514981478 +0000 UTC m=+2065.639345963 helpers_test.go:215: -----------------------post-mortem-------------------------------- helpers_test.go:223: ======> post-mortem[TestNetworkPlugins/group/auto]: docker inspect <====== helpers_test.go:224: (dbg) Run: docker inspect auto-20200724220146-14997 helpers_test.go:228: (dbg) docker inspect auto-20200724220146-14997: -- stdout -- [ { "Id": "56d9e31ecefef72999269bcc7e01f47386096cbee130b4f186448d261a134c06", "Created": "2020-07-24T22:02:32.657372181Z", "Path": "/usr/local/bin/entrypoint", "Args": [ "/sbin/init" ], "State": { "Status": "running", "Running": true, "Paused": false, "Restarting": false, "OOMKilled": false, "Dead": false, "Pid": 123068, "ExitCode": 0, "Error": "", "StartedAt": "2020-07-24T22:02:33.480424316Z", "FinishedAt": "0001-01-01T00:00:00Z" }, "Image": "sha256:e6bc41c39dc48b2b472936db36aedb28527ce0f675ed1bc20d029125c9ccf578", "ResolvConfPath": "/var/lib/docker/containers/56d9e31ecefef72999269bcc7e01f47386096cbee130b4f186448d261a134c06/resolv.conf", "HostnamePath": "/var/lib/docker/containers/56d9e31ecefef72999269bcc7e01f47386096cbee130b4f186448d261a134c06/hostname", "HostsPath": "/var/lib/docker/containers/56d9e31ecefef72999269bcc7e01f47386096cbee130b4f186448d261a134c06/hosts", "LogPath": "/var/lib/docker/containers/56d9e31ecefef72999269bcc7e01f47386096cbee130b4f186448d261a134c06/56d9e31ecefef72999269bcc7e01f47386096cbee130b4f186448d261a134c06-json.log", "Name": "/auto-20200724220146-14997", "RestartCount": 0, "Driver": "overlay2", "Platform": "linux", "MountLabel": "", "ProcessLabel": "", "AppArmorProfile": "unconfined", "ExecIDs": null, "HostConfig": { "Binds": [ "auto-20200724220146-14997:/var", "/lib/modules:/lib/modules:ro" ], "ContainerIDFile": "", "LogConfig": { "Type": "json-file", "Config": {} }, "NetworkMode": "default", "PortBindings": { "22/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "2376/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "5000/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "8443/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ] }, "RestartPolicy": { "Name": "no", "MaximumRetryCount": 0 }, "AutoRemove": false, "VolumeDriver": "", "VolumesFrom": null, "CapAdd": null, "CapDrop": null, "Capabilities": null, "Dns": [], "DnsOptions": [], "DnsSearch": [], "ExtraHosts": null, "GroupAdd": null, "IpcMode": "private", "Cgroup": "", "Links": null, "OomScoreAdj": 0, "PidMode": "", "Privileged": true, "PublishAllPorts": false, "ReadonlyRootfs": false, "SecurityOpt": [ "seccomp=unconfined", "apparmor=unconfined", "label=disable" ], "Tmpfs": { "/run": "", "/tmp": "" }, "UTSMode": "", "UsernsMode": "", "ShmSize": 67108864, "Runtime": "runc", "ConsoleSize": [ 0, 0 ], "Isolation": "", "CpuShares": 0, "Memory": 1887436800, "NanoCpus": 2000000000, "CgroupParent": "", "BlkioWeight": 0, "BlkioWeightDevice": [], "BlkioDeviceReadBps": null, "BlkioDeviceWriteBps": null, "BlkioDeviceReadIOps": null, "BlkioDeviceWriteIOps": null, "CpuPeriod": 0, "CpuQuota": 0, "CpuRealtimePeriod": 0, "CpuRealtimeRuntime": 0, "CpusetCpus": "", "CpusetMems": "", "Devices": [], "DeviceCgroupRules": null, "DeviceRequests": null, "KernelMemory": 0, "KernelMemoryTCP": 0, "MemoryReservation": 0, "MemorySwap": -1, "MemorySwappiness": null, "OomKillDisable": false, "PidsLimit": null, "Ulimits": null, "CpuCount": 0, "CpuPercent": 0, "IOMaximumIOps": 0, "IOMaximumBandwidth": 0, "MaskedPaths": null, "ReadonlyPaths": null }, "GraphDriver": { "Data": { "LowerDir": "/var/lib/docker/overlay2/6a71cc9acc2db7865f14a98a801dcda31d5edc840866a40ae5f8933058689543-init/diff:/var/lib/docker/overlay2/33ef5d7ed4fbb30d55d2e5925de9b5c359ee2f6bcaf5a31308f0c4c11e358759/diff:/var/lib/docker/overlay2/3cf31b1deb19cf9de586952311d9f94baab00dac6393d96e1a80da943c3823e6/diff:/var/lib/docker/overlay2/5e25be0edbb143d613aab16e23056ece18370c2427214464ccc0b8f033fc76c5/diff:/var/lib/docker/overlay2/933d7b760faf1726c0231eb48cbf3e7af8aeea7d1f449959b7bf07bbe9a84376/diff:/var/lib/docker/overlay2/69ec5555996fe29cf36b7e927034690ecb66f32d4c72396f3c2f3d73303ab10c/diff:/var/lib/docker/overlay2/891f9311e0e93aca44c576d873c0860bf67e972178833fc3ac0569308996933c/diff:/var/lib/docker/overlay2/431ddf5eea9395c020c3a27539c6984764cb7a0dd8f3eeadfbb24f37a897caf7/diff:/var/lib/docker/overlay2/ef7d3d9d53c985fdb46a41c8bcbc04c7d98baa3d8030b1432d115657269a4611/diff:/var/lib/docker/overlay2/d976f7471257da8ef85e0de564cae8e6b16eeb8f83fff6750b3985387f87052c/diff:/var/lib/docker/overlay2/16ed6f6abe25e98b50b10529e70d835da8b64ca99934243f70e5cc63cca44ba1/diff:/var/lib/docker/overlay2/a6495d6f579c32bd0faf387dc3b48c733dae0219e3cfc5d94f9f6ba027261668/diff:/var/lib/docker/overlay2/a34db548da9e24b0323e43e53767c49484fa19509ba9c1e738fb42330788ec52/diff:/var/lib/docker/overlay2/03f93f74dafc48e2bb0234297546abf70dc9918c047a7e3b500ed2242aec09ff/diff:/var/lib/docker/overlay2/b0d08d0be034080c25561d9451d8081050a591c2991a7f3d242234b9bf7c046d/diff:/var/lib/docker/overlay2/d7be6ed61eedc76988e74fe1d87a6a1ef287912654acea5566e7f9cb797b3e3c/diff:/var/lib/docker/overlay2/bde6a4a42d5652b3ef22e3a1dddd0f6fb765d2cd9f5b1a38d4ecb9b44c45b998/diff:/var/lib/docker/overlay2/0f384da0b034b730d4bdd22f270821719c0ac0b7b7d608c3a52fa89b9a386b90/diff:/var/lib/docker/overlay2/46e9d50bc162d2d523c12da3e378e5b859515056dc7d7f11344c1c42c64718d4/diff:/var/lib/docker/overlay2/6e7552cdb94dfc1c9a0a8efae25c758d93fff3d2eec58a3e2b710496d0c94653/diff:/var/lib/docker/overlay2/b9217960caef08b70ded801ca89f5a0808227383f2ab1de5ae7313c200a8090b/diff:/var/lib/docker/overlay2/d11fc39cafccd3e3cb8fba6cfdda2bbfdb99c2e41619ce169b0d6ede0b3f6a64/diff:/var/lib/docker/overlay2/0f89620291cba4bc3afac7c9fa1faf205450d5dcae032c3a9882009633b72b4a/diff", "MergedDir": "/var/lib/docker/overlay2/6a71cc9acc2db7865f14a98a801dcda31d5edc840866a40ae5f8933058689543/merged", "UpperDir": "/var/lib/docker/overlay2/6a71cc9acc2db7865f14a98a801dcda31d5edc840866a40ae5f8933058689543/diff", "WorkDir": "/var/lib/docker/overlay2/6a71cc9acc2db7865f14a98a801dcda31d5edc840866a40ae5f8933058689543/work" }, "Name": "overlay2" }, "Mounts": [ { "Type": "bind", "Source": "/lib/modules", "Destination": "/lib/modules", "Mode": "ro", "RW": false, "Propagation": "rprivate" }, { "Type": "volume", "Name": "auto-20200724220146-14997", "Source": "/var/lib/docker/volumes/auto-20200724220146-14997/_data", "Destination": "/var", "Driver": "local", "Mode": "z", "RW": true, "Propagation": "" } ], "Config": { "Hostname": "auto-20200724220146-14997", "Domainname": "", "User": "root", "AttachStdin": false, "AttachStdout": false, "AttachStderr": false, "ExposedPorts": { "22/tcp": {}, "2376/tcp": {}, "5000/tcp": {}, "8443/tcp": {} }, "Tty": true, "OpenStdin": false, "StdinOnce": false, "Env": [ "container=docker", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], "Cmd": null, "Image": "kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438", "Volumes": null, "WorkingDir": "", "Entrypoint": [ "/usr/local/bin/entrypoint", "/sbin/init" ], "OnBuild": null, "Labels": { "created_by.minikube.sigs.k8s.io": "true", "mode.minikube.sigs.k8s.io": "auto-20200724220146-14997", "name.minikube.sigs.k8s.io": "auto-20200724220146-14997", "role.minikube.sigs.k8s.io": "" }, "StopSignal": "SIGRTMIN+3" }, "NetworkSettings": { "Bridge": "", "SandboxID": "3437e364753e160355fbcb13cdf2c50daa57048b4062ba29cf2cffa212bcc86c", "HairpinMode": false, "LinkLocalIPv6Address": "", "LinkLocalIPv6PrefixLen": 0, "Ports": { "22/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32844" } ], "2376/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32843" } ], "5000/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32842" } ], "8443/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32841" } ] }, "SandboxKey": "/var/run/docker/netns/3437e364753e", "SecondaryIPAddresses": null, "SecondaryIPv6Addresses": null, "EndpointID": "97016f72d3580fb7c5d2c865e038bab09bc8debd2bb8c7297b686df8557ba55f", "Gateway": "172.17.0.1", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "IPAddress": "172.17.0.9", "IPPrefixLen": 16, "IPv6Gateway": "", "MacAddress": "02:42:ac:11:00:09", "Networks": { "bridge": { "IPAMConfig": null, "Links": null, "Aliases": null, "NetworkID": "d4a420189740571bc15c14b2ebac1ab66e95c9c6796461aa96bb3486a5f7e03d", "EndpointID": "97016f72d3580fb7c5d2c865e038bab09bc8debd2bb8c7297b686df8557ba55f", "Gateway": "172.17.0.1", "IPAddress": "172.17.0.9", "IPPrefixLen": 16, "IPv6Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "MacAddress": "02:42:ac:11:00:09", "DriverOpts": null } } } } ] -- /stdout -- helpers_test.go:232: (dbg) Run: ./minikube-linux-amd64 status --format={{.Host}} -p auto-20200724220146-14997 -n auto-20200724220146-14997 helpers_test.go:237: <<< TestNetworkPlugins/group/auto FAILED: start of post-mortem logs <<< helpers_test.go:238: ======> post-mortem[TestNetworkPlugins/group/auto]: minikube logs <====== helpers_test.go:240: (dbg) Run: ./minikube-linux-amd64 -p auto-20200724220146-14997 logs -n 25 helpers_test.go:240: (dbg) Done: ./minikube-linux-amd64 -p auto-20200724220146-14997 logs -n 25: (2.118417522s) helpers_test.go:245: TestNetworkPlugins/group/auto logs: -- stdout -- * ==> Docker <== * -- Logs begin at Fri 2020-07-24 22:02:34 UTC, end at Fri 2020-07-24 22:10:36 UTC. -- * Jul 24 22:02:47 auto-20200724220146-14997 dockerd[354]: time="2020-07-24T22:02:47.170369674Z" level=info msg="scheme \"unix\" not registered, fallback to default scheme" module=grpc * Jul 24 22:02:47 auto-20200724220146-14997 dockerd[354]: time="2020-07-24T22:02:47.170382875Z" level=info msg="ccResolverWrapper: sending update to cc: {[{unix:///run/containerd/containerd.sock 0 }] }" module=grpc * Jul 24 22:02:47 auto-20200724220146-14997 dockerd[354]: time="2020-07-24T22:02:47.170393176Z" level=info msg="ClientConn switching balancer to \"pick_first\"" module=grpc * Jul 24 22:02:47 auto-20200724220146-14997 dockerd[354]: time="2020-07-24T22:02:47.170427778Z" level=info msg="pickfirstBalancer: HandleSubConnStateChange: 0xc00015dbb0, CONNECTING" module=grpc * Jul 24 22:02:47 auto-20200724220146-14997 dockerd[354]: time="2020-07-24T22:02:47.171101225Z" level=info msg="pickfirstBalancer: HandleSubConnStateChange: 0xc00015dbb0, READY" module=grpc * Jul 24 22:02:47 auto-20200724220146-14997 dockerd[354]: time="2020-07-24T22:02:47.176602607Z" level=info msg="[graphdriver] using prior storage driver: overlay2" * Jul 24 22:02:48 auto-20200724220146-14997 dockerd[354]: time="2020-07-24T22:02:48.228451979Z" level=warning msg="Your kernel does not support swap memory limit" * Jul 24 22:02:48 auto-20200724220146-14997 dockerd[354]: time="2020-07-24T22:02:48.228485781Z" level=warning msg="Your kernel does not support cgroup rt period" * Jul 24 22:02:48 auto-20200724220146-14997 dockerd[354]: time="2020-07-24T22:02:48.228493582Z" level=warning msg="Your kernel does not support cgroup rt runtime" * Jul 24 22:02:48 auto-20200724220146-14997 dockerd[354]: time="2020-07-24T22:02:48.228500382Z" level=warning msg="Your kernel does not support cgroup blkio weight" * Jul 24 22:02:48 auto-20200724220146-14997 dockerd[354]: time="2020-07-24T22:02:48.228506883Z" level=warning msg="Your kernel does not support cgroup blkio weight_device" * Jul 24 22:02:48 auto-20200724220146-14997 dockerd[354]: time="2020-07-24T22:02:48.228714197Z" level=info msg="Loading containers: start." * Jul 24 22:02:50 auto-20200724220146-14997 dockerd[354]: time="2020-07-24T22:02:50.910182078Z" level=info msg="Default bridge (docker0) is assigned with an IP address 172.18.0.0/16. Daemon option --bip can be used to set a preferred IP address" * Jul 24 22:02:52 auto-20200724220146-14997 dockerd[354]: time="2020-07-24T22:02:52.557503841Z" level=info msg="Loading containers: done." * Jul 24 22:02:52 auto-20200724220146-14997 dockerd[354]: time="2020-07-24T22:02:52.736150144Z" level=warning msg="Not using native diff for overlay2, this may cause degraded performance for building images: kernel has CONFIG_OVERLAY_FS_REDIRECT_DIR enabled" storage-driver=overlay2 * Jul 24 22:02:52 auto-20200724220146-14997 dockerd[354]: time="2020-07-24T22:02:52.736493068Z" level=info msg="Docker daemon" commit=6a30dfca03 graphdriver(s)=overlay2 version=19.03.2 * Jul 24 22:02:52 auto-20200724220146-14997 dockerd[354]: time="2020-07-24T22:02:52.736557372Z" level=info msg="Daemon has completed initialization" * Jul 24 22:02:52 auto-20200724220146-14997 dockerd[354]: time="2020-07-24T22:02:52.779979163Z" level=info msg="API listen on /var/run/docker.sock" * Jul 24 22:02:52 auto-20200724220146-14997 dockerd[354]: time="2020-07-24T22:02:52.780020765Z" level=info msg="API listen on [::]:2376" * Jul 24 22:02:52 auto-20200724220146-14997 systemd[1]: Started Docker Application Container Engine. * Jul 24 22:03:46 auto-20200724220146-14997 dockerd[354]: time="2020-07-24T22:03:46.786184877Z" level=error msg="stream copy error: reading from a closed fifo" * Jul 24 22:03:46 auto-20200724220146-14997 dockerd[354]: time="2020-07-24T22:03:46.790929207Z" level=error msg="stream copy error: reading from a closed fifo" * Jul 24 22:03:46 auto-20200724220146-14997 dockerd[354]: time="2020-07-24T22:03:46.857838359Z" level=warning msg="Your kernel does not support swap limit capabilities or the cgroup is not mounted. Memory limited without swap." * Jul 24 22:03:48 auto-20200724220146-14997 dockerd[354]: time="2020-07-24T22:03:48.915320275Z" level=error msg="e1dac89659df2a59f14ccad9931dbedb4c7d33cf61402e58d1b0b5077365f42e cleanup: failed to delete container from containerd: no such container" * Jul 24 22:03:48 auto-20200724220146-14997 dockerd[354]: time="2020-07-24T22:03:48.915368278Z" level=error msg="Handler for POST /containers/e1dac89659df2a59f14ccad9931dbedb4c7d33cf61402e58d1b0b5077365f42e/start returned error: OCI runtime create failed: container_linux.go:349: starting container process caused \"process_linux.go:449: container init caused \\\"process_linux.go:438: writing syncT 'resume' caused \\\\\\\"write init-p: broken pipe\\\\\\\"\\\"\": unknown" * * ==> container status <== * CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID * 9038ee5edd239 gcr.io/kubernetes-e2e-test-images/dnsutils@sha256:b31bcf7ef4420ce7108e7fc10b6c00343b21257c945eec94c21598e72a8f2de0 6 minutes ago Running dnsutils 0 3a8bb138ce50d * a234536c13375 4689081edb103 6 minutes ago Running storage-provisioner 0 bdb63d6551291 * 99a99a3fb8bcd 67da37a9a360e 6 minutes ago Running coredns 0 f43e4593c7cd9 * 872a4dcaf60b1 3439b7546f29b 6 minutes ago Running kube-proxy 0 29294a210e2a3 * 9c59d53a50d1f 303ce5db0e90d 7 minutes ago Running etcd 0 b70f1641c06ed * 21901f6ecc578 76216c34ed0c7 7 minutes ago Running kube-scheduler 0 2506c1da9fbb8 * 578a205f680d3 7e28efa976bd1 7 minutes ago Running kube-apiserver 0 b4368d17b8cc3 * 621c894d0324c da26705ccb4b5 7 minutes ago Running kube-controller-manager 0 f1885c3b07dee * * ==> coredns [99a99a3fb8bc] <== * E0724 22:10:29.780972 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Namespace: Unauthorized * E0724 22:10:29.781688 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Service: Unauthorized * E0724 22:10:29.782982 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Endpoints: Unauthorized * E0724 22:10:30.782651 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Namespace: Unauthorized * E0724 22:10:30.783400 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Service: Unauthorized * E0724 22:10:30.784503 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Endpoints: Unauthorized * E0724 22:10:31.784483 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Namespace: Unauthorized * E0724 22:10:31.785349 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Service: Unauthorized * E0724 22:10:31.786533 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Endpoints: Unauthorized * E0724 22:10:32.786337 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Namespace: Unauthorized * E0724 22:10:32.787263 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Service: Unauthorized * E0724 22:10:32.789873 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Endpoints: Unauthorized * E0724 22:10:33.788069 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Namespace: Unauthorized * E0724 22:10:33.789071 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Service: Unauthorized * E0724 22:10:33.791294 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Endpoints: Unauthorized * E0724 22:10:34.789946 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Namespace: Unauthorized * E0724 22:10:34.790777 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Service: Unauthorized * E0724 22:10:34.792722 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Endpoints: Unauthorized * E0724 22:10:35.791647 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Namespace: Unauthorized * E0724 22:10:35.792601 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Service: Unauthorized * E0724 22:10:35.793777 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Endpoints: Unauthorized * E0724 22:10:36.793271 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Namespace: Unauthorized * E0724 22:10:36.794268 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Service: Unauthorized * E0724 22:10:36.795020 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Endpoints: Unauthorized * [INFO] plugin/ready: Still waiting on: "kubernetes" * * ==> describe nodes <== * Name: auto-20200724220146-14997 * Roles: master * Labels: beta.kubernetes.io/arch=amd64 * beta.kubernetes.io/os=linux * kubernetes.io/arch=amd64 * kubernetes.io/hostname=auto-20200724220146-14997 * kubernetes.io/os=linux * minikube.k8s.io/commit=40eac8ce825d2bb784efa63b900c8d788ea49faf * minikube.k8s.io/name=auto-20200724220146-14997 * minikube.k8s.io/updated_at=2020_07_24T22_03_29_0700 * minikube.k8s.io/version=v1.12.1 * node-role.kubernetes.io/master= * Annotations: kubeadm.alpha.kubernetes.io/cri-socket: /var/run/dockershim.sock * node.alpha.kubernetes.io/ttl: 0 * volumes.kubernetes.io/controller-managed-attach-detach: true * CreationTimestamp: Fri, 24 Jul 2020 22:03:20 +0000 * Taints: * Unschedulable: false * Lease: * HolderIdentity: auto-20200724220146-14997 * AcquireTime: * RenewTime: Fri, 24 Jul 2020 22:10:36 +0000 * Conditions: * Type Status LastHeartbeatTime LastTransitionTime Reason Message * ---- ------ ----------------- ------------------ ------ ------- * MemoryPressure False Fri, 24 Jul 2020 22:09:31 +0000 Fri, 24 Jul 2020 22:03:20 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available * DiskPressure False Fri, 24 Jul 2020 22:09:31 +0000 Fri, 24 Jul 2020 22:03:20 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure * PIDPressure False Fri, 24 Jul 2020 22:09:31 +0000 Fri, 24 Jul 2020 22:03:20 +0000 KubeletHasSufficientPID kubelet has sufficient PID available * Ready True Fri, 24 Jul 2020 22:09:31 +0000 Fri, 24 Jul 2020 22:03:40 +0000 KubeletReady kubelet is posting ready status * Addresses: * InternalIP: 172.17.0.9 * Hostname: auto-20200724220146-14997 * Capacity: * cpu: 16 * ephemeral-storage: 128884272Ki * hugepages-1Gi: 0 * hugepages-2Mi: 0 * memory: 65817044Ki * pods: 110 * Allocatable: * cpu: 16 * ephemeral-storage: 128884272Ki * hugepages-1Gi: 0 * hugepages-2Mi: 0 * memory: 65817044Ki * pods: 110 * System Info: * Machine ID: 89fe79e0fb114d70bd5bfb482a819375 * System UUID: 1aa805a7-9e40-47b3-bd49-432c26feb949 * Boot ID: 65219ec9-ab55-4151-85fa-6cbcd6144529 * Kernel Version: 5.4.0-1022-azure * OS Image: Ubuntu 19.10 * Operating System: linux * Architecture: amd64 * Container Runtime Version: docker://19.3.2 * Kubelet Version: v1.18.3 * Kube-Proxy Version: v1.18.3 * Non-terminated Pods: (8 in total) * Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits AGE * --------- ---- ------------ ---------- --------------- ------------- --- * default netcat-7987c4c66b-7p7gg 0 (0%) 0 (0%) 0 (0%) 0 (0%) 6m45s * kube-system coredns-66bff467f8-jt5gp 100m (0%) 0 (0%) 70Mi (0%) 170Mi (0%) 6m53s * kube-system etcd-auto-20200724220146-14997 0 (0%) 0 (0%) 0 (0%) 0 (0%) 7m7s * kube-system kube-apiserver-auto-20200724220146-14997 250m (1%) 0 (0%) 0 (0%) 0 (0%) 7m6s * kube-system kube-controller-manager-auto-20200724220146-14997 200m (1%) 0 (0%) 0 (0%) 0 (0%) 7m6s * kube-system kube-proxy-zwp2m 0 (0%) 0 (0%) 0 (0%) 0 (0%) 6m53s * kube-system kube-scheduler-auto-20200724220146-14997 100m (0%) 0 (0%) 0 (0%) 0 (0%) 7m7s * kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 6m52s * Allocated resources: * (Total limits may be over 100 percent, i.e., overcommitted.) * Resource Requests Limits * -------- -------- ------ * cpu 650m (4%) 0 (0%) * memory 70Mi (0%) 170Mi (0%) * ephemeral-storage 0 (0%) 0 (0%) * hugepages-1Gi 0 (0%) 0 (0%) * hugepages-2Mi 0 (0%) 0 (0%) * Events: * Type Reason Age From Message * ---- ------ ---- ---- ------- * Normal NodeHasNoDiskPressure 7m36s (x4 over 7m37s) kubelet, auto-20200724220146-14997 Node auto-20200724220146-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientMemory 7m35s (x5 over 7m37s) kubelet, auto-20200724220146-14997 Node auto-20200724220146-14997 status is now: NodeHasSufficientMemory * Normal NodeHasSufficientPID 7m35s (x5 over 7m37s) kubelet, auto-20200724220146-14997 Node auto-20200724220146-14997 status is now: NodeHasSufficientPID * Normal NodeHasSufficientMemory 7m7s kubelet, auto-20200724220146-14997 Node auto-20200724220146-14997 status is now: NodeHasSufficientMemory * Warning SystemOOM 7m7s kubelet, auto-20200724220146-14997 System OOM encountered, victim process: registry-server, pid: 26041 * Warning SystemOOM 7m7s kubelet, auto-20200724220146-14997 System OOM encountered, victim process: registry-server, pid: 28567 * Normal Starting 7m7s kubelet, auto-20200724220146-14997 Starting kubelet. * Normal NodeHasNoDiskPressure 7m7s kubelet, auto-20200724220146-14997 Node auto-20200724220146-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientPID 7m7s kubelet, auto-20200724220146-14997 Node auto-20200724220146-14997 status is now: NodeHasSufficientPID * Normal NodeAllocatableEnforced 7m7s kubelet, auto-20200724220146-14997 Updated Node Allocatable limit across pods * Normal NodeReady 6m57s kubelet, auto-20200724220146-14997 Node auto-20200724220146-14997 status is now: NodeReady * Warning readOnlySysFS 6m49s kube-proxy, auto-20200724220146-14997 CRI error: /sys is read-only: cannot modify conntrack limits, problems may arise later (If running Docker, see docker issue #24000) * Normal Starting 6m49s kube-proxy, auto-20200724220146-14997 Starting kube-proxy. * * ==> dmesg <== * [ +0.008042] FS-Cache: N-cookie d=00000000087e516b n=00000000639e5e65 * [ +0.007221] FS-Cache: N-key=[8] '56c00f0000000000' * [ +0.005631] FS-Cache: Duplicate cookie detected * [ +0.003841] FS-Cache: O-cookie c=0000000011bf07ab [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.005951] FS-Cache: O-cookie d=00000000087e516b n=00000000251e97b7 * [ +0.004790] FS-Cache: O-key=[8] '56c00f0000000000' * [ +0.003643] FS-Cache: N-cookie c=00000000e2eec606 [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.005689] FS-Cache: N-cookie d=00000000087e516b n=000000007550b1ca * [ +0.004826] FS-Cache: N-key=[8] '56c00f0000000000' * [ +1.890005] FS-Cache: Duplicate cookie detected * [ +0.022632] FS-Cache: O-cookie c=0000000036b15294 [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.008521] FS-Cache: O-cookie d=00000000087e516b n=00000000efd734b0 * [ +0.007450] FS-Cache: O-key=[8] '55c00f0000000000' * [ +0.004298] FS-Cache: N-cookie c=00000000c6760349 [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.007729] FS-Cache: N-cookie d=00000000087e516b n=000000002b6d28f2 * [ +0.006730] FS-Cache: N-key=[8] '55c00f0000000000' * [ +2.781512] FS-Cache: Duplicate cookie detected * [ +0.004691] FS-Cache: O-cookie c=0000000026b60cdb [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.006817] FS-Cache: O-cookie d=00000000087e516b n=0000000083eaff9d * [ +0.006146] FS-Cache: O-key=[8] '5ac00f0000000000' * [ +0.004163] FS-Cache: N-cookie c=000000006a4278df [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.006241] FS-Cache: N-cookie d=00000000087e516b n=00000000e4c3dd6e * [ +0.005074] FS-Cache: N-key=[8] '5ac00f0000000000' * [Jul24 22:02] tee (116897): /proc/108378/oom_adj is deprecated, please use /proc/108378/oom_score_adj instead. * [Jul24 22:09] hrtimer: interrupt took 1877431 ns * * ==> etcd [9c59d53a50d1] <== * 2020-07-24 22:06:28.430376 W | etcdserver: read-only range request "key:\"/registry/clusterroles\" range_end:\"/registry/clusterrolet\" count_only:true " with result "range_response_count:0 size:7" took too long (572.721782ms) to execute * 2020-07-24 22:06:28.430409 W | etcdserver: read-only range request "key:\"/registry/persistentvolumes\" range_end:\"/registry/persistentvolumet\" count_only:true " with result "range_response_count:0 size:5" took too long (373.506944ms) to execute * 2020-07-24 22:06:32.187495 W | wal: sync duration of 1.764548171s, expected less than 1s * 2020-07-24 22:06:32.193957 W | etcdserver: read-only range request "key:\"/registry/jobs/\" range_end:\"/registry/jobs0\" limit:500 " with result "range_response_count:0 size:5" took too long (2.328624953s) to execute * 2020-07-24 22:06:32.193987 W | etcdserver: read-only range request "key:\"/registry/events/kube-system/coredns-66bff467f8-jt5gp.1624cf6e1be4a3dc\" " with result "range_response_count:1 size:837" took too long (2.371439628s) to execute * 2020-07-24 22:06:32.194024 W | etcdserver: read-only range request "key:\"/registry/daemonsets\" range_end:\"/registry/daemonsett\" count_only:true " with result "range_response_count:0 size:7" took too long (2.153058958s) to execute * 2020-07-24 22:06:32.194167 W | etcdserver: read-only range request "key:\"/registry/mutatingwebhookconfigurations\" range_end:\"/registry/mutatingwebhookconfigurationt\" count_only:true " with result "range_response_count:0 size:5" took too long (278.890973ms) to execute * 2020-07-24 22:06:35.444856 W | etcdserver: read-only range request "key:\"/registry/health\" " with result "error:context canceled" took too long (2.000001226s) to execute * WARNING: 2020/07/24 22:06:35 grpc: Server.processUnaryRPC failed to write status: connection error: desc = "transport is closing" * 2020-07-24 22:06:36.804714 W | etcdserver: request "header: txn: success:> failure: >>" with result "size:16" took too long (4.358205634s) to execute * 2020-07-24 22:06:36.804986 W | etcdserver: read-only range request "key:\"/registry/persistentvolumeclaims\" range_end:\"/registry/persistentvolumeclaimt\" count_only:true " with result "range_response_count:0 size:5" took too long (3.987866708s) to execute * 2020-07-24 22:06:36.805111 W | etcdserver: read-only range request "key:\"/registry/rolebindings\" range_end:\"/registry/rolebindingt\" count_only:true " with result "range_response_count:0 size:7" took too long (3.852293892s) to execute * 2020-07-24 22:06:36.805189 W | etcdserver: read-only range request "key:\"/registry/poddisruptionbudgets\" range_end:\"/registry/poddisruptionbudgett\" count_only:true " with result "range_response_count:0 size:5" took too long (3.157679242s) to execute * 2020-07-24 22:06:36.805206 W | etcdserver: read-only range request "key:\"/registry/apiregistration.k8s.io/apiservices\" range_end:\"/registry/apiregistration.k8s.io/apiservicet\" count_only:true " with result "range_response_count:0 size:7" took too long (2.74707682s) to execute * 2020-07-24 22:06:36.805256 W | etcdserver: read-only range request "key:\"/registry/events/kube-system/kube-apiserver-auto-20200724220146-14997.1624cf686f557e76\" " with result "range_response_count:1 size:862" took too long (1.358888092s) to execute * 2020-07-24 22:06:36.805299 W | etcdserver: read-only range request "key:\"/registry/priorityclasses\" range_end:\"/registry/priorityclasset\" count_only:true " with result "range_response_count:0 size:7" took too long (3.032082918s) to execute * 2020-07-24 22:06:36.805344 W | etcdserver: read-only range request "key:\"/registry/roles\" range_end:\"/registry/rolet\" count_only:true " with result "range_response_count:0 size:7" took too long (3.642771237s) to execute * 2020-07-24 22:06:36.805493 W | etcdserver: read-only range request "key:\"/registry/controllers\" range_end:\"/registry/controllert\" count_only:true " with result "range_response_count:0 size:5" took too long (1.373280692s) to execute * 2020-07-24 22:08:57.905438 W | etcdserver: read-only range request "key:\"/registry/services/endpoints/default/kubernetes\" " with result "range_response_count:1 size:286" took too long (112.091692ms) to execute * 2020-07-24 22:09:08.585313 W | wal: sync duration of 1.733971585s, expected less than 1s * 2020-07-24 22:09:08.599077 W | etcdserver: read-only range request "key:\"/registry/csidrivers\" range_end:\"/registry/csidrivert\" count_only:true " with result "range_response_count:0 size:5" took too long (378.640562ms) to execute * 2020-07-24 22:09:08.599099 W | etcdserver: read-only range request "key:\"/registry/namespaces/default\" " with result "range_response_count:1 size:257" took too long (823.551157ms) to execute * 2020-07-24 22:09:08.741589 W | etcdserver: read-only range request "key:\"/registry/services/endpoints/default/kubernetes\" " with result "range_response_count:1 size:286" took too long (109.576942ms) to execute * 2020-07-24 22:09:26.301928 W | wal: sync duration of 2.319390507s, expected less than 1s * 2020-07-24 22:10:10.708025 W | etcdserver: read-only range request "key:\"/registry/runtimeclasses\" range_end:\"/registry/runtimeclasset\" count_only:true " with result "range_response_count:0 size:5" took too long (236.533039ms) to execute * * ==> kernel <== * 22:10:37 up 37 min, 0 users, load average: 10.56, 11.53, 7.46 * Linux auto-20200724220146-14997 5.4.0-1022-azure #22-Ubuntu SMP Fri Jul 10 06:14:37 UTC 2020 x86_64 x86_64 x86_64 GNU/Linux * PRETTY_NAME="Ubuntu 19.10" * * ==> kube-apiserver [578a205f680d] <== * Trace[439064068]: [600.338656ms] [600.338656ms] END * I0724 22:06:19.832501 1 trace.go:116] Trace[1413311385]: "List" url:/apis/batch/v1/jobs,user-agent:kube-controller-manager/v1.18.3 (linux/amd64) kubernetes/2e7996e/system:serviceaccount:kube-system:cronjob-controller,client:172.17.0.9 (started: 2020-07-24 22:06:19.231991164 +0000 UTC m=+185.581336949) (total time: 600.481866ms): * Trace[1413311385]: [600.424562ms] [600.40476ms] Listing from storage done * I0724 22:06:19.832869 1 trace.go:116] Trace[752204729]: "Get" url:/api/v1/namespaces/default/endpoints/kubernetes,user-agent:kube-apiserver/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:127.0.0.1 (started: 2020-07-24 22:06:18.144811928 +0000 UTC m=+184.494157713) (total time: 1.688016826s): * Trace[752204729]: [1.687978124s] [1.687966423s] About to write a response * I0724 22:06:28.430857 1 trace.go:116] Trace[1658343328]: "GuaranteedUpdate etcd3" type:*v1.Endpoints (started: 2020-07-24 22:06:27.775026085 +0000 UTC m=+194.124371970) (total time: 655.793353ms): * Trace[1658343328]: [655.763751ms] [654.578869ms] Transaction committed * I0724 22:06:32.194536 1 trace.go:116] Trace[1305355705]: "List etcd3" key:/jobs,resourceVersion:,limit:500,continue: (started: 2020-07-24 22:06:29.864911854 +0000 UTC m=+196.214257539) (total time: 2.32958382s): * Trace[1305355705]: [2.32958382s] [2.32958382s] END * I0724 22:06:32.194631 1 trace.go:116] Trace[885891870]: "List" url:/apis/batch/v1/jobs,user-agent:kube-controller-manager/v1.18.3 (linux/amd64) kubernetes/2e7996e/system:serviceaccount:kube-system:cronjob-controller,client:172.17.0.9 (started: 2020-07-24 22:06:29.864883652 +0000 UTC m=+196.214229337) (total time: 2.32972673s): * Trace[885891870]: [2.329673026s] [2.329651224s] Listing from storage done * I0724 22:06:32.196173 1 trace.go:116] Trace[1075908147]: "GuaranteedUpdate etcd3" type:*core.Event (started: 2020-07-24 22:06:29.822135083 +0000 UTC m=+196.171480768) (total time: 2.374007505s): * Trace[1075908147]: [2.372419895s] [2.372419895s] initial value restored * I0724 22:06:32.196266 1 trace.go:116] Trace[1677435849]: "Patch" url:/api/v1/namespaces/kube-system/events/coredns-66bff467f8-jt5gp.1624cf6e1be4a3dc,user-agent:kubelet/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:172.17.0.9 (started: 2020-07-24 22:06:29.822044076 +0000 UTC m=+196.171389761) (total time: 2.374198719s): * Trace[1677435849]: [2.372513002s] [2.372472399s] About to apply patch * I0724 22:06:36.805280 1 trace.go:116] Trace[1037753692]: "GuaranteedUpdate etcd3" type:*coordination.Lease (started: 2020-07-24 22:06:32.445473008 +0000 UTC m=+198.794818793) (total time: 4.359771342s): * Trace[1037753692]: [4.359754441s] [4.359311311s] Transaction committed * I0724 22:06:36.805398 1 trace.go:116] Trace[1290834510]: "Update" url:/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/auto-20200724220146-14997,user-agent:kubelet/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:172.17.0.9 (started: 2020-07-24 22:06:32.445302196 +0000 UTC m=+198.794647881) (total time: 4.360069363s): * Trace[1290834510]: [4.36002426s] [4.359909952s] Object stored in database * I0724 22:06:36.808009 1 trace.go:116] Trace[1216026229]: "GuaranteedUpdate etcd3" type:*core.Event (started: 2020-07-24 22:06:35.445893326 +0000 UTC m=+201.795239111) (total time: 1.362083814s): * Trace[1216026229]: [1.359664346s] [1.359664346s] initial value restored * I0724 22:06:36.808116 1 trace.go:116] Trace[1509977975]: "Patch" url:/api/v1/namespaces/kube-system/events/kube-apiserver-auto-20200724220146-14997.1624cf686f557e76,user-agent:kubelet/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:172.17.0.9 (started: 2020-07-24 22:06:35.44580962 +0000 UTC m=+201.795155305) (total time: 1.362279128s): * Trace[1509977975]: [1.359750752s] [1.359716949s] About to apply patch * I0724 22:09:08.599662 1 trace.go:116] Trace[1950261568]: "Get" url:/api/v1/namespaces/default,user-agent:kube-apiserver/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:127.0.0.1 (started: 2020-07-24 22:09:07.775060665 +0000 UTC m=+354.124406650) (total time: 824.568128ms): * Trace[1950261568]: [824.495123ms] [824.485822ms] About to write a response * * ==> kube-controller-manager [621c894d0324] <== * I0724 22:03:44.360256 1 shared_informer.go:230] Caches are synced for endpoint * I0724 22:03:44.360505 1 shared_informer.go:230] Caches are synced for disruption * I0724 22:03:44.360580 1 disruption.go:339] Sending events to api server. * I0724 22:03:44.391112 1 event.go:278] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"kube-system", Name:"coredns-66bff467f8", UID:"12599ad7-3a2f-4a1c-90aa-d59363534fbc", APIVersion:"apps/v1", ResourceVersion:"313", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: coredns-66bff467f8-jt5gp * I0724 22:03:44.456170 1 shared_informer.go:230] Caches are synced for job * I0724 22:03:44.635560 1 shared_informer.go:230] Caches are synced for expand * I0724 22:03:44.635579 1 shared_informer.go:230] Caches are synced for persistent volume * I0724 22:03:44.736406 1 shared_informer.go:230] Caches are synced for ClusterRoleAggregator * I0724 22:03:44.738053 1 event.go:278] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"kube-system", Name:"coredns", UID:"bdec6daf-a3a3-43a3-a605-2590d4dfd5b2", APIVersion:"apps/v1", ResourceVersion:"349", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled down replica set coredns-66bff467f8 to 1 * I0724 22:03:44.757159 1 shared_informer.go:230] Caches are synced for certificate-csrsigning * I0724 22:03:44.795807 1 shared_informer.go:230] Caches are synced for certificate-csrapproving * I0724 22:03:44.836669 1 event.go:278] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"kube-system", Name:"coredns-66bff467f8", UID:"12599ad7-3a2f-4a1c-90aa-d59363534fbc", APIVersion:"apps/v1", ResourceVersion:"350", FieldPath:""}): type: 'Normal' reason: 'SuccessfulDelete' Deleted pod: coredns-66bff467f8-gjj22 * I0724 22:03:44.872401 1 shared_informer.go:230] Caches are synced for resource quota * I0724 22:03:44.947336 1 shared_informer.go:230] Caches are synced for garbage collector * I0724 22:03:44.947363 1 garbagecollector.go:142] Garbage collector: all resource monitors have synced. Proceeding to collect garbage * I0724 22:03:44.959393 1 shared_informer.go:230] Caches are synced for resource quota * I0724 22:03:45.235717 1 request.go:621] Throttling request took 1.086889062s, request: GET:https://control-plane.minikube.internal:8443/apis/batch/v1?timeout=32s * I0724 22:03:45.644406 1 shared_informer.go:223] Waiting for caches to sync for garbage collector * I0724 22:03:45.644456 1 shared_informer.go:230] Caches are synced for garbage collector * I0724 22:03:52.456075 1 event.go:278] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"default", Name:"netcat", UID:"53987571-c8ad-4951-90ef-a7709310148a", APIVersion:"apps/v1", ResourceVersion:"404", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set netcat-7987c4c66b to 1 * I0724 22:03:52.471765 1 event.go:278] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"default", Name:"netcat-7987c4c66b", UID:"c2fb669f-2d73-438b-8ea2-e9175e087267", APIVersion:"apps/v1", ResourceVersion:"405", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: netcat-7987c4c66b-7p7gg * I0724 22:04:18.543333 1 event.go:278] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"default", Name:"netcat", UID:"d24c7f33-9d41-4e2d-9b44-a6e496ccc108", APIVersion:"v1", ResourceVersion:"413", FieldPath:""}): type: 'Warning' reason: 'FailedToUpdateEndpoint' Failed to update endpoint default/netcat: etcdserver: request timed out * W0724 22:04:18.544145 1 endpointslice_controller.go:260] Error syncing endpoint slices for service "default/netcat", retrying. Error: Error updating netcat-xh5n4 EndpointSlice for Service default/netcat: etcdserver: request timed out * I0724 22:04:18.544264 1 event.go:278] Event(v1.ObjectReference{Kind:"Service", Namespace:"default", Name:"netcat", UID:"c2e099e9-20a1-429f-b573-7ef49ee5ac81", APIVersion:"v1", ResourceVersion:"409", FieldPath:""}): type: 'Warning' reason: 'FailedToUpdateEndpointSlices' Error updating Endpoint Slices for Service default/netcat: Error updating netcat-xh5n4 EndpointSlice for Service default/netcat: etcdserver: request timed out * I0724 22:04:19.640603 1 event.go:278] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"default", Name:"netcat", UID:"d24c7f33-9d41-4e2d-9b44-a6e496ccc108", APIVersion:"v1", ResourceVersion:"413", FieldPath:""}): type: 'Warning' reason: 'FailedToUpdateEndpoint' Failed to update endpoint default/netcat: Operation cannot be fulfilled on endpoints "netcat": the object has been modified; please apply your changes to the latest version and try again * * ==> kube-proxy [872a4dcaf60b] <== * W0724 22:03:48.881821 1 server_others.go:559] Unknown proxy mode "", assuming iptables proxy * I0724 22:03:48.889081 1 node.go:136] Successfully retrieved node IP: 172.17.0.9 * I0724 22:03:48.889113 1 server_others.go:186] Using iptables Proxier. * W0724 22:03:48.889121 1 server_others.go:436] detect-local-mode set to ClusterCIDR, but no cluster CIDR defined * I0724 22:03:48.889125 1 server_others.go:447] detect-local-mode: ClusterCIDR , defaulting to no-op detect-local * I0724 22:03:48.889408 1 server.go:583] Version: v1.18.3 * I0724 22:03:48.890049 1 conntrack.go:52] Setting nf_conntrack_max to 524288 * E0724 22:03:48.890423 1 conntrack.go:127] sysfs is not writable: {Device:sysfs Path:/sys Type:sysfs Opts:[ro nosuid nodev noexec relatime] Freq:0 Pass:0} (mount options are [ro nosuid nodev noexec relatime]) * I0724 22:03:48.890647 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_established' to 86400 * I0724 22:03:48.890720 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_close_wait' to 3600 * I0724 22:03:48.890864 1 config.go:315] Starting service config controller * I0724 22:03:48.890870 1 shared_informer.go:223] Waiting for caches to sync for service config * I0724 22:03:48.890890 1 config.go:133] Starting endpoints config controller * I0724 22:03:48.890899 1 shared_informer.go:223] Waiting for caches to sync for endpoints config * I0724 22:03:49.035799 1 shared_informer.go:230] Caches are synced for service config * I0724 22:03:49.035891 1 shared_informer.go:230] Caches are synced for endpoints config * * ==> kube-scheduler [21901f6ecc57] <== * E0724 22:03:19.068143 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1beta1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope * E0724 22:03:20.102094 1 reflector.go:178] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system" * E0724 22:03:20.172945 1 reflector.go:178] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:233: Failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope * E0724 22:03:20.276472 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope * E0724 22:03:20.279592 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope * E0724 22:03:20.304497 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope * E0724 22:03:20.312461 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope * E0724 22:03:20.321131 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1beta1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope * E0724 22:03:20.447001 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope * E0724 22:03:20.566514 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope * E0724 22:03:21.833615 1 reflector.go:178] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system" * E0724 22:03:21.868541 1 reflector.go:178] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:233: Failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope * E0724 22:03:22.028882 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope * E0724 22:03:22.140758 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope * E0724 22:03:22.231796 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1beta1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope * E0724 22:03:22.512607 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope * E0724 22:03:22.824689 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope * E0724 22:03:23.335931 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope * E0724 22:03:23.683776 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope * E0724 22:03:25.992887 1 reflector.go:178] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:233: Failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope * E0724 22:03:26.005929 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope * E0724 22:03:26.464614 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope * E0724 22:03:26.618552 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope * E0724 22:03:27.399959 1 reflector.go:178] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system" * I0724 22:03:36.365301 1 shared_informer.go:230] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file * * ==> kubelet <== * -- Logs begin at Fri 2020-07-24 22:02:34 UTC, end at Fri 2020-07-24 22:10:37 UTC. -- * Jul 24 22:03:45 auto-20200724220146-14997 kubelet[2398]: I0724 22:03:45.135762 2398 reconciler.go:319] Volume detached for volume "config-volume" (UniqueName: "kubernetes.io/configmap/ee1918c3-564d-460f-8266-bea7dc21ff55-config-volume") on node "auto-20200724220146-14997" DevicePath "" * Jul 24 22:03:45 auto-20200724220146-14997 kubelet[2398]: I0724 22:03:45.593323 2398 topology_manager.go:233] [topologymanager] Topology Admit Handler * Jul 24 22:03:45 auto-20200724220146-14997 kubelet[2398]: I0724 22:03:45.741739 2398 reconciler.go:224] operationExecutor.VerifyControllerAttachedVolume started for volume "tmp" (UniqueName: "kubernetes.io/host-path/5d59faf0-c9db-4be7-a37c-84a1fa2844c5-tmp") pod "storage-provisioner" (UID: "5d59faf0-c9db-4be7-a37c-84a1fa2844c5") * Jul 24 22:03:45 auto-20200724220146-14997 kubelet[2398]: I0724 22:03:45.741862 2398 reconciler.go:224] operationExecutor.VerifyControllerAttachedVolume started for volume "storage-provisioner-token-g5bx5" (UniqueName: "kubernetes.io/secret/5d59faf0-c9db-4be7-a37c-84a1fa2844c5-storage-provisioner-token-g5bx5") pod "storage-provisioner" (UID: "5d59faf0-c9db-4be7-a37c-84a1fa2844c5") * Jul 24 22:03:46 auto-20200724220146-14997 kubelet[2398]: W0724 22:03:46.849261 2398 docker_sandbox.go:400] failed to read pod IP from plugin/docker: Couldn't find network status for kube-system/coredns-66bff467f8-jt5gp through plugin: invalid network status for * Jul 24 22:03:48 auto-20200724220146-14997 kubelet[2398]: E0724 22:03:48.915849 2398 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to start sandbox container for pod "coredns-66bff467f8-gjj22": Error response from daemon: OCI runtime create failed: container_linux.go:349: starting container process caused "process_linux.go:449: container init caused \"process_linux.go:438: writing syncT 'resume' caused \\\"write init-p: broken pipe\\\"\"": unknown * Jul 24 22:03:48 auto-20200724220146-14997 kubelet[2398]: E0724 22:03:48.915905 2398 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "coredns-66bff467f8-gjj22_kube-system(ee1918c3-564d-460f-8266-bea7dc21ff55)" failed: rpc error: code = Unknown desc = failed to start sandbox container for pod "coredns-66bff467f8-gjj22": Error response from daemon: OCI runtime create failed: container_linux.go:349: starting container process caused "process_linux.go:449: container init caused \"process_linux.go:438: writing syncT 'resume' caused \\\"write init-p: broken pipe\\\"\"": unknown * Jul 24 22:03:48 auto-20200724220146-14997 kubelet[2398]: E0724 22:03:48.915921 2398 kuberuntime_manager.go:727] createPodSandbox for pod "coredns-66bff467f8-gjj22_kube-system(ee1918c3-564d-460f-8266-bea7dc21ff55)" failed: rpc error: code = Unknown desc = failed to start sandbox container for pod "coredns-66bff467f8-gjj22": Error response from daemon: OCI runtime create failed: container_linux.go:349: starting container process caused "process_linux.go:449: container init caused \"process_linux.go:438: writing syncT 'resume' caused \\\"write init-p: broken pipe\\\"\"": unknown * Jul 24 22:03:48 auto-20200724220146-14997 kubelet[2398]: E0724 22:03:48.915970 2398 pod_workers.go:191] Error syncing pod ee1918c3-564d-460f-8266-bea7dc21ff55 ("coredns-66bff467f8-gjj22_kube-system(ee1918c3-564d-460f-8266-bea7dc21ff55)"), skipping: failed to "CreatePodSandbox" for "coredns-66bff467f8-gjj22_kube-system(ee1918c3-564d-460f-8266-bea7dc21ff55)" with CreatePodSandboxError: "CreatePodSandbox for pod \"coredns-66bff467f8-gjj22_kube-system(ee1918c3-564d-460f-8266-bea7dc21ff55)\" failed: rpc error: code = Unknown desc = failed to start sandbox container for pod \"coredns-66bff467f8-gjj22\": Error response from daemon: OCI runtime create failed: container_linux.go:349: starting container process caused \"process_linux.go:449: container init caused \\\"process_linux.go:438: writing syncT 'resume' caused \\\\\\\"write init-p: broken pipe\\\\\\\"\\\"\": unknown" * Jul 24 22:03:48 auto-20200724220146-14997 kubelet[2398]: W0724 22:03:48.916108 2398 container.go:412] Failed to create summary reader for "/kubepods/burstable/podee1918c3-564d-460f-8266-bea7dc21ff55/e1dac89659df2a59f14ccad9931dbedb4c7d33cf61402e58d1b0b5077365f42e": none of the resources are being tracked. * Jul 24 22:03:48 auto-20200724220146-14997 kubelet[2398]: W0724 22:03:48.944149 2398 pod_container_deletor.go:77] Container "29294a210e2a36ab08a63992b872809977bf1b968cc9248c829ad67c1de53fbb" not found in pod's containers * Jul 24 22:03:48 auto-20200724220146-14997 kubelet[2398]: W0724 22:03:48.946244 2398 docker_sandbox.go:400] failed to read pod IP from plugin/docker: Couldn't find network status for kube-system/coredns-66bff467f8-jt5gp through plugin: invalid network status for * Jul 24 22:03:49 auto-20200724220146-14997 kubelet[2398]: W0724 22:03:48.984168 2398 pod_container_deletor.go:77] Container "f43e4593c7cd9ddd3991c7143ade08b6fb8a0348a88939ab25522259030313d5" not found in pod's containers * Jul 24 22:03:50 auto-20200724220146-14997 kubelet[2398]: W0724 22:03:50.003965 2398 docker_sandbox.go:400] failed to read pod IP from plugin/docker: Couldn't find network status for kube-system/coredns-66bff467f8-jt5gp through plugin: invalid network status for * Jul 24 22:03:52 auto-20200724220146-14997 kubelet[2398]: I0724 22:03:52.477092 2398 topology_manager.go:233] [topologymanager] Topology Admit Handler * Jul 24 22:03:52 auto-20200724220146-14997 kubelet[2398]: I0724 22:03:52.657801 2398 reconciler.go:224] operationExecutor.VerifyControllerAttachedVolume started for volume "default-token-59wsb" (UniqueName: "kubernetes.io/secret/4e0fd294-9f63-4ee5-ba5a-c966303840a3-default-token-59wsb") pod "netcat-7987c4c66b-7p7gg" (UID: "4e0fd294-9f63-4ee5-ba5a-c966303840a3") * Jul 24 22:03:53 auto-20200724220146-14997 kubelet[2398]: W0724 22:03:53.392609 2398 pod_container_deletor.go:77] Container "3a8bb138ce50d721489ad0b292ef9ce1ba41d2295363afc4dfcce2a2d027a803" not found in pod's containers * Jul 24 22:03:53 auto-20200724220146-14997 kubelet[2398]: W0724 22:03:53.392858 2398 docker_sandbox.go:400] failed to read pod IP from plugin/docker: Couldn't find network status for default/netcat-7987c4c66b-7p7gg through plugin: invalid network status for * Jul 24 22:03:54 auto-20200724220146-14997 kubelet[2398]: W0724 22:03:54.399415 2398 docker_sandbox.go:400] failed to read pod IP from plugin/docker: Couldn't find network status for default/netcat-7987c4c66b-7p7gg through plugin: invalid network status for * Jul 24 22:04:06 auto-20200724220146-14997 kubelet[2398]: E0724 22:04:06.820623 2398 event.go:260] Server rejected event '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"coredns-66bff467f8-jt5gp.1624cf6e1be4a3dc", GenerateName:"", Namespace:"kube-system", SelfLink:"", UID:"", ResourceVersion:"", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"kube-system", Name:"coredns-66bff467f8-jt5gp", UID:"1103547f-4b8f-4675-bd1c-1c1460791836", APIVersion:"v1", ResourceVersion:"339", FieldPath:"spec.containers{coredns}"}, Reason:"Unhealthy", Message:"Readiness probe failed: HTTP probe failed with statuscode: 503", Source:v1.EventSource{Component:"kubelet", Host:"auto-20200724220146-14997"}, FirstTimestamp:v1.Time{Time:time.Time{wall:0xbfbef493f0b74ddc, ext:30253165398, loc:(*time.Location)(0x701d4a0)}}, LastTimestamp:v1.Time{Time:time.Time{wall:0xbfbef493f0b74ddc, ext:30253165398, loc:(*time.Location)(0x701d4a0)}}, Count:1, Type:"Warning", EventTime:v1.MicroTime{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'etcdserver: request timed out' (will not retry!) * Jul 24 22:04:07 auto-20200724220146-14997 kubelet[2398]: E0724 22:04:07.680203 2398 controller.go:178] failed to update node lease, error: etcdserver: request timed out * Jul 24 22:04:09 auto-20200724220146-14997 kubelet[2398]: E0724 22:04:09.572299 2398 controller.go:178] failed to update node lease, error: Operation cannot be fulfilled on leases.coordination.k8s.io "auto-20200724220146-14997": the object has been modified; please apply your changes to the latest version and try again * Jul 24 22:04:10 auto-20200724220146-14997 kubelet[2398]: W0724 22:04:10.483237 2398 docker_sandbox.go:400] failed to read pod IP from plugin/docker: Couldn't find network status for default/netcat-7987c4c66b-7p7gg through plugin: invalid network status for * Jul 24 22:04:11 auto-20200724220146-14997 kubelet[2398]: W0724 22:04:11.529564 2398 docker_sandbox.go:400] failed to read pod IP from plugin/docker: Couldn't find network status for default/netcat-7987c4c66b-7p7gg through plugin: invalid network status for * Jul 24 22:04:56 auto-20200724220146-14997 kubelet[2398]: E0724 22:04:56.820228 2398 event.go:260] Server rejected event '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"coredns-66bff467f8-jt5gp.1624cf6e1be4a3dc", GenerateName:"", Namespace:"kube-system", SelfLink:"", UID:"", ResourceVersion:"453", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"kube-system", Name:"coredns-66bff467f8-jt5gp", UID:"1103547f-4b8f-4675-bd1c-1c1460791836", APIVersion:"v1", ResourceVersion:"339", FieldPath:"spec.containers{coredns}"}, Reason:"Unhealthy", Message:"Readiness probe failed: HTTP probe failed with statuscode: 503", Source:v1.EventSource{Component:"kubelet", Host:"auto-20200724220146-14997"}, FirstTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:63731225039, loc:(*time.Location)(0x701d4a0)}}, LastTimestamp:v1.Time{Time:time.Time{wall:0xbfbef4a070b7b400, ext:80253191646, loc:(*time.Location)(0x701d4a0)}}, Count:6, Type:"Warning", EventTime:v1.MicroTime{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'etcdserver: request timed out' (will not retry!) * * ==> storage-provisioner [a234536c1337] <== -- /stdout -- helpers_test.go:247: (dbg) Run: ./minikube-linux-amd64 status --format={{.APIServer}} -p auto-20200724220146-14997 -n auto-20200724220146-14997 helpers_test.go:254: (dbg) Run: kubectl --context auto-20200724220146-14997 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running helpers_test.go:260: non-running pods: helpers_test.go:262: ======> post-mortem[TestNetworkPlugins/group/auto]: describe non-running pods <====== helpers_test.go:265: (dbg) Run: kubectl --context auto-20200724220146-14997 describe pod helpers_test.go:265: (dbg) Non-zero exit: kubectl --context auto-20200724220146-14997 describe pod : exit status 1 (68.155137ms) ** stderr ** error: resource name may not be empty ** /stderr ** helpers_test.go:267: kubectl --context auto-20200724220146-14997 describe pod : exit status 1 helpers_test.go:170: Cleaning up "auto-20200724220146-14997" profile ... helpers_test.go:171: (dbg) Run: ./minikube-linux-amd64 delete -p auto-20200724220146-14997 helpers_test.go:171: (dbg) Done: ./minikube-linux-amd64 delete -p auto-20200724220146-14997: (4.702709926s) === CONT TestStartStop/group/embed-certs === RUN TestStartStop/group/embed-certs/serial === RUN TestStartStop/group/embed-certs/serial/FirstStart start_stop_delete_test.go:149: (dbg) Run: ./minikube-linux-amd64 start -p embed-certs-20200724221043-14997 --memory=2200 --alsologtostderr --wait=true --embed-certs --vm-driver=docker --base-image=local/kicbase:-snapshot --kubernetes-version=v1.18.3 === CONT TestNetworkPlugins/group/enable-default-cni/DNS net_test.go:156: (dbg) Run: kubectl --context enable-default-cni-20200724220253-14997 exec deployment/netcat -- nslookup kubernetes.default === CONT TestNetworkPlugins/group/bridge/DNS net_test.go:156: (dbg) Run: kubectl --context bridge-20200724220503-14997 exec deployment/netcat -- nslookup kubernetes.default === CONT TestNetworkPlugins/group/enable-default-cni/DNS net_test.go:156: (dbg) Non-zero exit: kubectl --context enable-default-cni-20200724220253-14997 exec deployment/netcat -- nslookup kubernetes.default: exit status 1 (15.306757464s) -- stdout -- ;; connection timed out; no servers could be reached -- /stdout -- ** stderr ** command terminated with exit code 1 ** /stderr ** === CONT TestNetworkPlugins/group/kubenet/DNS net_test.go:156: (dbg) Run: kubectl --context kubenet-20200724220343-14997 exec deployment/netcat -- nslookup kubernetes.default === CONT TestNetworkPlugins/group/bridge/DNS net_test.go:156: (dbg) Non-zero exit: kubectl --context bridge-20200724220503-14997 exec deployment/netcat -- nslookup kubernetes.default: exit status 1 (15.222577934s) -- stdout -- ;; connection timed out; no servers could be reached -- /stdout -- ** stderr ** command terminated with exit code 1 ** /stderr ** === CONT TestNetworkPlugins/group/kubenet/DNS net_test.go:156: (dbg) Non-zero exit: kubectl --context kubenet-20200724220343-14997 exec deployment/netcat -- nslookup kubernetes.default: exit status 1 (15.218291339s) -- stdout -- ;; connection timed out; no servers could be reached -- /stdout -- ** stderr ** command terminated with exit code 1 ** /stderr ** === CONT TestStartStop/group/old-k8s-version/serial/SecondStart start_stop_delete_test.go:190: (dbg) Done: ./minikube-linux-amd64 start -p old-k8s-version-20200724220619-14997 --memory=2200 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --container-runtime=docker --vm-driver=docker --base-image=local/kicbase:-snapshot --kubernetes-version=v1.13.0: (2m31.191198396s) start_stop_delete_test.go:196: (dbg) Run: ./minikube-linux-amd64 status --format={{.Host}} -p old-k8s-version-20200724220619-14997 -n old-k8s-version-20200724220619-14997 === RUN TestStartStop/group/old-k8s-version/serial/UserAppExistsAfterStop start_stop_delete_test.go:208: (dbg) TestStartStop/group/old-k8s-version/serial/UserAppExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ... helpers_test.go:332: "kubernetes-dashboard-76d8f8d5bc-hj89z" [810ea05a-cdfa-11ea-a1bc-0242077342c0] Running === CONT TestStartStop/group/embed-certs/serial/FirstStart start_stop_delete_test.go:149: (dbg) Done: ./minikube-linux-amd64 start -p embed-certs-20200724221043-14997 --memory=2200 --alsologtostderr --wait=true --embed-certs --vm-driver=docker --base-image=local/kicbase:-snapshot --kubernetes-version=v1.18.3: (1m2.572009462s) === RUN TestStartStop/group/embed-certs/serial/DeployApp start_stop_delete_test.go:158: (dbg) Run: kubectl --context embed-certs-20200724221043-14997 create -f testdata/busybox.yaml start_stop_delete_test.go:158: (dbg) TestStartStop/group/embed-certs/serial/DeployApp: waiting 8m0s for pods matching "integration-test=busybox" in namespace "default" ... helpers_test.go:332: "busybox" [f71e0d7c-1f59-40c0-be29-98b1072b14d6] Pending === CONT TestStartStop/group/old-k8s-version/serial/UserAppExistsAfterStop start_stop_delete_test.go:208: (dbg) TestStartStop/group/old-k8s-version/serial/UserAppExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 5.012643636s === RUN TestStartStop/group/old-k8s-version/serial/AddonExistsAfterStop start_stop_delete_test.go:219: (dbg) TestStartStop/group/old-k8s-version/serial/AddonExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ... helpers_test.go:332: "kubernetes-dashboard-76d8f8d5bc-hj89z" [810ea05a-cdfa-11ea-a1bc-0242077342c0] Running === CONT TestStartStop/group/embed-certs/serial/DeployApp helpers_test.go:332: "busybox" [f71e0d7c-1f59-40c0-be29-98b1072b14d6] Pending / Ready:ContainersNotReady (containers with unready status: [busybox]) / ContainersReady:ContainersNotReady (containers with unready status: [busybox]) helpers_test.go:332: "busybox" [f71e0d7c-1f59-40c0-be29-98b1072b14d6] Running === CONT TestStartStop/group/old-k8s-version/serial/AddonExistsAfterStop start_stop_delete_test.go:219: (dbg) TestStartStop/group/old-k8s-version/serial/AddonExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 5.005327423s === RUN TestStartStop/group/old-k8s-version/serial/VerifyKubernetesImages start_stop_delete_test.go:227: (dbg) Run: ./minikube-linux-amd64 ssh -p old-k8s-version-20200724220619-14997 "sudo crictl images -o json" start_stop_delete_test.go:227: Found non-minikube image: busybox:1.28.4-glibc === RUN TestStartStop/group/old-k8s-version/serial/Pause start_stop_delete_test.go:233: (dbg) Run: ./minikube-linux-amd64 pause -p old-k8s-version-20200724220619-14997 --alsologtostderr -v=1 start_stop_delete_test.go:233: (dbg) Run: ./minikube-linux-amd64 status --format={{.APIServer}} -p old-k8s-version-20200724220619-14997 -n old-k8s-version-20200724220619-14997 start_stop_delete_test.go:233: (dbg) Non-zero exit: ./minikube-linux-amd64 status --format={{.APIServer}} -p old-k8s-version-20200724220619-14997 -n old-k8s-version-20200724220619-14997: exit status 2 (375.592098ms) -- stdout -- Paused -- /stdout -- start_stop_delete_test.go:233: status error: exit status 2 (may be ok) start_stop_delete_test.go:233: (dbg) Run: ./minikube-linux-amd64 status --format={{.Kubelet}} -p old-k8s-version-20200724220619-14997 -n old-k8s-version-20200724220619-14997 start_stop_delete_test.go:233: (dbg) Non-zero exit: ./minikube-linux-amd64 status --format={{.Kubelet}} -p old-k8s-version-20200724220619-14997 -n old-k8s-version-20200724220619-14997: exit status 2 (382.692888ms) -- stdout -- Stopped -- /stdout -- start_stop_delete_test.go:233: status error: exit status 2 (may be ok) start_stop_delete_test.go:233: (dbg) Run: ./minikube-linux-amd64 unpause -p old-k8s-version-20200724220619-14997 --alsologtostderr -v=1 start_stop_delete_test.go:233: (dbg) Run: ./minikube-linux-amd64 status --format={{.APIServer}} -p old-k8s-version-20200724220619-14997 -n old-k8s-version-20200724220619-14997 start_stop_delete_test.go:233: (dbg) Run: ./minikube-linux-amd64 status --format={{.Kubelet}} -p old-k8s-version-20200724220619-14997 -n old-k8s-version-20200724220619-14997 === CONT TestStartStop/group/embed-certs/serial/DeployApp start_stop_delete_test.go:158: (dbg) TestStartStop/group/embed-certs/serial/DeployApp: integration-test=busybox healthy within 9.014263909s start_stop_delete_test.go:158: (dbg) Run: kubectl --context embed-certs-20200724221043-14997 exec busybox -- /bin/sh -c "ulimit -n" === RUN TestStartStop/group/embed-certs/serial/Stop start_stop_delete_test.go:164: (dbg) Run: ./minikube-linux-amd64 stop -p embed-certs-20200724221043-14997 --alsologtostderr -v=3 === CONT TestStartStop/group/old-k8s-version/serial start_stop_delete_test.go:126: (dbg) Run: ./minikube-linux-amd64 delete -p old-k8s-version-20200724220619-14997 === CONT TestNetworkPlugins/group/enable-default-cni/DNS net_test.go:156: (dbg) Run: kubectl --context enable-default-cni-20200724220253-14997 exec deployment/netcat -- nslookup kubernetes.default === CONT TestStartStop/group/old-k8s-version/serial start_stop_delete_test.go:126: (dbg) Done: ./minikube-linux-amd64 delete -p old-k8s-version-20200724220619-14997: (5.057926207s) start_stop_delete_test.go:131: (dbg) Run: kubectl config get-contexts old-k8s-version-20200724220619-14997 start_stop_delete_test.go:131: (dbg) Non-zero exit: kubectl config get-contexts old-k8s-version-20200724220619-14997: exit status 1 (63.287425ms) -- stdout -- CURRENT NAME CLUSTER AUTHINFO NAMESPACE -- /stdout -- ** stderr ** error: context old-k8s-version-20200724220619-14997 not found ** /stderr ** start_stop_delete_test.go:133: config context error: exit status 1 (may be ok) === CONT TestStartStop/group/old-k8s-version helpers_test.go:170: Cleaning up "old-k8s-version-20200724220619-14997" profile ... helpers_test.go:171: (dbg) Run: ./minikube-linux-amd64 delete -p old-k8s-version-20200724220619-14997 === CONT TestStartStop/group/containerd === RUN TestStartStop/group/containerd/serial === RUN TestStartStop/group/containerd/serial/FirstStart start_stop_delete_test.go:149: (dbg) Run: ./minikube-linux-amd64 start -p containerd-20200724221200-14997 --memory=2200 --alsologtostderr --wait=true --container-runtime=containerd --docker-opt containerd=/var/run/containerd/containerd.sock --apiserver-port=8444 --vm-driver=docker --base-image=local/kicbase:-snapshot --kubernetes-version=v1.18.3 === CONT TestStartStop/group/embed-certs/serial/Stop start_stop_delete_test.go:164: (dbg) Done: ./minikube-linux-amd64 stop -p embed-certs-20200724221043-14997 --alsologtostderr -v=3: (12.195110954s) === RUN TestStartStop/group/embed-certs/serial/EnableAddonAfterStop start_stop_delete_test.go:174: (dbg) Run: ./minikube-linux-amd64 status --format={{.Host}} -p embed-certs-20200724221043-14997 -n embed-certs-20200724221043-14997 start_stop_delete_test.go:174: (dbg) Non-zero exit: ./minikube-linux-amd64 status --format={{.Host}} -p embed-certs-20200724221043-14997 -n embed-certs-20200724221043-14997: exit status 7 (129.284743ms) -- stdout -- Stopped -- /stdout -- start_stop_delete_test.go:174: status error: exit status 7 (may be ok) start_stop_delete_test.go:181: (dbg) Run: ./minikube-linux-amd64 addons enable dashboard -p embed-certs-20200724221043-14997 === RUN TestStartStop/group/embed-certs/serial/SecondStart start_stop_delete_test.go:190: (dbg) Run: ./minikube-linux-amd64 start -p embed-certs-20200724221043-14997 --memory=2200 --alsologtostderr --wait=true --embed-certs --vm-driver=docker --base-image=local/kicbase:-snapshot --kubernetes-version=v1.18.3 === CONT TestNetworkPlugins/group/enable-default-cni/DNS net_test.go:156: (dbg) Non-zero exit: kubectl --context enable-default-cni-20200724220253-14997 exec deployment/netcat -- nslookup kubernetes.default: exit status 1 (15.306198064s) -- stdout -- ;; connection timed out; no servers could be reached -- /stdout -- ** stderr ** command terminated with exit code 1 ** /stderr ** net_test.go:162: failed to do nslookup on kubernetes.default: exit status 1 net_test.go:167: failed nslookup: got=";; connection timed out; no servers could be reached\n\n", want=*"10.96.0.1"* === CONT TestNetworkPlugins/group/enable-default-cni net_test.go:204: "enable-default-cni" test finished in 12m3.465617994s, failed=true net_test.go:205: *** TestNetworkPlugins/group/enable-default-cni FAILED at 2020-07-24 22:12:11.642332385 +0000 UTC m=+2161.766696770 helpers_test.go:215: -----------------------post-mortem-------------------------------- helpers_test.go:223: ======> post-mortem[TestNetworkPlugins/group/enable-default-cni]: docker inspect <====== helpers_test.go:224: (dbg) Run: docker inspect enable-default-cni-20200724220253-14997 helpers_test.go:228: (dbg) docker inspect enable-default-cni-20200724220253-14997: -- stdout -- [ { "Id": "3bc41b437f9e8d708694bdb8bbc004b7378bbad00be719a2417bae2ffd516c03", "Created": "2020-07-24T22:04:28.116965389Z", "Path": "/usr/local/bin/entrypoint", "Args": [ "/sbin/init" ], "State": { "Status": "running", "Running": true, "Paused": false, "Restarting": false, "OOMKilled": false, "Dead": false, "Pid": 150397, "ExitCode": 0, "Error": "", "StartedAt": "2020-07-24T22:04:28.944643108Z", "FinishedAt": "0001-01-01T00:00:00Z" }, "Image": "sha256:e6bc41c39dc48b2b472936db36aedb28527ce0f675ed1bc20d029125c9ccf578", "ResolvConfPath": "/var/lib/docker/containers/3bc41b437f9e8d708694bdb8bbc004b7378bbad00be719a2417bae2ffd516c03/resolv.conf", "HostnamePath": "/var/lib/docker/containers/3bc41b437f9e8d708694bdb8bbc004b7378bbad00be719a2417bae2ffd516c03/hostname", "HostsPath": "/var/lib/docker/containers/3bc41b437f9e8d708694bdb8bbc004b7378bbad00be719a2417bae2ffd516c03/hosts", "LogPath": "/var/lib/docker/containers/3bc41b437f9e8d708694bdb8bbc004b7378bbad00be719a2417bae2ffd516c03/3bc41b437f9e8d708694bdb8bbc004b7378bbad00be719a2417bae2ffd516c03-json.log", "Name": "/enable-default-cni-20200724220253-14997", "RestartCount": 0, "Driver": "overlay2", "Platform": "linux", "MountLabel": "", "ProcessLabel": "", "AppArmorProfile": "unconfined", "ExecIDs": null, "HostConfig": { "Binds": [ "/lib/modules:/lib/modules:ro", "enable-default-cni-20200724220253-14997:/var" ], "ContainerIDFile": "", "LogConfig": { "Type": "json-file", "Config": {} }, "NetworkMode": "default", "PortBindings": { "22/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "2376/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "5000/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "8443/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ] }, "RestartPolicy": { "Name": "no", "MaximumRetryCount": 0 }, "AutoRemove": false, "VolumeDriver": "", "VolumesFrom": null, "CapAdd": null, "CapDrop": null, "Capabilities": null, "Dns": [], "DnsOptions": [], "DnsSearch": [], "ExtraHosts": null, "GroupAdd": null, "IpcMode": "private", "Cgroup": "", "Links": null, "OomScoreAdj": 0, "PidMode": "", "Privileged": true, "PublishAllPorts": false, "ReadonlyRootfs": false, "SecurityOpt": [ "seccomp=unconfined", "apparmor=unconfined", "label=disable" ], "Tmpfs": { "/run": "", "/tmp": "" }, "UTSMode": "", "UsernsMode": "", "ShmSize": 67108864, "Runtime": "runc", "ConsoleSize": [ 0, 0 ], "Isolation": "", "CpuShares": 0, "Memory": 1887436800, "NanoCpus": 2000000000, "CgroupParent": "", "BlkioWeight": 0, "BlkioWeightDevice": [], "BlkioDeviceReadBps": null, "BlkioDeviceWriteBps": null, "BlkioDeviceReadIOps": null, "BlkioDeviceWriteIOps": null, "CpuPeriod": 0, "CpuQuota": 0, "CpuRealtimePeriod": 0, "CpuRealtimeRuntime": 0, "CpusetCpus": "", "CpusetMems": "", "Devices": [], "DeviceCgroupRules": null, "DeviceRequests": null, "KernelMemory": 0, "KernelMemoryTCP": 0, "MemoryReservation": 0, "MemorySwap": -1, "MemorySwappiness": null, "OomKillDisable": false, "PidsLimit": null, "Ulimits": null, "CpuCount": 0, "CpuPercent": 0, "IOMaximumIOps": 0, "IOMaximumBandwidth": 0, "MaskedPaths": null, "ReadonlyPaths": null }, "GraphDriver": { "Data": { "LowerDir": "/var/lib/docker/overlay2/559282caa3611af4196c6bc1fd85e0c297eda7ecc5ae19fe5fc8c7145bba402c-init/diff:/var/lib/docker/overlay2/33ef5d7ed4fbb30d55d2e5925de9b5c359ee2f6bcaf5a31308f0c4c11e358759/diff:/var/lib/docker/overlay2/3cf31b1deb19cf9de586952311d9f94baab00dac6393d96e1a80da943c3823e6/diff:/var/lib/docker/overlay2/5e25be0edbb143d613aab16e23056ece18370c2427214464ccc0b8f033fc76c5/diff:/var/lib/docker/overlay2/933d7b760faf1726c0231eb48cbf3e7af8aeea7d1f449959b7bf07bbe9a84376/diff:/var/lib/docker/overlay2/69ec5555996fe29cf36b7e927034690ecb66f32d4c72396f3c2f3d73303ab10c/diff:/var/lib/docker/overlay2/891f9311e0e93aca44c576d873c0860bf67e972178833fc3ac0569308996933c/diff:/var/lib/docker/overlay2/431ddf5eea9395c020c3a27539c6984764cb7a0dd8f3eeadfbb24f37a897caf7/diff:/var/lib/docker/overlay2/ef7d3d9d53c985fdb46a41c8bcbc04c7d98baa3d8030b1432d115657269a4611/diff:/var/lib/docker/overlay2/d976f7471257da8ef85e0de564cae8e6b16eeb8f83fff6750b3985387f87052c/diff:/var/lib/docker/overlay2/16ed6f6abe25e98b50b10529e70d835da8b64ca99934243f70e5cc63cca44ba1/diff:/var/lib/docker/overlay2/a6495d6f579c32bd0faf387dc3b48c733dae0219e3cfc5d94f9f6ba027261668/diff:/var/lib/docker/overlay2/a34db548da9e24b0323e43e53767c49484fa19509ba9c1e738fb42330788ec52/diff:/var/lib/docker/overlay2/03f93f74dafc48e2bb0234297546abf70dc9918c047a7e3b500ed2242aec09ff/diff:/var/lib/docker/overlay2/b0d08d0be034080c25561d9451d8081050a591c2991a7f3d242234b9bf7c046d/diff:/var/lib/docker/overlay2/d7be6ed61eedc76988e74fe1d87a6a1ef287912654acea5566e7f9cb797b3e3c/diff:/var/lib/docker/overlay2/bde6a4a42d5652b3ef22e3a1dddd0f6fb765d2cd9f5b1a38d4ecb9b44c45b998/diff:/var/lib/docker/overlay2/0f384da0b034b730d4bdd22f270821719c0ac0b7b7d608c3a52fa89b9a386b90/diff:/var/lib/docker/overlay2/46e9d50bc162d2d523c12da3e378e5b859515056dc7d7f11344c1c42c64718d4/diff:/var/lib/docker/overlay2/6e7552cdb94dfc1c9a0a8efae25c758d93fff3d2eec58a3e2b710496d0c94653/diff:/var/lib/docker/overlay2/b9217960caef08b70ded801ca89f5a0808227383f2ab1de5ae7313c200a8090b/diff:/var/lib/docker/overlay2/d11fc39cafccd3e3cb8fba6cfdda2bbfdb99c2e41619ce169b0d6ede0b3f6a64/diff:/var/lib/docker/overlay2/0f89620291cba4bc3afac7c9fa1faf205450d5dcae032c3a9882009633b72b4a/diff", "MergedDir": "/var/lib/docker/overlay2/559282caa3611af4196c6bc1fd85e0c297eda7ecc5ae19fe5fc8c7145bba402c/merged", "UpperDir": "/var/lib/docker/overlay2/559282caa3611af4196c6bc1fd85e0c297eda7ecc5ae19fe5fc8c7145bba402c/diff", "WorkDir": "/var/lib/docker/overlay2/559282caa3611af4196c6bc1fd85e0c297eda7ecc5ae19fe5fc8c7145bba402c/work" }, "Name": "overlay2" }, "Mounts": [ { "Type": "bind", "Source": "/lib/modules", "Destination": "/lib/modules", "Mode": "ro", "RW": false, "Propagation": "rprivate" }, { "Type": "volume", "Name": "enable-default-cni-20200724220253-14997", "Source": "/var/lib/docker/volumes/enable-default-cni-20200724220253-14997/_data", "Destination": "/var", "Driver": "local", "Mode": "z", "RW": true, "Propagation": "" } ], "Config": { "Hostname": "enable-default-cni-20200724220253-14997", "Domainname": "", "User": "root", "AttachStdin": false, "AttachStdout": false, "AttachStderr": false, "ExposedPorts": { "22/tcp": {}, "2376/tcp": {}, "5000/tcp": {}, "8443/tcp": {} }, "Tty": true, "OpenStdin": false, "StdinOnce": false, "Env": [ "container=docker", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], "Cmd": null, "Image": "kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438", "Volumes": null, "WorkingDir": "", "Entrypoint": [ "/usr/local/bin/entrypoint", "/sbin/init" ], "OnBuild": null, "Labels": { "created_by.minikube.sigs.k8s.io": "true", "mode.minikube.sigs.k8s.io": "enable-default-cni-20200724220253-14997", "name.minikube.sigs.k8s.io": "enable-default-cni-20200724220253-14997", "role.minikube.sigs.k8s.io": "" }, "StopSignal": "SIGRTMIN+3" }, "NetworkSettings": { "Bridge": "", "SandboxID": "185ce3d85264cc89d8ce349a6ad3e2a4c3c595abf06aac3e5ed73407561f2fe0", "HairpinMode": false, "LinkLocalIPv6Address": "", "LinkLocalIPv6PrefixLen": 0, "Ports": { "22/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32868" } ], "2376/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32867" } ], "5000/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32866" } ], "8443/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32865" } ] }, "SandboxKey": "/var/run/docker/netns/185ce3d85264", "SecondaryIPAddresses": null, "SecondaryIPv6Addresses": null, "EndpointID": "947554a87450133dc4c940d3e54804bc10d05107f1e587eb45fd7c23e0e3cd86", "Gateway": "172.17.0.1", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "IPAddress": "172.17.0.10", "IPPrefixLen": 16, "IPv6Gateway": "", "MacAddress": "02:42:ac:11:00:0a", "Networks": { "bridge": { "IPAMConfig": null, "Links": null, "Aliases": null, "NetworkID": "d4a420189740571bc15c14b2ebac1ab66e95c9c6796461aa96bb3486a5f7e03d", "EndpointID": "947554a87450133dc4c940d3e54804bc10d05107f1e587eb45fd7c23e0e3cd86", "Gateway": "172.17.0.1", "IPAddress": "172.17.0.10", "IPPrefixLen": 16, "IPv6Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "MacAddress": "02:42:ac:11:00:0a", "DriverOpts": null } } } } ] -- /stdout -- helpers_test.go:232: (dbg) Run: ./minikube-linux-amd64 status --format={{.Host}} -p enable-default-cni-20200724220253-14997 -n enable-default-cni-20200724220253-14997 helpers_test.go:232: (dbg) Non-zero exit: ./minikube-linux-amd64 status --format={{.Host}} -p enable-default-cni-20200724220253-14997 -n enable-default-cni-20200724220253-14997: exit status 2 (7.084997714s) -- stdout -- Running -- /stdout -- ** stderr ** E0724 22:12:18.790617 338910 status.go:256] Error apiserver status: https://172.17.0.10:8443/healthz returned error 500: [+]ping ok [+]log ok [-]etcd failed: reason withheld [+]poststarthook/start-kube-apiserver-admission-initializer ok [+]poststarthook/generic-apiserver-start-informers ok [+]poststarthook/start-apiextensions-informers ok [+]poststarthook/start-apiextensions-controllers ok [+]poststarthook/crd-informer-synced ok [+]poststarthook/bootstrap-controller ok [+]poststarthook/rbac/bootstrap-roles ok [+]poststarthook/scheduling/bootstrap-system-priority-classes ok [+]poststarthook/start-cluster-authentication-info-controller ok [+]poststarthook/start-kube-aggregator-informers ok [+]poststarthook/apiservice-registration-controller ok [+]poststarthook/apiservice-status-available-controller ok [+]poststarthook/kube-apiserver-autoregistration ok [+]autoregister-completion ok [+]poststarthook/apiservice-openapi-controller ok healthz check failed ** /stderr ** helpers_test.go:232: status error: exit status 2 (may be ok) helpers_test.go:237: <<< TestNetworkPlugins/group/enable-default-cni FAILED: start of post-mortem logs <<< helpers_test.go:238: ======> post-mortem[TestNetworkPlugins/group/enable-default-cni]: minikube logs <====== helpers_test.go:240: (dbg) Run: ./minikube-linux-amd64 -p enable-default-cni-20200724220253-14997 logs -n 25 helpers_test.go:240: (dbg) Done: ./minikube-linux-amd64 -p enable-default-cni-20200724220253-14997 logs -n 25: (9.115591448s) helpers_test.go:245: TestNetworkPlugins/group/enable-default-cni logs: -- stdout -- * ==> Docker <== * -- Logs begin at Fri 2020-07-24 22:04:31 UTC, end at Fri 2020-07-24 22:12:24 UTC. -- * Jul 24 22:04:43 enable-default-cni-20200724220253-14997 dockerd[359]: time="2020-07-24T22:04:43.816560606Z" level=info msg="Daemon has completed initialization" * Jul 24 22:04:43 enable-default-cni-20200724220253-14997 systemd[1]: Started Docker Application Container Engine. * Jul 24 22:04:43 enable-default-cni-20200724220253-14997 dockerd[359]: time="2020-07-24T22:04:43.847166532Z" level=info msg="API listen on /var/run/docker.sock" * Jul 24 22:04:43 enable-default-cni-20200724220253-14997 dockerd[359]: time="2020-07-24T22:04:43.847535158Z" level=info msg="API listen on [::]:2376" * Jul 24 22:05:19 enable-default-cni-20200724220253-14997 dockerd[359]: time="2020-07-24T22:05:19.432069896Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:05:19 enable-default-cni-20200724220253-14997 dockerd[359]: time="2020-07-24T22:05:19.432204305Z" level=warning msg="521ff621df18bde3a8608f6be02334aa45161c3d1b7772116b1f782b3ab64483 cleanup: failed to unmount IPC: umount /var/lib/docker/containers/521ff621df18bde3a8608f6be02334aa45161c3d1b7772116b1f782b3ab64483/mounts/shm, flags: 0x2: no such file or directory" * Jul 24 22:05:44 enable-default-cni-20200724220253-14997 dockerd[359]: time="2020-07-24T22:05:44.252476898Z" level=error msg="stream copy error: reading from a closed fifo" * Jul 24 22:05:44 enable-default-cni-20200724220253-14997 dockerd[359]: time="2020-07-24T22:05:44.264457529Z" level=error msg="stream copy error: reading from a closed fifo" * Jul 24 22:05:44 enable-default-cni-20200724220253-14997 dockerd[359]: time="2020-07-24T22:05:44.567266019Z" level=error msg="ec766e1b5f0f0be197da2d4ae691814ff335db84dfd0d1aac35e6f14ffcf7d9a cleanup: failed to delete container from containerd: no such container" * Jul 24 22:05:44 enable-default-cni-20200724220253-14997 dockerd[359]: time="2020-07-24T22:05:44.567329223Z" level=error msg="Handler for POST /containers/ec766e1b5f0f0be197da2d4ae691814ff335db84dfd0d1aac35e6f14ffcf7d9a/start returned error: OCI runtime create failed: container_linux.go:349: starting container process caused \"process_linux.go:449: container init caused \\\"process_linux.go:438: writing syncT 'resume' caused \\\\\\\"write init-p: broken pipe\\\\\\\"\\\"\": unknown" * Jul 24 22:05:44 enable-default-cni-20200724220253-14997 dockerd[359]: time="2020-07-24T22:05:44.648732866Z" level=warning msg="Your kernel does not support swap limit capabilities or the cgroup is not mounted. Memory limited without swap." * Jul 24 22:05:46 enable-default-cni-20200724220253-14997 dockerd[359]: time="2020-07-24T22:05:46.410303974Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:05:46 enable-default-cni-20200724220253-14997 dockerd[359]: time="2020-07-24T22:05:46.410485287Z" level=warning msg="38e638bd145f5ae747eced62961a2cc19d4852d1d44f8fc9427897db81bf0403 cleanup: failed to unmount IPC: umount /var/lib/docker/containers/38e638bd145f5ae747eced62961a2cc19d4852d1d44f8fc9427897db81bf0403/mounts/shm, flags: 0x2: no such file or directory" * Jul 24 22:05:47 enable-default-cni-20200724220253-14997 dockerd[359]: time="2020-07-24T22:05:47.532888590Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:05:47 enable-default-cni-20200724220253-14997 dockerd[359]: time="2020-07-24T22:05:47.533010598Z" level=warning msg="94a42943f14f3e98a0b46ce38a21048469517ca992700eda208d2dc4888292e1 cleanup: failed to unmount IPC: umount /var/lib/docker/containers/94a42943f14f3e98a0b46ce38a21048469517ca992700eda208d2dc4888292e1/mounts/shm, flags: 0x2: no such file or directory" * Jul 24 22:06:02 enable-default-cni-20200724220253-14997 dockerd[359]: time="2020-07-24T22:06:02.780499921Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:06:02 enable-default-cni-20200724220253-14997 dockerd[359]: time="2020-07-24T22:06:02.780642531Z" level=warning msg="3667f70047a6e64b32504984ad6195e7dff0d245bf122b90a1890a840665ae20 cleanup: failed to unmount IPC: umount /var/lib/docker/containers/3667f70047a6e64b32504984ad6195e7dff0d245bf122b90a1890a840665ae20/mounts/shm, flags: 0x2: no such file or directory" * Jul 24 22:06:27 enable-default-cni-20200724220253-14997 dockerd[359]: time="2020-07-24T22:06:27.890384198Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:06:27 enable-default-cni-20200724220253-14997 dockerd[359]: time="2020-07-24T22:06:27.890400699Z" level=warning msg="e28cb05f0b2b27634d4d0c941fcbb5c7e41b9d6cbfa5faed97ebf2c6068f9f3b cleanup: failed to unmount IPC: umount /var/lib/docker/containers/e28cb05f0b2b27634d4d0c941fcbb5c7e41b9d6cbfa5faed97ebf2c6068f9f3b/mounts/shm, flags: 0x2: no such file or directory" * Jul 24 22:07:19 enable-default-cni-20200724220253-14997 dockerd[359]: time="2020-07-24T22:07:19.728260217Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:07:19 enable-default-cni-20200724220253-14997 dockerd[359]: time="2020-07-24T22:07:19.728489333Z" level=warning msg="4e62a4385a900d43be7beba939326530d268d84fb1cd20fef164ae1b46f1aeaa cleanup: failed to unmount IPC: umount /var/lib/docker/containers/4e62a4385a900d43be7beba939326530d268d84fb1cd20fef164ae1b46f1aeaa/mounts/shm, flags: 0x2: no such file or directory" * Jul 24 22:08:48 enable-default-cni-20200724220253-14997 dockerd[359]: time="2020-07-24T22:08:48.812649477Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:08:48 enable-default-cni-20200724220253-14997 dockerd[359]: time="2020-07-24T22:08:48.812870292Z" level=warning msg="4e79d3ccece167165b595b3c17dde600add776f9b6fb486fa28b39efb947b92d cleanup: failed to unmount IPC: umount /var/lib/docker/containers/4e79d3ccece167165b595b3c17dde600add776f9b6fb486fa28b39efb947b92d/mounts/shm, flags: 0x2: no such file or directory" * Jul 24 22:11:36 enable-default-cni-20200724220253-14997 dockerd[359]: time="2020-07-24T22:11:36.661813717Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:11:36 enable-default-cni-20200724220253-14997 dockerd[359]: time="2020-07-24T22:11:36.661929125Z" level=warning msg="38176139cff667ee890a13e52649f4c0d2385012d1af2900a7d96277042b0396 cleanup: failed to unmount IPC: umount /var/lib/docker/containers/38176139cff667ee890a13e52649f4c0d2385012d1af2900a7d96277042b0396/mounts/shm, flags: 0x2: no such file or directory" * * ==> container status <== * CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID * 38176139cff66 4689081edb103 49 seconds ago Exited storage-provisioner 6 936d4a3d12a44 * e22397f3f363d gcr.io/kubernetes-e2e-test-images/dnsutils@sha256:b31bcf7ef4420ce7108e7fc10b6c00343b21257c945eec94c21598e72a8f2de0 6 minutes ago Running dnsutils 0 70e290327904f * f7200b92c78eb 67da37a9a360e 6 minutes ago Running coredns 0 adff0b98016a8 * 2aa5f49cb2b78 3439b7546f29b 6 minutes ago Running kube-proxy 0 523a14493a9be * 89fbef5a19ca0 da26705ccb4b5 7 minutes ago Running kube-controller-manager 1 d9c753e9cdefe * 1600a8443efb7 303ce5db0e90d 7 minutes ago Running etcd 0 e69eff1cda33e * 521ff621df18b da26705ccb4b5 7 minutes ago Exited kube-controller-manager 0 d9c753e9cdefe * d377059ae6fd3 76216c34ed0c7 7 minutes ago Running kube-scheduler 0 29561fc8197fd * c79c87144073e 7e28efa976bd1 7 minutes ago Running kube-apiserver 0 6d05708cb3540 * * ==> coredns [f7200b92c78e] <== * [INFO] plugin/ready: Still waiting on: "kubernetes" * E0724 22:12:17.423376 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Endpoints: Get https://10.96.0.1:443/api/v1/endpoints?limit=500&resourceVersion=0: dial tcp 10.96.0.1:443: connect: connection refused * E0724 22:12:17.424440 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Namespace: Get https://10.96.0.1:443/api/v1/namespaces?limit=500&resourceVersion=0: dial tcp 10.96.0.1:443: connect: connection refused * E0724 22:12:17.425502 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Service: Get https://10.96.0.1:443/api/v1/services?limit=500&resourceVersion=0: dial tcp 10.96.0.1:443: connect: connection refused * E0724 22:12:18.424121 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Endpoints: Get https://10.96.0.1:443/api/v1/endpoints?limit=500&resourceVersion=0: dial tcp 10.96.0.1:443: connect: connection refused * E0724 22:12:18.424903 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Namespace: Get https://10.96.0.1:443/api/v1/namespaces?limit=500&resourceVersion=0: dial tcp 10.96.0.1:443: connect: connection refused * E0724 22:12:18.425968 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Service: Get https://10.96.0.1:443/api/v1/services?limit=500&resourceVersion=0: dial tcp 10.96.0.1:443: connect: connection refused * E0724 22:12:19.424650 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Endpoints: Get https://10.96.0.1:443/api/v1/endpoints?limit=500&resourceVersion=0: dial tcp 10.96.0.1:443: connect: connection refused * E0724 22:12:19.425642 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Namespace: Get https://10.96.0.1:443/api/v1/namespaces?limit=500&resourceVersion=0: dial tcp 10.96.0.1:443: connect: connection refused * E0724 22:12:19.426697 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Service: Get https://10.96.0.1:443/api/v1/services?limit=500&resourceVersion=0: dial tcp 10.96.0.1:443: connect: connection refused * E0724 22:12:20.425190 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Endpoints: Get https://10.96.0.1:443/api/v1/endpoints?limit=500&resourceVersion=0: dial tcp 10.96.0.1:443: connect: connection refused * E0724 22:12:20.426074 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Namespace: Get https://10.96.0.1:443/api/v1/namespaces?limit=500&resourceVersion=0: dial tcp 10.96.0.1:443: connect: connection refused * E0724 22:12:20.427131 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Service: Get https://10.96.0.1:443/api/v1/services?limit=500&resourceVersion=0: dial tcp 10.96.0.1:443: connect: connection refused * E0724 22:12:21.425703 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Endpoints: Get https://10.96.0.1:443/api/v1/endpoints?limit=500&resourceVersion=0: dial tcp 10.96.0.1:443: connect: connection refused * E0724 22:12:21.427108 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Namespace: Get https://10.96.0.1:443/api/v1/namespaces?limit=500&resourceVersion=0: dial tcp 10.96.0.1:443: connect: connection refused * E0724 22:12:21.428265 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Service: Get https://10.96.0.1:443/api/v1/services?limit=500&resourceVersion=0: dial tcp 10.96.0.1:443: connect: connection refused * E0724 22:12:22.426300 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Endpoints: Get https://10.96.0.1:443/api/v1/endpoints?limit=500&resourceVersion=0: dial tcp 10.96.0.1:443: connect: connection refused * E0724 22:12:22.427509 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Namespace: Get https://10.96.0.1:443/api/v1/namespaces?limit=500&resourceVersion=0: dial tcp 10.96.0.1:443: connect: connection refused * E0724 22:12:22.428651 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Service: Get https://10.96.0.1:443/api/v1/services?limit=500&resourceVersion=0: dial tcp 10.96.0.1:443: connect: connection refused * E0724 22:12:23.426791 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Endpoints: Get https://10.96.0.1:443/api/v1/endpoints?limit=500&resourceVersion=0: dial tcp 10.96.0.1:443: connect: connection refused * E0724 22:12:23.427910 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Namespace: Get https://10.96.0.1:443/api/v1/namespaces?limit=500&resourceVersion=0: dial tcp 10.96.0.1:443: connect: connection refused * E0724 22:12:23.429023 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Service: Get https://10.96.0.1:443/api/v1/services?limit=500&resourceVersion=0: dial tcp 10.96.0.1:443: connect: connection refused * E0724 22:12:24.427375 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Endpoints: Get https://10.96.0.1:443/api/v1/endpoints?limit=500&resourceVersion=0: dial tcp 10.96.0.1:443: connect: connection refused * E0724 22:12:24.428352 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Namespace: Get https://10.96.0.1:443/api/v1/namespaces?limit=500&resourceVersion=0: dial tcp 10.96.0.1:443: connect: connection refused * E0724 22:12:24.429449 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Service: Get https://10.96.0.1:443/api/v1/services?limit=500&resourceVersion=0: dial tcp 10.96.0.1:443: connect: connection refused * * ==> describe nodes <== * Name: enable-default-cni-20200724220253-14997 * Roles: master * Labels: beta.kubernetes.io/arch=amd64 * beta.kubernetes.io/os=linux * kubernetes.io/arch=amd64 * kubernetes.io/hostname=enable-default-cni-20200724220253-14997 * kubernetes.io/os=linux * minikube.k8s.io/commit=40eac8ce825d2bb784efa63b900c8d788ea49faf * minikube.k8s.io/name=enable-default-cni-20200724220253-14997 * minikube.k8s.io/updated_at=2020_07_24T22_05_23_0700 * minikube.k8s.io/version=v1.12.1 * node-role.kubernetes.io/master= * Annotations: kubeadm.alpha.kubernetes.io/cri-socket: /var/run/dockershim.sock * node.alpha.kubernetes.io/ttl: 0 * volumes.kubernetes.io/controller-managed-attach-detach: true * CreationTimestamp: Fri, 24 Jul 2020 22:05:09 +0000 * Taints: * Unschedulable: false * Lease: * HolderIdentity: enable-default-cni-20200724220253-14997 * AcquireTime: * RenewTime: Fri, 24 Jul 2020 22:12:19 +0000 * Conditions: * Type Status LastHeartbeatTime LastTransitionTime Reason Message * ---- ------ ----------------- ------------------ ------ ------- * MemoryPressure False Fri, 24 Jul 2020 22:10:56 +0000 Fri, 24 Jul 2020 22:05:04 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available * DiskPressure False Fri, 24 Jul 2020 22:10:56 +0000 Fri, 24 Jul 2020 22:05:04 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure * PIDPressure False Fri, 24 Jul 2020 22:10:56 +0000 Fri, 24 Jul 2020 22:05:04 +0000 KubeletHasSufficientPID kubelet has sufficient PID available * Ready True Fri, 24 Jul 2020 22:10:56 +0000 Fri, 24 Jul 2020 22:05:20 +0000 KubeletReady kubelet is posting ready status * Addresses: * InternalIP: 172.17.0.10 * Hostname: enable-default-cni-20200724220253-14997 * Capacity: * cpu: 16 * ephemeral-storage: 128884272Ki * hugepages-1Gi: 0 * hugepages-2Mi: 0 * memory: 65817044Ki * pods: 110 * Allocatable: * cpu: 16 * ephemeral-storage: 128884272Ki * hugepages-1Gi: 0 * hugepages-2Mi: 0 * memory: 65817044Ki * pods: 110 * System Info: * Machine ID: 64d55b0573b843caad38c2ef814d9d28 * System UUID: 1351d024-4839-4727-94f4-24d665dccb07 * Boot ID: 65219ec9-ab55-4151-85fa-6cbcd6144529 * Kernel Version: 5.4.0-1022-azure * OS Image: Ubuntu 19.10 * Operating System: linux * Architecture: amd64 * Container Runtime Version: docker://19.3.2 * Kubelet Version: v1.18.3 * Kube-Proxy Version: v1.18.3 * PodCIDR: 10.244.0.0/24 * PodCIDRs: 10.244.0.0/24 * Non-terminated Pods: (8 in total) * Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits AGE * --------- ---- ------------ ---------- --------------- ------------- --- * default netcat-7987c4c66b-wk7g2 0 (0%) 0 (0%) 0 (0%) 0 (0%) 6m38s * kube-system coredns-66bff467f8-4j5vc 100m (0%) 0 (0%) 70Mi (0%) 170Mi (0%) 6m44s * kube-system etcd-enable-default-cni-20200724220253-14997 0 (0%) 0 (0%) 0 (0%) 0 (0%) 7m2s * kube-system kube-apiserver-enable-default-cni-20200724220253-14997 250m (1%) 0 (0%) 0 (0%) 0 (0%) 7m1s * kube-system kube-controller-manager-enable-default-cni-20200724220253-14997 200m (1%) 0 (0%) 0 (0%) 0 (0%) 7m4s * kube-system kube-proxy-kjj4g 0 (0%) 0 (0%) 0 (0%) 0 (0%) 6m44s * kube-system kube-scheduler-enable-default-cni-20200724220253-14997 100m (0%) 0 (0%) 0 (0%) 0 (0%) 7m1s * kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 6m42s * Allocated resources: * (Total limits may be over 100 percent, i.e., overcommitted.) * Resource Requests Limits * -------- -------- ------ * cpu 650m (4%) 0 (0%) * memory 70Mi (0%) 170Mi (0%) * ephemeral-storage 0 (0%) 0 (0%) * hugepages-1Gi 0 (0%) 0 (0%) * hugepages-2Mi 0 (0%) 0 (0%) * Events: * Type Reason Age From Message * ---- ------ ---- ---- ------- * Normal NodeHasSufficientMemory 7m29s (x7 over 7m35s) kubelet, enable-default-cni-20200724220253-14997 Node enable-default-cni-20200724220253-14997 status is now: NodeHasSufficientMemory * Normal NodeHasNoDiskPressure 7m29s (x7 over 7m35s) kubelet, enable-default-cni-20200724220253-14997 Node enable-default-cni-20200724220253-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientPID 7m29s (x7 over 7m35s) kubelet, enable-default-cni-20200724220253-14997 Node enable-default-cni-20200724220253-14997 status is now: NodeHasSufficientPID * Normal NodeAllocatableEnforced 7m29s kubelet, enable-default-cni-20200724220253-14997 Updated Node Allocatable limit across pods * Warning SystemOOM 7m2s kubelet, enable-default-cni-20200724220253-14997 System OOM encountered, victim process: registry-server, pid: 28567 * Warning SystemOOM 7m2s kubelet, enable-default-cni-20200724220253-14997 System OOM encountered, victim process: registry-server, pid: 26041 * Normal Starting 7m2s kubelet, enable-default-cni-20200724220253-14997 Starting kubelet. * Normal NodeHasSufficientMemory 7m2s kubelet, enable-default-cni-20200724220253-14997 Node enable-default-cni-20200724220253-14997 status is now: NodeHasSufficientMemory * Normal NodeHasNoDiskPressure 7m2s kubelet, enable-default-cni-20200724220253-14997 Node enable-default-cni-20200724220253-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientPID 7m2s kubelet, enable-default-cni-20200724220253-14997 Node enable-default-cni-20200724220253-14997 status is now: NodeHasSufficientPID * Normal NodeAllocatableEnforced 7m2s kubelet, enable-default-cni-20200724220253-14997 Updated Node Allocatable limit across pods * Warning readOnlySysFS 6m42s kube-proxy, enable-default-cni-20200724220253-14997 CRI error: /sys is read-only: cannot modify conntrack limits, problems may arise later (If running Docker, see docker issue #24000) * Normal Starting 6m42s kube-proxy, enable-default-cni-20200724220253-14997 Starting kube-proxy. * * ==> dmesg <== * [ +0.008042] FS-Cache: N-cookie d=00000000087e516b n=00000000639e5e65 * [ +0.007221] FS-Cache: N-key=[8] '56c00f0000000000' * [ +0.005631] FS-Cache: Duplicate cookie detected * [ +0.003841] FS-Cache: O-cookie c=0000000011bf07ab [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.005951] FS-Cache: O-cookie d=00000000087e516b n=00000000251e97b7 * [ +0.004790] FS-Cache: O-key=[8] '56c00f0000000000' * [ +0.003643] FS-Cache: N-cookie c=00000000e2eec606 [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.005689] FS-Cache: N-cookie d=00000000087e516b n=000000007550b1ca * [ +0.004826] FS-Cache: N-key=[8] '56c00f0000000000' * [ +1.890005] FS-Cache: Duplicate cookie detected * [ +0.022632] FS-Cache: O-cookie c=0000000036b15294 [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.008521] FS-Cache: O-cookie d=00000000087e516b n=00000000efd734b0 * [ +0.007450] FS-Cache: O-key=[8] '55c00f0000000000' * [ +0.004298] FS-Cache: N-cookie c=00000000c6760349 [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.007729] FS-Cache: N-cookie d=00000000087e516b n=000000002b6d28f2 * [ +0.006730] FS-Cache: N-key=[8] '55c00f0000000000' * [ +2.781512] FS-Cache: Duplicate cookie detected * [ +0.004691] FS-Cache: O-cookie c=0000000026b60cdb [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.006817] FS-Cache: O-cookie d=00000000087e516b n=0000000083eaff9d * [ +0.006146] FS-Cache: O-key=[8] '5ac00f0000000000' * [ +0.004163] FS-Cache: N-cookie c=000000006a4278df [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.006241] FS-Cache: N-cookie d=00000000087e516b n=00000000e4c3dd6e * [ +0.005074] FS-Cache: N-key=[8] '5ac00f0000000000' * [Jul24 22:02] tee (116897): /proc/108378/oom_adj is deprecated, please use /proc/108378/oom_score_adj instead. * [Jul24 22:09] hrtimer: interrupt took 1877431 ns * * ==> etcd [1600a8443efb] <== * 2020-07-24 22:12:00.496754 W | etcdserver: read-only range request "key:\"/registry/controllers\" range_end:\"/registry/controllert\" count_only:true " with result "range_response_count:0 size:5" took too long (1.193401356s) to execute * 2020-07-24 22:12:14.135296 W | etcdserver: read-only range request "key:\"/registry/health\" " with result "error:context canceled" took too long (1.999563978s) to execute * WARNING: 2020/07/24 22:12:14 grpc: Server.processUnaryRPC failed to write status: connection error: desc = "transport is closing" * 2020-07-24 22:12:16.404793 W | etcdserver: read-only range request "key:\"/registry/health\" " with result "error:context canceled" took too long (1.999973189s) to execute * WARNING: 2020/07/24 22:12:16 grpc: Server.processUnaryRPC failed to write status: connection error: desc = "transport is closing" * 2020-07-24 22:12:18.414309 W | etcdserver: read-only range request "key:\"/registry/health\" " with result "error:context canceled" took too long (1.999883183s) to execute * WARNING: 2020/07/24 22:12:18 grpc: Server.processUnaryRPC failed to write status: connection error: desc = "transport is closing" * 2020-07-24 22:12:18.790622 W | etcdserver: read-only range request "key:\"/registry/health\" " with result "error:context canceled" took too long (2.000047094s) to execute * WARNING: 2020/07/24 22:12:18 grpc: Server.processUnaryRPC failed to write status: connection error: desc = "transport is closing" * 2020-07-24 22:12:19.135870 W | etcdserver: timed out waiting for read index response (local node might have slow network) * 2020-07-24 22:12:24.201115 W | etcdserver: failed to revoke 15ed7382dbb68bea ("etcdserver: request timed out") * 2020-07-24 22:12:24.607610 W | wal: sync duration of 12.663077291s, expected less than 1s * 2020-07-24 22:12:24.689548 W | etcdserver: read-only range request "key:\"/registry/events/kube-system/etcd-enable-default-cni-20200724220253-14997.1624cf8463855eee\" " with result "range_response_count:1 size:877" took too long (9.17958838s) to execute * 2020-07-24 22:12:24.690884 W | etcdserver: failed to revoke 15ed7382dbb68bea ("lease not found") * 2020-07-24 22:12:24.691047 W | etcdserver: read-only range request "key:\"/registry/serviceaccounts\" range_end:\"/registry/serviceaccountt\" count_only:true " with result "range_response_count:0 size:7" took too long (12.186757517s) to execute * 2020-07-24 22:12:24.691305 W | etcdserver: read-only range request "key:\"/registry/jobs/\" range_end:\"/registry/jobs0\" limit:500 " with result "range_response_count:0 size:5" took too long (9.504849152s) to execute * 2020-07-24 22:12:24.691317 W | etcdserver: read-only range request "key:\"/registry/poddisruptionbudgets\" range_end:\"/registry/poddisruptionbudgett\" count_only:true " with result "range_response_count:0 size:5" took too long (6.311890075s) to execute * 2020-07-24 22:12:24.691551 W | etcdserver: read-only range request "key:\"/registry/cronjobs\" range_end:\"/registry/cronjobt\" count_only:true " with result "range_response_count:0 size:5" took too long (10.716506063s) to execute * 2020-07-24 22:12:24.692307 W | etcdserver: read-only range request "key:\"/registry/leases\" range_end:\"/registry/leaset\" count_only:true " with result "range_response_count:0 size:7" took too long (5.06996709s) to execute * 2020-07-24 22:12:24.692370 W | etcdserver: read-only range request "key:\"/registry/validatingwebhookconfigurations\" range_end:\"/registry/validatingwebhookconfigurationt\" count_only:true " with result "range_response_count:0 size:5" took too long (1.000509241s) to execute * 2020-07-24 22:12:24.692411 W | etcdserver: read-only range request "key:\"/registry/namespaces/default\" " with result "range_response_count:1 size:257" took too long (2.750928453s) to execute * 2020-07-24 22:12:24.692486 W | etcdserver: read-only range request "key:\"/registry/resourcequotas\" range_end:\"/registry/resourcequotat\" count_only:true " with result "range_response_count:0 size:5" took too long (467.624109ms) to execute * 2020-07-24 22:12:24.692637 W | etcdserver: read-only range request "key:\"/registry/apiextensions.k8s.io/customresourcedefinitions\" range_end:\"/registry/apiextensions.k8s.io/customresourcedefinitiont\" count_only:true " with result "range_response_count:0 size:5" took too long (276.829886ms) to execute * 2020-07-24 22:12:24.851469 W | etcdserver: read-only range request "key:\"/registry/events/kube-system/kube-apiserver-enable-default-cni-20200724220253-14997.1624cf8510bad08b\" " with result "range_response_count:1 size:918" took too long (116.217054ms) to execute * 2020-07-24 22:12:24.985401 W | etcdserver: read-only range request "key:\"/registry/services/endpoints/default/kubernetes\" " with result "range_response_count:1 size:287" took too long (132.056052ms) to execute * * ==> kernel <== * 22:12:27 up 39 min, 0 users, load average: 15.89, 12.78, 8.35 * Linux enable-default-cni-20200724220253-14997 5.4.0-1022-azure #22-Ubuntu SMP Fri Jul 10 06:14:37 UTC 2020 x86_64 x86_64 x86_64 GNU/Linux * PRETTY_NAME="Ubuntu 19.10" * * ==> kube-apiserver [c79c87144073] <== * I0724 22:10:43.263604 1 trace.go:116] Trace[992564654]: "GuaranteedUpdate etcd3" type:*v1.Endpoints (started: 2020-07-24 22:10:41.942511571 +0000 UTC m=+341.904242636) (total time: 1.32105801s): * Trace[992564654]: [1.321036608s] [1.319305788s] Transaction committed * I0724 22:11:05.145438 1 trace.go:116] Trace[1916484810]: "GuaranteedUpdate etcd3" type:*v1.Endpoints (started: 2020-07-24 22:11:01.942736708 +0000 UTC m=+361.904467673) (total time: 3.20266048s): * Trace[1916484810]: [3.202632978s] [3.201237381s] Transaction committed * I0724 22:11:05.145541 1 trace.go:116] Trace[1596447611]: "List etcd3" key:/jobs,resourceVersion:,limit:500,continue: (started: 2020-07-24 22:11:03.276708017 +0000 UTC m=+363.238438982) (total time: 1.868798779s): * Trace[1596447611]: [1.868798779s] [1.868798779s] END * I0724 22:11:05.145634 1 trace.go:116] Trace[829840988]: "List" url:/apis/batch/v1/jobs,user-agent:kube-controller-manager/v1.18.3 (linux/amd64) kubernetes/2e7996e/system:serviceaccount:kube-system:cronjob-controller,client:172.17.0.10 (started: 2020-07-24 22:11:03.276683116 +0000 UTC m=+363.238414181) (total time: 1.868924587s): * Trace[829840988]: [1.868870783s] [1.868851682s] Listing from storage done * I0724 22:12:18.945752 1 trace.go:116] Trace[2019115575]: "GuaranteedUpdate etcd3" type:*v1.Endpoints (started: 2020-07-24 22:12:11.943386245 +0000 UTC m=+431.905117310) (total time: 7.002333394s): * Trace[2019115575]: [7.002333394s] [7.000770188s] END * E0724 22:12:18.945778 1 controller.go:223] unable to sync kubernetes service: etcdserver: request timed out * I0724 22:12:24.692008 1 trace.go:116] Trace[1922272323]: "List etcd3" key:/jobs,resourceVersion:,limit:500,continue: (started: 2020-07-24 22:12:15.186099738 +0000 UTC m=+435.147830703) (total time: 9.505866422s): * Trace[1922272323]: [9.505866422s] [9.505866422s] END * I0724 22:12:24.692127 1 trace.go:116] Trace[1409544902]: "List" url:/apis/batch/v1/jobs,user-agent:kube-controller-manager/v1.18.3 (linux/amd64) kubernetes/2e7996e/system:serviceaccount:kube-system:cronjob-controller,client:172.17.0.10 (started: 2020-07-24 22:12:15.186077736 +0000 UTC m=+435.147808701) (total time: 9.506019133s): * Trace[1409544902]: [9.505959829s] [9.505944128s] Listing from storage done * I0724 22:12:24.692913 1 trace.go:116] Trace[2102601930]: "Get" url:/api/v1/namespaces/default,user-agent:kube-apiserver/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:127.0.0.1 (started: 2020-07-24 22:12:21.940975103 +0000 UTC m=+441.902706168) (total time: 2.75190592s): * Trace[2102601930]: [2.751868318s] [2.751860818s] About to write a response * I0724 22:12:24.693752 1 trace.go:116] Trace[563901383]: "GuaranteedUpdate etcd3" type:*coordination.Lease (started: 2020-07-24 22:12:19.251357548 +0000 UTC m=+439.213088513) (total time: 5.442371034s): * Trace[563901383]: [5.442346032s] [5.441919102s] Transaction committed * I0724 22:12:24.693838 1 trace.go:116] Trace[2029761124]: "Update" url:/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/enable-default-cni-20200724220253-14997,user-agent:kubelet/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:172.17.0.10 (started: 2020-07-24 22:12:19.251203038 +0000 UTC m=+439.212934103) (total time: 5.44261345s): * Trace[2029761124]: [5.442568947s] [5.442475041s] Object stored in database * I0724 22:12:24.732673 1 trace.go:116] Trace[175889892]: "GuaranteedUpdate etcd3" type:*core.Event (started: 2020-07-24 22:12:15.509527582 +0000 UTC m=+435.471258647) (total time: 9.223112097s): * Trace[175889892]: [9.183661163s] [9.183661163s] initial value restored * I0724 22:12:24.732857 1 trace.go:116] Trace[1581683246]: "Patch" url:/api/v1/namespaces/kube-system/events/etcd-enable-default-cni-20200724220253-14997.1624cf8463855eee,user-agent:kubelet/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:172.17.0.10 (started: 2020-07-24 22:12:15.509433776 +0000 UTC m=+435.471164741) (total time: 9.223395316s): * Trace[1581683246]: [9.183757669s] [9.183722567s] About to apply patch * * ==> kube-controller-manager [521ff621df18] <== * I0724 22:05:01.419041 1 serving.go:313] Generated self-signed cert in-memory * I0724 22:05:01.837353 1 controllermanager.go:161] Version: v1.18.3 * I0724 22:05:01.838217 1 dynamic_cafile_content.go:167] Starting request-header::/var/lib/minikube/certs/front-proxy-ca.crt * I0724 22:05:01.838226 1 dynamic_cafile_content.go:167] Starting client-ca-bundle::/var/lib/minikube/certs/ca.crt * I0724 22:05:01.838553 1 secure_serving.go:178] Serving securely on 127.0.0.1:10257 * I0724 22:05:01.838647 1 tlsconfig.go:240] Starting DynamicServingCertificateController * I0724 22:05:01.839260 1 deprecated_insecure_serving.go:53] Serving insecurely on [::]:10252 * F0724 22:05:19.372490 1 controllermanager.go:230] error building controller context: failed to wait for apiserver being healthy: timed out waiting for the condition: failed to get apiserver /healthz status: an error on the server ("[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/start-kube-apiserver-admission-initializer ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/start-apiextensions-informers ok\n[+]poststarthook/start-apiextensions-controllers ok\n[+]poststarthook/crd-informer-synced ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/start-cluster-authentication-info-controller ok\n[+]poststarthook/start-kube-aggregator-informers ok\n[+]poststarthook/apiservice-registration-controller ok\n[+]poststarthook/apiservice-status-available-controller ok\n[+]poststarthook/kube-apiserver-autoregistration ok\n[+]autoregister-completion ok\n[+]poststarthook/apiservice-openapi-controller ok\nhealthz check failed") has prevented the request from succeeding * * ==> kube-controller-manager [89fbef5a19ca] <== * I0724 22:05:42.238372 1 range_allocator.go:373] Set node enable-default-cni-20200724220253-14997 PodCIDR to [10.244.0.0/24] * I0724 22:05:42.240029 1 event.go:278] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"kube-system", Name:"coredns", UID:"1c27f0f4-f89b-477c-bfa8-60448cc8880c", APIVersion:"apps/v1", ResourceVersion:"194", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set coredns-66bff467f8 to 2 * I0724 22:05:42.243714 1 shared_informer.go:230] Caches are synced for endpoint_slice * I0724 22:05:42.259039 1 shared_informer.go:230] Caches are synced for bootstrap_signer * E0724 22:05:42.368022 1 clusterroleaggregation_controller.go:181] edit failed with : Operation cannot be fulfilled on clusterroles.rbac.authorization.k8s.io "edit": the object has been modified; please apply your changes to the latest version and try again * I0724 22:05:42.368262 1 event.go:278] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"kube-system", Name:"coredns-66bff467f8", UID:"0997b900-5804-46c5-bb77-b891b37b5cf2", APIVersion:"apps/v1", ResourceVersion:"320", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: coredns-66bff467f8-ht42g * I0724 22:05:42.368299 1 event.go:278] Event(v1.ObjectReference{Kind:"DaemonSet", Namespace:"kube-system", Name:"kube-proxy", UID:"643a251a-4728-46ec-a343-aea9658354bd", APIVersion:"apps/v1", ResourceVersion:"199", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: kube-proxy-kjj4g * I0724 22:05:42.390874 1 event.go:278] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"kube-system", Name:"coredns-66bff467f8", UID:"0997b900-5804-46c5-bb77-b891b37b5cf2", APIVersion:"apps/v1", ResourceVersion:"320", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: coredns-66bff467f8-4j5vc * I0724 22:05:42.433265 1 shared_informer.go:230] Caches are synced for service account * I0724 22:05:42.496425 1 shared_informer.go:230] Caches are synced for namespace * I0724 22:05:42.677177 1 shared_informer.go:223] Waiting for caches to sync for resource quota * I0724 22:05:42.738896 1 shared_informer.go:230] Caches are synced for garbage collector * I0724 22:05:42.738923 1 garbagecollector.go:142] Garbage collector: all resource monitors have synced. Proceeding to collect garbage * I0724 22:05:42.770513 1 shared_informer.go:230] Caches are synced for stateful set * I0724 22:05:42.777014 1 shared_informer.go:230] Caches are synced for PVC protection * I0724 22:05:42.777484 1 shared_informer.go:230] Caches are synced for resource quota * I0724 22:05:42.781587 1 shared_informer.go:230] Caches are synced for attach detach * I0724 22:05:42.800483 1 shared_informer.go:230] Caches are synced for expand * I0724 22:05:42.827233 1 shared_informer.go:230] Caches are synced for persistent volume * I0724 22:05:42.829412 1 shared_informer.go:230] Caches are synced for resource quota * I0724 22:05:42.832238 1 shared_informer.go:230] Caches are synced for garbage collector * I0724 22:05:43.315422 1 event.go:278] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"kube-system", Name:"coredns", UID:"1c27f0f4-f89b-477c-bfa8-60448cc8880c", APIVersion:"apps/v1", ResourceVersion:"366", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled down replica set coredns-66bff467f8 to 1 * I0724 22:05:43.409722 1 event.go:278] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"kube-system", Name:"coredns-66bff467f8", UID:"0997b900-5804-46c5-bb77-b891b37b5cf2", APIVersion:"apps/v1", ResourceVersion:"367", FieldPath:""}): type: 'Normal' reason: 'SuccessfulDelete' Deleted pod: coredns-66bff467f8-ht42g * I0724 22:05:48.977313 1 event.go:278] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"default", Name:"netcat", UID:"25101045-309e-46bf-9c23-951c0d544988", APIVersion:"apps/v1", ResourceVersion:"417", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set netcat-7987c4c66b to 1 * I0724 22:05:49.005560 1 event.go:278] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"default", Name:"netcat-7987c4c66b", UID:"ff7d6353-1bd7-48b2-828f-3d51b6992329", APIVersion:"apps/v1", ResourceVersion:"418", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: netcat-7987c4c66b-wk7g2 * * ==> kube-proxy [2aa5f49cb2b7] <== * W0724 22:05:44.892017 1 server_others.go:559] Unknown proxy mode "", assuming iptables proxy * I0724 22:05:44.928156 1 node.go:136] Successfully retrieved node IP: 172.17.0.10 * I0724 22:05:44.928207 1 server_others.go:186] Using iptables Proxier. * I0724 22:05:44.929039 1 server.go:583] Version: v1.18.3 * I0724 22:05:44.929913 1 conntrack.go:52] Setting nf_conntrack_max to 524288 * E0724 22:05:44.930389 1 conntrack.go:127] sysfs is not writable: {Device:sysfs Path:/sys Type:sysfs Opts:[ro nosuid nodev noexec relatime] Freq:0 Pass:0} (mount options are [ro nosuid nodev noexec relatime]) * I0724 22:05:44.930637 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_established' to 86400 * I0724 22:05:44.930702 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_close_wait' to 3600 * I0724 22:05:44.930878 1 config.go:315] Starting service config controller * I0724 22:05:44.930890 1 shared_informer.go:223] Waiting for caches to sync for service config * I0724 22:05:44.930934 1 config.go:133] Starting endpoints config controller * I0724 22:05:44.930950 1 shared_informer.go:223] Waiting for caches to sync for endpoints config * I0724 22:05:45.031024 1 shared_informer.go:230] Caches are synced for service config * I0724 22:05:45.031031 1 shared_informer.go:230] Caches are synced for endpoints config * * ==> kube-scheduler [d377059ae6fd] <== * I0724 22:05:09.241802 1 deprecated_insecure_serving.go:51] Serving healthz insecurely on [::]:10251 * I0724 22:05:09.243081 1 configmap_cafile_content.go:202] Starting client-ca::kube-system::extension-apiserver-authentication::client-ca-file * I0724 22:05:09.243122 1 shared_informer.go:223] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file * I0724 22:05:09.243952 1 secure_serving.go:178] Serving securely on 127.0.0.1:10259 * I0724 22:05:09.243989 1 tlsconfig.go:240] Starting DynamicServingCertificateController * E0724 22:05:09.245623 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope * E0724 22:05:09.245716 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1beta1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope * E0724 22:05:09.245745 1 reflector.go:178] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system" * E0724 22:05:09.245747 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope * E0724 22:05:09.245838 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope * E0724 22:05:09.245847 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope * E0724 22:05:09.246212 1 reflector.go:178] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:233: Failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope * E0724 22:05:09.246628 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope * E0724 22:05:09.248877 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope * E0724 22:05:10.120680 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope * E0724 22:05:10.293526 1 reflector.go:178] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:233: Failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope * E0724 22:05:10.307592 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope * E0724 22:05:10.325504 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope * E0724 22:05:10.391599 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope * E0724 22:05:10.527368 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope * E0724 22:05:10.670173 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope * E0724 22:05:10.674411 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1beta1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope * E0724 22:05:10.791191 1 reflector.go:178] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system" * E0724 22:05:12.445096 1 reflector.go:178] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system" * I0724 22:05:16.643298 1 shared_informer.go:230] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file * * ==> kubelet <== * -- Logs begin at Fri 2020-07-24 22:04:31 UTC, end at Fri 2020-07-24 22:12:27 UTC. -- * Jul 24 22:10:02 enable-default-cni-20200724220253-14997 kubelet[2487]: E0724 22:10:02.149095 2487 pod_workers.go:191] Error syncing pod d6acad28-ae23-4271-a37a-587f8c955d0f ("storage-provisioner_kube-system(d6acad28-ae23-4271-a37a-587f8c955d0f)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 2m40s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(d6acad28-ae23-4271-a37a-587f8c955d0f)" * Jul 24 22:10:15 enable-default-cni-20200724220253-14997 kubelet[2487]: I0724 22:10:15.148649 2487 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: 4e79d3ccece167165b595b3c17dde600add776f9b6fb486fa28b39efb947b92d * Jul 24 22:10:15 enable-default-cni-20200724220253-14997 kubelet[2487]: E0724 22:10:15.148923 2487 pod_workers.go:191] Error syncing pod d6acad28-ae23-4271-a37a-587f8c955d0f ("storage-provisioner_kube-system(d6acad28-ae23-4271-a37a-587f8c955d0f)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 2m40s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(d6acad28-ae23-4271-a37a-587f8c955d0f)" * Jul 24 22:10:27 enable-default-cni-20200724220253-14997 kubelet[2487]: I0724 22:10:27.148646 2487 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: 4e79d3ccece167165b595b3c17dde600add776f9b6fb486fa28b39efb947b92d * Jul 24 22:10:27 enable-default-cni-20200724220253-14997 kubelet[2487]: E0724 22:10:27.148939 2487 pod_workers.go:191] Error syncing pod d6acad28-ae23-4271-a37a-587f8c955d0f ("storage-provisioner_kube-system(d6acad28-ae23-4271-a37a-587f8c955d0f)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 2m40s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(d6acad28-ae23-4271-a37a-587f8c955d0f)" * Jul 24 22:10:42 enable-default-cni-20200724220253-14997 kubelet[2487]: I0724 22:10:42.148610 2487 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: 4e79d3ccece167165b595b3c17dde600add776f9b6fb486fa28b39efb947b92d * Jul 24 22:10:42 enable-default-cni-20200724220253-14997 kubelet[2487]: E0724 22:10:42.148859 2487 pod_workers.go:191] Error syncing pod d6acad28-ae23-4271-a37a-587f8c955d0f ("storage-provisioner_kube-system(d6acad28-ae23-4271-a37a-587f8c955d0f)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 2m40s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(d6acad28-ae23-4271-a37a-587f8c955d0f)" * Jul 24 22:10:55 enable-default-cni-20200724220253-14997 kubelet[2487]: I0724 22:10:55.148589 2487 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: 4e79d3ccece167165b595b3c17dde600add776f9b6fb486fa28b39efb947b92d * Jul 24 22:10:55 enable-default-cni-20200724220253-14997 kubelet[2487]: E0724 22:10:55.148842 2487 pod_workers.go:191] Error syncing pod d6acad28-ae23-4271-a37a-587f8c955d0f ("storage-provisioner_kube-system(d6acad28-ae23-4271-a37a-587f8c955d0f)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 2m40s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(d6acad28-ae23-4271-a37a-587f8c955d0f)" * Jul 24 22:11:09 enable-default-cni-20200724220253-14997 kubelet[2487]: I0724 22:11:09.148684 2487 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: 4e79d3ccece167165b595b3c17dde600add776f9b6fb486fa28b39efb947b92d * Jul 24 22:11:09 enable-default-cni-20200724220253-14997 kubelet[2487]: E0724 22:11:09.148955 2487 pod_workers.go:191] Error syncing pod d6acad28-ae23-4271-a37a-587f8c955d0f ("storage-provisioner_kube-system(d6acad28-ae23-4271-a37a-587f8c955d0f)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 2m40s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(d6acad28-ae23-4271-a37a-587f8c955d0f)" * Jul 24 22:11:24 enable-default-cni-20200724220253-14997 kubelet[2487]: I0724 22:11:24.148788 2487 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: 4e79d3ccece167165b595b3c17dde600add776f9b6fb486fa28b39efb947b92d * Jul 24 22:11:24 enable-default-cni-20200724220253-14997 kubelet[2487]: E0724 22:11:24.149008 2487 pod_workers.go:191] Error syncing pod d6acad28-ae23-4271-a37a-587f8c955d0f ("storage-provisioner_kube-system(d6acad28-ae23-4271-a37a-587f8c955d0f)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 2m40s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(d6acad28-ae23-4271-a37a-587f8c955d0f)" * Jul 24 22:11:36 enable-default-cni-20200724220253-14997 kubelet[2487]: I0724 22:11:36.148759 2487 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: 4e79d3ccece167165b595b3c17dde600add776f9b6fb486fa28b39efb947b92d * Jul 24 22:11:36 enable-default-cni-20200724220253-14997 kubelet[2487]: I0724 22:11:36.891708 2487 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: 4e79d3ccece167165b595b3c17dde600add776f9b6fb486fa28b39efb947b92d * Jul 24 22:11:36 enable-default-cni-20200724220253-14997 kubelet[2487]: I0724 22:11:36.892010 2487 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: 38176139cff667ee890a13e52649f4c0d2385012d1af2900a7d96277042b0396 * Jul 24 22:11:36 enable-default-cni-20200724220253-14997 kubelet[2487]: E0724 22:11:36.892271 2487 pod_workers.go:191] Error syncing pod d6acad28-ae23-4271-a37a-587f8c955d0f ("storage-provisioner_kube-system(d6acad28-ae23-4271-a37a-587f8c955d0f)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 5m0s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(d6acad28-ae23-4271-a37a-587f8c955d0f)" * Jul 24 22:11:49 enable-default-cni-20200724220253-14997 kubelet[2487]: I0724 22:11:49.148732 2487 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: 38176139cff667ee890a13e52649f4c0d2385012d1af2900a7d96277042b0396 * Jul 24 22:11:49 enable-default-cni-20200724220253-14997 kubelet[2487]: E0724 22:11:49.149102 2487 pod_workers.go:191] Error syncing pod d6acad28-ae23-4271-a37a-587f8c955d0f ("storage-provisioner_kube-system(d6acad28-ae23-4271-a37a-587f8c955d0f)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 5m0s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(d6acad28-ae23-4271-a37a-587f8c955d0f)" * Jul 24 22:12:00 enable-default-cni-20200724220253-14997 kubelet[2487]: I0724 22:12:00.148671 2487 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: 38176139cff667ee890a13e52649f4c0d2385012d1af2900a7d96277042b0396 * Jul 24 22:12:00 enable-default-cni-20200724220253-14997 kubelet[2487]: E0724 22:12:00.148907 2487 pod_workers.go:191] Error syncing pod d6acad28-ae23-4271-a37a-587f8c955d0f ("storage-provisioner_kube-system(d6acad28-ae23-4271-a37a-587f8c955d0f)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 5m0s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(d6acad28-ae23-4271-a37a-587f8c955d0f)" * Jul 24 22:12:11 enable-default-cni-20200724220253-14997 kubelet[2487]: I0724 22:12:11.148636 2487 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: 38176139cff667ee890a13e52649f4c0d2385012d1af2900a7d96277042b0396 * Jul 24 22:12:11 enable-default-cni-20200724220253-14997 kubelet[2487]: E0724 22:12:11.148889 2487 pod_workers.go:191] Error syncing pod d6acad28-ae23-4271-a37a-587f8c955d0f ("storage-provisioner_kube-system(d6acad28-ae23-4271-a37a-587f8c955d0f)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 5m0s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(d6acad28-ae23-4271-a37a-587f8c955d0f)" * Jul 24 22:12:23 enable-default-cni-20200724220253-14997 kubelet[2487]: I0724 22:12:23.148611 2487 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: 38176139cff667ee890a13e52649f4c0d2385012d1af2900a7d96277042b0396 * Jul 24 22:12:23 enable-default-cni-20200724220253-14997 kubelet[2487]: E0724 22:12:23.148857 2487 pod_workers.go:191] Error syncing pod d6acad28-ae23-4271-a37a-587f8c955d0f ("storage-provisioner_kube-system(d6acad28-ae23-4271-a37a-587f8c955d0f)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 5m0s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(d6acad28-ae23-4271-a37a-587f8c955d0f)" * * ==> storage-provisioner [38176139cff6] <== * F0724 22:11:36.589551 1 main.go:37] Error getting server version: the server has asked for the client to provide credentials -- /stdout -- helpers_test.go:247: (dbg) Run: ./minikube-linux-amd64 status --format={{.APIServer}} -p enable-default-cni-20200724220253-14997 -n enable-default-cni-20200724220253-14997 helpers_test.go:254: (dbg) Run: kubectl --context enable-default-cni-20200724220253-14997 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running helpers_test.go:260: non-running pods: helpers_test.go:262: ======> post-mortem[TestNetworkPlugins/group/enable-default-cni]: describe non-running pods <====== helpers_test.go:265: (dbg) Run: kubectl --context enable-default-cni-20200724220253-14997 describe pod helpers_test.go:265: (dbg) Non-zero exit: kubectl --context enable-default-cni-20200724220253-14997 describe pod : exit status 1 (72.410618ms) ** stderr ** error: resource name may not be empty ** /stderr ** helpers_test.go:267: kubectl --context enable-default-cni-20200724220253-14997 describe pod : exit status 1 helpers_test.go:170: Cleaning up "enable-default-cni-20200724220253-14997" profile ... helpers_test.go:171: (dbg) Run: ./minikube-linux-amd64 delete -p enable-default-cni-20200724220253-14997 === CONT TestNetworkPlugins/group/bridge/DNS net_test.go:156: (dbg) Run: kubectl --context bridge-20200724220503-14997 exec deployment/netcat -- nslookup kubernetes.default === CONT TestNetworkPlugins/group/kubenet/DNS net_test.go:156: (dbg) Run: kubectl --context kubenet-20200724220343-14997 exec deployment/netcat -- nslookup kubernetes.default === CONT TestNetworkPlugins/group/enable-default-cni helpers_test.go:171: (dbg) Done: ./minikube-linux-amd64 delete -p enable-default-cni-20200724220253-14997: (5.680600314s) === CONT TestStartStop/group/newest-cni === RUN TestStartStop/group/newest-cni/serial === RUN TestStartStop/group/newest-cni/serial/FirstStart start_stop_delete_test.go:149: (dbg) Run: ./minikube-linux-amd64 start -p newest-cni-20200724221234-14997 --memory=2200 --alsologtostderr --wait=apiserver,system_pods,default_sa --feature-gates ServerSideApply=true --network-plugin=cni --extra-config=kubelet.network-plugin=cni --extra-config=kubeadm.pod-network-cidr=192.168.111.111/16 --vm-driver=docker --base-image=local/kicbase:-snapshot --kubernetes-version=v1.18.4-rc.0 === CONT TestNetworkPlugins/group/bridge/DNS net_test.go:156: (dbg) Non-zero exit: kubectl --context bridge-20200724220503-14997 exec deployment/netcat -- nslookup kubernetes.default: exit status 1 (15.262755237s) -- stdout -- ;; connection timed out; no servers could be reached -- /stdout -- ** stderr ** command terminated with exit code 1 ** /stderr ** === CONT TestNetworkPlugins/group/kubenet/DNS net_test.go:156: (dbg) Non-zero exit: kubectl --context kubenet-20200724220343-14997 exec deployment/netcat -- nslookup kubernetes.default: exit status 1 (26.393349633s) -- stdout -- ;; connection timed out; no servers could be reached -- /stdout -- ** stderr ** command terminated with exit code 1 ** /stderr ** net_test.go:162: failed to do nslookup on kubernetes.default: exit status 1 net_test.go:167: failed nslookup: got=";; connection timed out; no servers could be reached\n\n", want=*"10.96.0.1"* === CONT TestNetworkPlugins/group/kubenet net_test.go:204: "kubenet" test finished in 12m48.872596382s, failed=true net_test.go:205: *** TestNetworkPlugins/group/kubenet FAILED at 2020-07-24 22:12:57.047328545 +0000 UTC m=+2207.171692930 helpers_test.go:215: -----------------------post-mortem-------------------------------- helpers_test.go:223: ======> post-mortem[TestNetworkPlugins/group/kubenet]: docker inspect <====== helpers_test.go:224: (dbg) Run: docker inspect kubenet-20200724220343-14997 helpers_test.go:228: (dbg) docker inspect kubenet-20200724220343-14997: -- stdout -- [ { "Id": "abf0086af4703ba5c4aa8e52a63cc57116576ad58dc31b52f2300e0f4c61e5f6", "Created": "2020-07-24T22:05:08.70573589Z", "Path": "/usr/local/bin/entrypoint", "Args": [ "/sbin/init" ], "State": { "Status": "running", "Running": true, "Paused": false, "Restarting": false, "OOMKilled": false, "Dead": false, "Pid": 163135, "ExitCode": 0, "Error": "", "StartedAt": "2020-07-24T22:05:11.159006588Z", "FinishedAt": "0001-01-01T00:00:00Z" }, "Image": "sha256:e6bc41c39dc48b2b472936db36aedb28527ce0f675ed1bc20d029125c9ccf578", "ResolvConfPath": "/var/lib/docker/containers/abf0086af4703ba5c4aa8e52a63cc57116576ad58dc31b52f2300e0f4c61e5f6/resolv.conf", "HostnamePath": "/var/lib/docker/containers/abf0086af4703ba5c4aa8e52a63cc57116576ad58dc31b52f2300e0f4c61e5f6/hostname", "HostsPath": "/var/lib/docker/containers/abf0086af4703ba5c4aa8e52a63cc57116576ad58dc31b52f2300e0f4c61e5f6/hosts", "LogPath": "/var/lib/docker/containers/abf0086af4703ba5c4aa8e52a63cc57116576ad58dc31b52f2300e0f4c61e5f6/abf0086af4703ba5c4aa8e52a63cc57116576ad58dc31b52f2300e0f4c61e5f6-json.log", "Name": "/kubenet-20200724220343-14997", "RestartCount": 0, "Driver": "overlay2", "Platform": "linux", "MountLabel": "", "ProcessLabel": "", "AppArmorProfile": "unconfined", "ExecIDs": null, "HostConfig": { "Binds": [ "/lib/modules:/lib/modules:ro", "kubenet-20200724220343-14997:/var" ], "ContainerIDFile": "", "LogConfig": { "Type": "json-file", "Config": {} }, "NetworkMode": "default", "PortBindings": { "22/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "2376/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "5000/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "8443/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ] }, "RestartPolicy": { "Name": "no", "MaximumRetryCount": 0 }, "AutoRemove": false, "VolumeDriver": "", "VolumesFrom": null, "CapAdd": null, "CapDrop": null, "Capabilities": null, "Dns": [], "DnsOptions": [], "DnsSearch": [], "ExtraHosts": null, "GroupAdd": null, "IpcMode": "private", "Cgroup": "", "Links": null, "OomScoreAdj": 0, "PidMode": "", "Privileged": true, "PublishAllPorts": false, "ReadonlyRootfs": false, "SecurityOpt": [ "seccomp=unconfined", "apparmor=unconfined", "label=disable" ], "Tmpfs": { "/run": "", "/tmp": "" }, "UTSMode": "", "UsernsMode": "", "ShmSize": 67108864, "Runtime": "runc", "ConsoleSize": [ 0, 0 ], "Isolation": "", "CpuShares": 0, "Memory": 1887436800, "NanoCpus": 2000000000, "CgroupParent": "", "BlkioWeight": 0, "BlkioWeightDevice": [], "BlkioDeviceReadBps": null, "BlkioDeviceWriteBps": null, "BlkioDeviceReadIOps": null, "BlkioDeviceWriteIOps": null, "CpuPeriod": 0, "CpuQuota": 0, "CpuRealtimePeriod": 0, "CpuRealtimeRuntime": 0, "CpusetCpus": "", "CpusetMems": "", "Devices": [], "DeviceCgroupRules": null, "DeviceRequests": null, "KernelMemory": 0, "KernelMemoryTCP": 0, "MemoryReservation": 0, "MemorySwap": -1, "MemorySwappiness": null, "OomKillDisable": false, "PidsLimit": null, "Ulimits": null, "CpuCount": 0, "CpuPercent": 0, "IOMaximumIOps": 0, "IOMaximumBandwidth": 0, "MaskedPaths": null, "ReadonlyPaths": null }, "GraphDriver": { "Data": { "LowerDir": "/var/lib/docker/overlay2/6c7d2af194df54d6d9c116fad5c3ac7abd260b35cd180b46a9c938c4ea2b81ee-init/diff:/var/lib/docker/overlay2/33ef5d7ed4fbb30d55d2e5925de9b5c359ee2f6bcaf5a31308f0c4c11e358759/diff:/var/lib/docker/overlay2/3cf31b1deb19cf9de586952311d9f94baab00dac6393d96e1a80da943c3823e6/diff:/var/lib/docker/overlay2/5e25be0edbb143d613aab16e23056ece18370c2427214464ccc0b8f033fc76c5/diff:/var/lib/docker/overlay2/933d7b760faf1726c0231eb48cbf3e7af8aeea7d1f449959b7bf07bbe9a84376/diff:/var/lib/docker/overlay2/69ec5555996fe29cf36b7e927034690ecb66f32d4c72396f3c2f3d73303ab10c/diff:/var/lib/docker/overlay2/891f9311e0e93aca44c576d873c0860bf67e972178833fc3ac0569308996933c/diff:/var/lib/docker/overlay2/431ddf5eea9395c020c3a27539c6984764cb7a0dd8f3eeadfbb24f37a897caf7/diff:/var/lib/docker/overlay2/ef7d3d9d53c985fdb46a41c8bcbc04c7d98baa3d8030b1432d115657269a4611/diff:/var/lib/docker/overlay2/d976f7471257da8ef85e0de564cae8e6b16eeb8f83fff6750b3985387f87052c/diff:/var/lib/docker/overlay2/16ed6f6abe25e98b50b10529e70d835da8b64ca99934243f70e5cc63cca44ba1/diff:/var/lib/docker/overlay2/a6495d6f579c32bd0faf387dc3b48c733dae0219e3cfc5d94f9f6ba027261668/diff:/var/lib/docker/overlay2/a34db548da9e24b0323e43e53767c49484fa19509ba9c1e738fb42330788ec52/diff:/var/lib/docker/overlay2/03f93f74dafc48e2bb0234297546abf70dc9918c047a7e3b500ed2242aec09ff/diff:/var/lib/docker/overlay2/b0d08d0be034080c25561d9451d8081050a591c2991a7f3d242234b9bf7c046d/diff:/var/lib/docker/overlay2/d7be6ed61eedc76988e74fe1d87a6a1ef287912654acea5566e7f9cb797b3e3c/diff:/var/lib/docker/overlay2/bde6a4a42d5652b3ef22e3a1dddd0f6fb765d2cd9f5b1a38d4ecb9b44c45b998/diff:/var/lib/docker/overlay2/0f384da0b034b730d4bdd22f270821719c0ac0b7b7d608c3a52fa89b9a386b90/diff:/var/lib/docker/overlay2/46e9d50bc162d2d523c12da3e378e5b859515056dc7d7f11344c1c42c64718d4/diff:/var/lib/docker/overlay2/6e7552cdb94dfc1c9a0a8efae25c758d93fff3d2eec58a3e2b710496d0c94653/diff:/var/lib/docker/overlay2/b9217960caef08b70ded801ca89f5a0808227383f2ab1de5ae7313c200a8090b/diff:/var/lib/docker/overlay2/d11fc39cafccd3e3cb8fba6cfdda2bbfdb99c2e41619ce169b0d6ede0b3f6a64/diff:/var/lib/docker/overlay2/0f89620291cba4bc3afac7c9fa1faf205450d5dcae032c3a9882009633b72b4a/diff", "MergedDir": "/var/lib/docker/overlay2/6c7d2af194df54d6d9c116fad5c3ac7abd260b35cd180b46a9c938c4ea2b81ee/merged", "UpperDir": "/var/lib/docker/overlay2/6c7d2af194df54d6d9c116fad5c3ac7abd260b35cd180b46a9c938c4ea2b81ee/diff", "WorkDir": "/var/lib/docker/overlay2/6c7d2af194df54d6d9c116fad5c3ac7abd260b35cd180b46a9c938c4ea2b81ee/work" }, "Name": "overlay2" }, "Mounts": [ { "Type": "bind", "Source": "/lib/modules", "Destination": "/lib/modules", "Mode": "ro", "RW": false, "Propagation": "rprivate" }, { "Type": "volume", "Name": "kubenet-20200724220343-14997", "Source": "/var/lib/docker/volumes/kubenet-20200724220343-14997/_data", "Destination": "/var", "Driver": "local", "Mode": "z", "RW": true, "Propagation": "" } ], "Config": { "Hostname": "kubenet-20200724220343-14997", "Domainname": "", "User": "root", "AttachStdin": false, "AttachStdout": false, "AttachStderr": false, "ExposedPorts": { "22/tcp": {}, "2376/tcp": {}, "5000/tcp": {}, "8443/tcp": {} }, "Tty": true, "OpenStdin": false, "StdinOnce": false, "Env": [ "container=docker", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], "Cmd": null, "Image": "kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438", "Volumes": null, "WorkingDir": "", "Entrypoint": [ "/usr/local/bin/entrypoint", "/sbin/init" ], "OnBuild": null, "Labels": { "created_by.minikube.sigs.k8s.io": "true", "mode.minikube.sigs.k8s.io": "kubenet-20200724220343-14997", "name.minikube.sigs.k8s.io": "kubenet-20200724220343-14997", "role.minikube.sigs.k8s.io": "" }, "StopSignal": "SIGRTMIN+3" }, "NetworkSettings": { "Bridge": "", "SandboxID": "16ee5cb5ab1579c4cb51285de0a961128408fa1b6ed228f5cd5f6e059f0584da", "HairpinMode": false, "LinkLocalIPv6Address": "", "LinkLocalIPv6PrefixLen": 0, "Ports": { "22/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32876" } ], "2376/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32875" } ], "5000/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32874" } ], "8443/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32873" } ] }, "SandboxKey": "/var/run/docker/netns/16ee5cb5ab15", "SecondaryIPAddresses": null, "SecondaryIPv6Addresses": null, "EndpointID": "28cbbd254d720afcc9cab040adbbca51bd16ebb1d009bfeb926134081b81ad88", "Gateway": "172.17.0.1", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "IPAddress": "172.17.0.6", "IPPrefixLen": 16, "IPv6Gateway": "", "MacAddress": "02:42:ac:11:00:06", "Networks": { "bridge": { "IPAMConfig": null, "Links": null, "Aliases": null, "NetworkID": "d4a420189740571bc15c14b2ebac1ab66e95c9c6796461aa96bb3486a5f7e03d", "EndpointID": "28cbbd254d720afcc9cab040adbbca51bd16ebb1d009bfeb926134081b81ad88", "Gateway": "172.17.0.1", "IPAddress": "172.17.0.6", "IPPrefixLen": 16, "IPv6Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "MacAddress": "02:42:ac:11:00:06", "DriverOpts": null } } } } ] -- /stdout -- helpers_test.go:232: (dbg) Run: ./minikube-linux-amd64 status --format={{.Host}} -p kubenet-20200724220343-14997 -n kubenet-20200724220343-14997 helpers_test.go:237: <<< TestNetworkPlugins/group/kubenet FAILED: start of post-mortem logs <<< helpers_test.go:238: ======> post-mortem[TestNetworkPlugins/group/kubenet]: minikube logs <====== helpers_test.go:240: (dbg) Run: ./minikube-linux-amd64 -p kubenet-20200724220343-14997 logs -n 25 helpers_test.go:240: (dbg) Done: ./minikube-linux-amd64 -p kubenet-20200724220343-14997 logs -n 25: (2.641026634s) helpers_test.go:245: TestNetworkPlugins/group/kubenet logs: -- stdout -- * ==> Docker <== * -- Logs begin at Fri 2020-07-24 22:05:12 UTC, end at Fri 2020-07-24 22:12:58 UTC. -- * Jul 24 22:05:24 kubenet-20200724220343-14997 dockerd[360]: time="2020-07-24T22:05:24.003630324Z" level=warning msg="Your kernel does not support cgroup blkio weight_device" * Jul 24 22:05:24 kubenet-20200724220343-14997 dockerd[360]: time="2020-07-24T22:05:24.003776934Z" level=info msg="Loading containers: start." * Jul 24 22:05:25 kubenet-20200724220343-14997 dockerd[360]: time="2020-07-24T22:05:25.034512898Z" level=info msg="Default bridge (docker0) is assigned with an IP address 172.18.0.0/16. Daemon option --bip can be used to set a preferred IP address" * Jul 24 22:05:25 kubenet-20200724220343-14997 dockerd[360]: time="2020-07-24T22:05:25.167433145Z" level=info msg="Loading containers: done." * Jul 24 22:05:25 kubenet-20200724220343-14997 dockerd[360]: time="2020-07-24T22:05:25.202098356Z" level=warning msg="Not using native diff for overlay2, this may cause degraded performance for building images: kernel has CONFIG_OVERLAY_FS_REDIRECT_DIR enabled" storage-driver=overlay2 * Jul 24 22:05:25 kubenet-20200724220343-14997 dockerd[360]: time="2020-07-24T22:05:25.202372775Z" level=info msg="Docker daemon" commit=6a30dfca03 graphdriver(s)=overlay2 version=19.03.2 * Jul 24 22:05:25 kubenet-20200724220343-14997 dockerd[360]: time="2020-07-24T22:05:25.202437980Z" level=info msg="Daemon has completed initialization" * Jul 24 22:05:25 kubenet-20200724220343-14997 dockerd[360]: time="2020-07-24T22:05:25.251280078Z" level=info msg="API listen on [::]:2376" * Jul 24 22:05:25 kubenet-20200724220343-14997 dockerd[360]: time="2020-07-24T22:05:25.251283578Z" level=info msg="API listen on /var/run/docker.sock" * Jul 24 22:05:25 kubenet-20200724220343-14997 systemd[1]: Started Docker Application Container Engine. * Jul 24 22:06:08 kubenet-20200724220343-14997 dockerd[360]: time="2020-07-24T22:06:08.362686321Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:06:08 kubenet-20200724220343-14997 dockerd[360]: time="2020-07-24T22:06:08.362825230Z" level=warning msg="22266b9d780455a0b8c6231ca399e8a4a059eba624f51349e5d2218ddf85f4f0 cleanup: failed to unmount IPC: umount /var/lib/docker/containers/22266b9d780455a0b8c6231ca399e8a4a059eba624f51349e5d2218ddf85f4f0/mounts/shm, flags: 0x2: no such file or directory" * Jul 24 22:06:09 kubenet-20200724220343-14997 dockerd[360]: time="2020-07-24T22:06:09.219515827Z" level=warning msg="Your kernel does not support swap limit capabilities or the cgroup is not mounted. Memory limited without swap." * Jul 24 22:06:09 kubenet-20200724220343-14997 dockerd[360]: time="2020-07-24T22:06:09.435449523Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:06:09 kubenet-20200724220343-14997 dockerd[360]: time="2020-07-24T22:06:09.435548030Z" level=warning msg="055375f6a7033b69a09c98eea5de100c9d01d49483416af2ef50a845e6a214a5 cleanup: failed to unmount IPC: umount /var/lib/docker/containers/055375f6a7033b69a09c98eea5de100c9d01d49483416af2ef50a845e6a214a5/mounts/shm, flags: 0x2: no such file or directory" * Jul 24 22:06:28 kubenet-20200724220343-14997 dockerd[360]: time="2020-07-24T22:06:28.809765460Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:06:28 kubenet-20200724220343-14997 dockerd[360]: time="2020-07-24T22:06:28.809884769Z" level=warning msg="1a0c1821c43cadd12bc7fbdf55979f785c9205de732546321fa29550ab629e6e cleanup: failed to unmount IPC: umount /var/lib/docker/containers/1a0c1821c43cadd12bc7fbdf55979f785c9205de732546321fa29550ab629e6e/mounts/shm, flags: 0x2: no such file or directory" * Jul 24 22:07:02 kubenet-20200724220343-14997 dockerd[360]: time="2020-07-24T22:07:02.969460403Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:07:02 kubenet-20200724220343-14997 dockerd[360]: time="2020-07-24T22:07:02.969594712Z" level=warning msg="5f33b8ad4262e7c53c15f8c68dce3a1c7968457dfcd80a76ab4a470dcb420c6b cleanup: failed to unmount IPC: umount /var/lib/docker/containers/5f33b8ad4262e7c53c15f8c68dce3a1c7968457dfcd80a76ab4a470dcb420c6b/mounts/shm, flags: 0x2: no such file or directory" * Jul 24 22:07:47 kubenet-20200724220343-14997 dockerd[360]: time="2020-07-24T22:07:47.683998533Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:07:47 kubenet-20200724220343-14997 dockerd[360]: time="2020-07-24T22:07:47.684106440Z" level=warning msg="3dd1ab15a4639ff73ea6e6786762edcb9d8044f96f9f5e20caf270dd20ed83d4 cleanup: failed to unmount IPC: umount /var/lib/docker/containers/3dd1ab15a4639ff73ea6e6786762edcb9d8044f96f9f5e20caf270dd20ed83d4/mounts/shm, flags: 0x2: no such file or directory" * Jul 24 22:09:19 kubenet-20200724220343-14997 dockerd[360]: time="2020-07-24T22:09:19.846561737Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:09:19 kubenet-20200724220343-14997 dockerd[360]: time="2020-07-24T22:09:19.846662043Z" level=warning msg="c6d41b97e124d4902c6fad5e7d46ac4500308a3475186522ff9a48838d1e8013 cleanup: failed to unmount IPC: umount /var/lib/docker/containers/c6d41b97e124d4902c6fad5e7d46ac4500308a3475186522ff9a48838d1e8013/mounts/shm, flags: 0x2: no such file or directory" * Jul 24 22:12:10 kubenet-20200724220343-14997 dockerd[360]: time="2020-07-24T22:12:10.960910301Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:12:10 kubenet-20200724220343-14997 dockerd[360]: time="2020-07-24T22:12:10.961101714Z" level=warning msg="d8c25e06603f099d66058f5c3d185d247bb8227ba4aa620193fe7a63196deb80 cleanup: failed to unmount IPC: umount /var/lib/docker/containers/d8c25e06603f099d66058f5c3d185d247bb8227ba4aa620193fe7a63196deb80/mounts/shm, flags: 0x2: no such file or directory" * * ==> container status <== * CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID * d8c25e06603f0 4689081edb103 52 seconds ago Exited storage-provisioner 6 badc4b2bb3c6c * 3b23909a59c35 gcr.io/kubernetes-e2e-test-images/dnsutils@sha256:b31bcf7ef4420ce7108e7fc10b6c00343b21257c945eec94c21598e72a8f2de0 6 minutes ago Running dnsutils 0 44a109182dbd5 * 8b0dad6396099 67da37a9a360e 6 minutes ago Running coredns 0 2737c0e1142a1 * 7de23fa32b48c 3439b7546f29b 6 minutes ago Running kube-proxy 0 ee12400fe2001 * 024d615be9b0e da26705ccb4b5 7 minutes ago Running kube-controller-manager 0 82740b2170ee3 * e8a0b4b34ce5d 76216c34ed0c7 7 minutes ago Running kube-scheduler 0 f3e55e718f8ee * e70cd3f2f394b 303ce5db0e90d 7 minutes ago Running etcd 0 efa4d946b611a * 8634ec22fffce 7e28efa976bd1 7 minutes ago Running kube-apiserver 0 7250f6df21451 * * ==> coredns [8b0dad639609] <== * E0724 22:12:50.626477 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Service: Unauthorized * E0724 22:12:51.626248 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Endpoints: Unauthorized * E0724 22:12:51.627057 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Namespace: Unauthorized * E0724 22:12:51.627992 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Service: Unauthorized * E0724 22:12:52.627936 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Endpoints: Unauthorized * E0724 22:12:52.628517 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Namespace: Unauthorized * E0724 22:12:52.630106 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Service: Unauthorized * E0724 22:12:53.629582 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Endpoints: Unauthorized * E0724 22:12:53.630339 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Namespace: Unauthorized * E0724 22:12:53.631366 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Service: Unauthorized * E0724 22:12:54.631146 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Endpoints: Unauthorized * E0724 22:12:54.632030 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Namespace: Unauthorized * E0724 22:12:54.633257 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Service: Unauthorized * E0724 22:12:55.632545 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Endpoints: Unauthorized * E0724 22:12:55.633555 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Namespace: Unauthorized * E0724 22:12:55.634550 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Service: Unauthorized * E0724 22:12:56.634320 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Endpoints: Unauthorized * E0724 22:12:56.636279 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Service: Unauthorized * E0724 22:12:56.636286 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Namespace: Unauthorized * E0724 22:12:57.637661 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Endpoints: Unauthorized * E0724 22:12:57.637920 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Service: Unauthorized * E0724 22:12:57.639832 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Namespace: Unauthorized * E0724 22:12:58.639381 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Endpoints: Unauthorized * E0724 22:12:58.640463 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Service: Unauthorized * E0724 22:12:58.641131 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Namespace: Unauthorized * * ==> describe nodes <== * Name: kubenet-20200724220343-14997 * Roles: master * Labels: beta.kubernetes.io/arch=amd64 * beta.kubernetes.io/os=linux * kubernetes.io/arch=amd64 * kubernetes.io/hostname=kubenet-20200724220343-14997 * kubernetes.io/os=linux * minikube.k8s.io/commit=40eac8ce825d2bb784efa63b900c8d788ea49faf * minikube.k8s.io/name=kubenet-20200724220343-14997 * minikube.k8s.io/updated_at=2020_07_24T22_05_54_0700 * minikube.k8s.io/version=v1.12.1 * node-role.kubernetes.io/master= * Annotations: kubeadm.alpha.kubernetes.io/cri-socket: /var/run/dockershim.sock * node.alpha.kubernetes.io/ttl: 0 * volumes.kubernetes.io/controller-managed-attach-detach: true * CreationTimestamp: Fri, 24 Jul 2020 22:05:49 +0000 * Taints: * Unschedulable: false * Lease: * HolderIdentity: kubenet-20200724220343-14997 * AcquireTime: * RenewTime: Fri, 24 Jul 2020 22:12:55 +0000 * Conditions: * Type Status LastHeartbeatTime LastTransitionTime Reason Message * ---- ------ ----------------- ------------------ ------ ------- * MemoryPressure False Fri, 24 Jul 2020 22:11:26 +0000 Fri, 24 Jul 2020 22:05:45 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available * DiskPressure False Fri, 24 Jul 2020 22:11:26 +0000 Fri, 24 Jul 2020 22:05:45 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure * PIDPressure False Fri, 24 Jul 2020 22:11:26 +0000 Fri, 24 Jul 2020 22:05:45 +0000 KubeletHasSufficientPID kubelet has sufficient PID available * Ready True Fri, 24 Jul 2020 22:11:26 +0000 Fri, 24 Jul 2020 22:06:05 +0000 KubeletReady kubelet is posting ready status * Addresses: * InternalIP: 172.17.0.6 * Hostname: kubenet-20200724220343-14997 * Capacity: * cpu: 16 * ephemeral-storage: 128884272Ki * hugepages-1Gi: 0 * hugepages-2Mi: 0 * memory: 65817044Ki * pods: 110 * Allocatable: * cpu: 16 * ephemeral-storage: 128884272Ki * hugepages-1Gi: 0 * hugepages-2Mi: 0 * memory: 65817044Ki * pods: 110 * System Info: * Machine ID: 9167d002dfbf4623b935fae071e84712 * System UUID: 52b60d2f-ca53-44da-8591-9687ce0bb523 * Boot ID: 65219ec9-ab55-4151-85fa-6cbcd6144529 * Kernel Version: 5.4.0-1022-azure * OS Image: Ubuntu 19.10 * Operating System: linux * Architecture: amd64 * Container Runtime Version: docker://19.3.2 * Kubelet Version: v1.18.3 * Kube-Proxy Version: v1.18.3 * Non-terminated Pods: (8 in total) * Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits AGE * --------- ---- ------------ ---------- --------------- ------------- --- * default netcat-7987c4c66b-mgq44 0 (0%) 0 (0%) 0 (0%) 0 (0%) 6m47s * kube-system coredns-66bff467f8-vhh67 100m (0%) 0 (0%) 70Mi (0%) 170Mi (0%) 6m59s * kube-system etcd-kubenet-20200724220343-14997 0 (0%) 0 (0%) 0 (0%) 0 (0%) 7m3s * kube-system kube-apiserver-kubenet-20200724220343-14997 250m (1%) 0 (0%) 0 (0%) 0 (0%) 7m3s * kube-system kube-controller-manager-kubenet-20200724220343-14997 200m (1%) 0 (0%) 0 (0%) 0 (0%) 7m3s * kube-system kube-proxy-7ngbk 0 (0%) 0 (0%) 0 (0%) 0 (0%) 6m58s * kube-system kube-scheduler-kubenet-20200724220343-14997 100m (0%) 0 (0%) 0 (0%) 0 (0%) 7m3s * kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 6m57s * Allocated resources: * (Total limits may be over 100 percent, i.e., overcommitted.) * Resource Requests Limits * -------- -------- ------ * cpu 650m (4%) 0 (0%) * memory 70Mi (0%) 170Mi (0%) * ephemeral-storage 0 (0%) 0 (0%) * hugepages-1Gi 0 (0%) 0 (0%) * hugepages-2Mi 0 (0%) 0 (0%) * Events: * Type Reason Age From Message * ---- ------ ---- ---- ------- * Normal NodeHasSufficientMemory 7m22s (x6 over 7m25s) kubelet, kubenet-20200724220343-14997 Node kubenet-20200724220343-14997 status is now: NodeHasSufficientMemory * Normal NodeHasNoDiskPressure 7m22s (x6 over 7m25s) kubelet, kubenet-20200724220343-14997 Node kubenet-20200724220343-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientPID 7m22s (x6 over 7m25s) kubelet, kubenet-20200724220343-14997 Node kubenet-20200724220343-14997 status is now: NodeHasSufficientPID * Normal Starting 7m4s kubelet, kubenet-20200724220343-14997 Starting kubelet. * Warning SystemOOM 7m4s kubelet, kubenet-20200724220343-14997 System OOM encountered, victim process: registry-server, pid: 26041 * Warning SystemOOM 7m4s kubelet, kubenet-20200724220343-14997 System OOM encountered, victim process: registry-server, pid: 28567 * Normal NodeHasSufficientMemory 7m3s kubelet, kubenet-20200724220343-14997 Node kubenet-20200724220343-14997 status is now: NodeHasSufficientMemory * Normal NodeHasNoDiskPressure 7m3s kubelet, kubenet-20200724220343-14997 Node kubenet-20200724220343-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientPID 7m3s kubelet, kubenet-20200724220343-14997 Node kubenet-20200724220343-14997 status is now: NodeHasSufficientPID * Normal NodeAllocatableEnforced 7m3s kubelet, kubenet-20200724220343-14997 Updated Node Allocatable limit across pods * Warning readOnlySysFS 6m57s kube-proxy, kubenet-20200724220343-14997 CRI error: /sys is read-only: cannot modify conntrack limits, problems may arise later (If running Docker, see docker issue #24000) * Normal Starting 6m57s kube-proxy, kubenet-20200724220343-14997 Starting kube-proxy. * Normal NodeReady 6m53s kubelet, kubenet-20200724220343-14997 Node kubenet-20200724220343-14997 status is now: NodeReady * * ==> dmesg <== * [ +0.008042] FS-Cache: N-cookie d=00000000087e516b n=00000000639e5e65 * [ +0.007221] FS-Cache: N-key=[8] '56c00f0000000000' * [ +0.005631] FS-Cache: Duplicate cookie detected * [ +0.003841] FS-Cache: O-cookie c=0000000011bf07ab [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.005951] FS-Cache: O-cookie d=00000000087e516b n=00000000251e97b7 * [ +0.004790] FS-Cache: O-key=[8] '56c00f0000000000' * [ +0.003643] FS-Cache: N-cookie c=00000000e2eec606 [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.005689] FS-Cache: N-cookie d=00000000087e516b n=000000007550b1ca * [ +0.004826] FS-Cache: N-key=[8] '56c00f0000000000' * [ +1.890005] FS-Cache: Duplicate cookie detected * [ +0.022632] FS-Cache: O-cookie c=0000000036b15294 [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.008521] FS-Cache: O-cookie d=00000000087e516b n=00000000efd734b0 * [ +0.007450] FS-Cache: O-key=[8] '55c00f0000000000' * [ +0.004298] FS-Cache: N-cookie c=00000000c6760349 [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.007729] FS-Cache: N-cookie d=00000000087e516b n=000000002b6d28f2 * [ +0.006730] FS-Cache: N-key=[8] '55c00f0000000000' * [ +2.781512] FS-Cache: Duplicate cookie detected * [ +0.004691] FS-Cache: O-cookie c=0000000026b60cdb [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.006817] FS-Cache: O-cookie d=00000000087e516b n=0000000083eaff9d * [ +0.006146] FS-Cache: O-key=[8] '5ac00f0000000000' * [ +0.004163] FS-Cache: N-cookie c=000000006a4278df [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.006241] FS-Cache: N-cookie d=00000000087e516b n=00000000e4c3dd6e * [ +0.005074] FS-Cache: N-key=[8] '5ac00f0000000000' * [Jul24 22:02] tee (116897): /proc/108378/oom_adj is deprecated, please use /proc/108378/oom_score_adj instead. * [Jul24 22:09] hrtimer: interrupt took 1877431 ns * * ==> etcd [e70cd3f2f394] <== * 2020-07-24 22:12:34.080707 W | etcdserver: read-only range request "key:\"/registry/limitranges\" range_end:\"/registry/limitranget\" count_only:true " with result "range_response_count:0 size:5" took too long (286.013384ms) to execute * 2020-07-24 22:12:34.080823 W | etcdserver: read-only range request "key:\"/registry/health\" " with result "range_response_count:0 size:5" took too long (812.393743ms) to execute * 2020-07-24 22:12:34.080924 W | etcdserver: read-only range request "key:\"/registry/storageclasses\" range_end:\"/registry/storageclasset\" count_only:true " with result "range_response_count:0 size:7" took too long (342.839214ms) to execute * 2020-07-24 22:12:39.198737 W | wal: sync duration of 1.25519137s, expected less than 1s * 2020-07-24 22:12:39.214552 W | etcdserver: read-only range request "key:\"/registry/configmaps\" range_end:\"/registry/configmapt\" count_only:true " with result "range_response_count:0 size:7" took too long (969.100812ms) to execute * 2020-07-24 22:12:39.214680 W | etcdserver: read-only range request "key:\"/registry/statefulsets\" range_end:\"/registry/statefulsett\" count_only:true " with result "range_response_count:0 size:5" took too long (713.968466ms) to execute * 2020-07-24 22:12:39.214743 W | etcdserver: read-only range request "key:\"/registry/rolebindings\" range_end:\"/registry/rolebindingt\" count_only:true " with result "range_response_count:0 size:7" took too long (856.46736ms) to execute * 2020-07-24 22:12:42.651551 W | etcdserver: read-only range request "key:\"/registry/services/endpoints/default/kubernetes\" " with result "range_response_count:1 size:286" took too long (161.490828ms) to execute * 2020-07-24 22:12:49.813199 W | wal: sync duration of 2.002981489s, expected less than 1s * 2020-07-24 22:12:55.268502 W | etcdserver: read-only range request "key:\"/registry/health\" " with result "error:context canceled" took too long (2.000027952s) to execute * WARNING: 2020/07/24 22:12:55 grpc: Server.processUnaryRPC failed to write status: connection error: desc = "transport is closing" * 2020-07-24 22:12:55.904630 W | wal: sync duration of 5.353112123s, expected less than 1s * 2020-07-24 22:12:55.911765 W | etcdserver: read-only range request "key:\"/registry/poddisruptionbudgets\" range_end:\"/registry/poddisruptionbudgett\" count_only:true " with result "range_response_count:0 size:5" took too long (5.682374103s) to execute * 2020-07-24 22:12:55.911821 W | etcdserver: read-only range request "key:\"/registry/namespaces\" range_end:\"/registry/namespacet\" count_only:true " with result "range_response_count:0 size:7" took too long (6.051232382s) to execute * 2020-07-24 22:12:55.911860 W | etcdserver: read-only range request "key:\"/registry/namespaces/kube-system\" " with result "range_response_count:1 size:263" took too long (5.709630785s) to execute * 2020-07-24 22:12:55.911940 W | etcdserver: read-only range request "key:\"/registry/priorityclasses\" range_end:\"/registry/priorityclasset\" count_only:true " with result "range_response_count:0 size:7" took too long (5.124557612s) to execute * 2020-07-24 22:12:55.911974 W | etcdserver: read-only range request "key:\"/registry/namespaces/default\" " with result "range_response_count:1 size:257" took too long (3.448050044s) to execute * 2020-07-24 22:12:55.912094 W | etcdserver: read-only range request "key:\"/registry/secrets\" range_end:\"/registry/secrett\" count_only:true " with result "range_response_count:0 size:7" took too long (5.432373449s) to execute * 2020-07-24 22:12:55.912579 W | etcdserver: read-only range request "key:\"/registry/jobs/\" range_end:\"/registry/jobs0\" limit:500 " with result "range_response_count:0 size:5" took too long (910.438409ms) to execute * 2020-07-24 22:12:55.912677 W | etcdserver: read-only range request "key:\"/registry/events/kube-system/kube-apiserver-kubenet-20200724220343-14997.1624cf924d833fb4\" " with result "range_response_count:1 size:874" took too long (642.737017ms) to execute * 2020-07-24 22:12:55.912697 W | etcdserver: read-only range request "key:\"/registry/events\" range_end:\"/registry/eventt\" count_only:true " with result "range_response_count:0 size:7" took too long (1.850580215s) to execute * 2020-07-24 22:12:55.912730 W | etcdserver: read-only range request "key:\"/registry/horizontalpodautoscalers\" range_end:\"/registry/horizontalpodautoscalert\" count_only:true " with result "range_response_count:0 size:5" took too long (2.198440886s) to execute * 2020-07-24 22:12:55.913238 W | etcdserver: read-only range request "key:\"/registry/events\" range_end:\"/registry/eventt\" count_only:true " with result "range_response_count:0 size:7" took too long (1.348300022s) to execute * 2020-07-24 22:12:56.084674 W | etcdserver: read-only range request "key:\"/registry/namespaces/kube-public\" " with result "range_response_count:1 size:263" took too long (166.83483ms) to execute * 2020-07-24 22:12:56.084707 W | etcdserver: read-only range request "key:\"/registry/services/specs/default/kubernetes\" " with result "range_response_count:1 size:610" took too long (169.003154ms) to execute * * ==> kernel <== * 22:12:59 up 40 min, 0 users, load average: 18.68, 13.71, 8.82 * Linux kubenet-20200724220343-14997 5.4.0-1022-azure #22-Ubuntu SMP Fri Jul 10 06:14:37 UTC 2020 x86_64 x86_64 x86_64 GNU/Linux * PRETTY_NAME="Ubuntu 19.10" * * ==> kube-apiserver [8634ec22fffc] <== * Trace[783151866]: [810.409766ms] [809.907131ms] Transaction committed * I0724 22:12:24.730940 1 trace.go:116] Trace[1042430822]: "Update" url:/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/kubenet-20200724220343-14997,user-agent:kubelet/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:172.17.0.6 (started: 2020-07-24 22:12:23.919895552 +0000 UTC m=+400.482387547) (total time: 811.009807ms): * Trace[1042430822]: [811.009807ms] [810.925802ms] END * I0724 22:12:24.995517 1 trace.go:116] Trace[112972661]: "Get" url:/api/v1/namespaces/default,user-agent:kube-apiserver/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:127.0.0.1 (started: 2020-07-24 22:12:22.462880873 +0000 UTC m=+399.025372968) (total time: 2.532602022s): * Trace[112972661]: [2.53256432s] [2.532554519s] About to write a response * I0724 22:12:25.008833 1 trace.go:116] Trace[216602233]: "GuaranteedUpdate etcd3" type:*core.Event (started: 2020-07-24 22:12:21.554083789 +0000 UTC m=+398.116575784) (total time: 3.454714729s): * Trace[216602233]: [3.434715243s] [3.434715243s] initial value restored * I0724 22:12:25.008934 1 trace.go:116] Trace[1806041302]: "Patch" url:/api/v1/namespaces/kube-system/events/etcd-kubenet-20200724220343-14997.1624cf916ffef219,user-agent:kubelet/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:172.17.0.6 (started: 2020-07-24 22:12:21.553998083 +0000 UTC m=+398.116489978) (total time: 3.454908843s): * Trace[1806041302]: [3.434803449s] [3.434766946s] About to apply patch * I0724 22:12:55.912912 1 trace.go:116] Trace[1957702625]: "List etcd3" key:/jobs,resourceVersion:,limit:500,continue: (started: 2020-07-24 22:12:55.001790293 +0000 UTC m=+431.564282288) (total time: 911.080946ms): * Trace[1957702625]: [911.080946ms] [911.080946ms] END * I0724 22:12:55.913018 1 trace.go:116] Trace[1189474643]: "List" url:/apis/batch/v1/jobs,user-agent:kube-controller-manager/v1.18.3 (linux/amd64) kubernetes/2e7996e/system:serviceaccount:kube-system:cronjob-controller,client:172.17.0.6 (started: 2020-07-24 22:12:55.001765392 +0000 UTC m=+431.564257387) (total time: 911.223854ms): * Trace[1189474643]: [911.16635ms] [911.148449ms] Listing from storage done * I0724 22:12:55.914779 1 trace.go:116] Trace[58861739]: "Get" url:/api/v1/namespaces/default,user-agent:kube-apiserver/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:127.0.0.1 (started: 2020-07-24 22:12:52.463510517 +0000 UTC m=+429.026002512) (total time: 3.451241129s): * Trace[58861739]: [3.451201627s] [3.451192526s] About to write a response * I0724 22:12:55.915224 1 trace.go:116] Trace[1479090820]: "GuaranteedUpdate etcd3" type:*core.Event (started: 2020-07-24 22:12:55.269541789 +0000 UTC m=+431.832033884) (total time: 645.655783ms): * Trace[1479090820]: [643.798477ms] [643.798477ms] initial value restored * I0724 22:12:55.915306 1 trace.go:116] Trace[1546993]: "Patch" url:/api/v1/namespaces/kube-system/events/kube-apiserver-kubenet-20200724220343-14997.1624cf924d833fb4,user-agent:kubelet/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:172.17.0.6 (started: 2020-07-24 22:12:55.269456084 +0000 UTC m=+431.831948079) (total time: 645.828593ms): * Trace[1546993]: [643.886682ms] [643.84968ms] About to apply patch * I0724 22:12:55.916717 1 trace.go:116] Trace[1088517548]: "GuaranteedUpdate etcd3" type:*coordination.Lease (started: 2020-07-24 22:12:55.016882756 +0000 UTC m=+431.579374651) (total time: 899.810001ms): * Trace[1088517548]: [899.7834ms] [899.323674ms] Transaction committed * I0724 22:12:55.916861 1 trace.go:116] Trace[1886296855]: "Get" url:/api/v1/namespaces/kube-system,user-agent:kube-apiserver/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:127.0.0.1 (started: 2020-07-24 22:12:50.201867773 +0000 UTC m=+426.764359668) (total time: 5.714937491s): * Trace[1886296855]: [5.714905089s] [5.714896788s] About to write a response * I0724 22:12:55.916882 1 trace.go:116] Trace[611334847]: "Update" url:/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/kubenet-20200724220343-14997,user-agent:kubelet/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:172.17.0.6 (started: 2020-07-24 22:12:55.016736747 +0000 UTC m=+431.579228742) (total time: 900.12032ms): * Trace[611334847]: [900.016914ms] [899.924108ms] Object stored in database * * ==> kube-controller-manager [024d615be9b0] <== * I0724 22:06:00.159390 1 node_lifecycle_controller.go:1433] Initializing eviction metric for zone: * I0724 22:06:00.159439 1 taint_manager.go:187] Starting NoExecuteTaintManager * W0724 22:06:00.159473 1 node_lifecycle_controller.go:1048] Missing timestamp for Node kubenet-20200724220343-14997. Assuming now as a timestamp. * I0724 22:06:00.159521 1 node_lifecycle_controller.go:1199] Controller detected that all Nodes are not-Ready. Entering master disruption mode. * I0724 22:06:00.159570 1 event.go:278] Event(v1.ObjectReference{Kind:"Node", Namespace:"", Name:"kubenet-20200724220343-14997", UID:"54950114-a1ac-4eaa-acde-3b9f6b53b85f", APIVersion:"v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'RegisteredNode' Node kubenet-20200724220343-14997 event: Registered Node kubenet-20200724220343-14997 in Controller * I0724 22:06:00.174319 1 shared_informer.go:230] Caches are synced for daemon sets * I0724 22:06:00.196994 1 event.go:278] Event(v1.ObjectReference{Kind:"DaemonSet", Namespace:"kube-system", Name:"kube-proxy", UID:"b6df44c4-3936-4153-9684-29f2fbcd54d5", APIVersion:"apps/v1", ResourceVersion:"226", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: kube-proxy-7ngbk * I0724 22:06:00.204297 1 shared_informer.go:230] Caches are synced for persistent volume * I0724 22:06:00.204822 1 shared_informer.go:230] Caches are synced for attach detach * I0724 22:06:00.211260 1 shared_informer.go:230] Caches are synced for namespace * E0724 22:06:00.241053 1 daemon_controller.go:292] kube-system/kube-proxy failed with : error storing status for daemon set &v1.DaemonSet{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"kube-proxy", GenerateName:"", Namespace:"kube-system", SelfLink:"/apis/apps/v1/namespaces/kube-system/daemonsets/kube-proxy", UID:"b6df44c4-3936-4153-9684-29f2fbcd54d5", ResourceVersion:"226", Generation:1, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:63731225154, loc:(*time.Location)(0x6d09200)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{"k8s-app":"kube-proxy"}, Annotations:map[string]string{"deprecated.daemonset.template.generation":"1"}, OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:"kubeadm", Operation:"Update", APIVersion:"apps/v1", Time:(*v1.Time)(0xc0016817e0), FieldsType:"FieldsV1", FieldsV1:(*v1.FieldsV1)(0xc001681820)}}}, Spec:v1.DaemonSetSpec{Selector:(*v1.LabelSelector)(0xc001681840), Template:v1.PodTemplateSpec{ObjectMeta:v1.ObjectMeta{Name:"", GenerateName:"", Namespace:"", SelfLink:"", UID:"", ResourceVersion:"", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{"k8s-app":"kube-proxy"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, Spec:v1.PodSpec{Volumes:[]v1.Volume{v1.Volume{Name:"kube-proxy", VolumeSource:v1.VolumeSource{HostPath:(*v1.HostPathVolumeSource)(nil), EmptyDir:(*v1.EmptyDirVolumeSource)(nil), GCEPersistentDisk:(*v1.GCEPersistentDiskVolumeSource)(nil), AWSElasticBlockStore:(*v1.AWSElasticBlockStoreVolumeSource)(nil), GitRepo:(*v1.GitRepoVolumeSource)(nil), Secret:(*v1.SecretVolumeSource)(nil), NFS:(*v1.NFSVolumeSource)(nil), ISCSI:(*v1.ISCSIVolumeSource)(nil), Glusterfs:(*v1.GlusterfsVolumeSource)(nil), PersistentVolumeClaim:(*v1.PersistentVolumeClaimVolumeSource)(nil), RBD:(*v1.RBDVolumeSource)(nil), FlexVolume:(*v1.FlexVolumeSource)(nil), Cinder:(*v1.CinderVolumeSource)(nil), CephFS:(*v1.CephFSVolumeSource)(nil), Flocker:(*v1.FlockerVolumeSource)(nil), DownwardAPI:(*v1.DownwardAPIVolumeSource)(nil), FC:(*v1.FCVolumeSource)(nil), AzureFile:(*v1.AzureFileVolumeSource)(nil), ConfigMap:(*v1.ConfigMapVolumeSource)(0xc0016e1fc0), VsphereVolume:(*v1.VsphereVirtualDiskVolumeSource)(nil), Quobyte:(*v1.QuobyteVolumeSource)(nil), AzureDisk:(*v1.AzureDiskVolumeSource)(nil), PhotonPersistentDisk:(*v1.PhotonPersistentDiskVolumeSource)(nil), Projected:(*v1.ProjectedVolumeSource)(nil), PortworxVolume:(*v1.PortworxVolumeSource)(nil), ScaleIO:(*v1.ScaleIOVolumeSource)(nil), StorageOS:(*v1.StorageOSVolumeSource)(nil), CSI:(*v1.CSIVolumeSource)(nil)}}, v1.Volume{Name:"xtables-lock", VolumeSource:v1.VolumeSource{HostPath:(*v1.HostPathVolumeSource)(0xc001681860), EmptyDir:(*v1.EmptyDirVolumeSource)(nil), GCEPersistentDisk:(*v1.GCEPersistentDiskVolumeSource)(nil), AWSElasticBlockStore:(*v1.AWSElasticBlockStoreVolumeSource)(nil), GitRepo:(*v1.GitRepoVolumeSource)(nil), Secret:(*v1.SecretVolumeSource)(nil), NFS:(*v1.NFSVolumeSource)(nil), ISCSI:(*v1.ISCSIVolumeSource)(nil), Glusterfs:(*v1.GlusterfsVolumeSource)(nil), PersistentVolumeClaim:(*v1.PersistentVolumeClaimVolumeSource)(nil), RBD:(*v1.RBDVolumeSource)(nil), FlexVolume:(*v1.FlexVolumeSource)(nil), Cinder:(*v1.CinderVolumeSource)(nil), CephFS:(*v1.CephFSVolumeSource)(nil), Flocker:(*v1.FlockerVolumeSource)(nil), DownwardAPI:(*v1.DownwardAPIVolumeSource)(nil), FC:(*v1.FCVolumeSource)(nil), AzureFile:(*v1.AzureFileVolumeSource)(nil), ConfigMap:(*v1.ConfigMapVolumeSource)(nil), VsphereVolume:(*v1.VsphereVirtualDiskVolumeSource)(nil), Quobyte:(*v1.QuobyteVolumeSource)(nil), AzureDisk:(*v1.AzureDiskVolumeSource)(nil), PhotonPersistentDisk:(*v1.PhotonPersistentDiskVolumeSource)(nil), Projected:(*v1.ProjectedVolumeSource)(nil), PortworxVolume:(*v1.PortworxVolumeSource)(nil), ScaleIO:(*v1.ScaleIOVolumeSource)(nil), StorageOS:(*v1.StorageOSVolumeSource)(nil), CSI:(*v1.CSIVolumeSource)(nil)}}, v1.Volume{Name:"lib-modules", VolumeSource:v1.VolumeSource{HostPath:(*v1.HostPathVolumeSource)(0xc001681880), EmptyDir:(*v1.EmptyDirVolumeSource)(nil), GCEPersistentDisk:(*v1.GCEPersistentDiskVolumeSource)(nil), AWSElasticBlockStore:(*v1.AWSElasticBlockStoreVolumeSource)(nil), GitRepo:(*v1.GitRepoVolumeSource)(nil), Secret:(*v1.SecretVolumeSource)(nil), NFS:(*v1.NFSVolumeSource)(nil), ISCSI:(*v1.ISCSIVolumeSource)(nil), Glusterfs:(*v1.GlusterfsVolumeSource)(nil), PersistentVolumeClaim:(*v1.PersistentVolumeClaimVolumeSource)(nil), RBD:(*v1.RBDVolumeSource)(nil), FlexVolume:(*v1.FlexVolumeSource)(nil), Cinder:(*v1.CinderVolumeSource)(nil), CephFS:(*v1.CephFSVolumeSource)(nil), Flocker:(*v1.FlockerVolumeSource)(nil), DownwardAPI:(*v1.DownwardAPIVolumeSource)(nil), FC:(*v1.FCVolumeSource)(nil), AzureFile:(*v1.AzureFileVolumeSource)(nil), ConfigMap:(*v1.ConfigMapVolumeSource)(nil), VsphereVolume:(*v1.VsphereVirtualDiskVolumeSource)(nil), Quobyte:(*v1.QuobyteVolumeSource)(nil), AzureDisk:(*v1.AzureDiskVolumeSource)(nil), PhotonPersistentDisk:(*v1.PhotonPersistentDiskVolumeSource)(nil), Projected:(*v1.ProjectedVolumeSource)(nil), PortworxVolume:(*v1.PortworxVolumeSource)(nil), ScaleIO:(*v1.ScaleIOVolumeSource)(nil), StorageOS:(*v1.StorageOSVolumeSource)(nil), CSI:(*v1.CSIVolumeSource)(nil)}}}, InitContainers:[]v1.Container(nil), Containers:[]v1.Container{v1.Container{Name:"kube-proxy", Image:"k8s.gcr.io/kube-proxy:v1.18.3", Command:[]string{"/usr/local/bin/kube-proxy", "--config=/var/lib/kube-proxy/config.conf", "--hostname-override=$(NODE_NAME)"}, Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar{v1.EnvVar{Name:"NODE_NAME", Value:"", ValueFrom:(*v1.EnvVarSource)(0xc0016818c0)}}, Resources:v1.ResourceRequirements{Limits:v1.ResourceList(nil), Requests:v1.ResourceList(nil)}, VolumeMounts:[]v1.VolumeMount{v1.VolumeMount{Name:"kube-proxy", ReadOnly:false, MountPath:"/var/lib/kube-proxy", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil), SubPathExpr:""}, v1.VolumeMount{Name:"xtables-lock", ReadOnly:false, MountPath:"/run/xtables.lock", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil), SubPathExpr:""}, v1.VolumeMount{Name:"lib-modules", ReadOnly:true, MountPath:"/lib/modules", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil), SubPathExpr:""}}, VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), StartupProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(0xc0016e3f40), Stdin:false, StdinOnce:false, TTY:false}}, EphemeralContainers:[]v1.EphemeralContainer(nil), RestartPolicy:"Always", TerminationGracePeriodSeconds:(*int64)(0xc0016f9fc8), ActiveDeadlineSeconds:(*int64)(nil), DNSPolicy:"ClusterFirst", NodeSelector:map[string]string{"kubernetes.io/os":"linux"}, ServiceAccountName:"kube-proxy", DeprecatedServiceAccount:"kube-proxy", AutomountServiceAccountToken:(*bool)(nil), NodeName:"", HostNetwork:true, HostPID:false, HostIPC:false, ShareProcessNamespace:(*bool)(nil), SecurityContext:(*v1.PodSecurityContext)(0xc00042e460), ImagePullSecrets:[]v1.LocalObjectReference(nil), Hostname:"", Subdomain:"", Affinity:(*v1.Affinity)(nil), SchedulerName:"default-scheduler", Tolerations:[]v1.Toleration{v1.Toleration{Key:"CriticalAddonsOnly", Operator:"Exists", Value:"", Effect:"", TolerationSeconds:(*int64)(nil)}, v1.Toleration{Key:"", Operator:"Exists", Value:"", Effect:"", TolerationSeconds:(*int64)(nil)}}, HostAliases:[]v1.HostAlias(nil), PriorityClassName:"system-node-critical", Priority:(*int32)(nil), DNSConfig:(*v1.PodDNSConfig)(nil), ReadinessGates:[]v1.PodReadinessGate(nil), RuntimeClassName:(*string)(nil), EnableServiceLinks:(*bool)(nil), PreemptionPolicy:(*v1.PreemptionPolicy)(nil), Overhead:v1.ResourceList(nil), TopologySpreadConstraints:[]v1.TopologySpreadConstraint(nil)}}, UpdateStrategy:v1.DaemonSetUpdateStrategy{Type:"RollingUpdate", RollingUpdate:(*v1.RollingUpdateDaemonSet)(0xc001b3c350)}, MinReadySeconds:0, RevisionHistoryLimit:(*int32)(0xc000a88018)}, Status:v1.DaemonSetStatus{CurrentNumberScheduled:0, NumberMisscheduled:0, DesiredNumberScheduled:0, NumberReady:0, ObservedGeneration:0, UpdatedNumberScheduled:0, NumberAvailable:0, NumberUnavailable:0, CollisionCount:(*int32)(nil), Conditions:[]v1.DaemonSetCondition(nil)}}: Operation cannot be fulfilled on daemonsets.apps "kube-proxy": the object has been modified; please apply your changes to the latest version and try again * I0724 22:06:00.247714 1 shared_informer.go:230] Caches are synced for service account * I0724 22:06:00.308421 1 shared_informer.go:230] Caches are synced for disruption * I0724 22:06:00.308449 1 disruption.go:339] Sending events to api server. * I0724 22:06:00.357197 1 shared_informer.go:230] Caches are synced for resource quota * I0724 22:06:00.360050 1 shared_informer.go:230] Caches are synced for garbage collector * I0724 22:06:00.360061 1 garbagecollector.go:142] Garbage collector: all resource monitors have synced. Proceeding to collect garbage * I0724 22:06:00.404622 1 shared_informer.go:230] Caches are synced for resource quota * I0724 22:06:00.757624 1 shared_informer.go:223] Waiting for caches to sync for garbage collector * I0724 22:06:00.757681 1 shared_informer.go:230] Caches are synced for garbage collector * I0724 22:06:00.772440 1 event.go:278] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"kube-system", Name:"coredns", UID:"8875da88-7a4c-4e34-bbe5-9bc53c4cb1a8", APIVersion:"apps/v1", ResourceVersion:"350", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled down replica set coredns-66bff467f8 to 1 * I0724 22:06:00.856403 1 event.go:278] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"kube-system", Name:"coredns-66bff467f8", UID:"3560949e-c2eb-4de4-a896-4ceceabd8a06", APIVersion:"apps/v1", ResourceVersion:"351", FieldPath:""}): type: 'Normal' reason: 'SuccessfulDelete' Deleted pod: coredns-66bff467f8-mzwl8 * I0724 22:06:10.160032 1 node_lifecycle_controller.go:1226] Controller detected that some Nodes are Ready. Exiting master disruption mode. * I0724 22:06:11.628209 1 event.go:278] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"default", Name:"netcat", UID:"15502920-4985-47bc-8a8f-e77e34607515", APIVersion:"apps/v1", ResourceVersion:"414", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set netcat-7987c4c66b to 1 * I0724 22:06:11.643190 1 event.go:278] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"default", Name:"netcat-7987c4c66b", UID:"94575eb2-f003-40dc-b34a-0176ae97ed5d", APIVersion:"apps/v1", ResourceVersion:"415", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: netcat-7987c4c66b-mgq44 * * ==> kube-proxy [7de23fa32b48] <== * W0724 22:06:01.827758 1 server_others.go:559] Unknown proxy mode "", assuming iptables proxy * I0724 22:06:01.836138 1 node.go:136] Successfully retrieved node IP: 172.17.0.6 * I0724 22:06:01.836174 1 server_others.go:186] Using iptables Proxier. * W0724 22:06:01.836182 1 server_others.go:436] detect-local-mode set to ClusterCIDR, but no cluster CIDR defined * I0724 22:06:01.836187 1 server_others.go:447] detect-local-mode: ClusterCIDR , defaulting to no-op detect-local * I0724 22:06:01.836579 1 server.go:583] Version: v1.18.3 * I0724 22:06:01.837194 1 conntrack.go:52] Setting nf_conntrack_max to 524288 * E0724 22:06:01.837496 1 conntrack.go:127] sysfs is not writable: {Device:sysfs Path:/sys Type:sysfs Opts:[ro nosuid nodev noexec relatime] Freq:0 Pass:0} (mount options are [ro nosuid nodev noexec relatime]) * I0724 22:06:01.837716 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_established' to 86400 * I0724 22:06:01.837783 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_close_wait' to 3600 * I0724 22:06:01.837934 1 config.go:315] Starting service config controller * I0724 22:06:01.837954 1 shared_informer.go:223] Waiting for caches to sync for service config * I0724 22:06:01.838033 1 config.go:133] Starting endpoints config controller * I0724 22:06:01.838067 1 shared_informer.go:223] Waiting for caches to sync for endpoints config * I0724 22:06:01.938150 1 shared_informer.go:230] Caches are synced for service config * I0724 22:06:01.938803 1 shared_informer.go:230] Caches are synced for endpoints config * * ==> kube-scheduler [e8a0b4b34ce5] <== * I0724 22:05:49.943946 1 shared_informer.go:223] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file * I0724 22:05:49.944082 1 secure_serving.go:178] Serving securely on 127.0.0.1:10259 * I0724 22:05:49.944102 1 tlsconfig.go:240] Starting DynamicServingCertificateController * E0724 22:05:49.949555 1 reflector.go:178] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system" * E0724 22:05:49.949781 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope * E0724 22:05:49.949987 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope * E0724 22:05:49.950008 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope * E0724 22:05:49.950015 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope * E0724 22:05:49.950128 1 reflector.go:178] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:233: Failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope * E0724 22:05:49.950131 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope * E0724 22:05:49.950019 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1beta1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope * E0724 22:05:49.950182 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope * E0724 22:05:50.840831 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope * E0724 22:05:51.012853 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope * E0724 22:05:51.014803 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope * E0724 22:05:51.226195 1 reflector.go:178] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system" * E0724 22:05:51.227368 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope * E0724 22:05:51.234230 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1beta1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope * E0724 22:05:51.348463 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope * E0724 22:05:51.369916 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope * E0724 22:05:51.450894 1 reflector.go:178] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:233: Failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope * I0724 22:05:54.344507 1 shared_informer.go:230] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file * E0724 22:05:59.785768 1 factory.go:503] pod: kube-system/coredns-66bff467f8-mzwl8 is already present in the active queue * E0724 22:05:59.836570 1 factory.go:503] pod: kube-system/coredns-66bff467f8-vhh67 is already present in the active queue * E0724 22:06:01.568787 1 factory.go:503] pod: kube-system/storage-provisioner is already present in unschedulable queue * * ==> kubelet <== * -- Logs begin at Fri 2020-07-24 22:05:12 UTC, end at Fri 2020-07-24 22:12:59 UTC. -- * Jul 24 22:10:27 kubenet-20200724220343-14997 kubelet[2340]: E0724 22:10:27.939570 2340 pod_workers.go:191] Error syncing pod 49ba8922-6fb9-48a9-850d-df654c38bd86 ("storage-provisioner_kube-system(49ba8922-6fb9-48a9-850d-df654c38bd86)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 2m40s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(49ba8922-6fb9-48a9-850d-df654c38bd86)" * Jul 24 22:10:38 kubenet-20200724220343-14997 kubelet[2340]: I0724 22:10:38.938698 2340 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: c6d41b97e124d4902c6fad5e7d46ac4500308a3475186522ff9a48838d1e8013 * Jul 24 22:10:38 kubenet-20200724220343-14997 kubelet[2340]: E0724 22:10:38.938984 2340 pod_workers.go:191] Error syncing pod 49ba8922-6fb9-48a9-850d-df654c38bd86 ("storage-provisioner_kube-system(49ba8922-6fb9-48a9-850d-df654c38bd86)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 2m40s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(49ba8922-6fb9-48a9-850d-df654c38bd86)" * Jul 24 22:10:52 kubenet-20200724220343-14997 kubelet[2340]: I0724 22:10:52.938771 2340 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: c6d41b97e124d4902c6fad5e7d46ac4500308a3475186522ff9a48838d1e8013 * Jul 24 22:10:52 kubenet-20200724220343-14997 kubelet[2340]: E0724 22:10:52.939345 2340 pod_workers.go:191] Error syncing pod 49ba8922-6fb9-48a9-850d-df654c38bd86 ("storage-provisioner_kube-system(49ba8922-6fb9-48a9-850d-df654c38bd86)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 2m40s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(49ba8922-6fb9-48a9-850d-df654c38bd86)" * Jul 24 22:11:05 kubenet-20200724220343-14997 kubelet[2340]: I0724 22:11:05.938524 2340 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: c6d41b97e124d4902c6fad5e7d46ac4500308a3475186522ff9a48838d1e8013 * Jul 24 22:11:05 kubenet-20200724220343-14997 kubelet[2340]: E0724 22:11:05.938756 2340 pod_workers.go:191] Error syncing pod 49ba8922-6fb9-48a9-850d-df654c38bd86 ("storage-provisioner_kube-system(49ba8922-6fb9-48a9-850d-df654c38bd86)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 2m40s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(49ba8922-6fb9-48a9-850d-df654c38bd86)" * Jul 24 22:11:18 kubenet-20200724220343-14997 kubelet[2340]: I0724 22:11:18.938747 2340 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: c6d41b97e124d4902c6fad5e7d46ac4500308a3475186522ff9a48838d1e8013 * Jul 24 22:11:18 kubenet-20200724220343-14997 kubelet[2340]: E0724 22:11:18.939085 2340 pod_workers.go:191] Error syncing pod 49ba8922-6fb9-48a9-850d-df654c38bd86 ("storage-provisioner_kube-system(49ba8922-6fb9-48a9-850d-df654c38bd86)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 2m40s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(49ba8922-6fb9-48a9-850d-df654c38bd86)" * Jul 24 22:11:29 kubenet-20200724220343-14997 kubelet[2340]: I0724 22:11:29.938518 2340 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: c6d41b97e124d4902c6fad5e7d46ac4500308a3475186522ff9a48838d1e8013 * Jul 24 22:11:29 kubenet-20200724220343-14997 kubelet[2340]: E0724 22:11:29.938789 2340 pod_workers.go:191] Error syncing pod 49ba8922-6fb9-48a9-850d-df654c38bd86 ("storage-provisioner_kube-system(49ba8922-6fb9-48a9-850d-df654c38bd86)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 2m40s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(49ba8922-6fb9-48a9-850d-df654c38bd86)" * Jul 24 22:11:43 kubenet-20200724220343-14997 kubelet[2340]: I0724 22:11:43.938574 2340 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: c6d41b97e124d4902c6fad5e7d46ac4500308a3475186522ff9a48838d1e8013 * Jul 24 22:11:43 kubenet-20200724220343-14997 kubelet[2340]: E0724 22:11:43.938869 2340 pod_workers.go:191] Error syncing pod 49ba8922-6fb9-48a9-850d-df654c38bd86 ("storage-provisioner_kube-system(49ba8922-6fb9-48a9-850d-df654c38bd86)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 2m40s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(49ba8922-6fb9-48a9-850d-df654c38bd86)" * Jul 24 22:11:55 kubenet-20200724220343-14997 kubelet[2340]: I0724 22:11:55.939301 2340 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: c6d41b97e124d4902c6fad5e7d46ac4500308a3475186522ff9a48838d1e8013 * Jul 24 22:11:55 kubenet-20200724220343-14997 kubelet[2340]: E0724 22:11:55.939644 2340 pod_workers.go:191] Error syncing pod 49ba8922-6fb9-48a9-850d-df654c38bd86 ("storage-provisioner_kube-system(49ba8922-6fb9-48a9-850d-df654c38bd86)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 2m40s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(49ba8922-6fb9-48a9-850d-df654c38bd86)" * Jul 24 22:12:06 kubenet-20200724220343-14997 kubelet[2340]: I0724 22:12:06.938593 2340 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: c6d41b97e124d4902c6fad5e7d46ac4500308a3475186522ff9a48838d1e8013 * Jul 24 22:12:23 kubenet-20200724220343-14997 kubelet[2340]: E0724 22:12:23.919325 2340 controller.go:178] failed to update node lease, error: etcdserver: request timed out * Jul 24 22:12:24 kubenet-20200724220343-14997 kubelet[2340]: E0724 22:12:24.732467 2340 controller.go:178] failed to update node lease, error: Operation cannot be fulfilled on leases.coordination.k8s.io "kubenet-20200724220343-14997": the object has been modified; please apply your changes to the latest version and try again * Jul 24 22:12:25 kubenet-20200724220343-14997 kubelet[2340]: I0724 22:12:25.878603 2340 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: c6d41b97e124d4902c6fad5e7d46ac4500308a3475186522ff9a48838d1e8013 * Jul 24 22:12:25 kubenet-20200724220343-14997 kubelet[2340]: I0724 22:12:25.878998 2340 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: d8c25e06603f099d66058f5c3d185d247bb8227ba4aa620193fe7a63196deb80 * Jul 24 22:12:25 kubenet-20200724220343-14997 kubelet[2340]: E0724 22:12:25.879285 2340 pod_workers.go:191] Error syncing pod 49ba8922-6fb9-48a9-850d-df654c38bd86 ("storage-provisioner_kube-system(49ba8922-6fb9-48a9-850d-df654c38bd86)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 5m0s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(49ba8922-6fb9-48a9-850d-df654c38bd86)" * Jul 24 22:12:37 kubenet-20200724220343-14997 kubelet[2340]: I0724 22:12:37.938615 2340 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: d8c25e06603f099d66058f5c3d185d247bb8227ba4aa620193fe7a63196deb80 * Jul 24 22:12:37 kubenet-20200724220343-14997 kubelet[2340]: E0724 22:12:37.938894 2340 pod_workers.go:191] Error syncing pod 49ba8922-6fb9-48a9-850d-df654c38bd86 ("storage-provisioner_kube-system(49ba8922-6fb9-48a9-850d-df654c38bd86)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 5m0s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(49ba8922-6fb9-48a9-850d-df654c38bd86)" * Jul 24 22:12:52 kubenet-20200724220343-14997 kubelet[2340]: I0724 22:12:52.938538 2340 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: d8c25e06603f099d66058f5c3d185d247bb8227ba4aa620193fe7a63196deb80 * Jul 24 22:12:52 kubenet-20200724220343-14997 kubelet[2340]: E0724 22:12:52.938782 2340 pod_workers.go:191] Error syncing pod 49ba8922-6fb9-48a9-850d-df654c38bd86 ("storage-provisioner_kube-system(49ba8922-6fb9-48a9-850d-df654c38bd86)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 5m0s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(49ba8922-6fb9-48a9-850d-df654c38bd86)" * * ==> storage-provisioner [d8c25e06603f] <== * F0724 22:12:10.856843 1 main.go:37] Error getting server version: Get https://10.96.0.1:443/version: dial tcp 10.96.0.1:443: getsockopt: connection refused -- /stdout -- helpers_test.go:247: (dbg) Run: ./minikube-linux-amd64 status --format={{.APIServer}} -p kubenet-20200724220343-14997 -n kubenet-20200724220343-14997 helpers_test.go:254: (dbg) Run: kubectl --context kubenet-20200724220343-14997 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running helpers_test.go:260: non-running pods: helpers_test.go:262: ======> post-mortem[TestNetworkPlugins/group/kubenet]: describe non-running pods <====== helpers_test.go:265: (dbg) Run: kubectl --context kubenet-20200724220343-14997 describe pod helpers_test.go:265: (dbg) Non-zero exit: kubectl --context kubenet-20200724220343-14997 describe pod : exit status 1 (87.222497ms) ** stderr ** error: resource name may not be empty ** /stderr ** helpers_test.go:267: kubectl --context kubenet-20200724220343-14997 describe pod : exit status 1 helpers_test.go:170: Cleaning up "kubenet-20200724220343-14997" profile ... helpers_test.go:171: (dbg) Run: ./minikube-linux-amd64 delete -p kubenet-20200724220343-14997 === CONT TestStartStop/group/embed-certs/serial/SecondStart start_stop_delete_test.go:190: (dbg) Done: ./minikube-linux-amd64 start -p embed-certs-20200724221043-14997 --memory=2200 --alsologtostderr --wait=true --embed-certs --vm-driver=docker --base-image=local/kicbase:-snapshot --kubernetes-version=v1.18.3: (53.373256302s) start_stop_delete_test.go:196: (dbg) Run: ./minikube-linux-amd64 status --format={{.Host}} -p embed-certs-20200724221043-14997 -n embed-certs-20200724221043-14997 === RUN TestStartStop/group/embed-certs/serial/UserAppExistsAfterStop start_stop_delete_test.go:208: (dbg) TestStartStop/group/embed-certs/serial/UserAppExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ... === CONT TestNetworkPlugins/group/kubenet helpers_test.go:171: (dbg) Done: ./minikube-linux-amd64 delete -p kubenet-20200724220343-14997: (5.954821317s) === CONT TestFunctional helpers_test.go:170: Cleaning up "functional-20200724215019-14997" profile ... helpers_test.go:171: (dbg) Run: ./minikube-linux-amd64 delete -p functional-20200724215019-14997 === CONT TestStartStop/group/embed-certs/serial/UserAppExistsAfterStop helpers_test.go:332: "kubernetes-dashboard-6dbb54fd95-7swbz" [b08f666f-be9c-4b7c-990c-654a448dabd0] Pending helpers_test.go:332: "kubernetes-dashboard-6dbb54fd95-7swbz" [b08f666f-be9c-4b7c-990c-654a448dabd0] Pending / Ready:ContainersNotReady (containers with unready status: [kubernetes-dashboard]) / ContainersReady:ContainersNotReady (containers with unready status: [kubernetes-dashboard]) helpers_test.go:332: "kubernetes-dashboard-6dbb54fd95-7swbz" [b08f666f-be9c-4b7c-990c-654a448dabd0] Running === CONT TestFunctional helpers_test.go:171: (dbg) Done: ./minikube-linux-amd64 delete -p functional-20200724215019-14997: (8.752094085s) --- PASS: TestFunctional (851.67s) --- PASS: TestFunctional/serial (64.30s) --- PASS: TestFunctional/serial/CopySyncFile (0.00s) --- PASS: TestFunctional/serial/StartWithProxy (52.11s) --- PASS: TestFunctional/serial/SoftStart (3.47s) --- PASS: TestFunctional/serial/KubeContext (0.05s) --- PASS: TestFunctional/serial/KubectlGetPods (0.59s) --- PASS: TestFunctional/serial/CacheCmd (7.76s) --- PASS: TestFunctional/serial/CacheCmd/cache (7.76s) --- PASS: TestFunctional/serial/CacheCmd/cache/add (5.01s) --- PASS: TestFunctional/serial/CacheCmd/cache/delete_busybox:1.28.4-glibc (0.07s) --- PASS: TestFunctional/serial/CacheCmd/cache/list (0.06s) --- PASS: TestFunctional/serial/CacheCmd/cache/verify_cache_inside_node (0.32s) --- PASS: TestFunctional/serial/CacheCmd/cache/cache_reload (2.16s) --- PASS: TestFunctional/serial/CacheCmd/cache/delete (0.13s) --- PASS: TestFunctional/serial/MinikubeKubectlCmd (0.34s) --- PASS: TestFunctional/parallel (0.00s) --- PASS: TestFunctional/parallel/ComponentHealth (0.10s) --- PASS: TestFunctional/parallel/PersistentVolumeClaim (12.46s) --- PASS: TestFunctional/parallel/NodeLabels (0.07s) --- PASS: TestFunctional/parallel/DockerEnv (2.67s) --- PASS: TestFunctional/parallel/UpdateContextCmd (4.69s) --- PASS: TestFunctional/parallel/FileSync (2.82s) --- PASS: TestFunctional/parallel/CertSync (7.75s) --- PASS: TestFunctional/parallel/SSHCmd (0.64s) --- PASS: TestFunctional/parallel/TunnelCmd (31.00s) --- PASS: TestFunctional/parallel/TunnelCmd/serial (31.00s) --- PASS: TestFunctional/parallel/TunnelCmd/serial/StartTunnel (0.00s) --- PASS: TestFunctional/parallel/TunnelCmd/serial/WaitService (30.87s) --- PASS: TestFunctional/parallel/TunnelCmd/serial/WaitService/IngressIP (0.09s) --- PASS: TestFunctional/parallel/TunnelCmd/serial/AccessDirect (0.00s) --- SKIP: TestFunctional/parallel/TunnelCmd/serial/DNSResolutionByDig (0.00s) --- SKIP: TestFunctional/parallel/TunnelCmd/serial/DNSResolutionByDscacheutil (0.00s) --- SKIP: TestFunctional/parallel/TunnelCmd/serial/AccessThroughDNS (0.00s) --- PASS: TestFunctional/parallel/TunnelCmd/serial/DeleteTunnel (0.12s) --- PASS: TestFunctional/parallel/LogsCmd (6.88s) --- PASS: TestFunctional/parallel/AddonsCmd (0.27s) --- PASS: TestFunctional/parallel/MySQL (51.48s) --- PASS: TestFunctional/parallel/ProfileCmd (1.35s) --- PASS: TestFunctional/parallel/ProfileCmd/profile_not_create (0.53s) --- PASS: TestFunctional/parallel/ProfileCmd/profile_list (0.42s) --- PASS: TestFunctional/parallel/ProfileCmd/profile_json_output (0.39s) --- PASS: TestFunctional/parallel/DryRun (0.44s) --- PASS: TestFunctional/parallel/MountCmd (16.21s) --- PASS: TestFunctional/parallel/ServiceCmd (30.81s) --- PASS: TestFunctional/parallel/StatusCmd (1.22s) --- PASS: TestFunctional/parallel/ConfigCmd (0.47s) --- PASS: TestFunctional/parallel/DashboardCmd (16.74s) === CONT TestStartStop/group/embed-certs/serial/UserAppExistsAfterStop start_stop_delete_test.go:208: (dbg) TestStartStop/group/embed-certs/serial/UserAppExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 14.011739915s === RUN TestStartStop/group/embed-certs/serial/AddonExistsAfterStop start_stop_delete_test.go:219: (dbg) TestStartStop/group/embed-certs/serial/AddonExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ... helpers_test.go:332: "kubernetes-dashboard-6dbb54fd95-7swbz" [b08f666f-be9c-4b7c-990c-654a448dabd0] Running start_stop_delete_test.go:219: (dbg) TestStartStop/group/embed-certs/serial/AddonExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 5.006514048s === RUN TestStartStop/group/embed-certs/serial/VerifyKubernetesImages start_stop_delete_test.go:227: (dbg) Run: ./minikube-linux-amd64 ssh -p embed-certs-20200724221043-14997 "sudo crictl images -o json" start_stop_delete_test.go:227: Found non-minikube image: busybox:1.28.4-glibc === RUN TestStartStop/group/embed-certs/serial/Pause start_stop_delete_test.go:233: (dbg) Run: ./minikube-linux-amd64 pause -p embed-certs-20200724221043-14997 --alsologtostderr -v=1 start_stop_delete_test.go:233: (dbg) Done: ./minikube-linux-amd64 pause -p embed-certs-20200724221043-14997 --alsologtostderr -v=1: (1.04990268s) start_stop_delete_test.go:233: (dbg) Run: ./minikube-linux-amd64 status --format={{.APIServer}} -p embed-certs-20200724221043-14997 -n embed-certs-20200724221043-14997 start_stop_delete_test.go:233: (dbg) Non-zero exit: ./minikube-linux-amd64 status --format={{.APIServer}} -p embed-certs-20200724221043-14997 -n embed-certs-20200724221043-14997: exit status 2 (414.588567ms) -- stdout -- Paused -- /stdout -- start_stop_delete_test.go:233: status error: exit status 2 (may be ok) start_stop_delete_test.go:233: (dbg) Run: ./minikube-linux-amd64 status --format={{.Kubelet}} -p embed-certs-20200724221043-14997 -n embed-certs-20200724221043-14997 start_stop_delete_test.go:233: (dbg) Non-zero exit: ./minikube-linux-amd64 status --format={{.Kubelet}} -p embed-certs-20200724221043-14997 -n embed-certs-20200724221043-14997: exit status 2 (418.864641ms) -- stdout -- Stopped -- /stdout -- start_stop_delete_test.go:233: status error: exit status 2 (may be ok) start_stop_delete_test.go:233: (dbg) Run: ./minikube-linux-amd64 unpause -p embed-certs-20200724221043-14997 --alsologtostderr -v=1 start_stop_delete_test.go:233: (dbg) Run: ./minikube-linux-amd64 status --format={{.APIServer}} -p embed-certs-20200724221043-14997 -n embed-certs-20200724221043-14997 start_stop_delete_test.go:233: (dbg) Run: ./minikube-linux-amd64 status --format={{.Kubelet}} -p embed-certs-20200724221043-14997 -n embed-certs-20200724221043-14997 === CONT TestStartStop/group/embed-certs/serial start_stop_delete_test.go:126: (dbg) Run: ./minikube-linux-amd64 delete -p embed-certs-20200724221043-14997 start_stop_delete_test.go:126: (dbg) Done: ./minikube-linux-amd64 delete -p embed-certs-20200724221043-14997: (6.725690585s) start_stop_delete_test.go:131: (dbg) Run: kubectl config get-contexts embed-certs-20200724221043-14997 start_stop_delete_test.go:131: (dbg) Non-zero exit: kubectl config get-contexts embed-certs-20200724221043-14997: exit status 1 (58.178452ms) -- stdout -- CURRENT NAME CLUSTER AUTHINFO NAMESPACE -- /stdout -- ** stderr ** error: context embed-certs-20200724221043-14997 not found ** /stderr ** start_stop_delete_test.go:133: config context error: exit status 1 (may be ok) === CONT TestStartStop/group/embed-certs helpers_test.go:170: Cleaning up "embed-certs-20200724221043-14997" profile ... helpers_test.go:171: (dbg) Run: ./minikube-linux-amd64 delete -p embed-certs-20200724221043-14997 === CONT TestStartStop/group/newest-cni/serial/FirstStart start_stop_delete_test.go:149: (dbg) Done: ./minikube-linux-amd64 start -p newest-cni-20200724221234-14997 --memory=2200 --alsologtostderr --wait=apiserver,system_pods,default_sa --feature-gates ServerSideApply=true --network-plugin=cni --extra-config=kubelet.network-plugin=cni --extra-config=kubeadm.pod-network-cidr=192.168.111.111/16 --vm-driver=docker --base-image=local/kicbase:-snapshot --kubernetes-version=v1.18.4-rc.0: (1m17.225021611s) === RUN TestStartStop/group/newest-cni/serial/DeployApp === RUN TestStartStop/group/newest-cni/serial/Stop start_stop_delete_test.go:164: (dbg) Run: ./minikube-linux-amd64 stop -p newest-cni-20200724221234-14997 --alsologtostderr -v=3 start_stop_delete_test.go:164: (dbg) Done: ./minikube-linux-amd64 stop -p newest-cni-20200724221234-14997 --alsologtostderr -v=3: (2.376935006s) === RUN TestStartStop/group/newest-cni/serial/EnableAddonAfterStop start_stop_delete_test.go:174: (dbg) Run: ./minikube-linux-amd64 status --format={{.Host}} -p newest-cni-20200724221234-14997 -n newest-cni-20200724221234-14997 start_stop_delete_test.go:174: (dbg) Non-zero exit: ./minikube-linux-amd64 status --format={{.Host}} -p newest-cni-20200724221234-14997 -n newest-cni-20200724221234-14997: exit status 7 (108.018376ms) -- stdout -- Stopped -- /stdout -- start_stop_delete_test.go:174: status error: exit status 7 (may be ok) start_stop_delete_test.go:181: (dbg) Run: ./minikube-linux-amd64 addons enable dashboard -p newest-cni-20200724221234-14997 === RUN TestStartStop/group/newest-cni/serial/SecondStart start_stop_delete_test.go:190: (dbg) Run: ./minikube-linux-amd64 start -p newest-cni-20200724221234-14997 --memory=2200 --alsologtostderr --wait=apiserver,system_pods,default_sa --feature-gates ServerSideApply=true --network-plugin=cni --extra-config=kubelet.network-plugin=cni --extra-config=kubeadm.pod-network-cidr=192.168.111.111/16 --vm-driver=docker --base-image=local/kicbase:-snapshot --kubernetes-version=v1.18.4-rc.0 === CONT TestNetworkPlugins/group/bridge/DNS net_test.go:156: (dbg) Run: kubectl --context bridge-20200724220503-14997 exec deployment/netcat -- nslookup kubernetes.default net_test.go:156: (dbg) Non-zero exit: kubectl --context bridge-20200724220503-14997 exec deployment/netcat -- nslookup kubernetes.default: exit status 1 (15.205980325s) -- stdout -- ;; connection timed out; no servers could be reached -- /stdout -- ** stderr ** command terminated with exit code 1 ** /stderr ** net_test.go:162: failed to do nslookup on kubernetes.default: exit status 1 net_test.go:167: failed nslookup: got=";; connection timed out; no servers could be reached\n\n", want=*"10.96.0.1"* === CONT TestNetworkPlugins/group/bridge net_test.go:204: "bridge" test finished in 14m21.588716472s, failed=true net_test.go:205: *** TestNetworkPlugins/group/bridge FAILED at 2020-07-24 22:14:29.763748754 +0000 UTC m=+2299.888113239 helpers_test.go:215: -----------------------post-mortem-------------------------------- helpers_test.go:223: ======> post-mortem[TestNetworkPlugins/group/bridge]: docker inspect <====== helpers_test.go:224: (dbg) Run: docker inspect bridge-20200724220503-14997 helpers_test.go:228: (dbg) docker inspect bridge-20200724220503-14997: -- stdout -- [ { "Id": "93c2faa15f3ecb684b3e4797685e0c183e509245f0e8f05cdf330f56580e3207", "Created": "2020-07-24T22:05:27.721039984Z", "Path": "/usr/local/bin/entrypoint", "Args": [ "/sbin/init" ], "State": { "Status": "running", "Running": true, "Paused": false, "Restarting": false, "OOMKilled": false, "Dead": false, "Pid": 169307, "ExitCode": 0, "Error": "", "StartedAt": "2020-07-24T22:05:30.907710063Z", "FinishedAt": "0001-01-01T00:00:00Z" }, "Image": "sha256:e6bc41c39dc48b2b472936db36aedb28527ce0f675ed1bc20d029125c9ccf578", "ResolvConfPath": "/var/lib/docker/containers/93c2faa15f3ecb684b3e4797685e0c183e509245f0e8f05cdf330f56580e3207/resolv.conf", "HostnamePath": "/var/lib/docker/containers/93c2faa15f3ecb684b3e4797685e0c183e509245f0e8f05cdf330f56580e3207/hostname", "HostsPath": "/var/lib/docker/containers/93c2faa15f3ecb684b3e4797685e0c183e509245f0e8f05cdf330f56580e3207/hosts", "LogPath": "/var/lib/docker/containers/93c2faa15f3ecb684b3e4797685e0c183e509245f0e8f05cdf330f56580e3207/93c2faa15f3ecb684b3e4797685e0c183e509245f0e8f05cdf330f56580e3207-json.log", "Name": "/bridge-20200724220503-14997", "RestartCount": 0, "Driver": "overlay2", "Platform": "linux", "MountLabel": "", "ProcessLabel": "", "AppArmorProfile": "unconfined", "ExecIDs": null, "HostConfig": { "Binds": [ "/lib/modules:/lib/modules:ro", "bridge-20200724220503-14997:/var" ], "ContainerIDFile": "", "LogConfig": { "Type": "json-file", "Config": {} }, "NetworkMode": "default", "PortBindings": { "22/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "2376/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "5000/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "8443/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ] }, "RestartPolicy": { "Name": "no", "MaximumRetryCount": 0 }, "AutoRemove": false, "VolumeDriver": "", "VolumesFrom": null, "CapAdd": null, "CapDrop": null, "Capabilities": null, "Dns": [], "DnsOptions": [], "DnsSearch": [], "ExtraHosts": null, "GroupAdd": null, "IpcMode": "private", "Cgroup": "", "Links": null, "OomScoreAdj": 0, "PidMode": "", "Privileged": true, "PublishAllPorts": false, "ReadonlyRootfs": false, "SecurityOpt": [ "seccomp=unconfined", "apparmor=unconfined", "label=disable" ], "Tmpfs": { "/run": "", "/tmp": "" }, "UTSMode": "", "UsernsMode": "", "ShmSize": 67108864, "Runtime": "runc", "ConsoleSize": [ 0, 0 ], "Isolation": "", "CpuShares": 0, "Memory": 1887436800, "NanoCpus": 2000000000, "CgroupParent": "", "BlkioWeight": 0, "BlkioWeightDevice": [], "BlkioDeviceReadBps": null, "BlkioDeviceWriteBps": null, "BlkioDeviceReadIOps": null, "BlkioDeviceWriteIOps": null, "CpuPeriod": 0, "CpuQuota": 0, "CpuRealtimePeriod": 0, "CpuRealtimeRuntime": 0, "CpusetCpus": "", "CpusetMems": "", "Devices": [], "DeviceCgroupRules": null, "DeviceRequests": null, "KernelMemory": 0, "KernelMemoryTCP": 0, "MemoryReservation": 0, "MemorySwap": -1, "MemorySwappiness": null, "OomKillDisable": false, "PidsLimit": null, "Ulimits": null, "CpuCount": 0, "CpuPercent": 0, "IOMaximumIOps": 0, "IOMaximumBandwidth": 0, "MaskedPaths": null, "ReadonlyPaths": null }, "GraphDriver": { "Data": { "LowerDir": "/var/lib/docker/overlay2/05c7a0fe169be18d6a7c415b8584e1a7fffdba9b236c235fecb6b4fca6dde923-init/diff:/var/lib/docker/overlay2/33ef5d7ed4fbb30d55d2e5925de9b5c359ee2f6bcaf5a31308f0c4c11e358759/diff:/var/lib/docker/overlay2/3cf31b1deb19cf9de586952311d9f94baab00dac6393d96e1a80da943c3823e6/diff:/var/lib/docker/overlay2/5e25be0edbb143d613aab16e23056ece18370c2427214464ccc0b8f033fc76c5/diff:/var/lib/docker/overlay2/933d7b760faf1726c0231eb48cbf3e7af8aeea7d1f449959b7bf07bbe9a84376/diff:/var/lib/docker/overlay2/69ec5555996fe29cf36b7e927034690ecb66f32d4c72396f3c2f3d73303ab10c/diff:/var/lib/docker/overlay2/891f9311e0e93aca44c576d873c0860bf67e972178833fc3ac0569308996933c/diff:/var/lib/docker/overlay2/431ddf5eea9395c020c3a27539c6984764cb7a0dd8f3eeadfbb24f37a897caf7/diff:/var/lib/docker/overlay2/ef7d3d9d53c985fdb46a41c8bcbc04c7d98baa3d8030b1432d115657269a4611/diff:/var/lib/docker/overlay2/d976f7471257da8ef85e0de564cae8e6b16eeb8f83fff6750b3985387f87052c/diff:/var/lib/docker/overlay2/16ed6f6abe25e98b50b10529e70d835da8b64ca99934243f70e5cc63cca44ba1/diff:/var/lib/docker/overlay2/a6495d6f579c32bd0faf387dc3b48c733dae0219e3cfc5d94f9f6ba027261668/diff:/var/lib/docker/overlay2/a34db548da9e24b0323e43e53767c49484fa19509ba9c1e738fb42330788ec52/diff:/var/lib/docker/overlay2/03f93f74dafc48e2bb0234297546abf70dc9918c047a7e3b500ed2242aec09ff/diff:/var/lib/docker/overlay2/b0d08d0be034080c25561d9451d8081050a591c2991a7f3d242234b9bf7c046d/diff:/var/lib/docker/overlay2/d7be6ed61eedc76988e74fe1d87a6a1ef287912654acea5566e7f9cb797b3e3c/diff:/var/lib/docker/overlay2/bde6a4a42d5652b3ef22e3a1dddd0f6fb765d2cd9f5b1a38d4ecb9b44c45b998/diff:/var/lib/docker/overlay2/0f384da0b034b730d4bdd22f270821719c0ac0b7b7d608c3a52fa89b9a386b90/diff:/var/lib/docker/overlay2/46e9d50bc162d2d523c12da3e378e5b859515056dc7d7f11344c1c42c64718d4/diff:/var/lib/docker/overlay2/6e7552cdb94dfc1c9a0a8efae25c758d93fff3d2eec58a3e2b710496d0c94653/diff:/var/lib/docker/overlay2/b9217960caef08b70ded801ca89f5a0808227383f2ab1de5ae7313c200a8090b/diff:/var/lib/docker/overlay2/d11fc39cafccd3e3cb8fba6cfdda2bbfdb99c2e41619ce169b0d6ede0b3f6a64/diff:/var/lib/docker/overlay2/0f89620291cba4bc3afac7c9fa1faf205450d5dcae032c3a9882009633b72b4a/diff", "MergedDir": "/var/lib/docker/overlay2/05c7a0fe169be18d6a7c415b8584e1a7fffdba9b236c235fecb6b4fca6dde923/merged", "UpperDir": "/var/lib/docker/overlay2/05c7a0fe169be18d6a7c415b8584e1a7fffdba9b236c235fecb6b4fca6dde923/diff", "WorkDir": "/var/lib/docker/overlay2/05c7a0fe169be18d6a7c415b8584e1a7fffdba9b236c235fecb6b4fca6dde923/work" }, "Name": "overlay2" }, "Mounts": [ { "Type": "bind", "Source": "/lib/modules", "Destination": "/lib/modules", "Mode": "ro", "RW": false, "Propagation": "rprivate" }, { "Type": "volume", "Name": "bridge-20200724220503-14997", "Source": "/var/lib/docker/volumes/bridge-20200724220503-14997/_data", "Destination": "/var", "Driver": "local", "Mode": "z", "RW": true, "Propagation": "" } ], "Config": { "Hostname": "bridge-20200724220503-14997", "Domainname": "", "User": "root", "AttachStdin": false, "AttachStdout": false, "AttachStderr": false, "ExposedPorts": { "22/tcp": {}, "2376/tcp": {}, "5000/tcp": {}, "8443/tcp": {} }, "Tty": true, "OpenStdin": false, "StdinOnce": false, "Env": [ "container=docker", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], "Cmd": null, "Image": "kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438", "Volumes": null, "WorkingDir": "", "Entrypoint": [ "/usr/local/bin/entrypoint", "/sbin/init" ], "OnBuild": null, "Labels": { "created_by.minikube.sigs.k8s.io": "true", "mode.minikube.sigs.k8s.io": "bridge-20200724220503-14997", "name.minikube.sigs.k8s.io": "bridge-20200724220503-14997", "role.minikube.sigs.k8s.io": "" }, "StopSignal": "SIGRTMIN+3" }, "NetworkSettings": { "Bridge": "", "SandboxID": "843878ceb6e0a84da23e08f106bb0a20fa0094e7d0f0ca101a9cee296aabb526", "HairpinMode": false, "LinkLocalIPv6Address": "", "LinkLocalIPv6PrefixLen": 0, "Ports": { "22/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32880" } ], "2376/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32879" } ], "5000/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32878" } ], "8443/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32877" } ] }, "SandboxKey": "/var/run/docker/netns/843878ceb6e0", "SecondaryIPAddresses": null, "SecondaryIPv6Addresses": null, "EndpointID": "7a1682ad5b89ddc4c8b50560753733efda0b813227f82e88bd8dbba77f1d6219", "Gateway": "172.17.0.1", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "IPAddress": "172.17.0.12", "IPPrefixLen": 16, "IPv6Gateway": "", "MacAddress": "02:42:ac:11:00:0c", "Networks": { "bridge": { "IPAMConfig": null, "Links": null, "Aliases": null, "NetworkID": "d4a420189740571bc15c14b2ebac1ab66e95c9c6796461aa96bb3486a5f7e03d", "EndpointID": "7a1682ad5b89ddc4c8b50560753733efda0b813227f82e88bd8dbba77f1d6219", "Gateway": "172.17.0.1", "IPAddress": "172.17.0.12", "IPPrefixLen": 16, "IPv6Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "MacAddress": "02:42:ac:11:00:0c", "DriverOpts": null } } } } ] -- /stdout -- helpers_test.go:232: (dbg) Run: ./minikube-linux-amd64 status --format={{.Host}} -p bridge-20200724220503-14997 -n bridge-20200724220503-14997 helpers_test.go:237: <<< TestNetworkPlugins/group/bridge FAILED: start of post-mortem logs <<< helpers_test.go:238: ======> post-mortem[TestNetworkPlugins/group/bridge]: minikube logs <====== helpers_test.go:240: (dbg) Run: ./minikube-linux-amd64 -p bridge-20200724220503-14997 logs -n 25 helpers_test.go:240: (dbg) Done: ./minikube-linux-amd64 -p bridge-20200724220503-14997 logs -n 25: (3.289108104s) helpers_test.go:245: TestNetworkPlugins/group/bridge logs: -- stdout -- * ==> Docker <== * -- Logs begin at Fri 2020-07-24 22:05:39 UTC, end at Fri 2020-07-24 22:14:31 UTC. -- * Jul 24 22:05:42 bridge-20200724220503-14997 dockerd[364]: time="2020-07-24T22:05:42.755023198Z" level=warning msg="Your kernel does not support cgroup blkio weight_device" * Jul 24 22:05:42 bridge-20200724220503-14997 dockerd[364]: time="2020-07-24T22:05:42.755182709Z" level=info msg="Loading containers: start." * Jul 24 22:05:43 bridge-20200724220503-14997 dockerd[364]: time="2020-07-24T22:05:43.111933938Z" level=info msg="Default bridge (docker0) is assigned with an IP address 172.18.0.0/16. Daemon option --bip can be used to set a preferred IP address" * Jul 24 22:05:43 bridge-20200724220503-14997 dockerd[364]: time="2020-07-24T22:05:43.316555922Z" level=info msg="Loading containers: done." * Jul 24 22:05:43 bridge-20200724220503-14997 dockerd[364]: time="2020-07-24T22:05:43.382821516Z" level=warning msg="Not using native diff for overlay2, this may cause degraded performance for building images: kernel has CONFIG_OVERLAY_FS_REDIRECT_DIR enabled" storage-driver=overlay2 * Jul 24 22:05:43 bridge-20200724220503-14997 dockerd[364]: time="2020-07-24T22:05:43.383059132Z" level=info msg="Docker daemon" commit=6a30dfca03 graphdriver(s)=overlay2 version=19.03.2 * Jul 24 22:05:43 bridge-20200724220503-14997 dockerd[364]: time="2020-07-24T22:05:43.383111536Z" level=info msg="Daemon has completed initialization" * Jul 24 22:05:43 bridge-20200724220503-14997 dockerd[364]: time="2020-07-24T22:05:43.432409753Z" level=info msg="API listen on /var/run/docker.sock" * Jul 24 22:05:43 bridge-20200724220503-14997 dockerd[364]: time="2020-07-24T22:05:43.432452256Z" level=info msg="API listen on [::]:2376" * Jul 24 22:05:43 bridge-20200724220503-14997 systemd[1]: Started Docker Application Container Engine. * Jul 24 22:06:25 bridge-20200724220503-14997 dockerd[364]: time="2020-07-24T22:06:25.814901030Z" level=warning msg="Your kernel does not support swap limit capabilities or the cgroup is not mounted. Memory limited without swap." * Jul 24 22:06:27 bridge-20200724220503-14997 dockerd[364]: time="2020-07-24T22:06:27.596133459Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:06:27 bridge-20200724220503-14997 dockerd[364]: time="2020-07-24T22:06:27.596285869Z" level=warning msg="c623428d93247a5a6e44fecdac1f25b4339dde9d77b61df30f7436776cf78482 cleanup: failed to unmount IPC: umount /var/lib/docker/containers/c623428d93247a5a6e44fecdac1f25b4339dde9d77b61df30f7436776cf78482/mounts/shm, flags: 0x2: no such file or directory" * Jul 24 22:07:07 bridge-20200724220503-14997 dockerd[364]: time="2020-07-24T22:07:07.886296717Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:07:07 bridge-20200724220503-14997 dockerd[364]: time="2020-07-24T22:07:07.886398524Z" level=warning msg="35e682a3f1f772cda79dfc6ca592535a46b960bf53b8339167bf14906a5595b1 cleanup: failed to unmount IPC: umount /var/lib/docker/containers/35e682a3f1f772cda79dfc6ca592535a46b960bf53b8339167bf14906a5595b1/mounts/shm, flags: 0x2: no such file or directory" * Jul 24 22:07:23 bridge-20200724220503-14997 dockerd[364]: time="2020-07-24T22:07:23.602566234Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:07:23 bridge-20200724220503-14997 dockerd[364]: time="2020-07-24T22:07:23.602706344Z" level=warning msg="e5464e6fc8ec65fa4929f08d646992a5e7a0641a11d65d3a786181a61c613dd8 cleanup: failed to unmount IPC: umount /var/lib/docker/containers/e5464e6fc8ec65fa4929f08d646992a5e7a0641a11d65d3a786181a61c613dd8/mounts/shm, flags: 0x2: no such file or directory" * Jul 24 22:07:51 bridge-20200724220503-14997 dockerd[364]: time="2020-07-24T22:07:51.242051997Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:07:51 bridge-20200724220503-14997 dockerd[364]: time="2020-07-24T22:07:51.242194807Z" level=warning msg="3eae8d5699ea125e77b9ebb06182c1566f45ca29f555d2fcb100f8fcfa194a74 cleanup: failed to unmount IPC: umount /var/lib/docker/containers/3eae8d5699ea125e77b9ebb06182c1566f45ca29f555d2fcb100f8fcfa194a74/mounts/shm, flags: 0x2: no such file or directory" * Jul 24 22:08:43 bridge-20200724220503-14997 dockerd[364]: time="2020-07-24T22:08:43.470166457Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:08:43 bridge-20200724220503-14997 dockerd[364]: time="2020-07-24T22:08:43.470288366Z" level=warning msg="915b4e0e72aa9f6ca517b3e9356207f34bd3c8a41cf328024c7765bfbc8cc5ed cleanup: failed to unmount IPC: umount /var/lib/docker/containers/915b4e0e72aa9f6ca517b3e9356207f34bd3c8a41cf328024c7765bfbc8cc5ed/mounts/shm, flags: 0x2: no such file or directory" * Jul 24 22:10:12 bridge-20200724220503-14997 dockerd[364]: time="2020-07-24T22:10:12.007132680Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:10:12 bridge-20200724220503-14997 dockerd[364]: time="2020-07-24T22:10:12.007234587Z" level=warning msg="af05fa57ba6949e6224ec51d9ab9f0e538d11318e6b9dfb54da997c7174d727e cleanup: failed to unmount IPC: umount /var/lib/docker/containers/af05fa57ba6949e6224ec51d9ab9f0e538d11318e6b9dfb54da997c7174d727e/mounts/shm, flags: 0x2: no such file or directory" * Jul 24 22:12:57 bridge-20200724220503-14997 dockerd[364]: time="2020-07-24T22:12:57.469297650Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:12:57 bridge-20200724220503-14997 dockerd[364]: time="2020-07-24T22:12:57.469402956Z" level=warning msg="d3027c300055e1db4a3e91bea5629a19fa10d3235d9c5ff4a8a2c6df1c7b871d cleanup: failed to unmount IPC: umount /var/lib/docker/containers/d3027c300055e1db4a3e91bea5629a19fa10d3235d9c5ff4a8a2c6df1c7b871d/mounts/shm, flags: 0x2: no such file or directory" * * ==> container status <== * CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID * d3027c300055e 4689081edb103 About a minute ago Exited storage-provisioner 6 9bf1e05a80d15 * e0ace24b0aac1 gcr.io/kubernetes-e2e-test-images/dnsutils@sha256:b31bcf7ef4420ce7108e7fc10b6c00343b21257c945eec94c21598e72a8f2de0 7 minutes ago Running dnsutils 0 36e82e4170857 * 94cb847d19572 67da37a9a360e 8 minutes ago Running coredns 0 fc63051ede544 * 7e16b0926843d 3439b7546f29b 8 minutes ago Running kube-proxy 0 22bc4764e715e * d2b3974bb07da da26705ccb4b5 8 minutes ago Running kube-controller-manager 0 9dbb110062c11 * 000497d97038c 76216c34ed0c7 8 minutes ago Running kube-scheduler 0 97c22dc2a2f53 * d2b209fecb828 7e28efa976bd1 8 minutes ago Running kube-apiserver 0 c94e5a6288f3f * 13e8d3b08e18b 303ce5db0e90d 8 minutes ago Running etcd 0 a1abfd57addb0 * * ==> coredns [94cb847d1957] <== * I0724 22:13:57.465533 1 trace.go:116] Trace[460128162]: "Reflector ListAndWatch" name:pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105 (started: 2020-07-24 22:13:27.464917158 +0000 UTC m=+421.297216922) (total time: 30.000600903s): * Trace[460128162]: [30.000600903s] [30.000600903s] END * E0724 22:13:57.465547 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Service: Get https://10.96.0.1:443/api/v1/services?limit=500&resourceVersion=0: dial tcp 10.96.0.1:443: i/o timeout * I0724 22:14:16.808753 1 trace.go:116] Trace[683024728]: "Reflector ListAndWatch" name:pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105 (started: 2020-07-24 22:13:58.465710035 +0000 UTC m=+452.298009699) (total time: 18.343020086s): * Trace[683024728]: [18.343020086s] [18.343020086s] END * E0724 22:14:16.808781 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Service: Get https://10.96.0.1:443/api/v1/services?limit=500&resourceVersion=0: dial tcp 10.96.0.1:443: connect: no route to host * I0724 22:14:16.808756 1 trace.go:116] Trace[817455089]: "Reflector ListAndWatch" name:pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105 (started: 2020-07-24 22:13:58.464529059 +0000 UTC m=+452.296828923) (total time: 18.344179661s): * Trace[817455089]: [18.344179661s] [18.344179661s] END * E0724 22:14:16.808812 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Namespace: Get https://10.96.0.1:443/api/v1/namespaces?limit=500&resourceVersion=0: dial tcp 10.96.0.1:443: connect: no route to host * I0724 22:14:16.808759 1 trace.go:116] Trace[1006933274]: "Reflector ListAndWatch" name:pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105 (started: 2020-07-24 22:13:58.463556296 +0000 UTC m=+452.295856060) (total time: 18.345181225s): * Trace[1006933274]: [18.345181225s] [18.345181225s] END * E0724 22:14:16.808821 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Endpoints: Get https://10.96.0.1:443/api/v1/endpoints?limit=500&resourceVersion=0: dial tcp 10.96.0.1:443: connect: no route to host * E0724 22:14:20.876613 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Namespace: Get https://10.96.0.1:443/api/v1/namespaces?limit=500&resourceVersion=0: dial tcp 10.96.0.1:443: connect: no route to host * E0724 22:14:20.876612 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Endpoints: Get https://10.96.0.1:443/api/v1/endpoints?limit=500&resourceVersion=0: dial tcp 10.96.0.1:443: connect: no route to host * E0724 22:14:20.876670 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Service: Get https://10.96.0.1:443/api/v1/services?limit=500&resourceVersion=0: dial tcp 10.96.0.1:443: connect: no route to host * E0724 22:14:24.936658 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Endpoints: Get https://10.96.0.1:443/api/v1/endpoints?limit=500&resourceVersion=0: dial tcp 10.96.0.1:443: connect: no route to host * E0724 22:14:24.936658 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Namespace: Get https://10.96.0.1:443/api/v1/namespaces?limit=500&resourceVersion=0: dial tcp 10.96.0.1:443: connect: no route to host * E0724 22:14:24.936659 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Service: Get https://10.96.0.1:443/api/v1/services?limit=500&resourceVersion=0: dial tcp 10.96.0.1:443: connect: no route to host * E0724 22:14:29.000632 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Endpoints: Get https://10.96.0.1:443/api/v1/endpoints?limit=500&resourceVersion=0: dial tcp 10.96.0.1:443: connect: no route to host * E0724 22:14:29.000641 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Service: Get https://10.96.0.1:443/api/v1/services?limit=500&resourceVersion=0: dial tcp 10.96.0.1:443: connect: no route to host * E0724 22:14:29.000632 1 reflector.go:153] pkg/mod/k8s.io/client-go@v0.17.2/tools/cache/reflector.go:105: Failed to list *v1.Namespace: Get https://10.96.0.1:443/api/v1/namespaces?limit=500&resourceVersion=0: dial tcp 10.96.0.1:443: connect: no route to host * [INFO] plugin/ready: Still waiting on: "kubernetes" * [INFO] plugin/ready: Still waiting on: "kubernetes" * [INFO] plugin/ready: Still waiting on: "kubernetes" * [INFO] plugin/ready: Still waiting on: "kubernetes" * * ==> describe nodes <== * Name: bridge-20200724220503-14997 * Roles: master * Labels: beta.kubernetes.io/arch=amd64 * beta.kubernetes.io/os=linux * kubernetes.io/arch=amd64 * kubernetes.io/hostname=bridge-20200724220503-14997 * kubernetes.io/os=linux * minikube.k8s.io/commit=40eac8ce825d2bb784efa63b900c8d788ea49faf * minikube.k8s.io/name=bridge-20200724220503-14997 * minikube.k8s.io/updated_at=2020_07_24T22_06_04_0700 * minikube.k8s.io/version=v1.12.1 * node-role.kubernetes.io/master= * Annotations: kubeadm.alpha.kubernetes.io/cri-socket: /var/run/dockershim.sock * node.alpha.kubernetes.io/ttl: 0 * volumes.kubernetes.io/controller-managed-attach-detach: true * CreationTimestamp: Fri, 24 Jul 2020 22:06:00 +0000 * Taints: * Unschedulable: false * Lease: * HolderIdentity: bridge-20200724220503-14997 * AcquireTime: * RenewTime: Fri, 24 Jul 2020 22:14:25 +0000 * Conditions: * Type Status LastHeartbeatTime LastTransitionTime Reason Message * ---- ------ ----------------- ------------------ ------ ------- * MemoryPressure False Fri, 24 Jul 2020 22:12:06 +0000 Fri, 24 Jul 2020 22:05:55 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available * DiskPressure False Fri, 24 Jul 2020 22:12:06 +0000 Fri, 24 Jul 2020 22:05:55 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure * PIDPressure False Fri, 24 Jul 2020 22:12:06 +0000 Fri, 24 Jul 2020 22:05:55 +0000 KubeletHasSufficientPID kubelet has sufficient PID available * Ready True Fri, 24 Jul 2020 22:12:06 +0000 Fri, 24 Jul 2020 22:06:15 +0000 KubeletReady kubelet is posting ready status * Addresses: * InternalIP: 172.17.0.12 * Hostname: bridge-20200724220503-14997 * Capacity: * cpu: 16 * ephemeral-storage: 128884272Ki * hugepages-1Gi: 0 * hugepages-2Mi: 0 * memory: 65817044Ki * pods: 110 * Allocatable: * cpu: 16 * ephemeral-storage: 128884272Ki * hugepages-1Gi: 0 * hugepages-2Mi: 0 * memory: 65817044Ki * pods: 110 * System Info: * Machine ID: 98937d9cced445e3a7be52b8555c5276 * System UUID: 6d484bba-d9f2-4b32-bc6e-79a8c6ad558c * Boot ID: 65219ec9-ab55-4151-85fa-6cbcd6144529 * Kernel Version: 5.4.0-1022-azure * OS Image: Ubuntu 19.10 * Operating System: linux * Architecture: amd64 * Container Runtime Version: docker://19.3.2 * Kubelet Version: v1.18.3 * Kube-Proxy Version: v1.18.3 * PodCIDR: 10.244.0.0/24 * PodCIDRs: 10.244.0.0/24 * Non-terminated Pods: (8 in total) * Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits AGE * --------- ---- ------------ ---------- --------------- ------------- --- * default netcat-7987c4c66b-kczvb 0 (0%) 0 (0%) 0 (0%) 0 (0%) 7m53s * kube-system coredns-66bff467f8-b548n 100m (0%) 0 (0%) 70Mi (0%) 170Mi (0%) 8m10s * kube-system etcd-bridge-20200724220503-14997 0 (0%) 0 (0%) 0 (0%) 0 (0%) 8m25s * kube-system kube-apiserver-bridge-20200724220503-14997 250m (1%) 0 (0%) 0 (0%) 0 (0%) 8m25s * kube-system kube-controller-manager-bridge-20200724220503-14997 200m (1%) 0 (0%) 0 (0%) 0 (0%) 8m25s * kube-system kube-proxy-s6gsk 0 (0%) 0 (0%) 0 (0%) 0 (0%) 8m10s * kube-system kube-scheduler-bridge-20200724220503-14997 100m (0%) 0 (0%) 0 (0%) 0 (0%) 8m25s * kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 8m7s * Allocated resources: * (Total limits may be over 100 percent, i.e., overcommitted.) * Resource Requests Limits * -------- -------- ------ * cpu 650m (4%) 0 (0%) * memory 70Mi (0%) 170Mi (0%) * ephemeral-storage 0 (0%) 0 (0%) * hugepages-1Gi 0 (0%) 0 (0%) * hugepages-2Mi 0 (0%) 0 (0%) * Events: * Type Reason Age From Message * ---- ------ ---- ---- ------- * Normal Starting 8m40s kubelet, bridge-20200724220503-14997 Starting kubelet. * Warning SystemOOM 8m40s kubelet, bridge-20200724220503-14997 System OOM encountered, victim process: registry-server, pid: 26041 * Warning SystemOOM 8m40s kubelet, bridge-20200724220503-14997 System OOM encountered, victim process: registry-server, pid: 28567 * Normal NodeHasSufficientMemory 8m39s (x5 over 8m40s) kubelet, bridge-20200724220503-14997 Node bridge-20200724220503-14997 status is now: NodeHasSufficientMemory * Normal NodeHasNoDiskPressure 8m39s (x5 over 8m40s) kubelet, bridge-20200724220503-14997 Node bridge-20200724220503-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientPID 8m39s (x4 over 8m40s) kubelet, bridge-20200724220503-14997 Node bridge-20200724220503-14997 status is now: NodeHasSufficientPID * Normal NodeAllocatableEnforced 8m39s kubelet, bridge-20200724220503-14997 Updated Node Allocatable limit across pods * Warning SystemOOM 8m26s kubelet, bridge-20200724220503-14997 System OOM encountered, victim process: registry-server, pid: 28567 * Warning SystemOOM 8m26s kubelet, bridge-20200724220503-14997 System OOM encountered, victim process: registry-server, pid: 26041 * Normal Starting 8m26s kubelet, bridge-20200724220503-14997 Starting kubelet. * Normal NodeHasSufficientMemory 8m26s kubelet, bridge-20200724220503-14997 Node bridge-20200724220503-14997 status is now: NodeHasSufficientMemory * Normal NodeHasNoDiskPressure 8m26s kubelet, bridge-20200724220503-14997 Node bridge-20200724220503-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientPID 8m26s kubelet, bridge-20200724220503-14997 Node bridge-20200724220503-14997 status is now: NodeHasSufficientPID * Normal NodeNotReady 8m26s kubelet, bridge-20200724220503-14997 Node bridge-20200724220503-14997 status is now: NodeNotReady * Normal NodeAllocatableEnforced 8m26s kubelet, bridge-20200724220503-14997 Updated Node Allocatable limit across pods * Normal NodeReady 8m16s kubelet, bridge-20200724220503-14997 Node bridge-20200724220503-14997 status is now: NodeReady * Warning readOnlySysFS 8m5s kube-proxy, bridge-20200724220503-14997 CRI error: /sys is read-only: cannot modify conntrack limits, problems may arise later (If running Docker, see docker issue #24000) * Normal Starting 8m5s kube-proxy, bridge-20200724220503-14997 Starting kube-proxy. * * ==> dmesg <== * [ +0.008042] FS-Cache: N-cookie d=00000000087e516b n=00000000639e5e65 * [ +0.007221] FS-Cache: N-key=[8] '56c00f0000000000' * [ +0.005631] FS-Cache: Duplicate cookie detected * [ +0.003841] FS-Cache: O-cookie c=0000000011bf07ab [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.005951] FS-Cache: O-cookie d=00000000087e516b n=00000000251e97b7 * [ +0.004790] FS-Cache: O-key=[8] '56c00f0000000000' * [ +0.003643] FS-Cache: N-cookie c=00000000e2eec606 [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.005689] FS-Cache: N-cookie d=00000000087e516b n=000000007550b1ca * [ +0.004826] FS-Cache: N-key=[8] '56c00f0000000000' * [ +1.890005] FS-Cache: Duplicate cookie detected * [ +0.022632] FS-Cache: O-cookie c=0000000036b15294 [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.008521] FS-Cache: O-cookie d=00000000087e516b n=00000000efd734b0 * [ +0.007450] FS-Cache: O-key=[8] '55c00f0000000000' * [ +0.004298] FS-Cache: N-cookie c=00000000c6760349 [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.007729] FS-Cache: N-cookie d=00000000087e516b n=000000002b6d28f2 * [ +0.006730] FS-Cache: N-key=[8] '55c00f0000000000' * [ +2.781512] FS-Cache: Duplicate cookie detected * [ +0.004691] FS-Cache: O-cookie c=0000000026b60cdb [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.006817] FS-Cache: O-cookie d=00000000087e516b n=0000000083eaff9d * [ +0.006146] FS-Cache: O-key=[8] '5ac00f0000000000' * [ +0.004163] FS-Cache: N-cookie c=000000006a4278df [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.006241] FS-Cache: N-cookie d=00000000087e516b n=00000000e4c3dd6e * [ +0.005074] FS-Cache: N-key=[8] '5ac00f0000000000' * [Jul24 22:02] tee (116897): /proc/108378/oom_adj is deprecated, please use /proc/108378/oom_score_adj instead. * [Jul24 22:09] hrtimer: interrupt took 1877431 ns * * ==> etcd [13e8d3b08e18] <== * 2020-07-24 22:12:24.690721 W | etcdserver: read-only range request "key:\"/registry/namespaces\" range_end:\"/registry/namespacet\" count_only:true " with result "range_response_count:0 size:7" took too long (10.8265056s) to execute * 2020-07-24 22:12:24.691001 W | etcdserver: read-only range request "key:\"/registry/clusterrolebindings\" range_end:\"/registry/clusterrolebindingt\" count_only:true " with result "range_response_count:0 size:7" took too long (10.409307452s) to execute * 2020-07-24 22:12:24.691102 W | etcdserver: read-only range request "key:\"/registry/resourcequotas\" range_end:\"/registry/resourcequotat\" count_only:true " with result "range_response_count:0 size:5" took too long (7.778712466s) to execute * 2020-07-24 22:12:24.731646 W | etcdserver: read-only range request "key:\"/registry/namespaces/default\" " with result "range_response_count:1 size:257" took too long (1.999987009s) to execute * 2020-07-24 22:12:24.731944 W | etcdserver: read-only range request "key:\"/registry/networkpolicies\" range_end:\"/registry/networkpoliciet\" count_only:true " with result "range_response_count:0 size:5" took too long (3.60060794s) to execute * 2020-07-24 22:12:24.732008 W | etcdserver: read-only range request "key:\"/registry/jobs/\" range_end:\"/registry/jobs0\" limit:500 " with result "range_response_count:0 size:5" took too long (2.375288519s) to execute * 2020-07-24 22:12:24.732933 W | etcdserver: read-only range request "key:\"/registry/leases\" range_end:\"/registry/leaset\" count_only:true " with result "range_response_count:0 size:7" took too long (2.180925749s) to execute * 2020-07-24 22:12:24.985068 W | etcdserver: read-only range request "key:\"/registry/services/specs/default/kubernetes\" " with result "range_response_count:1 size:610" took too long (249.158368ms) to execute * 2020-07-24 22:12:24.988189 W | etcdserver: read-only range request "key:\"/registry/cronjobs/\" range_end:\"/registry/cronjobs0\" limit:500 " with result "range_response_count:0 size:5" took too long (250.417755ms) to execute * 2020-07-24 22:12:24.994726 W | etcdserver: read-only range request "key:\"/registry/leases/kube-node-lease/bridge-20200724220503-14997\" " with result "range_response_count:1 size:676" took too long (292.33036ms) to execute * 2020-07-24 22:12:50.599258 W | wal: sync duration of 2.801991767s, expected less than 1s * 2020-07-24 22:12:55.902570 W | etcdserver: read-only range request "key:\"/registry/endpointslices\" range_end:\"/registry/endpointslicet\" count_only:true " with result "range_response_count:0 size:7" took too long (7.785643048s) to execute * 2020-07-24 22:12:55.904781 W | wal: sync duration of 5.305373699s, expected less than 1s * 2020-07-24 22:12:55.907374 W | etcdserver: read-only range request "key:\"/registry/csinodes\" range_end:\"/registry/csinodet\" count_only:true " with result "range_response_count:0 size:7" took too long (5.951395829s) to execute * 2020-07-24 22:12:56.143898 W | etcdserver: read-only range request "key:\"/registry/jobs/\" range_end:\"/registry/jobs0\" limit:500 " with result "range_response_count:0 size:5" took too long (1.136988651s) to execute * 2020-07-24 22:12:56.143948 W | etcdserver: read-only range request "key:\"/registry/certificatesigningrequests\" range_end:\"/registry/certificatesigningrequestt\" count_only:true " with result "range_response_count:0 size:7" took too long (2.967921243s) to execute * 2020-07-24 22:12:56.143962 W | etcdserver: read-only range request "key:\"/registry/controllers\" range_end:\"/registry/controllert\" count_only:true " with result "range_response_count:0 size:5" took too long (3.938657214s) to execute * 2020-07-24 22:12:56.144022 W | etcdserver: read-only range request "key:\"/registry/namespaces/default\" " with result "range_response_count:1 size:257" took too long (3.411904457s) to execute * 2020-07-24 22:12:56.144461 W | etcdserver: read-only range request "key:\"/registry/csidrivers\" range_end:\"/registry/csidrivert\" count_only:true " with result "range_response_count:0 size:5" took too long (4.355808301s) to execute * 2020-07-24 22:13:06.705293 W | etcdserver: read-only range request "key:\"/registry/jobs/\" range_end:\"/registry/jobs0\" limit:500 " with result "range_response_count:0 size:5" took too long (557.089178ms) to execute * 2020-07-24 22:13:06.705319 W | etcdserver: read-only range request "key:\"/registry/ingress\" range_end:\"/registry/ingrest\" count_only:true " with result "range_response_count:0 size:5" took too long (170.54155ms) to execute * 2020-07-24 22:13:15.433677 W | wal: sync duration of 4.277494363s, expected less than 1s * 2020-07-24 22:13:15.471539 W | etcdserver: read-only range request "key:\"/registry/cronjobs\" range_end:\"/registry/cronjobt\" count_only:true " with result "range_response_count:0 size:5" took too long (1.282443166s) to execute * 2020-07-24 22:13:15.471598 W | etcdserver: read-only range request "key:\"/registry/horizontalpodautoscalers\" range_end:\"/registry/horizontalpodautoscalert\" count_only:true " with result "range_response_count:0 size:5" took too long (260.956303ms) to execute * 2020-07-24 22:13:15.471714 W | etcdserver: read-only range request "key:\"/registry/namespaces/default\" " with result "range_response_count:1 size:257" took too long (2.739046114s) to execute * * ==> kernel <== * 22:14:31 up 41 min, 0 users, load average: 12.26, 12.82, 8.96 * Linux bridge-20200724220503-14997 5.4.0-1022-azure #22-Ubuntu SMP Fri Jul 10 06:14:37 UTC 2020 x86_64 x86_64 x86_64 GNU/Linux * PRETTY_NAME="Ubuntu 19.10" * * ==> kube-apiserver [d2b209fecb82] <== * Trace[1535239343]: [1.36801281s] [1.367920004s] END * I0724 22:12:24.691795 1 trace.go:116] Trace[474130030]: "Create" url:/api/v1/namespaces/kube-system/events,user-agent:kubelet/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:172.17.0.12 (started: 2020-07-24 22:12:18.995040861 +0000 UTC m=+384.209246876) (total time: 5.696723185s): * Trace[474130030]: [5.696682982s] [5.696614277s] Object stored in database * I0724 22:12:24.734606 1 trace.go:116] Trace[1469368167]: "List etcd3" key:/jobs,resourceVersion:,limit:500,continue: (started: 2020-07-24 22:12:22.355903759 +0000 UTC m=+387.570109774) (total time: 2.378662353s): * Trace[1469368167]: [2.378662353s] [2.378662353s] END * I0724 22:12:24.734696 1 trace.go:116] Trace[357653783]: "Get" url:/api/v1/namespaces/default,user-agent:kube-apiserver/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:127.0.0.1 (started: 2020-07-24 22:12:22.73121167 +0000 UTC m=+387.945417685) (total time: 2.003449149s): * Trace[357653783]: [2.003410446s] [2.003399645s] About to write a response * I0724 22:12:24.734702 1 trace.go:116] Trace[1536451314]: "List" url:/apis/batch/v1/jobs,user-agent:kube-controller-manager/v1.18.3 (linux/amd64) kubernetes/2e7996e/system:serviceaccount:kube-system:cronjob-controller,client:172.17.0.12 (started: 2020-07-24 22:12:22.355881858 +0000 UTC m=+387.570087873) (total time: 2.378794562s): * Trace[1536451314]: [2.378748659s] [2.378732258s] Listing from storage done * I0724 22:12:55.905618 1 trace.go:116] Trace[11515108]: "GuaranteedUpdate etcd3" type:*coordination.Lease (started: 2020-07-24 22:12:55.006712675 +0000 UTC m=+420.220918790) (total time: 898.869348ms): * Trace[11515108]: [898.845046ms] [898.450324ms] Transaction committed * I0724 22:12:55.905735 1 trace.go:116] Trace[370135447]: "Update" url:/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/bridge-20200724220503-14997,user-agent:kubelet/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:172.17.0.12 (started: 2020-07-24 22:12:55.006585867 +0000 UTC m=+420.220791982) (total time: 899.122763ms): * Trace[370135447]: [899.06696ms] [898.969754ms] Object stored in database * I0724 22:12:56.144608 1 trace.go:116] Trace[36611466]: "List etcd3" key:/jobs,resourceVersion:,limit:500,continue: (started: 2020-07-24 22:12:55.00645126 +0000 UTC m=+420.220657375) (total time: 1.138106614s): * Trace[36611466]: [1.138106614s] [1.138106614s] END * I0724 22:12:56.144730 1 trace.go:116] Trace[1506368562]: "List" url:/apis/batch/v1/jobs,user-agent:kube-controller-manager/v1.18.3 (linux/amd64) kubernetes/2e7996e/system:serviceaccount:kube-system:cronjob-controller,client:172.17.0.12 (started: 2020-07-24 22:12:55.006411357 +0000 UTC m=+420.220617372) (total time: 1.138290925s): * Trace[1506368562]: [1.138223222s] [1.138207321s] Listing from storage done * I0724 22:12:56.144784 1 trace.go:116] Trace[1651370096]: "Get" url:/api/v1/namespaces/default,user-agent:kube-apiserver/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:127.0.0.1 (started: 2020-07-24 22:12:52.731633956 +0000 UTC m=+417.945839971) (total time: 3.413105428s): * Trace[1651370096]: [3.413053826s] [3.413045825s] About to write a response * I0724 22:13:06.705895 1 trace.go:116] Trace[1725910256]: "List etcd3" key:/jobs,resourceVersion:,limit:500,continue: (started: 2020-07-24 22:13:06.147786017 +0000 UTC m=+431.361992032) (total time: 558.067744ms): * Trace[1725910256]: [558.067744ms] [558.067744ms] END * I0724 22:13:06.706006 1 trace.go:116] Trace[667688471]: "List" url:/apis/batch/v1/jobs,user-agent:kube-controller-manager/v1.18.3 (linux/amd64) kubernetes/2e7996e/system:serviceaccount:kube-system:cronjob-controller,client:172.17.0.12 (started: 2020-07-24 22:13:06.147761615 +0000 UTC m=+431.361967730) (total time: 558.217454ms): * Trace[667688471]: [558.15575ms] [558.137048ms] Listing from storage done * I0724 22:13:15.472210 1 trace.go:116] Trace[1577576060]: "Get" url:/api/v1/namespaces/default,user-agent:kube-apiserver/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:127.0.0.1 (started: 2020-07-24 22:13:12.732019381 +0000 UTC m=+437.946225396) (total time: 2.740142185s): * Trace[1577576060]: [2.740085481s] [2.740059479s] About to write a response * * ==> kube-controller-manager [d2b3974bb07d] <== * I0724 22:06:21.271625 1 shared_informer.go:230] Caches are synced for disruption * I0724 22:06:21.271642 1 disruption.go:339] Sending events to api server. * I0724 22:06:21.272042 1 shared_informer.go:230] Caches are synced for ReplicaSet * I0724 22:06:21.272172 1 shared_informer.go:230] Caches are synced for taint * I0724 22:06:21.272231 1 node_lifecycle_controller.go:1433] Initializing eviction metric for zone: * W0724 22:06:21.272313 1 node_lifecycle_controller.go:1048] Missing timestamp for Node bridge-20200724220503-14997. Assuming now as a timestamp. * I0724 22:06:21.272379 1 taint_manager.go:187] Starting NoExecuteTaintManager * I0724 22:06:21.272409 1 node_lifecycle_controller.go:1249] Controller detected that zone is now in state Normal. * I0724 22:06:21.272452 1 event.go:278] Event(v1.ObjectReference{Kind:"Node", Namespace:"", Name:"bridge-20200724220503-14997", UID:"52700137-9d83-4afe-9c5d-b165fc28944a", APIVersion:"v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'RegisteredNode' Node bridge-20200724220503-14997 event: Registered Node bridge-20200724220503-14997 in Controller * I0724 22:06:21.273063 1 shared_informer.go:230] Caches are synced for attach detach * I0724 22:06:21.287391 1 request.go:621] Throttling request took 1.048832233s, request: GET:https://control-plane.minikube.internal:8443/apis/apiextensions.k8s.io/v1beta1?timeout=32s * I0724 22:06:21.301968 1 event.go:278] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"kube-system", Name:"coredns-66bff467f8", UID:"921d33ab-023f-4993-87a6-73cfb3e9915f", APIVersion:"apps/v1", ResourceVersion:"341", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: coredns-66bff467f8-9vwfn * I0724 22:06:21.307513 1 event.go:278] Event(v1.ObjectReference{Kind:"DaemonSet", Namespace:"kube-system", Name:"kube-proxy", UID:"9be4f76d-c640-459c-91be-520996ce7662", APIVersion:"apps/v1", ResourceVersion:"227", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: kube-proxy-s6gsk * I0724 22:06:21.313255 1 shared_informer.go:230] Caches are synced for garbage collector * I0724 22:06:21.313278 1 garbagecollector.go:142] Garbage collector: all resource monitors have synced. Proceeding to collect garbage * I0724 22:06:21.322370 1 shared_informer.go:230] Caches are synced for garbage collector * I0724 22:06:21.336294 1 event.go:278] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"kube-system", Name:"coredns-66bff467f8", UID:"921d33ab-023f-4993-87a6-73cfb3e9915f", APIVersion:"apps/v1", ResourceVersion:"341", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: coredns-66bff467f8-b548n * I0724 22:06:21.340078 1 shared_informer.go:230] Caches are synced for resource quota * E0724 22:06:21.455069 1 daemon_controller.go:292] kube-system/kube-proxy failed with : error storing status for daemon set &v1.DaemonSet{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"kube-proxy", GenerateName:"", Namespace:"kube-system", SelfLink:"/apis/apps/v1/namespaces/kube-system/daemonsets/kube-proxy", UID:"9be4f76d-c640-459c-91be-520996ce7662", ResourceVersion:"227", Generation:1, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:63731225164, loc:(*time.Location)(0x6d09200)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{"k8s-app":"kube-proxy"}, Annotations:map[string]string{"deprecated.daemonset.template.generation":"1"}, OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:"kubeadm", Operation:"Update", APIVersion:"apps/v1", Time:(*v1.Time)(0xc0017cc8e0), FieldsType:"FieldsV1", FieldsV1:(*v1.FieldsV1)(0xc0017cc900)}}}, Spec:v1.DaemonSetSpec{Selector:(*v1.LabelSelector)(0xc0017cc920), Template:v1.PodTemplateSpec{ObjectMeta:v1.ObjectMeta{Name:"", GenerateName:"", Namespace:"", SelfLink:"", UID:"", ResourceVersion:"", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{"k8s-app":"kube-proxy"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, Spec:v1.PodSpec{Volumes:[]v1.Volume{v1.Volume{Name:"kube-proxy", VolumeSource:v1.VolumeSource{HostPath:(*v1.HostPathVolumeSource)(nil), EmptyDir:(*v1.EmptyDirVolumeSource)(nil), GCEPersistentDisk:(*v1.GCEPersistentDiskVolumeSource)(nil), AWSElasticBlockStore:(*v1.AWSElasticBlockStoreVolumeSource)(nil), GitRepo:(*v1.GitRepoVolumeSource)(nil), Secret:(*v1.SecretVolumeSource)(nil), NFS:(*v1.NFSVolumeSource)(nil), ISCSI:(*v1.ISCSIVolumeSource)(nil), Glusterfs:(*v1.GlusterfsVolumeSource)(nil), PersistentVolumeClaim:(*v1.PersistentVolumeClaimVolumeSource)(nil), RBD:(*v1.RBDVolumeSource)(nil), FlexVolume:(*v1.FlexVolumeSource)(nil), Cinder:(*v1.CinderVolumeSource)(nil), CephFS:(*v1.CephFSVolumeSource)(nil), Flocker:(*v1.FlockerVolumeSource)(nil), DownwardAPI:(*v1.DownwardAPIVolumeSource)(nil), FC:(*v1.FCVolumeSource)(nil), AzureFile:(*v1.AzureFileVolumeSource)(nil), ConfigMap:(*v1.ConfigMapVolumeSource)(0xc00061f580), VsphereVolume:(*v1.VsphereVirtualDiskVolumeSource)(nil), Quobyte:(*v1.QuobyteVolumeSource)(nil), AzureDisk:(*v1.AzureDiskVolumeSource)(nil), PhotonPersistentDisk:(*v1.PhotonPersistentDiskVolumeSource)(nil), Projected:(*v1.ProjectedVolumeSource)(nil), PortworxVolume:(*v1.PortworxVolumeSource)(nil), ScaleIO:(*v1.ScaleIOVolumeSource)(nil), StorageOS:(*v1.StorageOSVolumeSource)(nil), CSI:(*v1.CSIVolumeSource)(nil)}}, v1.Volume{Name:"xtables-lock", VolumeSource:v1.VolumeSource{HostPath:(*v1.HostPathVolumeSource)(0xc0017cc940), EmptyDir:(*v1.EmptyDirVolumeSource)(nil), GCEPersistentDisk:(*v1.GCEPersistentDiskVolumeSource)(nil), AWSElasticBlockStore:(*v1.AWSElasticBlockStoreVolumeSource)(nil), GitRepo:(*v1.GitRepoVolumeSource)(nil), Secret:(*v1.SecretVolumeSource)(nil), NFS:(*v1.NFSVolumeSource)(nil), ISCSI:(*v1.ISCSIVolumeSource)(nil), Glusterfs:(*v1.GlusterfsVolumeSource)(nil), PersistentVolumeClaim:(*v1.PersistentVolumeClaimVolumeSource)(nil), RBD:(*v1.RBDVolumeSource)(nil), FlexVolume:(*v1.FlexVolumeSource)(nil), Cinder:(*v1.CinderVolumeSource)(nil), CephFS:(*v1.CephFSVolumeSource)(nil), Flocker:(*v1.FlockerVolumeSource)(nil), DownwardAPI:(*v1.DownwardAPIVolumeSource)(nil), FC:(*v1.FCVolumeSource)(nil), AzureFile:(*v1.AzureFileVolumeSource)(nil), ConfigMap:(*v1.ConfigMapVolumeSource)(nil), VsphereVolume:(*v1.VsphereVirtualDiskVolumeSource)(nil), Quobyte:(*v1.QuobyteVolumeSource)(nil), AzureDisk:(*v1.AzureDiskVolumeSource)(nil), PhotonPersistentDisk:(*v1.PhotonPersistentDiskVolumeSource)(nil), Projected:(*v1.ProjectedVolumeSource)(nil), PortworxVolume:(*v1.PortworxVolumeSource)(nil), ScaleIO:(*v1.ScaleIOVolumeSource)(nil), StorageOS:(*v1.StorageOSVolumeSource)(nil), CSI:(*v1.CSIVolumeSource)(nil)}}, v1.Volume{Name:"lib-modules", VolumeSource:v1.VolumeSource{HostPath:(*v1.HostPathVolumeSource)(0xc0017cc960), EmptyDir:(*v1.EmptyDirVolumeSource)(nil), GCEPersistentDisk:(*v1.GCEPersistentDiskVolumeSource)(nil), AWSElasticBlockStore:(*v1.AWSElasticBlockStoreVolumeSource)(nil), GitRepo:(*v1.GitRepoVolumeSource)(nil), Secret:(*v1.SecretVolumeSource)(nil), NFS:(*v1.NFSVolumeSource)(nil), ISCSI:(*v1.ISCSIVolumeSource)(nil), Glusterfs:(*v1.GlusterfsVolumeSource)(nil), PersistentVolumeClaim:(*v1.PersistentVolumeClaimVolumeSource)(nil), RBD:(*v1.RBDVolumeSource)(nil), FlexVolume:(*v1.FlexVolumeSource)(nil), Cinder:(*v1.CinderVolumeSource)(nil), CephFS:(*v1.CephFSVolumeSource)(nil), Flocker:(*v1.FlockerVolumeSource)(nil), DownwardAPI:(*v1.DownwardAPIVolumeSource)(nil), FC:(*v1.FCVolumeSource)(nil), AzureFile:(*v1.AzureFileVolumeSource)(nil), ConfigMap:(*v1.ConfigMapVolumeSource)(nil), VsphereVolume:(*v1.VsphereVirtualDiskVolumeSource)(nil), Quobyte:(*v1.QuobyteVolumeSource)(nil), AzureDisk:(*v1.AzureDiskVolumeSource)(nil), PhotonPersistentDisk:(*v1.PhotonPersistentDiskVolumeSource)(nil), Projected:(*v1.ProjectedVolumeSource)(nil), PortworxVolume:(*v1.PortworxVolumeSource)(nil), ScaleIO:(*v1.ScaleIOVolumeSource)(nil), StorageOS:(*v1.StorageOSVolumeSource)(nil), CSI:(*v1.CSIVolumeSource)(nil)}}}, InitContainers:[]v1.Container(nil), Containers:[]v1.Container{v1.Container{Name:"kube-proxy", Image:"k8s.gcr.io/kube-proxy:v1.18.3", Command:[]string{"/usr/local/bin/kube-proxy", "--config=/var/lib/kube-proxy/config.conf", "--hostname-override=$(NODE_NAME)"}, Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar{v1.EnvVar{Name:"NODE_NAME", Value:"", ValueFrom:(*v1.EnvVarSource)(0xc0017cc9a0)}}, Resources:v1.ResourceRequirements{Limits:v1.ResourceList(nil), Requests:v1.ResourceList(nil)}, VolumeMounts:[]v1.VolumeMount{v1.VolumeMount{Name:"kube-proxy", ReadOnly:false, MountPath:"/var/lib/kube-proxy", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil), SubPathExpr:""}, v1.VolumeMount{Name:"xtables-lock", ReadOnly:false, MountPath:"/run/xtables.lock", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil), SubPathExpr:""}, v1.VolumeMount{Name:"lib-modules", ReadOnly:true, MountPath:"/lib/modules", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil), SubPathExpr:""}}, VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), StartupProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(0xc001d5eb40), Stdin:false, StdinOnce:false, TTY:false}}, EphemeralContainers:[]v1.EphemeralContainer(nil), RestartPolicy:"Always", TerminationGracePeriodSeconds:(*int64)(0xc001d59668), ActiveDeadlineSeconds:(*int64)(nil), DNSPolicy:"ClusterFirst", NodeSelector:map[string]string{"kubernetes.io/os":"linux"}, ServiceAccountName:"kube-proxy", DeprecatedServiceAccount:"kube-proxy", AutomountServiceAccountToken:(*bool)(nil), NodeName:"", HostNetwork:true, HostPID:false, HostIPC:false, ShareProcessNamespace:(*bool)(nil), SecurityContext:(*v1.PodSecurityContext)(0xc000a7c700), ImagePullSecrets:[]v1.LocalObjectReference(nil), Hostname:"", Subdomain:"", Affinity:(*v1.Affinity)(nil), SchedulerName:"default-scheduler", Tolerations:[]v1.Toleration{v1.Toleration{Key:"CriticalAddonsOnly", Operator:"Exists", Value:"", Effect:"", TolerationSeconds:(*int64)(nil)}, v1.Toleration{Key:"", Operator:"Exists", Value:"", Effect:"", TolerationSeconds:(*int64)(nil)}}, HostAliases:[]v1.HostAlias(nil), PriorityClassName:"system-node-critical", Priority:(*int32)(nil), DNSConfig:(*v1.PodDNSConfig)(nil), ReadinessGates:[]v1.PodReadinessGate(nil), RuntimeClassName:(*string)(nil), EnableServiceLinks:(*bool)(nil), PreemptionPolicy:(*v1.PreemptionPolicy)(nil), Overhead:v1.ResourceList(nil), TopologySpreadConstraints:[]v1.TopologySpreadConstraint(nil)}}, UpdateStrategy:v1.DaemonSetUpdateStrategy{Type:"RollingUpdate", RollingUpdate:(*v1.RollingUpdateDaemonSet)(0xc001ca0148)}, MinReadySeconds:0, RevisionHistoryLimit:(*int32)(0xc001d596b8)}, Status:v1.DaemonSetStatus{CurrentNumberScheduled:0, NumberMisscheduled:0, DesiredNumberScheduled:0, NumberReady:0, ObservedGeneration:0, UpdatedNumberScheduled:0, NumberAvailable:0, NumberUnavailable:0, CollisionCount:(*int32)(nil), Conditions:[]v1.DaemonSetCondition(nil)}}: Operation cannot be fulfilled on daemonsets.apps "kube-proxy": the object has been modified; please apply your changes to the latest version and try again * I0724 22:06:21.575937 1 event.go:278] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"kube-system", Name:"coredns", UID:"5169a0b0-94e2-4003-8b3f-9a18d03c786b", APIVersion:"apps/v1", ResourceVersion:"370", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled down replica set coredns-66bff467f8 to 1 * I0724 22:06:21.636822 1 event.go:278] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"kube-system", Name:"coredns-66bff467f8", UID:"921d33ab-023f-4993-87a6-73cfb3e9915f", APIVersion:"apps/v1", ResourceVersion:"372", FieldPath:""}): type: 'Normal' reason: 'SuccessfulDelete' Deleted pod: coredns-66bff467f8-9vwfn * I0724 22:06:21.888589 1 shared_informer.go:223] Waiting for caches to sync for resource quota * I0724 22:06:21.888625 1 shared_informer.go:230] Caches are synced for resource quota * I0724 22:06:38.601862 1 event.go:278] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"default", Name:"netcat", UID:"6b7c4100-0a67-46d5-a2ab-fe11ad2c04b0", APIVersion:"apps/v1", ResourceVersion:"425", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set netcat-7987c4c66b to 1 * I0724 22:06:38.611777 1 event.go:278] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"default", Name:"netcat-7987c4c66b", UID:"9615281b-8d61-44ee-b945-9d9c2153edc2", APIVersion:"apps/v1", ResourceVersion:"426", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: netcat-7987c4c66b-kczvb * * ==> kube-proxy [7e16b0926843] <== * W0724 22:06:26.099333 1 server_others.go:559] Unknown proxy mode "", assuming iptables proxy * I0724 22:06:26.110077 1 node.go:136] Successfully retrieved node IP: 172.17.0.12 * I0724 22:06:26.110131 1 server_others.go:186] Using iptables Proxier. * I0724 22:06:26.110525 1 server.go:583] Version: v1.18.3 * I0724 22:06:26.111270 1 conntrack.go:52] Setting nf_conntrack_max to 524288 * E0724 22:06:26.115907 1 conntrack.go:127] sysfs is not writable: {Device:sysfs Path:/sys Type:sysfs Opts:[ro nosuid nodev noexec relatime] Freq:0 Pass:0} (mount options are [ro nosuid nodev noexec relatime]) * I0724 22:06:26.116319 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_established' to 86400 * I0724 22:06:26.135977 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_close_wait' to 3600 * I0724 22:06:26.136188 1 config.go:315] Starting service config controller * I0724 22:06:26.136198 1 shared_informer.go:223] Waiting for caches to sync for service config * I0724 22:06:26.136229 1 config.go:133] Starting endpoints config controller * I0724 22:06:26.136242 1 shared_informer.go:223] Waiting for caches to sync for endpoints config * I0724 22:06:26.236351 1 shared_informer.go:230] Caches are synced for endpoints config * I0724 22:06:26.236474 1 shared_informer.go:230] Caches are synced for service config * * ==> kube-scheduler [000497d97038] <== * W0724 22:06:00.956827 1 authentication.go:299] To require authentication configuration lookup to succeed, set --authentication-tolerate-lookup-failure=false * I0724 22:06:01.046833 1 registry.go:150] Registering EvenPodsSpread predicate and priority function * I0724 22:06:01.046927 1 registry.go:150] Registering EvenPodsSpread predicate and priority function * W0724 22:06:01.049009 1 authorization.go:47] Authorization is disabled * W0724 22:06:01.049032 1 authentication.go:40] Authentication is disabled * I0724 22:06:01.049049 1 deprecated_insecure_serving.go:51] Serving healthz insecurely on [::]:10251 * I0724 22:06:01.051679 1 configmap_cafile_content.go:202] Starting client-ca::kube-system::extension-apiserver-authentication::client-ca-file * I0724 22:06:01.051713 1 shared_informer.go:223] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file * I0724 22:06:01.052159 1 secure_serving.go:178] Serving securely on 127.0.0.1:10259 * I0724 22:06:01.052206 1 tlsconfig.go:240] Starting DynamicServingCertificateController * E0724 22:06:01.055196 1 reflector.go:178] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system" * E0724 22:06:01.057242 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope * E0724 22:06:01.057385 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope * E0724 22:06:01.057496 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope * E0724 22:06:01.057609 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope * E0724 22:06:01.057706 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1beta1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope * E0724 22:06:01.059818 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope * E0724 22:06:01.059941 1 reflector.go:178] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:233: Failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope * E0724 22:06:01.062454 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope * E0724 22:06:01.867753 1 reflector.go:178] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system" * E0724 22:06:01.896134 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope * E0724 22:06:01.900136 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope * E0724 22:06:02.051108 1 reflector.go:178] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:233: Failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope * E0724 22:06:02.275608 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope * I0724 22:06:04.951969 1 shared_informer.go:230] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file * * ==> kubelet <== * -- Logs begin at Fri 2020-07-24 22:05:39 UTC, end at Fri 2020-07-24 22:14:31 UTC. -- * Jul 24 22:12:15 bridge-20200724220503-14997 kubelet[2377]: E0724 22:12:15.150551 2377 pod_workers.go:191] Error syncing pod da573614-228d-4e6c-abfe-a6872f2e5bb2 ("storage-provisioner_kube-system(da573614-228d-4e6c-abfe-a6872f2e5bb2)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 2m40s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(da573614-228d-4e6c-abfe-a6872f2e5bb2)" * Jul 24 22:12:23 bridge-20200724220503-14997 kubelet[2377]: E0724 22:12:23.322773 2377 controller.go:178] failed to update node lease, error: etcdserver: request timed out * Jul 24 22:12:24 bridge-20200724220503-14997 kubelet[2377]: E0724 22:12:24.691880 2377 controller.go:178] failed to update node lease, error: Operation cannot be fulfilled on leases.coordination.k8s.io "bridge-20200724220503-14997": the object has been modified; please apply your changes to the latest version and try again * Jul 24 22:12:27 bridge-20200724220503-14997 kubelet[2377]: I0724 22:12:27.150251 2377 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: af05fa57ba6949e6224ec51d9ab9f0e538d11318e6b9dfb54da997c7174d727e * Jul 24 22:12:27 bridge-20200724220503-14997 kubelet[2377]: E0724 22:12:27.150513 2377 pod_workers.go:191] Error syncing pod da573614-228d-4e6c-abfe-a6872f2e5bb2 ("storage-provisioner_kube-system(da573614-228d-4e6c-abfe-a6872f2e5bb2)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 2m40s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(da573614-228d-4e6c-abfe-a6872f2e5bb2)" * Jul 24 22:12:41 bridge-20200724220503-14997 kubelet[2377]: I0724 22:12:41.150157 2377 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: af05fa57ba6949e6224ec51d9ab9f0e538d11318e6b9dfb54da997c7174d727e * Jul 24 22:12:41 bridge-20200724220503-14997 kubelet[2377]: E0724 22:12:41.150416 2377 pod_workers.go:191] Error syncing pod da573614-228d-4e6c-abfe-a6872f2e5bb2 ("storage-provisioner_kube-system(da573614-228d-4e6c-abfe-a6872f2e5bb2)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 2m40s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(da573614-228d-4e6c-abfe-a6872f2e5bb2)" * Jul 24 22:12:52 bridge-20200724220503-14997 kubelet[2377]: I0724 22:12:52.150213 2377 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: af05fa57ba6949e6224ec51d9ab9f0e538d11318e6b9dfb54da997c7174d727e * Jul 24 22:12:58 bridge-20200724220503-14997 kubelet[2377]: I0724 22:12:58.303095 2377 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: af05fa57ba6949e6224ec51d9ab9f0e538d11318e6b9dfb54da997c7174d727e * Jul 24 22:12:58 bridge-20200724220503-14997 kubelet[2377]: I0724 22:12:58.303521 2377 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: d3027c300055e1db4a3e91bea5629a19fa10d3235d9c5ff4a8a2c6df1c7b871d * Jul 24 22:12:58 bridge-20200724220503-14997 kubelet[2377]: E0724 22:12:58.303740 2377 pod_workers.go:191] Error syncing pod da573614-228d-4e6c-abfe-a6872f2e5bb2 ("storage-provisioner_kube-system(da573614-228d-4e6c-abfe-a6872f2e5bb2)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 5m0s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(da573614-228d-4e6c-abfe-a6872f2e5bb2)" * Jul 24 22:13:11 bridge-20200724220503-14997 kubelet[2377]: I0724 22:13:11.150664 2377 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: d3027c300055e1db4a3e91bea5629a19fa10d3235d9c5ff4a8a2c6df1c7b871d * Jul 24 22:13:11 bridge-20200724220503-14997 kubelet[2377]: E0724 22:13:11.151724 2377 pod_workers.go:191] Error syncing pod da573614-228d-4e6c-abfe-a6872f2e5bb2 ("storage-provisioner_kube-system(da573614-228d-4e6c-abfe-a6872f2e5bb2)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 5m0s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(da573614-228d-4e6c-abfe-a6872f2e5bb2)" * Jul 24 22:13:22 bridge-20200724220503-14997 kubelet[2377]: I0724 22:13:22.150249 2377 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: d3027c300055e1db4a3e91bea5629a19fa10d3235d9c5ff4a8a2c6df1c7b871d * Jul 24 22:13:22 bridge-20200724220503-14997 kubelet[2377]: E0724 22:13:22.150465 2377 pod_workers.go:191] Error syncing pod da573614-228d-4e6c-abfe-a6872f2e5bb2 ("storage-provisioner_kube-system(da573614-228d-4e6c-abfe-a6872f2e5bb2)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 5m0s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(da573614-228d-4e6c-abfe-a6872f2e5bb2)" * Jul 24 22:13:34 bridge-20200724220503-14997 kubelet[2377]: I0724 22:13:34.150083 2377 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: d3027c300055e1db4a3e91bea5629a19fa10d3235d9c5ff4a8a2c6df1c7b871d * Jul 24 22:13:34 bridge-20200724220503-14997 kubelet[2377]: E0724 22:13:34.150335 2377 pod_workers.go:191] Error syncing pod da573614-228d-4e6c-abfe-a6872f2e5bb2 ("storage-provisioner_kube-system(da573614-228d-4e6c-abfe-a6872f2e5bb2)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 5m0s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(da573614-228d-4e6c-abfe-a6872f2e5bb2)" * Jul 24 22:13:47 bridge-20200724220503-14997 kubelet[2377]: I0724 22:13:47.150223 2377 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: d3027c300055e1db4a3e91bea5629a19fa10d3235d9c5ff4a8a2c6df1c7b871d * Jul 24 22:13:47 bridge-20200724220503-14997 kubelet[2377]: E0724 22:13:47.150454 2377 pod_workers.go:191] Error syncing pod da573614-228d-4e6c-abfe-a6872f2e5bb2 ("storage-provisioner_kube-system(da573614-228d-4e6c-abfe-a6872f2e5bb2)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 5m0s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(da573614-228d-4e6c-abfe-a6872f2e5bb2)" * Jul 24 22:14:02 bridge-20200724220503-14997 kubelet[2377]: I0724 22:14:02.150107 2377 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: d3027c300055e1db4a3e91bea5629a19fa10d3235d9c5ff4a8a2c6df1c7b871d * Jul 24 22:14:02 bridge-20200724220503-14997 kubelet[2377]: E0724 22:14:02.150408 2377 pod_workers.go:191] Error syncing pod da573614-228d-4e6c-abfe-a6872f2e5bb2 ("storage-provisioner_kube-system(da573614-228d-4e6c-abfe-a6872f2e5bb2)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 5m0s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(da573614-228d-4e6c-abfe-a6872f2e5bb2)" * Jul 24 22:14:17 bridge-20200724220503-14997 kubelet[2377]: I0724 22:14:17.150335 2377 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: d3027c300055e1db4a3e91bea5629a19fa10d3235d9c5ff4a8a2c6df1c7b871d * Jul 24 22:14:17 bridge-20200724220503-14997 kubelet[2377]: E0724 22:14:17.150653 2377 pod_workers.go:191] Error syncing pod da573614-228d-4e6c-abfe-a6872f2e5bb2 ("storage-provisioner_kube-system(da573614-228d-4e6c-abfe-a6872f2e5bb2)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 5m0s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(da573614-228d-4e6c-abfe-a6872f2e5bb2)" * Jul 24 22:14:29 bridge-20200724220503-14997 kubelet[2377]: I0724 22:14:29.150231 2377 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: d3027c300055e1db4a3e91bea5629a19fa10d3235d9c5ff4a8a2c6df1c7b871d * Jul 24 22:14:29 bridge-20200724220503-14997 kubelet[2377]: E0724 22:14:29.150552 2377 pod_workers.go:191] Error syncing pod da573614-228d-4e6c-abfe-a6872f2e5bb2 ("storage-provisioner_kube-system(da573614-228d-4e6c-abfe-a6872f2e5bb2)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 5m0s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(da573614-228d-4e6c-abfe-a6872f2e5bb2)" * * ==> storage-provisioner [d3027c300055] <== * F0724 22:12:57.389518 1 main.go:37] Error getting server version: the server has asked for the client to provide credentials -- /stdout -- helpers_test.go:247: (dbg) Run: ./minikube-linux-amd64 status --format={{.APIServer}} -p bridge-20200724220503-14997 -n bridge-20200724220503-14997 helpers_test.go:254: (dbg) Run: kubectl --context bridge-20200724220503-14997 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running helpers_test.go:260: non-running pods: helpers_test.go:262: ======> post-mortem[TestNetworkPlugins/group/bridge]: describe non-running pods <====== helpers_test.go:265: (dbg) Run: kubectl --context bridge-20200724220503-14997 describe pod helpers_test.go:265: (dbg) Non-zero exit: kubectl --context bridge-20200724220503-14997 describe pod : exit status 1 (68.703535ms) ** stderr ** error: resource name may not be empty ** /stderr ** helpers_test.go:267: kubectl --context bridge-20200724220503-14997 describe pod : exit status 1 helpers_test.go:170: Cleaning up "bridge-20200724220503-14997" profile ... helpers_test.go:171: (dbg) Run: ./minikube-linux-amd64 delete -p bridge-20200724220503-14997 === CONT TestStartStop/group/newest-cni/serial/SecondStart start_stop_delete_test.go:190: (dbg) Done: ./minikube-linux-amd64 start -p newest-cni-20200724221234-14997 --memory=2200 --alsologtostderr --wait=apiserver,system_pods,default_sa --feature-gates ServerSideApply=true --network-plugin=cni --extra-config=kubelet.network-plugin=cni --extra-config=kubeadm.pod-network-cidr=192.168.111.111/16 --vm-driver=docker --base-image=local/kicbase:-snapshot --kubernetes-version=v1.18.4-rc.0: (42.414999669s) start_stop_delete_test.go:196: (dbg) Run: ./minikube-linux-amd64 status --format={{.Host}} -p newest-cni-20200724221234-14997 -n newest-cni-20200724221234-14997 start_stop_delete_test.go:196: (dbg) Done: ./minikube-linux-amd64 status --format={{.Host}} -p newest-cni-20200724221234-14997 -n newest-cni-20200724221234-14997: (1.82518555s) === RUN TestStartStop/group/newest-cni/serial/UserAppExistsAfterStop start_stop_delete_test.go:207: WARNING: cni mode requires additional setup before pods can schedule :( === RUN TestStartStop/group/newest-cni/serial/AddonExistsAfterStop start_stop_delete_test.go:218: WARNING: cni mode requires additional setup before pods can schedule :( === RUN TestStartStop/group/newest-cni/serial/VerifyKubernetesImages start_stop_delete_test.go:227: (dbg) Run: ./minikube-linux-amd64 ssh -p newest-cni-20200724221234-14997 "sudo crictl images -o json" === RUN TestStartStop/group/newest-cni/serial/Pause start_stop_delete_test.go:233: (dbg) Run: ./minikube-linux-amd64 pause -p newest-cni-20200724221234-14997 --alsologtostderr -v=1 === CONT TestNetworkPlugins/group/bridge helpers_test.go:171: (dbg) Done: ./minikube-linux-amd64 delete -p bridge-20200724220503-14997: (5.090847737s) === CONT TestStartStop/group/newest-cni/serial/Pause start_stop_delete_test.go:233: (dbg) Run: ./minikube-linux-amd64 status --format={{.APIServer}} -p newest-cni-20200724221234-14997 -n newest-cni-20200724221234-14997 start_stop_delete_test.go:233: (dbg) Non-zero exit: ./minikube-linux-amd64 status --format={{.APIServer}} -p newest-cni-20200724221234-14997 -n newest-cni-20200724221234-14997: exit status 2 (360.402037ms) -- stdout -- Paused -- /stdout -- start_stop_delete_test.go:233: status error: exit status 2 (may be ok) start_stop_delete_test.go:233: (dbg) Run: ./minikube-linux-amd64 status --format={{.Kubelet}} -p newest-cni-20200724221234-14997 -n newest-cni-20200724221234-14997 start_stop_delete_test.go:233: (dbg) Non-zero exit: ./minikube-linux-amd64 status --format={{.Kubelet}} -p newest-cni-20200724221234-14997 -n newest-cni-20200724221234-14997: exit status 2 (359.128954ms) -- stdout -- Stopped -- /stdout -- start_stop_delete_test.go:233: status error: exit status 2 (may be ok) start_stop_delete_test.go:233: (dbg) Run: ./minikube-linux-amd64 unpause -p newest-cni-20200724221234-14997 --alsologtostderr -v=1 start_stop_delete_test.go:233: (dbg) Run: ./minikube-linux-amd64 status --format={{.APIServer}} -p newest-cni-20200724221234-14997 -n newest-cni-20200724221234-14997 start_stop_delete_test.go:233: (dbg) Run: ./minikube-linux-amd64 status --format={{.Kubelet}} -p newest-cni-20200724221234-14997 -n newest-cni-20200724221234-14997 === CONT TestStartStop/group/newest-cni/serial start_stop_delete_test.go:126: (dbg) Run: ./minikube-linux-amd64 delete -p newest-cni-20200724221234-14997 start_stop_delete_test.go:126: (dbg) Done: ./minikube-linux-amd64 delete -p newest-cni-20200724221234-14997: (4.612515955s) start_stop_delete_test.go:131: (dbg) Run: kubectl config get-contexts newest-cni-20200724221234-14997 start_stop_delete_test.go:131: (dbg) Non-zero exit: kubectl config get-contexts newest-cni-20200724221234-14997: exit status 1 (57.488119ms) -- stdout -- CURRENT NAME CLUSTER AUTHINFO NAMESPACE -- /stdout -- ** stderr ** error: context newest-cni-20200724221234-14997 not found ** /stderr ** start_stop_delete_test.go:133: config context error: exit status 1 (may be ok) === CONT TestStartStop/group/newest-cni helpers_test.go:170: Cleaning up "newest-cni-20200724221234-14997" profile ... helpers_test.go:171: (dbg) Run: ./minikube-linux-amd64 delete -p newest-cni-20200724221234-14997 === CONT TestStartStop/group/crio/serial/FirstStart start_stop_delete_test.go:149: (dbg) Non-zero exit: ./minikube-linux-amd64 start -p crio-20200724220901-14997 --memory=2200 --alsologtostderr --wait=true --container-runtime=crio --disable-driver-mounts --extra-config=kubeadm.ignore-preflight-errors=SystemVerification --vm-driver=docker --base-image=local/kicbase:-snapshot --kubernetes-version=v1.15.7: exit status 70 (8m20.582531207s) -- stdout -- * [crio-20200724220901-14997] minikube v1.12.1 on Ubuntu 20.04 - KUBECONFIG=/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/kubeconfig - MINIKUBE_HOME=/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome * Using the docker driver based on user configuration * Starting control plane node crio-20200724220901-14997 in cluster crio-20200724220901-14997 * Pulling base image ... * Creating docker container (CPUs=2, Memory=2200MB) ... * Preparing Kubernetes v1.15.7 on CRI-O 1.17.3 ... - kubeadm.ignore-preflight-errors=SystemVerification * Configuring CNI (Container Networking Interface) ... * Verifying Kubernetes components... * Enabled addons: default-storageclass, storage-provisioner -- /stdout -- ** stderr ** I0724 22:09:01.994173 268235 out.go:188] Setting JSON to false I0724 22:09:01.998249 268235 start.go:101] hostinfo: {"hostname":"mini-test-11-ubuntu","uptime":2180,"bootTime":1595626361,"procs":1035,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.4.0-1022-azure","virtualizationSystem":"kvm","virtualizationRole":"host","hostid":"c95cb721-f5cd-cb47-980f-2a6f7a0ad6b2"} I0724 22:09:01.998987 268235 start.go:111] virtualization: kvm host I0724 22:09:02.056476 268235 notify.go:125] Checking for updates... I0724 22:09:02.084713 268235 driver.go:287] Setting default libvirt URI to qemu:///system I0724 22:09:02.153367 268235 docker.go:87] docker version: linux-19.03.8 I0724 22:09:02.190333 268235 start.go:217] selected driver: docker I0724 22:09:02.190343 268235 start.go:623] validating driver "docker" against I0724 22:09:02.190361 268235 start.go:634] status for docker: {Installed:true Healthy:true NeedsImprovement:false Error: Fix: Doc:} I0724 22:09:02.190466 268235 cli_runner.go:109] Run: docker system info --format "{{json .}}" I0724 22:09:02.263583 268235 start_flags.go:223] no existing cluster config was found, will generate one from the flags I0724 22:09:02.263821 268235 start_flags.go:617] Waiting for all components: map[apiserver:true apps_running:true default_sa:true system_pods:true] I0724 22:09:02.263847 268235 cni.go:74] Creating CNI manager for "" I0724 22:09:02.263852 268235 cni.go:105] "docker" driver + crio runtime found, recommending kindnet I0724 22:09:02.263862 268235 start_flags.go:340] Found "CNI" CNI - setting NetworkPlugin=cni I0724 22:09:02.263875 268235 start_flags.go:345] config: {Name:crio-20200724220901-14997 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:local/kicbase:-snapshot Memory:2200 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.99.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio KubernetesConfig:{KubernetesVersion:v1.15.7 ClusterName:crio-20200724220901-14997 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:crio CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: ExtraOptions:[{Component:kubeadm Key:ignore-preflight-errors Value:SystemVerification}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[] Addons:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true system_pods:true]} I0724 22:09:02.362873 268235 cache.go:117] Beginning downloading kic base image for docker with crio I0724 22:09:02.397137 268235 preload.go:97] Checking if preload exists for k8s version v1.15.7 and runtime crio I0724 22:09:02.397294 268235 cache.go:137] Downloading local/kicbase:-snapshot to local daemon I0724 22:09:02.397310 268235 image.go:140] Writing local/kicbase:-snapshot to local daemon W0724 22:09:02.438205 268235 preload.go:118] https://storage.googleapis.com/minikube-preloaded-volume-tarballs/preloaded-images-k8s-v4-v1.15.7-cri-o-overlay-amd64.tar.lz4 status code: 404 I0724 22:09:02.438631 268235 cache.go:92] acquiring lock: {Name:mkbf11915380a29453ebb2928e02583f08e9fbef Clock:{} Delay:500ms Timeout:10m0s Cancel:} I0724 22:09:02.438656 268235 cache.go:92] acquiring lock: {Name:mk32ca5b3e79b7307f47e9423a681719da980baf Clock:{} Delay:500ms Timeout:10m0s Cancel:} I0724 22:09:02.438672 268235 cache.go:92] acquiring lock: {Name:mk23b568bde816fbc3b7ffe51df75f69fbef3bc3 Clock:{} Delay:500ms Timeout:10m0s Cancel:} I0724 22:09:02.438686 268235 profile.go:150] Saving config to /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/crio-20200724220901-14997/config.json ... I0724 22:09:02.438632 268235 cache.go:92] acquiring lock: {Name:mk7a059f3306f99b39e3faab02f4e85d5d81b09a Clock:{} Delay:500ms Timeout:10m0s Cancel:} I0724 22:09:02.438709 268235 cache.go:92] acquiring lock: {Name:mk8dbb77fe3f422f0c49d4018c75dc085d55f33f Clock:{} Delay:500ms Timeout:10m0s Cancel:} I0724 22:09:02.438707 268235 cache.go:92] acquiring lock: {Name:mk485a5c0ed60ac206a50c6264fd0a21b101b196 Clock:{} Delay:500ms Timeout:10m0s Cancel:} I0724 22:09:02.438788 268235 cache.go:92] acquiring lock: {Name:mk255e673dbb60dbee8eadf0518a0d0ffba2c00a Clock:{} Delay:500ms Timeout:10m0s Cancel:} I0724 22:09:02.438748 268235 cache.go:92] acquiring lock: {Name:mka9e31604aefd879ba790960409de49ba1db0d2 Clock:{} Delay:500ms Timeout:10m0s Cancel:} I0724 22:09:02.438832 268235 lock.go:35] WriteFile acquiring /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/crio-20200724220901-14997/config.json: {Name:mkfbe57e3c70f62ff1dc9b7d49b4801fa3c058cd Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0724 22:09:02.438813 268235 cache.go:92] acquiring lock: {Name:mk0e47a450df0c06d577780e58819b1c04eb4bab Clock:{} Delay:500ms Timeout:10m0s Cancel:} I0724 22:09:02.438864 268235 cache.go:100] /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/kubernetesui/dashboard_v2.0.1 exists I0724 22:09:02.438868 268235 image.go:168] retrieving image: k8s.gcr.io/etcd:3.3.10 I0724 22:09:02.438890 268235 image.go:168] retrieving image: k8s.gcr.io/kube-proxy:v1.15.7 I0724 22:09:02.438906 268235 cache.go:100] /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/pause_3.1 exists I0724 22:09:02.438904 268235 cache.go:100] /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/gcr.io/k8s-minikube/storage-provisioner_v1.8.1 exists I0724 22:09:02.438902 268235 cache.go:81] cache image "kubernetesui/dashboard:v2.0.1" -> "/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/kubernetesui/dashboard_v2.0.1" took 235.316µs I0724 22:09:02.438920 268235 cache.go:66] save to tar file kubernetesui/dashboard:v2.0.1 -> /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/kubernetesui/dashboard_v2.0.1 succeeded I0724 22:09:02.438844 268235 cache.go:100] /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/kubernetesui/metrics-scraper_v1.0.4 exists I0724 22:09:02.438922 268235 image.go:168] retrieving image: k8s.gcr.io/coredns:1.3.1 I0724 22:09:02.438924 268235 cache.go:81] cache image "k8s.gcr.io/pause:3.1" -> "/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/pause_3.1" took 201.514µs I0724 22:09:02.438924 268235 image.go:168] retrieving image: k8s.gcr.io/kube-controller-manager:v1.15.7 I0724 22:09:02.438927 268235 cache.go:81] cache image "gcr.io/k8s-minikube/storage-provisioner:v1.8.1" -> "/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/gcr.io/k8s-minikube/storage-provisioner_v1.8.1" took 260.418µs I0724 22:09:02.438937 268235 cache.go:66] save to tar file k8s.gcr.io/pause:3.1 -> /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/pause_3.1 succeeded I0724 22:09:02.438813 268235 cache.go:92] acquiring lock: {Name:mkb173bfd0adb5c32495c7e2d9cb127d135dcb56 Clock:{} Delay:500ms Timeout:10m0s Cancel:} I0724 22:09:02.438945 268235 cache.go:66] save to tar file gcr.io/k8s-minikube/storage-provisioner:v1.8.1 -> /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/gcr.io/k8s-minikube/storage-provisioner_v1.8.1 succeeded I0724 22:09:02.438967 268235 image.go:168] retrieving image: k8s.gcr.io/kube-apiserver:v1.15.7 I0724 22:09:02.438957 268235 cache.go:81] cache image "kubernetesui/metrics-scraper:v1.0.4" -> "/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/kubernetesui/metrics-scraper_v1.0.4" took 332.424µs I0724 22:09:02.438980 268235 cache.go:66] save to tar file kubernetesui/metrics-scraper:v1.0.4 -> /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/kubernetesui/metrics-scraper_v1.0.4 succeeded I0724 22:09:02.439010 268235 image.go:168] retrieving image: k8s.gcr.io/kube-scheduler:v1.15.7 I0724 22:09:02.439776 268235 image.go:176] daemon lookup for k8s.gcr.io/kube-scheduler:v1.15.7: Error response from daemon: client version 1.41 is too new. Maximum supported API version is 1.40 I0724 22:09:02.439780 268235 image.go:176] daemon lookup for k8s.gcr.io/etcd:3.3.10: Error response from daemon: client version 1.41 is too new. Maximum supported API version is 1.40 I0724 22:09:02.439992 268235 image.go:176] daemon lookup for k8s.gcr.io/kube-apiserver:v1.15.7: Error response from daemon: client version 1.41 is too new. Maximum supported API version is 1.40 I0724 22:09:02.440028 268235 image.go:176] daemon lookup for k8s.gcr.io/coredns:1.3.1: Error response from daemon: client version 1.41 is too new. Maximum supported API version is 1.40 I0724 22:09:02.440118 268235 image.go:176] daemon lookup for k8s.gcr.io/kube-proxy:v1.15.7: Error response from daemon: client version 1.41 is too new. Maximum supported API version is 1.40 I0724 22:09:02.440228 268235 image.go:176] daemon lookup for k8s.gcr.io/kube-controller-manager:v1.15.7: Error response from daemon: client version 1.41 is too new. Maximum supported API version is 1.40 I0724 22:09:02.583445 268235 cache.go:134] opening: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/etcd_3.3.10 I0724 22:09:02.617815 268235 cache.go:134] opening: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/coredns_1.3.1 I0724 22:09:02.618591 268235 cache.go:134] opening: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/kube-controller-manager_v1.15.7 I0724 22:09:02.619138 268235 cache.go:134] opening: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/kube-scheduler_v1.15.7 I0724 22:09:02.626663 268235 cache.go:134] opening: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/kube-apiserver_v1.15.7 I0724 22:09:02.637186 268235 cache.go:134] opening: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/kube-proxy_v1.15.7 I0724 22:09:02.811328 268235 cache.go:151] failed to download local/kicbase:-snapshot, will try fallback image if available: getting remote image: GET https://index.docker.io/v2/local/kicbase/manifests/-snapshot: unsupported status code 404; body: 404 page not found I0724 22:09:02.811382 268235 cache.go:137] Downloading kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 to local daemon I0724 22:09:02.811388 268235 image.go:140] Writing kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 to local daemon I0724 22:09:03.121443 268235 cache.go:129] /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/coredns_1.3.1 exists I0724 22:09:03.121671 268235 cache.go:81] cache image "k8s.gcr.io/coredns:1.3.1" -> "/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/coredns_1.3.1" took 682.892527ms I0724 22:09:03.121706 268235 cache.go:66] save to tar file k8s.gcr.io/coredns:1.3.1 -> /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/coredns_1.3.1 succeeded I0724 22:09:03.760260 268235 cache.go:129] /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/kube-scheduler_v1.15.7 exists I0724 22:09:03.760305 268235 cache.go:81] cache image "k8s.gcr.io/kube-scheduler:v1.15.7" -> "/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/kube-scheduler_v1.15.7" took 1.321521759s I0724 22:09:03.760346 268235 cache.go:66] save to tar file k8s.gcr.io/kube-scheduler:v1.15.7 -> /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/kube-scheduler_v1.15.7 succeeded I0724 22:09:03.793769 268235 cache.go:129] /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/kube-proxy_v1.15.7 exists I0724 22:09:03.793818 268235 cache.go:81] cache image "k8s.gcr.io/kube-proxy:v1.15.7" -> "/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/kube-proxy_v1.15.7" took 1.355149609s I0724 22:09:03.793831 268235 cache.go:66] save to tar file k8s.gcr.io/kube-proxy:v1.15.7 -> /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/kube-proxy_v1.15.7 succeeded I0724 22:09:03.972579 268235 cache.go:129] /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/etcd_3.3.10 exists I0724 22:09:03.972635 268235 cache.go:81] cache image "k8s.gcr.io/etcd:3.3.10" -> "/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/etcd_3.3.10" took 1.534001509s I0724 22:09:03.972659 268235 cache.go:66] save to tar file k8s.gcr.io/etcd:3.3.10 -> /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/etcd_3.3.10 succeeded I0724 22:09:04.003464 268235 cache.go:129] /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/kube-controller-manager_v1.15.7 exists I0724 22:09:04.003513 268235 cache.go:81] cache image "k8s.gcr.io/kube-controller-manager:v1.15.7" -> "/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/kube-controller-manager_v1.15.7" took 1.564901568s I0724 22:09:04.003535 268235 cache.go:66] save to tar file k8s.gcr.io/kube-controller-manager:v1.15.7 -> /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/kube-controller-manager_v1.15.7 succeeded I0724 22:09:04.052039 268235 cache.go:129] /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/kube-apiserver_v1.15.7 exists I0724 22:09:04.052080 268235 cache.go:81] cache image "k8s.gcr.io/kube-apiserver:v1.15.7" -> "/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/kube-apiserver_v1.15.7" took 1.613297751s I0724 22:09:04.052094 268235 cache.go:66] save to tar file k8s.gcr.io/kube-apiserver:v1.15.7 -> /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/kube-apiserver_v1.15.7 succeeded I0724 22:09:04.052103 268235 cache.go:73] Successfully saved all images to host disk. I0724 22:09:08.671089 268235 cache.go:140] successfully downloaded kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 ! minikube was unable to download local/kicbase:-snapshot, but successfully downloaded kicbase/stable:v0.0.10 as a fallback image I0724 22:09:08.671147 268235 cache.go:178] Successfully downloaded all kic artifacts I0724 22:09:08.671178 268235 start.go:241] acquiring machines lock for crio-20200724220901-14997: {Name:mk91e0531dbac8bf0bab062b1deb1a07bd4532ce Clock:{} Delay:500ms Timeout:15m0s Cancel:} I0724 22:09:08.671279 268235 start.go:245] acquired machines lock for "crio-20200724220901-14997" in 80.506µs I0724 22:09:08.671312 268235 start.go:85] Provisioning new machine with config: &{Name:crio-20200724220901-14997 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 Memory:2200 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.99.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio KubernetesConfig:{KubernetesVersion:v1.15.7 ClusterName:crio-20200724220901-14997 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:crio CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: ExtraOptions:[{Component:kubeadm Key:ignore-preflight-errors Value:SystemVerification}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.15.7 ControlPlane:true Worker:true}] Addons:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true system_pods:true]} &{Name: IP: Port:8443 KubernetesVersion:v1.15.7 ControlPlane:true Worker:true} I0724 22:09:08.671370 268235 start.go:122] createHost starting for "" (driver="docker") I0724 22:09:08.885579 268235 start.go:158] libmachine.API.Create for "crio-20200724220901-14997" (driver="docker") I0724 22:09:08.885624 268235 client.go:161] LocalClient.Create starting I0724 22:09:08.885659 268235 main.go:115] libmachine: Reading certificate data from /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca.pem I0724 22:09:08.885768 268235 main.go:115] libmachine: Decoding PEM data... I0724 22:09:08.885790 268235 main.go:115] libmachine: Parsing certificate... I0724 22:09:08.885919 268235 main.go:115] libmachine: Reading certificate data from /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/cert.pem I0724 22:09:08.885944 268235 main.go:115] libmachine: Decoding PEM data... I0724 22:09:08.885959 268235 main.go:115] libmachine: Parsing certificate... I0724 22:09:08.886319 268235 cli_runner.go:109] Run: docker ps -a --format {{.Names}} I0724 22:09:08.948737 268235 cli_runner.go:109] Run: docker volume create crio-20200724220901-14997 --label name.minikube.sigs.k8s.io=crio-20200724220901-14997 --label created_by.minikube.sigs.k8s.io=true I0724 22:09:09.015050 268235 oci.go:101] Successfully created a docker volume crio-20200724220901-14997 I0724 22:09:09.016032 268235 cli_runner.go:109] Run: docker run --rm --entrypoint /usr/bin/test -v crio-20200724220901-14997:/var kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 -d /var/lib I0724 22:09:11.057421 268235 cli_runner.go:151] Completed: docker run --rm --entrypoint /usr/bin/test -v crio-20200724220901-14997:/var kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 -d /var/lib: (2.041027134s) I0724 22:09:11.057449 268235 oci.go:105] Successfully prepared a docker volume crio-20200724220901-14997 W0724 22:09:11.057483 268235 oci.go:165] Your kernel does not support swap limit capabilities or the cgroup is not mounted. I0724 22:09:11.057508 268235 preload.go:97] Checking if preload exists for k8s version v1.15.7 and runtime crio I0724 22:09:11.057562 268235 cli_runner.go:109] Run: docker info --format "'{{json .SecurityOptions}}'" W0724 22:09:11.094705 268235 preload.go:118] https://storage.googleapis.com/minikube-preloaded-volume-tarballs/preloaded-images-k8s-v4-v1.15.7-cri-o-overlay-amd64.tar.lz4 status code: 404 I0724 22:09:11.121782 268235 cli_runner.go:109] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname crio-20200724220901-14997 --name crio-20200724220901-14997 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=crio-20200724220901-14997 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=crio-20200724220901-14997 --volume crio-20200724220901-14997:/var --security-opt apparmor=unconfined --cpus=2 --memory=2200mb -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 I0724 22:09:15.829165 268235 cli_runner.go:151] Completed: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname crio-20200724220901-14997 --name crio-20200724220901-14997 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=crio-20200724220901-14997 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=crio-20200724220901-14997 --volume crio-20200724220901-14997:/var --security-opt apparmor=unconfined --cpus=2 --memory=2200mb -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438: (4.707326049s) I0724 22:09:15.829258 268235 cli_runner.go:109] Run: docker container inspect crio-20200724220901-14997 --format={{.State.Running}} I0724 22:09:15.881897 268235 cli_runner.go:109] Run: docker container inspect crio-20200724220901-14997 --format={{.State.Status}} I0724 22:09:15.936869 268235 cli_runner.go:109] Run: docker exec crio-20200724220901-14997 stat /var/lib/dpkg/alternatives/iptables I0724 22:09:16.101181 268235 oci.go:222] the created container "crio-20200724220901-14997" has a running status. I0724 22:09:16.101209 268235 kic.go:157] Creating ssh key for kic: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/crio-20200724220901-14997/id_rsa... I0724 22:09:16.339914 268235 kic_runner.go:179] docker (temp): /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/crio-20200724220901-14997/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes) I0724 22:09:16.461105 268235 cli_runner.go:109] Run: docker container inspect crio-20200724220901-14997 --format={{.State.Status}} I0724 22:09:16.514576 268235 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys I0724 22:09:16.514600 268235 kic_runner.go:114] Args: [docker exec --privileged crio-20200724220901-14997 chown docker:docker /home/docker/.ssh/authorized_keys] I0724 22:09:16.659946 268235 cli_runner.go:109] Run: docker container inspect crio-20200724220901-14997 --format={{.State.Status}} I0724 22:09:16.721944 268235 machine.go:88] provisioning docker machine ... I0724 22:09:16.721982 268235 ubuntu.go:166] provisioning hostname "crio-20200724220901-14997" I0724 22:09:16.722056 268235 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" crio-20200724220901-14997 I0724 22:09:16.784908 268235 main.go:115] libmachine: Using SSH client type: native I0724 22:09:16.785098 268235 main.go:115] libmachine: &{{{ 0 [] [] []} docker [0x7b99a0] 0x7b9970 [] 0s} 127.0.0.1 32888 } I0724 22:09:16.785119 268235 main.go:115] libmachine: About to run SSH command: sudo hostname crio-20200724220901-14997 && echo "crio-20200724220901-14997" | sudo tee /etc/hostname I0724 22:09:16.785710 268235 main.go:115] libmachine: Error dialing TCP: ssh: handshake failed: read tcp 127.0.0.1:33016->127.0.0.1:32888: read: connection reset by peer I0724 22:09:19.926834 268235 main.go:115] libmachine: SSH cmd err, output: : crio-20200724220901-14997 I0724 22:09:19.926917 268235 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" crio-20200724220901-14997 I0724 22:09:19.985887 268235 main.go:115] libmachine: Using SSH client type: native I0724 22:09:19.986079 268235 main.go:115] libmachine: &{{{ 0 [] [] []} docker [0x7b99a0] 0x7b9970 [] 0s} 127.0.0.1 32888 } I0724 22:09:19.986116 268235 main.go:115] libmachine: About to run SSH command: if ! grep -xq '.*\scrio-20200724220901-14997' /etc/hosts; then if grep -xq '127.0.1.1\s.*' /etc/hosts; then sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 crio-20200724220901-14997/g' /etc/hosts; else echo '127.0.1.1 crio-20200724220901-14997' | sudo tee -a /etc/hosts; fi fi I0724 22:09:20.110011 268235 main.go:115] libmachine: SSH cmd err, output: : I0724 22:09:20.110041 268235 ubuntu.go:172] set auth options {CertDir:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube CaCertPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube} I0724 22:09:20.110058 268235 ubuntu.go:174] setting up certificates I0724 22:09:20.110067 268235 provision.go:82] configureAuth start I0724 22:09:20.110120 268235 cli_runner.go:109] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" crio-20200724220901-14997 I0724 22:09:20.170170 268235 provision.go:131] copyHostCerts I0724 22:09:20.170227 268235 exec_runner.go:91] found /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/ca.pem, removing ... I0724 22:09:20.170282 268235 exec_runner.go:98] cp: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca.pem --> /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/ca.pem (1038 bytes) I0724 22:09:20.170358 268235 exec_runner.go:91] found /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cert.pem, removing ... I0724 22:09:20.170391 268235 exec_runner.go:98] cp: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/cert.pem --> /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cert.pem (1078 bytes) I0724 22:09:20.170442 268235 exec_runner.go:91] found /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/key.pem, removing ... I0724 22:09:20.170466 268235 exec_runner.go:98] cp: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/key.pem --> /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/key.pem (1675 bytes) I0724 22:09:20.170504 268235 provision.go:105] generating server cert: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/server.pem ca-key=/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca.pem private-key=/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca-key.pem org=jenkins.crio-20200724220901-14997 san=[172.17.0.2 localhost 127.0.0.1] I0724 22:09:20.302768 268235 provision.go:159] copyRemoteCerts I0724 22:09:20.302842 268235 ssh_runner.go:148] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker I0724 22:09:20.302901 268235 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" crio-20200724220901-14997 I0724 22:09:20.366662 268235 sshutil.go:44] new ssh client: &{IP:127.0.0.1 Port:32888 SSHKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/crio-20200724220901-14997/id_rsa Username:docker} I0724 22:09:20.462478 268235 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1038 bytes) I0724 22:09:20.485433 268235 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/server.pem --> /etc/docker/server.pem (1143 bytes) I0724 22:09:20.554642 268235 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes) I0724 22:09:20.578974 268235 provision.go:85] duration metric: configureAuth took 468.891591ms I0724 22:09:20.578996 268235 ubuntu.go:190] setting minikube options for container-runtime I0724 22:09:20.579235 268235 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" crio-20200724220901-14997 I0724 22:09:20.633552 268235 main.go:115] libmachine: Using SSH client type: native I0724 22:09:20.633727 268235 main.go:115] libmachine: &{{{ 0 [] [] []} docker [0x7b99a0] 0x7b9970 [] 0s} 127.0.0.1 32888 } I0724 22:09:20.633747 268235 main.go:115] libmachine: About to run SSH command: sudo mkdir -p /etc/sysconfig && printf %s " CRIO_MINIKUBE_OPTIONS='--insecure-registry 10.96.0.0/12 ' " | sudo tee /etc/sysconfig/crio.minikube I0724 22:09:20.765976 268235 main.go:115] libmachine: SSH cmd err, output: : CRIO_MINIKUBE_OPTIONS='--insecure-registry 10.96.0.0/12 ' I0724 22:09:20.765998 268235 machine.go:91] provisioned docker machine in 4.044031086s I0724 22:09:20.766012 268235 client.go:164] LocalClient.Create took 11.880376329s I0724 22:09:20.766025 268235 start.go:163] duration metric: libmachine.API.Create for "crio-20200724220901-14997" took 11.880449234s I0724 22:09:20.766035 268235 start.go:204] post-start starting for "crio-20200724220901-14997" (driver="docker") I0724 22:09:20.766042 268235 start.go:214] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs] I0724 22:09:20.766104 268235 ssh_runner.go:148] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs I0724 22:09:20.766177 268235 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" crio-20200724220901-14997 I0724 22:09:20.819054 268235 sshutil.go:44] new ssh client: &{IP:127.0.0.1 Port:32888 SSHKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/crio-20200724220901-14997/id_rsa Username:docker} I0724 22:09:20.911370 268235 ssh_runner.go:148] Run: cat /etc/os-release I0724 22:09:20.915132 268235 main.go:115] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found I0724 22:09:20.915164 268235 main.go:115] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found I0724 22:09:20.915175 268235 main.go:115] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found I0724 22:09:20.915181 268235 info.go:98] Remote host: Ubuntu 19.10 I0724 22:09:20.915190 268235 filesync.go:118] Scanning /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/addons for local assets ... I0724 22:09:20.915242 268235 filesync.go:118] Scanning /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/files for local assets ... I0724 22:09:20.915385 268235 filesync.go:141] local asset: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/files/etc/test/nested/copy/14997/hosts -> hosts in /etc/test/nested/copy/14997 I0724 22:09:20.915441 268235 ssh_runner.go:148] Run: sudo mkdir -p /etc/test/nested/copy/14997 I0724 22:09:20.924564 268235 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/files/etc/test/nested/copy/14997/hosts --> /etc/test/nested/copy/14997/hosts (40 bytes) I0724 22:09:20.947983 268235 start.go:207] post-start completed in 181.936046ms I0724 22:09:20.948838 268235 cli_runner.go:109] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" crio-20200724220901-14997 I0724 22:09:21.006668 268235 profile.go:150] Saving config to /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/crio-20200724220901-14997/config.json ... I0724 22:09:21.006909 268235 start.go:125] duration metric: createHost completed in 12.335530381s I0724 22:09:21.006928 268235 start.go:76] releasing machines lock for "crio-20200724220901-14997", held for 12.335634487s I0724 22:09:21.007009 268235 cli_runner.go:109] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" crio-20200724220901-14997 I0724 22:09:21.065627 268235 ssh_runner.go:148] Run: systemctl --version I0724 22:09:21.065687 268235 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" crio-20200724220901-14997 I0724 22:09:21.065734 268235 ssh_runner.go:148] Run: curl -sS -m 2 https://k8s.gcr.io/ I0724 22:09:21.065822 268235 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" crio-20200724220901-14997 I0724 22:09:21.124354 268235 sshutil.go:44] new ssh client: &{IP:127.0.0.1 Port:32888 SSHKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/crio-20200724220901-14997/id_rsa Username:docker} I0724 22:09:21.126677 268235 sshutil.go:44] new ssh client: &{IP:127.0.0.1 Port:32888 SSHKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/crio-20200724220901-14997/id_rsa Username:docker} I0724 22:09:21.274396 268235 ssh_runner.go:148] Run: sudo systemctl is-active --quiet service containerd I0724 22:09:21.288198 268235 ssh_runner.go:148] Run: sudo systemctl stop -f containerd I0724 22:09:21.310201 268235 ssh_runner.go:148] Run: sudo systemctl is-active --quiet service containerd I0724 22:09:21.321706 268235 ssh_runner.go:148] Run: sudo systemctl is-active --quiet service docker I0724 22:09:21.333556 268235 ssh_runner.go:148] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/crio/crio.sock image-endpoint: unix:///var/run/crio/crio.sock " | sudo tee /etc/crictl.yaml" I0724 22:09:21.349794 268235 ssh_runner.go:148] Run: /bin/bash -c "sudo sed -e 's|^pause_image = .*$|pause_image = "k8s.gcr.io/pause:3.1"|' -i /etc/crio/crio.conf" I0724 22:09:21.361383 268235 ssh_runner.go:148] Run: sudo sysctl net.bridge.bridge-nf-call-iptables I0724 22:09:21.369729 268235 ssh_runner.go:148] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward" I0724 22:09:21.382945 268235 ssh_runner.go:148] Run: sudo systemctl daemon-reload I0724 22:09:21.497129 268235 ssh_runner.go:148] Run: sudo systemctl start crio I0724 22:09:22.005914 268235 ssh_runner.go:148] Run: crio --version I0724 22:09:22.153365 268235 cli_runner.go:109] Run: docker network ls --filter name=bridge --format {{.ID}} I0724 22:09:22.211346 268235 cli_runner.go:109] Run: docker network inspect --format "{{(index .IPAM.Config 0).Gateway}}" d4a420189740 I0724 22:09:22.266467 268235 network.go:77] got host ip for mount in container by inspect docker network: 172.17.0.1 I0724 22:09:22.266553 268235 ssh_runner.go:148] Run: grep 172.17.0.1 host.minikube.internal$ /etc/hosts I0724 22:09:22.270700 268235 ssh_runner.go:148] Run: /bin/bash -c "{ grep -v '\thost.minikube.internal$' /etc/hosts; echo "172.17.0.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ /etc/hosts" I0724 22:09:22.305029 268235 preload.go:97] Checking if preload exists for k8s version v1.15.7 and runtime crio W0724 22:09:22.347970 268235 preload.go:118] https://storage.googleapis.com/minikube-preloaded-volume-tarballs/preloaded-images-k8s-v4-v1.15.7-cri-o-overlay-amd64.tar.lz4 status code: 404 I0724 22:09:22.347999 268235 cache_images.go:72] LoadImages start: [k8s.gcr.io/kube-proxy:v1.15.7 k8s.gcr.io/kube-scheduler:v1.15.7 k8s.gcr.io/kube-controller-manager:v1.15.7 k8s.gcr.io/kube-apiserver:v1.15.7 k8s.gcr.io/coredns:1.3.1 k8s.gcr.io/etcd:3.3.10 k8s.gcr.io/pause:3.1 gcr.io/k8s-minikube/storage-provisioner:v1.8.1 kubernetesui/dashboard:v2.0.1 kubernetesui/metrics-scraper:v1.0.4] I0724 22:09:22.348079 268235 image.go:168] retrieving image: k8s.gcr.io/kube-proxy:v1.15.7 I0724 22:09:22.348114 268235 image.go:168] retrieving image: k8s.gcr.io/pause:3.1 I0724 22:09:22.348125 268235 image.go:168] retrieving image: k8s.gcr.io/kube-controller-manager:v1.15.7 I0724 22:09:22.348168 268235 image.go:168] retrieving image: k8s.gcr.io/kube-apiserver:v1.15.7 I0724 22:09:22.348079 268235 image.go:168] retrieving image: kubernetesui/metrics-scraper:v1.0.4 I0724 22:09:22.348298 268235 image.go:168] retrieving image: k8s.gcr.io/kube-scheduler:v1.15.7 I0724 22:09:22.348319 268235 image.go:168] retrieving image: kubernetesui/dashboard:v2.0.1 I0724 22:09:22.348299 268235 image.go:168] retrieving image: gcr.io/k8s-minikube/storage-provisioner:v1.8.1 I0724 22:09:22.348130 268235 image.go:168] retrieving image: k8s.gcr.io/etcd:3.3.10 I0724 22:09:22.348422 268235 image.go:168] retrieving image: k8s.gcr.io/coredns:1.3.1 I0724 22:09:22.349849 268235 image.go:176] daemon lookup for k8s.gcr.io/pause:3.1: Error response from daemon: client version 1.41 is too new. Maximum supported API version is 1.40 I0724 22:09:22.349914 268235 image.go:176] daemon lookup for kubernetesui/metrics-scraper:v1.0.4: Error response from daemon: client version 1.41 is too new. Maximum supported API version is 1.40 I0724 22:09:22.349932 268235 image.go:176] daemon lookup for k8s.gcr.io/coredns:1.3.1: Error response from daemon: client version 1.41 is too new. Maximum supported API version is 1.40 I0724 22:09:22.350086 268235 image.go:176] daemon lookup for k8s.gcr.io/kube-controller-manager:v1.15.7: Error response from daemon: client version 1.41 is too new. Maximum supported API version is 1.40 I0724 22:09:22.350161 268235 image.go:176] daemon lookup for kubernetesui/dashboard:v2.0.1: Error response from daemon: client version 1.41 is too new. Maximum supported API version is 1.40 I0724 22:09:22.350164 268235 image.go:176] daemon lookup for k8s.gcr.io/etcd:3.3.10: Error response from daemon: client version 1.41 is too new. Maximum supported API version is 1.40 I0724 22:09:22.350196 268235 image.go:176] daemon lookup for gcr.io/k8s-minikube/storage-provisioner:v1.8.1: Error response from daemon: client version 1.41 is too new. Maximum supported API version is 1.40 I0724 22:09:22.350195 268235 image.go:176] daemon lookup for k8s.gcr.io/kube-scheduler:v1.15.7: Error response from daemon: client version 1.41 is too new. Maximum supported API version is 1.40 I0724 22:09:22.350221 268235 image.go:176] daemon lookup for k8s.gcr.io/kube-proxy:v1.15.7: Error response from daemon: client version 1.41 is too new. Maximum supported API version is 1.40 I0724 22:09:22.350167 268235 image.go:176] daemon lookup for k8s.gcr.io/kube-apiserver:v1.15.7: Error response from daemon: client version 1.41 is too new. Maximum supported API version is 1.40 I0724 22:09:22.478778 268235 ssh_runner.go:148] Run: sudo podman image inspect --format {{.Id}} k8s.gcr.io/kube-controller-manager:v1.15.7 I0724 22:09:22.503829 268235 ssh_runner.go:148] Run: sudo podman image inspect --format {{.Id}} k8s.gcr.io/kube-proxy:v1.15.7 I0724 22:09:22.506722 268235 ssh_runner.go:148] Run: sudo podman image inspect --format {{.Id}} k8s.gcr.io/pause:3.1 I0724 22:09:22.506919 268235 ssh_runner.go:148] Run: sudo podman image inspect --format {{.Id}} k8s.gcr.io/etcd:3.3.10 I0724 22:09:22.520214 268235 ssh_runner.go:148] Run: sudo podman image inspect --format {{.Id}} k8s.gcr.io/kube-apiserver:v1.15.7 I0724 22:09:22.521549 268235 ssh_runner.go:148] Run: sudo podman image inspect --format {{.Id}} k8s.gcr.io/kube-scheduler:v1.15.7 I0724 22:09:22.532004 268235 ssh_runner.go:148] Run: sudo podman image inspect --format {{.Id}} gcr.io/k8s-minikube/storage-provisioner:v1.8.1 I0724 22:09:22.560228 268235 ssh_runner.go:148] Run: sudo podman image inspect --format {{.Id}} k8s.gcr.io/coredns:1.3.1 I0724 22:09:22.654190 268235 ssh_runner.go:148] Run: sudo podman image inspect --format {{.Id}} kubernetesui/metrics-scraper:v1.0.4 I0724 22:09:22.729987 268235 ssh_runner.go:148] Run: sudo podman image inspect --format {{.Id}} kubernetesui/dashboard:v2.0.1 I0724 22:09:23.263589 268235 cache_images.go:105] "k8s.gcr.io/kube-controller-manager:v1.15.7" needs transfer: "k8s.gcr.io/kube-controller-manager:v1.15.7" does not exist at hash "d2f090f2479fbf92c508100e0a6106b3516bb70421a465586661feb1494145a2" in container runtime I0724 22:09:23.263616 268235 cache_images.go:241] Loading image from cache: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/kube-controller-manager_v1.15.7 I0724 22:09:23.263718 268235 ssh_runner.go:148] Run: stat -c "%s %y" /var/lib/minikube/images/kube-controller-manager_v1.15.7 I0724 22:09:23.271128 268235 cache_images.go:105] "k8s.gcr.io/pause:3.1" needs transfer: "k8s.gcr.io/pause:3.1" does not exist at hash "da86e6ba6ca197bf6bc5e9d900febd906b133eaa4750e6bed647b0fbe50ed43e" in container runtime I0724 22:09:23.271157 268235 cache_images.go:241] Loading image from cache: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/pause_3.1 I0724 22:09:23.271228 268235 cache_images.go:105] "k8s.gcr.io/etcd:3.3.10" needs transfer: "k8s.gcr.io/etcd:3.3.10" does not exist at hash "2c4adeb21b4ff8ed3309d0e42b6b4ae39872399f7b37e0856e673b13c4aba13d" in container runtime I0724 22:09:23.271308 268235 cache_images.go:241] Loading image from cache: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/etcd_3.3.10 I0724 22:09:23.271492 268235 ssh_runner.go:148] Run: stat -c "%s %y" /var/lib/minikube/images/etcd_3.3.10 I0724 22:09:23.271264 268235 ssh_runner.go:148] Run: stat -c "%s %y" /var/lib/minikube/images/pause_3.1 I0724 22:09:23.337093 268235 cache_images.go:105] "k8s.gcr.io/kube-scheduler:v1.15.7" needs transfer: "k8s.gcr.io/kube-scheduler:v1.15.7" does not exist at hash "78b4180ab00d0fb99b1be2b5ef92a4831ad07f00f27e6746828f374497d79367" in container runtime I0724 22:09:23.337127 268235 cache_images.go:241] Loading image from cache: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/kube-scheduler_v1.15.7 I0724 22:09:23.337244 268235 ssh_runner.go:148] Run: stat -c "%s %y" /var/lib/minikube/images/kube-scheduler_v1.15.7 I0724 22:09:23.337395 268235 cache_images.go:105] "k8s.gcr.io/kube-proxy:v1.15.7" needs transfer: "k8s.gcr.io/kube-proxy:v1.15.7" does not exist at hash "ae3d9889423ede337df3814baa77445e566597a5a882f3cdf933b4d9e0025f0f" in container runtime I0724 22:09:23.337417 268235 cache_images.go:241] Loading image from cache: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/kube-proxy_v1.15.7 I0724 22:09:23.337528 268235 ssh_runner.go:148] Run: stat -c "%s %y" /var/lib/minikube/images/kube-proxy_v1.15.7 I0724 22:09:23.337544 268235 cache_images.go:105] "k8s.gcr.io/kube-apiserver:v1.15.7" needs transfer: "k8s.gcr.io/kube-apiserver:v1.15.7" does not exist at hash "c500a024ff843278184e5454ff6ee040a106c867c5a0361886fd3057cace2264" in container runtime I0724 22:09:23.337566 268235 cache_images.go:241] Loading image from cache: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/kube-apiserver_v1.15.7 I0724 22:09:23.337659 268235 cache_images.go:105] "gcr.io/k8s-minikube/storage-provisioner:v1.8.1" needs transfer: "gcr.io/k8s-minikube/storage-provisioner:v1.8.1" does not exist at hash "4689081edb103a9e8174bf23a255bfbe0b2d9ed82edc907abab6989d1c60f02c" in container runtime I0724 22:09:23.337691 268235 cache_images.go:241] Loading image from cache: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/gcr.io/k8s-minikube/storage-provisioner_v1.8.1 I0724 22:09:23.337667 268235 ssh_runner.go:148] Run: stat -c "%s %y" /var/lib/minikube/images/kube-apiserver_v1.15.7 I0724 22:09:23.337804 268235 ssh_runner.go:148] Run: stat -c "%s %y" /var/lib/minikube/images/storage-provisioner_v1.8.1 I0724 22:09:23.374041 268235 cache_images.go:105] "k8s.gcr.io/coredns:1.3.1" needs transfer: "k8s.gcr.io/coredns:1.3.1" does not exist at hash "eb516548c180f8a6e0235034ccee2428027896af16a509786da13022fe95fe8c" in container runtime I0724 22:09:23.374127 268235 cache_images.go:241] Loading image from cache: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/coredns_1.3.1 I0724 22:09:23.374144 268235 cache_images.go:105] "kubernetesui/metrics-scraper:v1.0.4" needs transfer: "kubernetesui/metrics-scraper:v1.0.4" does not exist at hash "86262685d9abb35698a4e03ed13f9ded5b97c6c85b466285e4f367e5232eeee4" in container runtime I0724 22:09:23.374170 268235 cache_images.go:241] Loading image from cache: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/kubernetesui/metrics-scraper_v1.0.4 I0724 22:09:23.374209 268235 cache_images.go:105] "kubernetesui/dashboard:v2.0.1" needs transfer: "kubernetesui/dashboard:v2.0.1" does not exist at hash "85d666cddd04329fc0552e50826c7b64ea8a2d66efef04f3ffd8ee3f14c46d01" in container runtime I0724 22:09:23.374227 268235 cache_images.go:241] Loading image from cache: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/kubernetesui/dashboard_v2.0.1 I0724 22:09:23.374264 268235 ssh_runner.go:148] Run: stat -c "%s %y" /var/lib/minikube/images/coredns_1.3.1 I0724 22:09:23.374296 268235 ssh_runner.go:148] Run: stat -c "%s %y" /var/lib/minikube/images/metrics-scraper_v1.0.4 I0724 22:09:23.374327 268235 ssh_runner.go:205] existence check for /var/lib/minikube/images/etcd_3.3.10: stat -c "%s %y" /var/lib/minikube/images/etcd_3.3.10: Process exited with status 1 stdout: stderr: stat: cannot stat '/var/lib/minikube/images/etcd_3.3.10': No such file or directory I0724 22:09:23.374332 268235 ssh_runner.go:148] Run: stat -c "%s %y" /var/lib/minikube/images/dashboard_v2.0.1 I0724 22:09:23.374356 268235 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/etcd_3.3.10 --> /var/lib/minikube/images/etcd_3.3.10 (76164608 bytes) I0724 22:09:23.374263 268235 ssh_runner.go:205] existence check for /var/lib/minikube/images/kube-controller-manager_v1.15.7: stat -c "%s %y" /var/lib/minikube/images/kube-controller-manager_v1.15.7: Process exited with status 1 stdout: stderr: stat: cannot stat '/var/lib/minikube/images/kube-controller-manager_v1.15.7': No such file or directory I0724 22:09:23.374379 268235 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/kube-controller-manager_v1.15.7 --> /var/lib/minikube/images/kube-controller-manager_v1.15.7 (47866880 bytes) I0724 22:09:23.374391 268235 ssh_runner.go:205] existence check for /var/lib/minikube/images/pause_3.1: stat -c "%s %y" /var/lib/minikube/images/pause_3.1: Process exited with status 1 stdout: stderr: stat: cannot stat '/var/lib/minikube/images/pause_3.1': No such file or directory I0724 22:09:23.374411 268235 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/pause_3.1 --> /var/lib/minikube/images/pause_3.1 (318976 bytes) I0724 22:09:23.374437 268235 ssh_runner.go:205] existence check for /var/lib/minikube/images/kube-scheduler_v1.15.7: stat -c "%s %y" /var/lib/minikube/images/kube-scheduler_v1.15.7: Process exited with status 1 stdout: stderr: stat: cannot stat '/var/lib/minikube/images/kube-scheduler_v1.15.7': No such file or directory I0724 22:09:23.374469 268235 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/kube-scheduler_v1.15.7 --> /var/lib/minikube/images/kube-scheduler_v1.15.7 (29872128 bytes) I0724 22:09:23.374520 268235 ssh_runner.go:205] existence check for /var/lib/minikube/images/kube-apiserver_v1.15.7: stat -c "%s %y" /var/lib/minikube/images/kube-apiserver_v1.15.7: Process exited with status 1 stdout: stderr: stat: cannot stat '/var/lib/minikube/images/kube-apiserver_v1.15.7': No such file or directory I0724 22:09:23.374538 268235 ssh_runner.go:205] existence check for /var/lib/minikube/images/kube-proxy_v1.15.7: stat -c "%s %y" /var/lib/minikube/images/kube-proxy_v1.15.7: Process exited with status 1 stdout: stderr: stat: cannot stat '/var/lib/minikube/images/kube-proxy_v1.15.7': No such file or directory I0724 22:09:23.374562 268235 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/kube-proxy_v1.15.7 --> /var/lib/minikube/images/kube-proxy_v1.15.7 (30116352 bytes) I0724 22:09:23.374540 268235 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/kube-apiserver_v1.15.7 --> /var/lib/minikube/images/kube-apiserver_v1.15.7 (49298432 bytes) I0724 22:09:23.374673 268235 ssh_runner.go:205] existence check for /var/lib/minikube/images/storage-provisioner_v1.8.1: stat -c "%s %y" /var/lib/minikube/images/storage-provisioner_v1.8.1: Process exited with status 1 stdout: stderr: stat: cannot stat '/var/lib/minikube/images/storage-provisioner_v1.8.1': No such file or directory I0724 22:09:23.374700 268235 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/gcr.io/k8s-minikube/storage-provisioner_v1.8.1 --> /var/lib/minikube/images/storage-provisioner_v1.8.1 (20683776 bytes) I0724 22:09:23.381244 268235 ssh_runner.go:205] existence check for /var/lib/minikube/images/coredns_1.3.1: stat -c "%s %y" /var/lib/minikube/images/coredns_1.3.1: Process exited with status 1 stdout: stderr: stat: cannot stat '/var/lib/minikube/images/coredns_1.3.1': No such file or directory I0724 22:09:23.381277 268235 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/coredns_1.3.1 --> /var/lib/minikube/images/coredns_1.3.1 (12306944 bytes) I0724 22:09:23.382862 268235 ssh_runner.go:205] existence check for /var/lib/minikube/images/metrics-scraper_v1.0.4: stat -c "%s %y" /var/lib/minikube/images/metrics-scraper_v1.0.4: Process exited with status 1 stdout: stderr: stat: cannot stat '/var/lib/minikube/images/metrics-scraper_v1.0.4': No such file or directory I0724 22:09:23.382891 268235 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/kubernetesui/metrics-scraper_v1.0.4 --> /var/lib/minikube/images/metrics-scraper_v1.0.4 (16022528 bytes) I0724 22:09:23.438163 268235 ssh_runner.go:205] existence check for /var/lib/minikube/images/dashboard_v2.0.1: stat -c "%s %y" /var/lib/minikube/images/dashboard_v2.0.1: Process exited with status 1 stdout: stderr: stat: cannot stat '/var/lib/minikube/images/dashboard_v2.0.1': No such file or directory I0724 22:09:23.438294 268235 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/kubernetesui/dashboard_v2.0.1 --> /var/lib/minikube/images/dashboard_v2.0.1 (66414592 bytes) I0724 22:09:23.541820 268235 crio.go:152] Loading image: /var/lib/minikube/images/pause_3.1 I0724 22:09:23.541903 268235 ssh_runner.go:148] Run: sudo podman load -i /var/lib/minikube/images/pause_3.1 I0724 22:09:26.989336 268235 ssh_runner.go:188] Completed: sudo podman load -i /var/lib/minikube/images/pause_3.1: (3.447393808s) I0724 22:09:26.989377 268235 cache_images.go:263] Transferred and loaded /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/pause_3.1 from cache I0724 22:09:26.989393 268235 crio.go:152] Loading image: /var/lib/minikube/images/coredns_1.3.1 I0724 22:09:26.989447 268235 ssh_runner.go:148] Run: sudo podman load -i /var/lib/minikube/images/coredns_1.3.1 I0724 22:09:29.558744 268235 ssh_runner.go:188] Completed: sudo podman load -i /var/lib/minikube/images/coredns_1.3.1: (2.569274171s) I0724 22:09:29.558786 268235 cache_images.go:263] Transferred and loaded /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/coredns_1.3.1 from cache I0724 22:09:29.558804 268235 crio.go:152] Loading image: /var/lib/minikube/images/metrics-scraper_v1.0.4 I0724 22:09:29.558850 268235 ssh_runner.go:148] Run: sudo podman load -i /var/lib/minikube/images/metrics-scraper_v1.0.4 I0724 22:09:32.157394 268235 ssh_runner.go:188] Completed: sudo podman load -i /var/lib/minikube/images/metrics-scraper_v1.0.4: (2.598503704s) I0724 22:09:32.157417 268235 cache_images.go:263] Transferred and loaded /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/kubernetesui/metrics-scraper_v1.0.4 from cache I0724 22:09:32.157433 268235 crio.go:152] Loading image: /var/lib/minikube/images/storage-provisioner_v1.8.1 I0724 22:09:32.157482 268235 ssh_runner.go:148] Run: sudo podman load -i /var/lib/minikube/images/storage-provisioner_v1.8.1 I0724 22:09:35.971170 268235 ssh_runner.go:188] Completed: sudo podman load -i /var/lib/minikube/images/storage-provisioner_v1.8.1: (3.813659656s) I0724 22:09:35.971231 268235 cache_images.go:263] Transferred and loaded /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/gcr.io/k8s-minikube/storage-provisioner_v1.8.1 from cache I0724 22:09:35.971246 268235 crio.go:152] Loading image: /var/lib/minikube/images/kube-proxy_v1.15.7 I0724 22:09:35.971313 268235 ssh_runner.go:148] Run: sudo podman load -i /var/lib/minikube/images/kube-proxy_v1.15.7 I0724 22:09:40.467649 268235 ssh_runner.go:188] Completed: sudo podman load -i /var/lib/minikube/images/kube-proxy_v1.15.7: (4.496305201s) I0724 22:09:40.467679 268235 cache_images.go:263] Transferred and loaded /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/kube-proxy_v1.15.7 from cache I0724 22:09:40.467702 268235 crio.go:152] Loading image: /var/lib/minikube/images/kube-scheduler_v1.15.7 I0724 22:09:40.467754 268235 ssh_runner.go:148] Run: sudo podman load -i /var/lib/minikube/images/kube-scheduler_v1.15.7 I0724 22:09:42.670343 268235 ssh_runner.go:188] Completed: sudo podman load -i /var/lib/minikube/images/kube-scheduler_v1.15.7: (2.20256318s) I0724 22:09:42.670367 268235 cache_images.go:263] Transferred and loaded /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/kube-scheduler_v1.15.7 from cache I0724 22:09:42.670383 268235 crio.go:152] Loading image: /var/lib/minikube/images/kube-controller-manager_v1.15.7 I0724 22:09:42.670430 268235 ssh_runner.go:148] Run: sudo podman load -i /var/lib/minikube/images/kube-controller-manager_v1.15.7 I0724 22:09:48.388505 268235 ssh_runner.go:188] Completed: sudo podman load -i /var/lib/minikube/images/kube-controller-manager_v1.15.7: (5.718051911s) I0724 22:09:48.388531 268235 cache_images.go:263] Transferred and loaded /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/kube-controller-manager_v1.15.7 from cache I0724 22:09:48.388551 268235 crio.go:152] Loading image: /var/lib/minikube/images/kube-apiserver_v1.15.7 I0724 22:09:48.388600 268235 ssh_runner.go:148] Run: sudo podman load -i /var/lib/minikube/images/kube-apiserver_v1.15.7 I0724 22:09:54.942572 268235 ssh_runner.go:188] Completed: sudo podman load -i /var/lib/minikube/images/kube-apiserver_v1.15.7: (6.5539444s) I0724 22:09:54.942601 268235 cache_images.go:263] Transferred and loaded /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/kube-apiserver_v1.15.7 from cache I0724 22:09:54.942646 268235 crio.go:152] Loading image: /var/lib/minikube/images/dashboard_v2.0.1 I0724 22:09:54.942701 268235 ssh_runner.go:148] Run: sudo podman load -i /var/lib/minikube/images/dashboard_v2.0.1 I0724 22:10:03.451858 268235 ssh_runner.go:188] Completed: sudo podman load -i /var/lib/minikube/images/dashboard_v2.0.1: (8.509136091s) I0724 22:10:03.451895 268235 cache_images.go:263] Transferred and loaded /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/kubernetesui/dashboard_v2.0.1 from cache I0724 22:10:03.451912 268235 crio.go:152] Loading image: /var/lib/minikube/images/etcd_3.3.10 I0724 22:10:03.451982 268235 ssh_runner.go:148] Run: sudo podman load -i /var/lib/minikube/images/etcd_3.3.10 I0724 22:10:13.949158 268235 ssh_runner.go:188] Completed: sudo podman load -i /var/lib/minikube/images/etcd_3.3.10: (10.497151246s) I0724 22:10:13.949185 268235 cache_images.go:263] Transferred and loaded /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/etcd_3.3.10 from cache I0724 22:10:13.949200 268235 cache_images.go:112] Successfully loaded all cached images I0724 22:10:13.949206 268235 cache_images.go:76] LoadImages completed in 51.601193846s I0724 22:10:13.949268 268235 ssh_runner.go:148] Run: crio config I0724 22:10:14.042388 268235 cni.go:74] Creating CNI manager for "" I0724 22:10:14.042409 268235 cni.go:105] "docker" driver + crio runtime found, recommending kindnet I0724 22:10:14.042418 268235 kubeadm.go:84] Using pod CIDR: 10.244.0.0/16 I0724 22:10:14.042435 268235 kubeadm.go:150] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:172.17.0.2 APIServerPort:8443 KubernetesVersion:v1.15.7 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:crio-20200724220901-14997 NodeName:crio-20200724220901-14997 DNSDomain:cluster.local CRISocket:/var/run/crio/crio.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "172.17.0.2"]]}] FeatureArgs:map[] NoTaintMaster:true NodeIP:172.17.0.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[]} I0724 22:10:14.042589 268235 kubeadm.go:154] kubeadm config: apiVersion: kubeadm.k8s.io/v1beta1 kind: InitConfiguration localAPIEndpoint: advertiseAddress: 172.17.0.2 bindPort: 8443 bootstrapTokens: - groups: - system:bootstrappers:kubeadm:default-node-token ttl: 24h0m0s usages: - signing - authentication nodeRegistration: criSocket: /var/run/crio/crio.sock name: "crio-20200724220901-14997" kubeletExtraArgs: node-ip: 172.17.0.2 taints: [] --- apiVersion: kubeadm.k8s.io/v1beta1 kind: ClusterConfiguration apiServer: certSANs: ["127.0.0.1", "localhost", "172.17.0.2"] extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs clusterName: crio-20200724220901-14997 controlPlaneEndpoint: control-plane.minikube.internal:8443 dns: type: CoreDNS etcd: local: dataDir: /var/lib/minikube/etcd extraArgs: listen-metrics-urls: http://127.0.0.1:2381,http://172.17.0.2:2381 controllerManager: extraArgs: "leader-elect": "false" scheduler: extraArgs: "leader-elect": "false" kubernetesVersion: v1.15.7 networking: dnsDomain: cluster.local podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: cgroupfs clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 evictionHard: nodefs.available: "0%" nodefs.inodesFree: "0%" imagefs.available: "0%" failSwapOn: false staticPodPath: /etc/kubernetes/manifests --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration clusterCIDR: "10.244.0.0/16" metricsBindAddress: 172.17.0.2:10249 I0724 22:10:14.042677 268235 kubeadm.go:790] kubelet [Unit] Wants=docker.socket [Service] ExecStart= ExecStart=/var/lib/minikube/binaries/v1.15.7/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --client-ca-file=/var/lib/minikube/certs/ca.crt --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=/var/run/crio/crio.sock --hostname-override=crio-20200724220901-14997 --image-service-endpoint=/var/run/crio/crio.sock --kubeconfig=/etc/kubernetes/kubelet.conf --network-plugin=cni --node-ip=172.17.0.2 --runtime-request-timeout=15m [Install] config: {KubernetesVersion:v1.15.7 ClusterName:crio-20200724220901-14997 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:crio CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: ExtraOptions:[{Component:kubeadm Key:ignore-preflight-errors Value:SystemVerification}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} I0724 22:10:14.042738 268235 ssh_runner.go:148] Run: sudo ls /var/lib/minikube/binaries/v1.15.7 I0724 22:10:14.051830 268235 binaries.go:46] Didn't find k8s binaries: sudo ls /var/lib/minikube/binaries/v1.15.7: Process exited with status 2 stdout: stderr: ls: cannot access '/var/lib/minikube/binaries/v1.15.7': No such file or directory Initiating transfer... I0724 22:10:14.051891 268235 ssh_runner.go:148] Run: sudo mkdir -p /var/lib/minikube/binaries/v1.15.7 I0724 22:10:14.060819 268235 download.go:78] Downloading: https://storage.googleapis.com/kubernetes-release/release/v1.15.7/bin/linux/amd64/kubectl?checksum=file:https://storage.googleapis.com/kubernetes-release/release/v1.15.7/bin/linux/amd64/kubectl.sha1 -> /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/linux/v1.15.7/kubectl I0724 22:10:14.060821 268235 download.go:78] Downloading: https://storage.googleapis.com/kubernetes-release/release/v1.15.7/bin/linux/amd64/kubeadm?checksum=file:https://storage.googleapis.com/kubernetes-release/release/v1.15.7/bin/linux/amd64/kubeadm.sha1 -> /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/linux/v1.15.7/kubeadm I0724 22:10:14.060821 268235 download.go:78] Downloading: https://storage.googleapis.com/kubernetes-release/release/v1.15.7/bin/linux/amd64/kubelet?checksum=file:https://storage.googleapis.com/kubernetes-release/release/v1.15.7/bin/linux/amd64/kubelet.sha1 -> /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/linux/v1.15.7/kubelet > kubeadm.sha1: 41 B / 41 B [----------------------------] 100.00% ? p/s 0s > kubelet.sha1: 41 B / 41 B [----------------------------] 100.00% ? p/s 0s > kubectl.sha1: 41 B / 41 B [----------------------------] 100.00% ? p/s 0s > kubeadm: 23.20 MiB / 38.33 MiB [--------------->_________] 60.53% ? p/s ? > kubelet: 16.00 MiB / 114.16 MiB [--->____________________] 14.02% ? p/s ? > kubeadm: 38.33 MiB / 38.33 MiB [--------------] 100.00% 254.13 MiB p/s 0s > kubectl: 15.73 MiB / 41.00 MiB [--------->_______________] 38.37% ? p/s ?I0724 22:10:14.843554 268235 ssh_runner.go:148] Run: stat -c "%s %y" /var/lib/minikube/binaries/v1.15.7/kubeadm > kubelet: 40.00 MiB / 114.16 MiB [-------->_______________] 35.04% ? p/s ?I0724 22:10:14.848491 268235 ssh_runner.go:205] existence check for /var/lib/minikube/binaries/v1.15.7/kubeadm: stat -c "%s %y" /var/lib/minikube/binaries/v1.15.7/kubeadm: Process exited with status 1 stdout: stderr: stat: cannot stat '/var/lib/minikube/binaries/v1.15.7/kubeadm': No such file or directory I0724 22:10:14.848528 268235 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/linux/v1.15.7/kubeadm --> /var/lib/minikube/binaries/v1.15.7/kubeadm (40190400 bytes) > kubectl: 41.00 MiB / 41.00 MiB [--------------] 100.00% 272.97 MiB p/s 0sI0724 22:10:15.041591 268235 ssh_runner.go:148] Run: stat -c "%s %y" /var/lib/minikube/binaries/v1.15.7/kubectl > kubelet: 56.00 MiB / 114.16 MiB [----------->____________] 49.0I0724 22:10:15.046000 268235 ssh_runner.go:205] existence check for /var/lib/minikube/binaries/v1.15.7/kubectl: stat -c "%s %y" /var/lib/minikube/binaries/v1.15.7/kubectl: Process exited with status 1 stdout: stderr: stat: cannot stat '/var/lib/minikube/binaries/v1.15.7/kubectl': No such file or directory I0724 22:10:15.047204 268235 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/linux/v1.15.7/kubectl --> /var/lib/minikube/binaries/v1.15.7/kubectl (42993696 bytes) 6% ? p/s ? > kubelet: 80.00 MiB / 114.16 MiB [------->__] 70.08% 106.66 MiB p/s ETA 0s > kubelet: 98.55 MiB / 114.16 MiB [-------->_] 86.33% 106.66 MiB p/s ETA 0s > kubelet: 98.55 MiB / 114.16 MiB [-------->_] 86.33% 106.66 MiB p/s ETA 0s > kubelet: 98.55 MiB / 114.16 MiB [-------->_] 86.33% 101.78 MiB p/s ETA 0s > kubelet: 108.30 MiB / 114.16 MiB [-------->] 94.87% 101.78 MiB p/s ETA 0s > kubelet: 114.16 MiB / 114.16 MiB [-------------] 100.00% 80.43 MiB p/s 2sI0724 22:10:16.266212 268235 ssh_runner.go:148] Run: sudo systemctl is-active --quiet service kubelet I0724 22:10:16.281463 268235 ssh_runner.go:148] Run: stat -c "%s %y" /var/lib/minikube/binaries/v1.15.7/kubelet I0724 22:10:16.285758 268235 ssh_runner.go:205] existence check for /var/lib/minikube/binaries/v1.15.7/kubelet: stat -c "%s %y" /var/lib/minikube/binaries/v1.15.7/kubelet: Process exited with status 1 stdout: stderr: stat: cannot stat '/var/lib/minikube/binaries/v1.15.7/kubelet': No such file or directory I0724 22:10:16.285829 268235 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/linux/v1.15.7/kubelet --> /var/lib/minikube/binaries/v1.15.7/kubelet (119702544 bytes) I0724 22:10:16.537345 268235 ssh_runner.go:148] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube I0724 22:10:16.546341 268235 ssh_runner.go:215] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (550 bytes) I0724 22:10:16.567893 268235 ssh_runner.go:215] scp memory --> /lib/systemd/system/kubelet.service (349 bytes) I0724 22:10:16.589024 268235 ssh_runner.go:215] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (1882 bytes) I0724 22:10:16.609712 268235 ssh_runner.go:148] Run: grep 172.17.0.2 control-plane.minikube.internal$ /etc/hosts I0724 22:10:16.613514 268235 ssh_runner.go:148] Run: /bin/bash -c "{ grep -v '\tcontrol-plane.minikube.internal$' /etc/hosts; echo "172.17.0.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ /etc/hosts" I0724 22:10:16.624531 268235 ssh_runner.go:148] Run: sudo systemctl daemon-reload I0724 22:10:16.693327 268235 ssh_runner.go:148] Run: sudo systemctl start kubelet I0724 22:10:16.711953 268235 certs.go:52] Setting up /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/crio-20200724220901-14997 for IP: 172.17.0.2 I0724 22:10:16.712018 268235 certs.go:169] skipping minikubeCA CA generation: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/ca.key I0724 22:10:16.712059 268235 certs.go:169] skipping proxyClientCA CA generation: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/proxy-client-ca.key I0724 22:10:16.712119 268235 certs.go:273] generating minikube-user signed cert: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/crio-20200724220901-14997/client.key I0724 22:10:16.712132 268235 crypto.go:69] Generating cert /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/crio-20200724220901-14997/client.crt with IP's: [] I0724 22:10:16.963385 268235 crypto.go:157] Writing cert to /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/crio-20200724220901-14997/client.crt ... I0724 22:10:16.963417 268235 lock.go:35] WriteFile acquiring /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/crio-20200724220901-14997/client.crt: {Name:mk6bcc4223e5f99879473f7c1c5eb968fcf24f74 Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0724 22:10:16.963610 268235 crypto.go:165] Writing key to /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/crio-20200724220901-14997/client.key ... I0724 22:10:16.963626 268235 lock.go:35] WriteFile acquiring /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/crio-20200724220901-14997/client.key: {Name:mkef355cc2adcb1639797c151478fdbd736330e2 Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0724 22:10:16.963726 268235 certs.go:273] generating minikube signed cert: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/crio-20200724220901-14997/apiserver.key.7b749c5f I0724 22:10:16.963738 268235 crypto.go:69] Generating cert /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/crio-20200724220901-14997/apiserver.crt.7b749c5f with IP's: [172.17.0.2 10.96.0.1 127.0.0.1 10.0.0.1] I0724 22:10:17.276643 268235 crypto.go:157] Writing cert to /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/crio-20200724220901-14997/apiserver.crt.7b749c5f ... I0724 22:10:17.276671 268235 lock.go:35] WriteFile acquiring /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/crio-20200724220901-14997/apiserver.crt.7b749c5f: {Name:mkdd328f3d1d9d7a9ed06399ac173df0df618eeb Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0724 22:10:17.276844 268235 crypto.go:165] Writing key to /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/crio-20200724220901-14997/apiserver.key.7b749c5f ... I0724 22:10:17.276860 268235 lock.go:35] WriteFile acquiring /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/crio-20200724220901-14997/apiserver.key.7b749c5f: {Name:mkd7ee7c85866aa933147375ea01228c434c7416 Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0724 22:10:17.276953 268235 certs.go:284] copying /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/crio-20200724220901-14997/apiserver.crt.7b749c5f -> /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/crio-20200724220901-14997/apiserver.crt I0724 22:10:17.277015 268235 certs.go:288] copying /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/crio-20200724220901-14997/apiserver.key.7b749c5f -> /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/crio-20200724220901-14997/apiserver.key I0724 22:10:17.277080 268235 certs.go:273] generating aggregator signed cert: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/crio-20200724220901-14997/proxy-client.key I0724 22:10:17.277096 268235 crypto.go:69] Generating cert /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/crio-20200724220901-14997/proxy-client.crt with IP's: [] I0724 22:10:17.500816 268235 crypto.go:157] Writing cert to /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/crio-20200724220901-14997/proxy-client.crt ... I0724 22:10:17.500845 268235 lock.go:35] WriteFile acquiring /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/crio-20200724220901-14997/proxy-client.crt: {Name:mkd97dc2f27023353ea8645675a821de3dd2dd60 Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0724 22:10:17.501041 268235 crypto.go:165] Writing key to /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/crio-20200724220901-14997/proxy-client.key ... I0724 22:10:17.501060 268235 lock.go:35] WriteFile acquiring /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/crio-20200724220901-14997/proxy-client.key: {Name:mk6ceabe2b2be1533a1d9865dd17f77151b2c129 Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0724 22:10:17.501255 268235 certs.go:348] found cert: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/14997.pem (1338 bytes) W0724 22:10:17.501300 268235 certs.go:344] ignoring /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/14997_empty.pem, impossibly tiny 0 bytes I0724 22:10:17.501315 268235 certs.go:348] found cert: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca-key.pem (1675 bytes) I0724 22:10:17.501342 268235 certs.go:348] found cert: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca.pem (1038 bytes) I0724 22:10:17.501372 268235 certs.go:348] found cert: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/cert.pem (1078 bytes) I0724 22:10:17.501402 268235 certs.go:348] found cert: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/key.pem (1675 bytes) I0724 22:10:17.502854 268235 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/crio-20200724220901-14997/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1350 bytes) I0724 22:10:17.525306 268235 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/crio-20200724220901-14997/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes) I0724 22:10:17.546279 268235 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/crio-20200724220901-14997/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1103 bytes) I0724 22:10:17.566531 268235 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/crio-20200724220901-14997/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes) I0724 22:10:17.586784 268235 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1066 bytes) I0724 22:10:17.606764 268235 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1679 bytes) I0724 22:10:17.630478 268235 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1074 bytes) I0724 22:10:17.652074 268235 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes) I0724 22:10:17.673968 268235 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1066 bytes) I0724 22:10:17.697416 268235 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/14997.pem --> /usr/share/ca-certificates/14997.pem (1338 bytes) I0724 22:10:17.718789 268235 ssh_runner.go:215] scp memory --> /var/lib/minikube/kubeconfig (392 bytes) I0724 22:10:17.741305 268235 ssh_runner.go:148] Run: openssl version I0724 22:10:17.747812 268235 ssh_runner.go:148] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem" I0724 22:10:17.757625 268235 ssh_runner.go:148] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem I0724 22:10:17.764150 268235 certs.go:389] hashing: -rw-r--r-- 1 root root 1066 Jul 24 21:47 /usr/share/ca-certificates/minikubeCA.pem I0724 22:10:17.764224 268235 ssh_runner.go:148] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem I0724 22:10:17.770974 268235 ssh_runner.go:148] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0" I0724 22:10:17.780132 268235 ssh_runner.go:148] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/14997.pem && ln -fs /usr/share/ca-certificates/14997.pem /etc/ssl/certs/14997.pem" I0724 22:10:17.789705 268235 ssh_runner.go:148] Run: ls -la /usr/share/ca-certificates/14997.pem I0724 22:10:17.794329 268235 certs.go:389] hashing: -rw-r--r-- 1 root root 1338 Jul 24 21:50 /usr/share/ca-certificates/14997.pem I0724 22:10:17.794386 268235 ssh_runner.go:148] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/14997.pem I0724 22:10:17.800785 268235 ssh_runner.go:148] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/14997.pem /etc/ssl/certs/51391683.0" I0724 22:10:17.810355 268235 kubeadm.go:327] StartCluster: {Name:crio-20200724220901-14997 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 Memory:2200 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.99.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio KubernetesConfig:{KubernetesVersion:v1.15.7 ClusterName:crio-20200724220901-14997 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:crio CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: ExtraOptions:[{Component:kubeadm Key:ignore-preflight-errors Value:SystemVerification}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[{Name: IP:172.17.0.2 Port:8443 KubernetesVersion:v1.15.7 ControlPlane:true Worker:true}] Addons:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true system_pods:true]} I0724 22:10:17.810449 268235 cri.go:41] listing CRI containers in root : {State:paused Name: Namespaces:[kube-system]} I0724 22:10:17.810495 268235 ssh_runner.go:148] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system" I0724 22:10:17.828999 268235 cri.go:76] found id: "" I0724 22:10:17.829069 268235 ssh_runner.go:148] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd I0724 22:10:17.838910 268235 ssh_runner.go:148] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml I0724 22:10:17.849489 268235 kubeadm.go:211] ignoring SystemVerification for kubeadm because of docker driver I0724 22:10:17.849593 268235 ssh_runner.go:148] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf I0724 22:10:17.859186 268235 kubeadm.go:147] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2 stdout: stderr: ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory I0724 22:10:17.859225 268235 ssh_runner.go:148] Run: /bin/bash -c "sudo env PATH=/var/lib/minikube/binaries/v1.15.7:$PATH kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=SystemVerification --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables" I0724 22:10:35.594028 268235 ssh_runner.go:188] Completed: /bin/bash -c "sudo env PATH=/var/lib/minikube/binaries/v1.15.7:$PATH kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=SystemVerification --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables": (17.734777493s) I0724 22:10:35.594053 268235 cni.go:74] Creating CNI manager for "" I0724 22:10:35.594059 268235 cni.go:105] "docker" driver + crio runtime found, recommending kindnet I0724 22:10:35.608083 268235 ssh_runner.go:148] Run: stat /opt/cni/bin/portmap I0724 22:10:35.612866 268235 cni.go:137] applying CNI manifest using /var/lib/minikube/binaries/v1.15.7/kubectl ... I0724 22:10:35.612891 268235 ssh_runner.go:215] scp memory --> /var/tmp/minikube/cni.yaml (2285 bytes) I0724 22:10:35.636250 268235 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.15.7/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml I0724 22:10:36.756992 268235 ssh_runner.go:188] Completed: sudo /var/lib/minikube/binaries/v1.15.7/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml: (1.120710885s) I0724 22:10:36.757020 268235 crio.go:331] Updating CRIO to use CIDR: "10.244.0.0/16" I0724 22:10:36.757073 268235 ssh_runner.go:148] Run: sudo /bin/bash -c "sed -i -e s#10.88.0.0/16#10.244.0.0/16# -e s#10.88.0.1#10.244.0.1# /etc/cni/net.d/*bridge*" I0724 22:10:36.769332 268235 ssh_runner.go:148] Run: sudo systemctl daemon-reload I0724 22:10:36.923583 268235 ssh_runner.go:148] Run: sudo systemctl restart crio I0724 22:10:37.493355 268235 ssh_runner.go:148] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj" I0724 22:10:37.493505 268235 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.15.7/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:10:37.493524 268235 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.15.7/kubectl label nodes minikube.k8s.io/version=v1.12.1 minikube.k8s.io/commit=40eac8ce825d2bb784efa63b900c8d788ea49faf minikube.k8s.io/name=crio-20200724220901-14997 minikube.k8s.io/updated_at=2020_07_24T22_10_37_0700 --all --overwrite --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:10:37.505080 268235 ops.go:35] apiserver oom_adj: 16 I0724 22:10:37.505106 268235 ops.go:40] adjusting apiserver oom_adj to -10 I0724 22:10:37.505120 268235 ssh_runner.go:148] Run: /bin/bash -c "echo -10 | sudo tee /proc/$(pgrep kube-apiserver)/oom_adj" I0724 22:10:37.655739 268235 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.15.7/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:10:38.248845 268235 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.15.7/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:10:38.748837 268235 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.15.7/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:10:39.248901 268235 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.15.7/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:10:39.748829 268235 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.15.7/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:10:40.248854 268235 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.15.7/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:10:40.748865 268235 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.15.7/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:10:41.248867 268235 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.15.7/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:10:41.748832 268235 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.15.7/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:10:42.248836 268235 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.15.7/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:10:43.269156 268235 ssh_runner.go:188] Completed: sudo /var/lib/minikube/binaries/v1.15.7/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig: (1.020283106s) I0724 22:10:43.748841 268235 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.15.7/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:10:44.248872 268235 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.15.7/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:10:44.748839 268235 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.15.7/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:10:45.248832 268235 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.15.7/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:10:45.748876 268235 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.15.7/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:10:46.248863 268235 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.15.7/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:10:46.748921 268235 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.15.7/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:10:47.248872 268235 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.15.7/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:10:47.748871 268235 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.15.7/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:10:48.876884 268235 ssh_runner.go:188] Completed: sudo /var/lib/minikube/binaries/v1.15.7/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig: (1.127974087s) I0724 22:10:49.248876 268235 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.15.7/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:10:49.748847 268235 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.15.7/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:10:49.841962 268235 kubeadm.go:866] duration metric: took 12.348530874s to wait for elevateKubeSystemPrivileges. I0724 22:10:49.841992 268235 kubeadm.go:329] StartCluster complete in 32.031644467s I0724 22:10:49.842009 268235 settings.go:123] acquiring lock: {Name:mk120aead41f4abf9b6da50636235ecd4ae2a41a Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0724 22:10:49.842101 268235 settings.go:131] Updating kubeconfig: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/kubeconfig I0724 22:10:49.844613 268235 lock.go:35] WriteFile acquiring /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/kubeconfig: {Name:mk94f19b810ab6208411eb086ed6241d89a90d8c Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0724 22:10:49.844817 268235 start.go:195] Will wait wait-timeout for node ... I0724 22:10:49.844976 268235 ssh_runner.go:148] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.15.7/kubectl scale deployment --replicas=1 coredns -n=kube-system I0724 22:10:49.844892 268235 addons.go:353] enableAddons start: toEnable=map[], additional=[] I0724 22:10:49.871462 268235 addons.go:53] Setting storage-provisioner=true in profile "crio-20200724220901-14997" I0724 22:10:49.871471 268235 addons.go:53] Setting default-storageclass=true in profile "crio-20200724220901-14997" I0724 22:10:49.871492 268235 addons.go:129] Setting addon storage-provisioner=true in "crio-20200724220901-14997" I0724 22:10:49.871493 268235 addons.go:267] enableOrDisableStorageClasses default-storageclass=true on "crio-20200724220901-14997" W0724 22:10:49.871499 268235 addons.go:138] addon storage-provisioner should already be in state true I0724 22:10:49.871511 268235 host.go:65] Checking if "crio-20200724220901-14997" exists ... I0724 22:10:49.871901 268235 cli_runner.go:109] Run: docker container inspect crio-20200724220901-14997 --format={{.State.Status}} I0724 22:10:49.872063 268235 cli_runner.go:109] Run: docker container inspect crio-20200724220901-14997 --format={{.State.Status}} I0724 22:10:49.874935 268235 api_server.go:48] waiting for apiserver process to appear ... I0724 22:10:49.874980 268235 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:10:49.940819 268235 addons.go:236] installing /etc/kubernetes/addons/storage-provisioner.yaml I0724 22:10:49.940843 268235 ssh_runner.go:215] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2668 bytes) I0724 22:10:49.940898 268235 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" crio-20200724220901-14997 I0724 22:10:49.960053 268235 addons.go:129] Setting addon default-storageclass=true in "crio-20200724220901-14997" W0724 22:10:49.960082 268235 addons.go:138] addon default-storageclass should already be in state true I0724 22:10:49.960098 268235 host.go:65] Checking if "crio-20200724220901-14997" exists ... I0724 22:10:49.960740 268235 cli_runner.go:109] Run: docker container inspect crio-20200724220901-14997 --format={{.State.Status}} I0724 22:10:50.001145 268235 sshutil.go:44] new ssh client: &{IP:127.0.0.1 Port:32888 SSHKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/crio-20200724220901-14997/id_rsa Username:docker} I0724 22:10:50.005960 268235 api_server.go:68] duration metric: took 161.115697ms to wait for apiserver process to appear ... I0724 22:10:50.005980 268235 api_server.go:84] waiting for apiserver healthz status ... I0724 22:10:50.005989 268235 api_server.go:221] Checking apiserver healthz at https://172.17.0.2:8443/healthz ... I0724 22:10:50.006038 268235 start.go:549] successfully scaled coredns replicas to 1 I0724 22:10:50.022213 268235 addons.go:236] installing /etc/kubernetes/addons/storageclass.yaml I0724 22:10:50.022235 268235 ssh_runner.go:215] scp deploy/addons/storageclass/storageclass.yaml.tmpl --> /etc/kubernetes/addons/storageclass.yaml (271 bytes) I0724 22:10:50.022291 268235 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" crio-20200724220901-14997 I0724 22:10:50.035844 268235 api_server.go:241] https://172.17.0.2:8443/healthz returned 200: ok I0724 22:10:50.036845 268235 api_server.go:137] control plane version: v1.15.7 I0724 22:10:50.036872 268235 api_server.go:127] duration metric: took 30.885346ms to wait for apiserver health ... I0724 22:10:50.036883 268235 system_pods.go:43] waiting for kube-system pods to appear ... I0724 22:10:50.045358 268235 system_pods.go:59] 2 kube-system pods found I0724 22:10:50.045394 268235 system_pods.go:61] "kindnet-4qfcd" [9fb35a28-5601-47e4-88e6-be2a18fa55ef] Pending / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:10:50.045407 268235 system_pods.go:61] "kube-proxy-6wf4w" [208ac09e-2a8d-43bf-a2da-f91dc042aaef] Pending / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy]) I0724 22:10:50.045415 268235 system_pods.go:74] duration metric: took 8.524793ms to wait for pod list to return data ... I0724 22:10:50.045428 268235 default_sa.go:33] waiting for default service account to be created ... I0724 22:10:50.050280 268235 default_sa.go:44] found service account: "default" I0724 22:10:50.050301 268235 default_sa.go:54] duration metric: took 4.857337ms for default service account to be created ... I0724 22:10:50.050310 268235 system_pods.go:116] waiting for k8s-apps to be running ... I0724 22:10:50.053179 268235 system_pods.go:86] 2 kube-system pods found I0724 22:10:50.053214 268235 system_pods.go:89] "kindnet-4qfcd" [9fb35a28-5601-47e4-88e6-be2a18fa55ef] Pending / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:10:50.053227 268235 system_pods.go:89] "kube-proxy-6wf4w" [208ac09e-2a8d-43bf-a2da-f91dc042aaef] Pending / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy]) I0724 22:10:50.053256 268235 retry.go:30] will retry after 263.082536ms: missing components: kube-dns, etcd, kube-apiserver, kube-controller-manager, kube-proxy, kube-scheduler I0724 22:10:50.080600 268235 sshutil.go:44] new ssh client: &{IP:127.0.0.1 Port:32888 SSHKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/crio-20200724220901-14997/id_rsa Username:docker} I0724 22:10:50.247112 268235 ssh_runner.go:148] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.15.7/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml I0724 22:10:50.338221 268235 system_pods.go:86] 3 kube-system pods found I0724 22:10:50.338274 268235 system_pods.go:89] "coredns-5d4dd4b4db-9ssg6" [dbd8dc2c-1af2-4c99-89f0-d44686805742] Pending I0724 22:10:50.338289 268235 system_pods.go:89] "kindnet-4qfcd" [9fb35a28-5601-47e4-88e6-be2a18fa55ef] Pending / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:10:50.338310 268235 system_pods.go:89] "kube-proxy-6wf4w" [208ac09e-2a8d-43bf-a2da-f91dc042aaef] Pending / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy]) I0724 22:10:50.338340 268235 retry.go:30] will retry after 381.329545ms: missing components: kube-dns, etcd, kube-apiserver, kube-controller-manager, kube-proxy, kube-scheduler I0724 22:10:50.346873 268235 ssh_runner.go:148] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.15.7/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml I0724 22:10:50.723103 268235 system_pods.go:86] 3 kube-system pods found I0724 22:10:50.723133 268235 system_pods.go:89] "coredns-5d4dd4b4db-9ssg6" [dbd8dc2c-1af2-4c99-89f0-d44686805742] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:10:50.723143 268235 system_pods.go:89] "kindnet-4qfcd" [9fb35a28-5601-47e4-88e6-be2a18fa55ef] Pending / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:10:50.723151 268235 system_pods.go:89] "kube-proxy-6wf4w" [208ac09e-2a8d-43bf-a2da-f91dc042aaef] Pending / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy]) I0724 22:10:50.723165 268235 retry.go:30] will retry after 422.765636ms: missing components: kube-dns, etcd, kube-apiserver, kube-controller-manager, kube-proxy, kube-scheduler I0724 22:10:51.150569 268235 system_pods.go:86] 3 kube-system pods found I0724 22:10:51.150606 268235 system_pods.go:89] "coredns-5d4dd4b4db-9ssg6" [dbd8dc2c-1af2-4c99-89f0-d44686805742] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:10:51.150619 268235 system_pods.go:89] "kindnet-4qfcd" [9fb35a28-5601-47e4-88e6-be2a18fa55ef] Pending / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:10:51.150630 268235 system_pods.go:89] "kube-proxy-6wf4w" [208ac09e-2a8d-43bf-a2da-f91dc042aaef] Pending / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy]) I0724 22:10:51.150644 268235 retry.go:30] will retry after 473.074753ms: missing components: kube-dns, etcd, kube-apiserver, kube-controller-manager, kube-proxy, kube-scheduler I0724 22:10:51.373272 268235 ssh_runner.go:188] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.15.7/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: (1.126110358s) I0724 22:10:51.373368 268235 ssh_runner.go:188] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.15.7/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml: (1.026458433s) I0724 22:10:51.401612 268235 addons.go:355] enableAddons completed in 1.556736984s I0724 22:10:51.711711 268235 system_pods.go:86] 4 kube-system pods found I0724 22:10:51.711742 268235 system_pods.go:89] "coredns-5d4dd4b4db-9ssg6" [dbd8dc2c-1af2-4c99-89f0-d44686805742] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:10:51.711750 268235 system_pods.go:89] "kindnet-4qfcd" [9fb35a28-5601-47e4-88e6-be2a18fa55ef] Pending / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:10:51.711759 268235 system_pods.go:89] "kube-proxy-6wf4w" [208ac09e-2a8d-43bf-a2da-f91dc042aaef] Pending / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy]) I0724 22:10:51.711766 268235 system_pods.go:89] "storage-provisioner" [e76b38cb-d66f-4b4b-a962-cdd9c670e250] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:10:51.711779 268235 retry.go:30] will retry after 587.352751ms: missing components: kube-dns, etcd, kube-apiserver, kube-controller-manager, kube-proxy, kube-scheduler I0724 22:10:52.360643 268235 system_pods.go:86] 4 kube-system pods found I0724 22:10:52.360687 268235 system_pods.go:89] "coredns-5d4dd4b4db-9ssg6" [dbd8dc2c-1af2-4c99-89f0-d44686805742] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:10:52.360699 268235 system_pods.go:89] "kindnet-4qfcd" [9fb35a28-5601-47e4-88e6-be2a18fa55ef] Pending / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:10:52.360740 268235 system_pods.go:89] "kube-proxy-6wf4w" [208ac09e-2a8d-43bf-a2da-f91dc042aaef] Pending / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy]) I0724 22:10:52.360752 268235 system_pods.go:89] "storage-provisioner" [e76b38cb-d66f-4b4b-a962-cdd9c670e250] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:10:52.360808 268235 retry.go:30] will retry after 834.206799ms: missing components: kube-dns, etcd, kube-apiserver, kube-controller-manager, kube-proxy, kube-scheduler I0724 22:10:53.198845 268235 system_pods.go:86] 4 kube-system pods found I0724 22:10:53.198877 268235 system_pods.go:89] "coredns-5d4dd4b4db-9ssg6" [dbd8dc2c-1af2-4c99-89f0-d44686805742] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:10:53.198887 268235 system_pods.go:89] "kindnet-4qfcd" [9fb35a28-5601-47e4-88e6-be2a18fa55ef] Pending / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:10:53.198897 268235 system_pods.go:89] "kube-proxy-6wf4w" [208ac09e-2a8d-43bf-a2da-f91dc042aaef] Running I0724 22:10:53.198910 268235 system_pods.go:89] "storage-provisioner" [e76b38cb-d66f-4b4b-a962-cdd9c670e250] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:10:53.198922 268235 retry.go:30] will retry after 746.553905ms: missing components: kube-dns, etcd, kube-apiserver, kube-controller-manager, kube-scheduler I0724 22:10:54.054058 268235 system_pods.go:86] 4 kube-system pods found I0724 22:10:54.054090 268235 system_pods.go:89] "coredns-5d4dd4b4db-9ssg6" [dbd8dc2c-1af2-4c99-89f0-d44686805742] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:10:54.054103 268235 system_pods.go:89] "kindnet-4qfcd" [9fb35a28-5601-47e4-88e6-be2a18fa55ef] Pending / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:10:54.054113 268235 system_pods.go:89] "kube-proxy-6wf4w" [208ac09e-2a8d-43bf-a2da-f91dc042aaef] Running I0724 22:10:54.054124 268235 system_pods.go:89] "storage-provisioner" [e76b38cb-d66f-4b4b-a962-cdd9c670e250] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:10:54.054135 268235 retry.go:30] will retry after 987.362415ms: missing components: kube-dns, etcd, kube-apiserver, kube-controller-manager, kube-scheduler I0724 22:10:55.375927 268235 system_pods.go:86] 4 kube-system pods found I0724 22:10:55.375976 268235 system_pods.go:89] "coredns-5d4dd4b4db-9ssg6" [dbd8dc2c-1af2-4c99-89f0-d44686805742] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:10:55.375988 268235 system_pods.go:89] "kindnet-4qfcd" [9fb35a28-5601-47e4-88e6-be2a18fa55ef] Pending / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:10:55.375997 268235 system_pods.go:89] "kube-proxy-6wf4w" [208ac09e-2a8d-43bf-a2da-f91dc042aaef] Running I0724 22:10:55.376010 268235 system_pods.go:89] "storage-provisioner" [e76b38cb-d66f-4b4b-a962-cdd9c670e250] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:10:55.376036 268235 retry.go:30] will retry after 1.189835008s: missing components: kube-dns, etcd, kube-apiserver, kube-controller-manager, kube-scheduler I0724 22:10:57.418279 268235 system_pods.go:86] 4 kube-system pods found I0724 22:10:57.418314 268235 system_pods.go:89] "coredns-5d4dd4b4db-9ssg6" [dbd8dc2c-1af2-4c99-89f0-d44686805742] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:10:57.418323 268235 system_pods.go:89] "kindnet-4qfcd" [9fb35a28-5601-47e4-88e6-be2a18fa55ef] Pending / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:10:57.418329 268235 system_pods.go:89] "kube-proxy-6wf4w" [208ac09e-2a8d-43bf-a2da-f91dc042aaef] Running I0724 22:10:57.418337 268235 system_pods.go:89] "storage-provisioner" [e76b38cb-d66f-4b4b-a962-cdd9c670e250] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:10:57.418348 268235 retry.go:30] will retry after 1.677229867s: missing components: kube-dns, etcd, kube-apiserver, kube-controller-manager, kube-scheduler I0724 22:10:59.099190 268235 system_pods.go:86] 4 kube-system pods found I0724 22:10:59.099234 268235 system_pods.go:89] "coredns-5d4dd4b4db-9ssg6" [dbd8dc2c-1af2-4c99-89f0-d44686805742] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:10:59.099243 268235 system_pods.go:89] "kindnet-4qfcd" [9fb35a28-5601-47e4-88e6-be2a18fa55ef] Pending / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:10:59.099249 268235 system_pods.go:89] "kube-proxy-6wf4w" [208ac09e-2a8d-43bf-a2da-f91dc042aaef] Running I0724 22:10:59.099256 268235 system_pods.go:89] "storage-provisioner" [e76b38cb-d66f-4b4b-a962-cdd9c670e250] Running I0724 22:10:59.099267 268235 retry.go:30] will retry after 2.346016261s: missing components: kube-dns, etcd, kube-apiserver, kube-controller-manager, kube-scheduler I0724 22:11:01.459780 268235 system_pods.go:86] 4 kube-system pods found I0724 22:11:01.459820 268235 system_pods.go:89] "coredns-5d4dd4b4db-9ssg6" [dbd8dc2c-1af2-4c99-89f0-d44686805742] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:11:01.459831 268235 system_pods.go:89] "kindnet-4qfcd" [9fb35a28-5601-47e4-88e6-be2a18fa55ef] Pending / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:11:01.459838 268235 system_pods.go:89] "kube-proxy-6wf4w" [208ac09e-2a8d-43bf-a2da-f91dc042aaef] Running I0724 22:11:01.459847 268235 system_pods.go:89] "storage-provisioner" [e76b38cb-d66f-4b4b-a962-cdd9c670e250] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:11:01.459858 268235 retry.go:30] will retry after 3.36678925s: missing components: kube-dns, etcd, kube-apiserver, kube-controller-manager, kube-scheduler I0724 22:11:05.214029 268235 system_pods.go:86] 4 kube-system pods found I0724 22:11:05.214069 268235 system_pods.go:89] "coredns-5d4dd4b4db-9ssg6" [dbd8dc2c-1af2-4c99-89f0-d44686805742] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:11:05.214086 268235 system_pods.go:89] "kindnet-4qfcd" [9fb35a28-5601-47e4-88e6-be2a18fa55ef] Pending / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:11:05.214095 268235 system_pods.go:89] "kube-proxy-6wf4w" [208ac09e-2a8d-43bf-a2da-f91dc042aaef] Running I0724 22:11:05.214108 268235 system_pods.go:89] "storage-provisioner" [e76b38cb-d66f-4b4b-a962-cdd9c670e250] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:11:05.214141 268235 retry.go:30] will retry after 3.11822781s: missing components: kube-dns, etcd, kube-apiserver, kube-controller-manager, kube-scheduler I0724 22:11:08.335385 268235 system_pods.go:86] 4 kube-system pods found I0724 22:11:08.335443 268235 system_pods.go:89] "coredns-5d4dd4b4db-9ssg6" [dbd8dc2c-1af2-4c99-89f0-d44686805742] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:11:08.335451 268235 system_pods.go:89] "kindnet-4qfcd" [9fb35a28-5601-47e4-88e6-be2a18fa55ef] Pending / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:11:08.335459 268235 system_pods.go:89] "kube-proxy-6wf4w" [208ac09e-2a8d-43bf-a2da-f91dc042aaef] Running I0724 22:11:08.335470 268235 system_pods.go:89] "storage-provisioner" [e76b38cb-d66f-4b4b-a962-cdd9c670e250] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:11:08.335501 268235 retry.go:30] will retry after 4.276119362s: missing components: kube-dns, etcd, kube-apiserver, kube-controller-manager, kube-scheduler I0724 22:11:12.615454 268235 system_pods.go:86] 4 kube-system pods found I0724 22:11:12.615497 268235 system_pods.go:89] "coredns-5d4dd4b4db-9ssg6" [dbd8dc2c-1af2-4c99-89f0-d44686805742] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:11:12.615507 268235 system_pods.go:89] "kindnet-4qfcd" [9fb35a28-5601-47e4-88e6-be2a18fa55ef] Running I0724 22:11:12.615519 268235 system_pods.go:89] "kube-proxy-6wf4w" [208ac09e-2a8d-43bf-a2da-f91dc042aaef] Running I0724 22:11:12.615530 268235 system_pods.go:89] "storage-provisioner" [e76b38cb-d66f-4b4b-a962-cdd9c670e250] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:11:12.615545 268235 retry.go:30] will retry after 5.167232101s: missing components: kube-dns, etcd, kube-apiserver, kube-controller-manager, kube-scheduler I0724 22:11:17.786790 268235 system_pods.go:86] 4 kube-system pods found I0724 22:11:17.786854 268235 system_pods.go:89] "coredns-5d4dd4b4db-9ssg6" [dbd8dc2c-1af2-4c99-89f0-d44686805742] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:11:17.786864 268235 system_pods.go:89] "kindnet-4qfcd" [9fb35a28-5601-47e4-88e6-be2a18fa55ef] Running I0724 22:11:17.786874 268235 system_pods.go:89] "kube-proxy-6wf4w" [208ac09e-2a8d-43bf-a2da-f91dc042aaef] Running I0724 22:11:17.786885 268235 system_pods.go:89] "storage-provisioner" [e76b38cb-d66f-4b4b-a962-cdd9c670e250] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:11:17.786900 268235 retry.go:30] will retry after 6.994901864s: missing components: kube-dns, etcd, kube-apiserver, kube-controller-manager, kube-scheduler I0724 22:11:24.786004 268235 system_pods.go:86] 4 kube-system pods found I0724 22:11:24.786044 268235 system_pods.go:89] "coredns-5d4dd4b4db-9ssg6" [dbd8dc2c-1af2-4c99-89f0-d44686805742] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:11:24.786053 268235 system_pods.go:89] "kindnet-4qfcd" [9fb35a28-5601-47e4-88e6-be2a18fa55ef] Running I0724 22:11:24.786064 268235 system_pods.go:89] "kube-proxy-6wf4w" [208ac09e-2a8d-43bf-a2da-f91dc042aaef] Running I0724 22:11:24.786075 268235 system_pods.go:89] "storage-provisioner" [e76b38cb-d66f-4b4b-a962-cdd9c670e250] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:11:24.786097 268235 retry.go:30] will retry after 7.91826225s: missing components: kube-dns, etcd, kube-apiserver, kube-controller-manager, kube-scheduler I0724 22:11:32.708071 268235 system_pods.go:86] 5 kube-system pods found I0724 22:11:32.708120 268235 system_pods.go:89] "coredns-5d4dd4b4db-9ssg6" [dbd8dc2c-1af2-4c99-89f0-d44686805742] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:11:32.708127 268235 system_pods.go:89] "etcd-crio-20200724220901-14997" [2a598f36-1d3e-4099-a544-319a0e8d8147] Pending I0724 22:11:32.708135 268235 system_pods.go:89] "kindnet-4qfcd" [9fb35a28-5601-47e4-88e6-be2a18fa55ef] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:11:32.708141 268235 system_pods.go:89] "kube-proxy-6wf4w" [208ac09e-2a8d-43bf-a2da-f91dc042aaef] Running I0724 22:11:32.708148 268235 system_pods.go:89] "storage-provisioner" [e76b38cb-d66f-4b4b-a962-cdd9c670e250] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:11:32.708166 268235 retry.go:30] will retry after 9.953714808s: missing components: kube-dns, etcd, kube-apiserver, kube-controller-manager, kube-scheduler I0724 22:11:42.670111 268235 system_pods.go:86] 7 kube-system pods found I0724 22:11:42.670153 268235 system_pods.go:89] "coredns-5d4dd4b4db-9ssg6" [dbd8dc2c-1af2-4c99-89f0-d44686805742] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:11:42.670163 268235 system_pods.go:89] "etcd-crio-20200724220901-14997" [2a598f36-1d3e-4099-a544-319a0e8d8147] Running I0724 22:11:42.670173 268235 system_pods.go:89] "kindnet-4qfcd" [9fb35a28-5601-47e4-88e6-be2a18fa55ef] Running I0724 22:11:42.670182 268235 system_pods.go:89] "kube-apiserver-crio-20200724220901-14997" [0bcd6051-f6dd-4389-835d-e5dc98ede131] Pending I0724 22:11:42.670189 268235 system_pods.go:89] "kube-proxy-6wf4w" [208ac09e-2a8d-43bf-a2da-f91dc042aaef] Running I0724 22:11:42.670197 268235 system_pods.go:89] "kube-scheduler-crio-20200724220901-14997" [463a5a27-eb31-4d31-9302-6a4646a51080] Pending I0724 22:11:42.670210 268235 system_pods.go:89] "storage-provisioner" [e76b38cb-d66f-4b4b-a962-cdd9c670e250] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:11:42.670231 268235 retry.go:30] will retry after 15.120437328s: missing components: kube-dns, kube-apiserver, kube-controller-manager, kube-scheduler I0724 22:11:57.794841 268235 system_pods.go:86] 8 kube-system pods found I0724 22:11:57.794871 268235 system_pods.go:89] "coredns-5d4dd4b4db-9ssg6" [dbd8dc2c-1af2-4c99-89f0-d44686805742] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:11:57.794878 268235 system_pods.go:89] "etcd-crio-20200724220901-14997" [2a598f36-1d3e-4099-a544-319a0e8d8147] Running I0724 22:11:57.794887 268235 system_pods.go:89] "kindnet-4qfcd" [9fb35a28-5601-47e4-88e6-be2a18fa55ef] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:11:57.794893 268235 system_pods.go:89] "kube-apiserver-crio-20200724220901-14997" [0bcd6051-f6dd-4389-835d-e5dc98ede131] Running I0724 22:11:57.794900 268235 system_pods.go:89] "kube-controller-manager-crio-20200724220901-14997" [90ac60c8-e9a9-4da8-87ec-cd7cb961454c] Running I0724 22:11:57.794905 268235 system_pods.go:89] "kube-proxy-6wf4w" [208ac09e-2a8d-43bf-a2da-f91dc042aaef] Running I0724 22:11:57.794910 268235 system_pods.go:89] "kube-scheduler-crio-20200724220901-14997" [463a5a27-eb31-4d31-9302-6a4646a51080] Running I0724 22:11:57.794917 268235 system_pods.go:89] "storage-provisioner" [e76b38cb-d66f-4b4b-a962-cdd9c670e250] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:11:57.794934 268235 retry.go:30] will retry after 14.90607158s: missing components: kube-dns I0724 22:12:24.735231 268235 system_pods.go:86] 8 kube-system pods found I0724 22:12:24.735269 268235 system_pods.go:89] "coredns-5d4dd4b4db-9ssg6" [dbd8dc2c-1af2-4c99-89f0-d44686805742] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:12:24.735280 268235 system_pods.go:89] "etcd-crio-20200724220901-14997" [2a598f36-1d3e-4099-a544-319a0e8d8147] Running I0724 22:12:24.735294 268235 system_pods.go:89] "kindnet-4qfcd" [9fb35a28-5601-47e4-88e6-be2a18fa55ef] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:12:24.735301 268235 system_pods.go:89] "kube-apiserver-crio-20200724220901-14997" [0bcd6051-f6dd-4389-835d-e5dc98ede131] Running I0724 22:12:24.735308 268235 system_pods.go:89] "kube-controller-manager-crio-20200724220901-14997" [90ac60c8-e9a9-4da8-87ec-cd7cb961454c] Running I0724 22:12:24.735317 268235 system_pods.go:89] "kube-proxy-6wf4w" [208ac09e-2a8d-43bf-a2da-f91dc042aaef] Running I0724 22:12:24.735325 268235 system_pods.go:89] "kube-scheduler-crio-20200724220901-14997" [463a5a27-eb31-4d31-9302-6a4646a51080] Running I0724 22:12:24.735336 268235 system_pods.go:89] "storage-provisioner" [e76b38cb-d66f-4b4b-a962-cdd9c670e250] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:12:24.735349 268235 retry.go:30] will retry after 18.465989061s: missing components: kube-dns I0724 22:12:43.206670 268235 system_pods.go:86] 8 kube-system pods found I0724 22:12:43.206705 268235 system_pods.go:89] "coredns-5d4dd4b4db-9ssg6" [dbd8dc2c-1af2-4c99-89f0-d44686805742] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:12:43.206712 268235 system_pods.go:89] "etcd-crio-20200724220901-14997" [2a598f36-1d3e-4099-a544-319a0e8d8147] Running I0724 22:12:43.206719 268235 system_pods.go:89] "kindnet-4qfcd" [9fb35a28-5601-47e4-88e6-be2a18fa55ef] Running I0724 22:12:43.206726 268235 system_pods.go:89] "kube-apiserver-crio-20200724220901-14997" [0bcd6051-f6dd-4389-835d-e5dc98ede131] Running I0724 22:12:43.206736 268235 system_pods.go:89] "kube-controller-manager-crio-20200724220901-14997" [90ac60c8-e9a9-4da8-87ec-cd7cb961454c] Running I0724 22:12:43.206754 268235 system_pods.go:89] "kube-proxy-6wf4w" [208ac09e-2a8d-43bf-a2da-f91dc042aaef] Running I0724 22:12:43.206760 268235 system_pods.go:89] "kube-scheduler-crio-20200724220901-14997" [463a5a27-eb31-4d31-9302-6a4646a51080] Running I0724 22:12:43.206773 268235 system_pods.go:89] "storage-provisioner" [e76b38cb-d66f-4b4b-a962-cdd9c670e250] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:12:43.206785 268235 retry.go:30] will retry after 25.219510332s: missing components: kube-dns I0724 22:13:08.430357 268235 system_pods.go:86] 8 kube-system pods found I0724 22:13:08.430384 268235 system_pods.go:89] "coredns-5d4dd4b4db-9ssg6" [dbd8dc2c-1af2-4c99-89f0-d44686805742] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:13:08.430390 268235 system_pods.go:89] "etcd-crio-20200724220901-14997" [2a598f36-1d3e-4099-a544-319a0e8d8147] Running I0724 22:13:08.430400 268235 system_pods.go:89] "kindnet-4qfcd" [9fb35a28-5601-47e4-88e6-be2a18fa55ef] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:13:08.430406 268235 system_pods.go:89] "kube-apiserver-crio-20200724220901-14997" [0bcd6051-f6dd-4389-835d-e5dc98ede131] Running I0724 22:13:08.430413 268235 system_pods.go:89] "kube-controller-manager-crio-20200724220901-14997" [90ac60c8-e9a9-4da8-87ec-cd7cb961454c] Running I0724 22:13:08.430418 268235 system_pods.go:89] "kube-proxy-6wf4w" [208ac09e-2a8d-43bf-a2da-f91dc042aaef] Running I0724 22:13:08.430423 268235 system_pods.go:89] "kube-scheduler-crio-20200724220901-14997" [463a5a27-eb31-4d31-9302-6a4646a51080] Running I0724 22:13:08.430430 268235 system_pods.go:89] "storage-provisioner" [e76b38cb-d66f-4b4b-a962-cdd9c670e250] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:13:08.430448 268235 retry.go:30] will retry after 35.078569648s: missing components: kube-dns I0724 22:13:43.514406 268235 system_pods.go:86] 8 kube-system pods found I0724 22:13:43.514439 268235 system_pods.go:89] "coredns-5d4dd4b4db-9ssg6" [dbd8dc2c-1af2-4c99-89f0-d44686805742] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:13:43.514445 268235 system_pods.go:89] "etcd-crio-20200724220901-14997" [2a598f36-1d3e-4099-a544-319a0e8d8147] Running I0724 22:13:43.514452 268235 system_pods.go:89] "kindnet-4qfcd" [9fb35a28-5601-47e4-88e6-be2a18fa55ef] Running I0724 22:13:43.514457 268235 system_pods.go:89] "kube-apiserver-crio-20200724220901-14997" [0bcd6051-f6dd-4389-835d-e5dc98ede131] Running I0724 22:13:43.514462 268235 system_pods.go:89] "kube-controller-manager-crio-20200724220901-14997" [90ac60c8-e9a9-4da8-87ec-cd7cb961454c] Running I0724 22:13:43.514467 268235 system_pods.go:89] "kube-proxy-6wf4w" [208ac09e-2a8d-43bf-a2da-f91dc042aaef] Running I0724 22:13:43.514472 268235 system_pods.go:89] "kube-scheduler-crio-20200724220901-14997" [463a5a27-eb31-4d31-9302-6a4646a51080] Running I0724 22:13:43.514479 268235 system_pods.go:89] "storage-provisioner" [e76b38cb-d66f-4b4b-a962-cdd9c670e250] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:13:43.514489 268235 retry.go:30] will retry after 50.027701973s: missing components: kube-dns I0724 22:14:33.546758 268235 system_pods.go:86] 8 kube-system pods found I0724 22:14:33.546801 268235 system_pods.go:89] "coredns-5d4dd4b4db-9ssg6" [dbd8dc2c-1af2-4c99-89f0-d44686805742] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:14:33.546813 268235 system_pods.go:89] "etcd-crio-20200724220901-14997" [2a598f36-1d3e-4099-a544-319a0e8d8147] Running I0724 22:14:33.546838 268235 system_pods.go:89] "kindnet-4qfcd" [9fb35a28-5601-47e4-88e6-be2a18fa55ef] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:14:33.546848 268235 system_pods.go:89] "kube-apiserver-crio-20200724220901-14997" [0bcd6051-f6dd-4389-835d-e5dc98ede131] Running I0724 22:14:33.546856 268235 system_pods.go:89] "kube-controller-manager-crio-20200724220901-14997" [90ac60c8-e9a9-4da8-87ec-cd7cb961454c] Running I0724 22:14:33.546873 268235 system_pods.go:89] "kube-proxy-6wf4w" [208ac09e-2a8d-43bf-a2da-f91dc042aaef] Running I0724 22:14:33.546882 268235 system_pods.go:89] "kube-scheduler-crio-20200724220901-14997" [463a5a27-eb31-4d31-9302-6a4646a51080] Running I0724 22:14:33.546901 268235 system_pods.go:89] "storage-provisioner" [e76b38cb-d66f-4b4b-a962-cdd9c670e250] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:14:33.546925 268235 retry.go:30] will retry after 47.463338706s: missing components: kube-dns I0724 22:15:21.014978 268235 system_pods.go:86] 8 kube-system pods found I0724 22:15:21.015017 268235 system_pods.go:89] "coredns-5d4dd4b4db-9ssg6" [dbd8dc2c-1af2-4c99-89f0-d44686805742] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:15:21.015024 268235 system_pods.go:89] "etcd-crio-20200724220901-14997" [2a598f36-1d3e-4099-a544-319a0e8d8147] Running I0724 22:15:21.015034 268235 system_pods.go:89] "kindnet-4qfcd" [9fb35a28-5601-47e4-88e6-be2a18fa55ef] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:15:21.015040 268235 system_pods.go:89] "kube-apiserver-crio-20200724220901-14997" [0bcd6051-f6dd-4389-835d-e5dc98ede131] Running I0724 22:15:21.015047 268235 system_pods.go:89] "kube-controller-manager-crio-20200724220901-14997" [90ac60c8-e9a9-4da8-87ec-cd7cb961454c] Running I0724 22:15:21.015052 268235 system_pods.go:89] "kube-proxy-6wf4w" [208ac09e-2a8d-43bf-a2da-f91dc042aaef] Running I0724 22:15:21.015058 268235 system_pods.go:89] "kube-scheduler-crio-20200724220901-14997" [463a5a27-eb31-4d31-9302-6a4646a51080] Running I0724 22:15:21.015064 268235 system_pods.go:89] "storage-provisioner" [e76b38cb-d66f-4b4b-a962-cdd9c670e250] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:15:21.015075 268235 retry.go:30] will retry after 53.912476906s: missing components: kube-dns I0724 22:16:14.931540 268235 system_pods.go:86] 8 kube-system pods found I0724 22:16:14.931574 268235 system_pods.go:89] "coredns-5d4dd4b4db-9ssg6" [dbd8dc2c-1af2-4c99-89f0-d44686805742] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:16:14.931581 268235 system_pods.go:89] "etcd-crio-20200724220901-14997" [2a598f36-1d3e-4099-a544-319a0e8d8147] Running I0724 22:16:14.931590 268235 system_pods.go:89] "kindnet-4qfcd" [9fb35a28-5601-47e4-88e6-be2a18fa55ef] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:16:14.931596 268235 system_pods.go:89] "kube-apiserver-crio-20200724220901-14997" [0bcd6051-f6dd-4389-835d-e5dc98ede131] Running I0724 22:16:14.931603 268235 system_pods.go:89] "kube-controller-manager-crio-20200724220901-14997" [90ac60c8-e9a9-4da8-87ec-cd7cb961454c] Running I0724 22:16:14.931610 268235 system_pods.go:89] "kube-proxy-6wf4w" [208ac09e-2a8d-43bf-a2da-f91dc042aaef] Running I0724 22:16:14.931618 268235 system_pods.go:89] "kube-scheduler-crio-20200724220901-14997" [463a5a27-eb31-4d31-9302-6a4646a51080] Running I0724 22:16:14.931628 268235 system_pods.go:89] "storage-provisioner" [e76b38cb-d66f-4b4b-a962-cdd9c670e250] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:16:14.931642 268235 retry.go:30] will retry after 1m7.577191067s: missing components: kube-dns I0724 22:17:22.512659 268235 system_pods.go:86] 8 kube-system pods found I0724 22:17:22.512795 268235 system_pods.go:89] "coredns-5d4dd4b4db-9ssg6" [dbd8dc2c-1af2-4c99-89f0-d44686805742] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:17:22.512809 268235 system_pods.go:89] "etcd-crio-20200724220901-14997" [2a598f36-1d3e-4099-a544-319a0e8d8147] Running I0724 22:17:22.512835 268235 system_pods.go:89] "kindnet-4qfcd" [9fb35a28-5601-47e4-88e6-be2a18fa55ef] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:17:22.512846 268235 system_pods.go:89] "kube-apiserver-crio-20200724220901-14997" [0bcd6051-f6dd-4389-835d-e5dc98ede131] Running I0724 22:17:22.512857 268235 system_pods.go:89] "kube-controller-manager-crio-20200724220901-14997" [90ac60c8-e9a9-4da8-87ec-cd7cb961454c] Running I0724 22:17:22.512865 268235 system_pods.go:89] "kube-proxy-6wf4w" [208ac09e-2a8d-43bf-a2da-f91dc042aaef] Running I0724 22:17:22.512874 268235 system_pods.go:89] "kube-scheduler-crio-20200724220901-14997" [463a5a27-eb31-4d31-9302-6a4646a51080] Running I0724 22:17:22.512888 268235 system_pods.go:89] "storage-provisioner" [e76b38cb-d66f-4b4b-a962-cdd9c670e250] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:17:22.513047 268235 exit.go:58] WithError(failed to start node)=startup failed: wait 6m0s for node: waiting for apps_running: expected k8s-apps: missing components: kube-dns called from: goroutine 1 [running]: runtime/debug.Stack(0x0, 0x0, 0x100000000000000) /home/jenkins/actions-runner/_work/_tool/go/1.14.6/x64/src/runtime/debug/stack.go:24 +0x9d k8s.io/minikube/pkg/minikube/exit.WithError(0x1ba7c56, 0x14, 0x1ebf200, 0xc000c6ce00) /home/jenkins/actions-runner/_work/minikube/minikube/pkg/minikube/exit/exit.go:58 +0x34 k8s.io/minikube/cmd/minikube/cmd.runStart(0x2cd0820, 0xc000a5a0d0, 0x2, 0xd) /home/jenkins/actions-runner/_work/minikube/minikube/cmd/minikube/cmd/start.go:206 +0x505 github.com/spf13/cobra.(*Command).execute(0x2cd0820, 0xc000a5a000, 0xd, 0xd, 0x2cd0820, 0xc000a5a000) /home/jenkins/go/pkg/mod/github.com/spf13/cobra@v1.0.0/command.go:846 +0x29d github.com/spf13/cobra.(*Command).ExecuteC(0x2ccf860, 0x0, 0x1, 0xc0000429b0) /home/jenkins/go/pkg/mod/github.com/spf13/cobra@v1.0.0/command.go:950 +0x349 github.com/spf13/cobra.(*Command).Execute(...) /home/jenkins/go/pkg/mod/github.com/spf13/cobra@v1.0.0/command.go:887 k8s.io/minikube/cmd/minikube/cmd.Execute() /home/jenkins/actions-runner/_work/minikube/minikube/cmd/minikube/cmd/root.go:106 +0x72c main.main() /home/jenkins/actions-runner/_work/minikube/minikube/cmd/minikube/main.go:71 +0x11f W0724 22:17:22.513239 268235 out.go:249] failed to start node: startup failed: wait 6m0s for node: waiting for apps_running: expected k8s-apps: missing components: kube-dns * X failed to start node: startup failed: wait 6m0s for node: waiting for apps_running: expected k8s-apps: missing components: kube-dns * * minikube is exiting due to an error. If the above message is not useful, open an issue: - https://github.com/kubernetes/minikube/issues/new/choose ** /stderr ** start_stop_delete_test.go:151: failed starting minikube -first start-. args "./minikube-linux-amd64 start -p crio-20200724220901-14997 --memory=2200 --alsologtostderr --wait=true --container-runtime=crio --disable-driver-mounts --extra-config=kubeadm.ignore-preflight-errors=SystemVerification --vm-driver=docker --base-image=local/kicbase:-snapshot --kubernetes-version=v1.15.7": exit status 70 helpers_test.go:215: -----------------------post-mortem-------------------------------- helpers_test.go:223: ======> post-mortem[TestStartStop/group/crio/serial/FirstStart]: docker inspect <====== helpers_test.go:224: (dbg) Run: docker inspect crio-20200724220901-14997 helpers_test.go:228: (dbg) docker inspect crio-20200724220901-14997: -- stdout -- [ { "Id": "d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a", "Created": "2020-07-24T22:09:11.178770681Z", "Path": "/usr/local/bin/entrypoint", "Args": [ "/sbin/init" ], "State": { "Status": "running", "Running": true, "Paused": false, "Restarting": false, "OOMKilled": false, "Dead": false, "Pid": 270617, "ExitCode": 0, "Error": "", "StartedAt": "2020-07-24T22:09:15.593531886Z", "FinishedAt": "0001-01-01T00:00:00Z" }, "Image": "sha256:e6bc41c39dc48b2b472936db36aedb28527ce0f675ed1bc20d029125c9ccf578", "ResolvConfPath": "/var/lib/docker/containers/d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a/resolv.conf", "HostnamePath": "/var/lib/docker/containers/d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a/hostname", "HostsPath": "/var/lib/docker/containers/d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a/hosts", "LogPath": "/var/lib/docker/containers/d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a/d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a-json.log", "Name": "/crio-20200724220901-14997", "RestartCount": 0, "Driver": "overlay2", "Platform": "linux", "MountLabel": "", "ProcessLabel": "", "AppArmorProfile": "unconfined", "ExecIDs": null, "HostConfig": { "Binds": [ "/lib/modules:/lib/modules:ro", "crio-20200724220901-14997:/var" ], "ContainerIDFile": "", "LogConfig": { "Type": "json-file", "Config": {} }, "NetworkMode": "default", "PortBindings": { "22/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "2376/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "5000/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "8443/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ] }, "RestartPolicy": { "Name": "no", "MaximumRetryCount": 0 }, "AutoRemove": false, "VolumeDriver": "", "VolumesFrom": null, "CapAdd": null, "CapDrop": null, "Capabilities": null, "Dns": [], "DnsOptions": [], "DnsSearch": [], "ExtraHosts": null, "GroupAdd": null, "IpcMode": "private", "Cgroup": "", "Links": null, "OomScoreAdj": 0, "PidMode": "", "Privileged": true, "PublishAllPorts": false, "ReadonlyRootfs": false, "SecurityOpt": [ "seccomp=unconfined", "apparmor=unconfined", "label=disable" ], "Tmpfs": { "/run": "", "/tmp": "" }, "UTSMode": "", "UsernsMode": "", "ShmSize": 67108864, "Runtime": "runc", "ConsoleSize": [ 0, 0 ], "Isolation": "", "CpuShares": 0, "Memory": 2306867200, "NanoCpus": 2000000000, "CgroupParent": "", "BlkioWeight": 0, "BlkioWeightDevice": [], "BlkioDeviceReadBps": null, "BlkioDeviceWriteBps": null, "BlkioDeviceReadIOps": null, "BlkioDeviceWriteIOps": null, "CpuPeriod": 0, "CpuQuota": 0, "CpuRealtimePeriod": 0, "CpuRealtimeRuntime": 0, "CpusetCpus": "", "CpusetMems": "", "Devices": [], "DeviceCgroupRules": null, "DeviceRequests": null, "KernelMemory": 0, "KernelMemoryTCP": 0, "MemoryReservation": 0, "MemorySwap": -1, "MemorySwappiness": null, "OomKillDisable": false, "PidsLimit": null, "Ulimits": null, "CpuCount": 0, "CpuPercent": 0, "IOMaximumIOps": 0, "IOMaximumBandwidth": 0, "MaskedPaths": null, "ReadonlyPaths": null }, "GraphDriver": { "Data": { "LowerDir": "/var/lib/docker/overlay2/25235e8a657907fa460b6393f79aab4ea109b0924e4843714a3776886a051c03-init/diff:/var/lib/docker/overlay2/33ef5d7ed4fbb30d55d2e5925de9b5c359ee2f6bcaf5a31308f0c4c11e358759/diff:/var/lib/docker/overlay2/3cf31b1deb19cf9de586952311d9f94baab00dac6393d96e1a80da943c3823e6/diff:/var/lib/docker/overlay2/5e25be0edbb143d613aab16e23056ece18370c2427214464ccc0b8f033fc76c5/diff:/var/lib/docker/overlay2/933d7b760faf1726c0231eb48cbf3e7af8aeea7d1f449959b7bf07bbe9a84376/diff:/var/lib/docker/overlay2/69ec5555996fe29cf36b7e927034690ecb66f32d4c72396f3c2f3d73303ab10c/diff:/var/lib/docker/overlay2/891f9311e0e93aca44c576d873c0860bf67e972178833fc3ac0569308996933c/diff:/var/lib/docker/overlay2/431ddf5eea9395c020c3a27539c6984764cb7a0dd8f3eeadfbb24f37a897caf7/diff:/var/lib/docker/overlay2/ef7d3d9d53c985fdb46a41c8bcbc04c7d98baa3d8030b1432d115657269a4611/diff:/var/lib/docker/overlay2/d976f7471257da8ef85e0de564cae8e6b16eeb8f83fff6750b3985387f87052c/diff:/var/lib/docker/overlay2/16ed6f6abe25e98b50b10529e70d835da8b64ca99934243f70e5cc63cca44ba1/diff:/var/lib/docker/overlay2/a6495d6f579c32bd0faf387dc3b48c733dae0219e3cfc5d94f9f6ba027261668/diff:/var/lib/docker/overlay2/a34db548da9e24b0323e43e53767c49484fa19509ba9c1e738fb42330788ec52/diff:/var/lib/docker/overlay2/03f93f74dafc48e2bb0234297546abf70dc9918c047a7e3b500ed2242aec09ff/diff:/var/lib/docker/overlay2/b0d08d0be034080c25561d9451d8081050a591c2991a7f3d242234b9bf7c046d/diff:/var/lib/docker/overlay2/d7be6ed61eedc76988e74fe1d87a6a1ef287912654acea5566e7f9cb797b3e3c/diff:/var/lib/docker/overlay2/bde6a4a42d5652b3ef22e3a1dddd0f6fb765d2cd9f5b1a38d4ecb9b44c45b998/diff:/var/lib/docker/overlay2/0f384da0b034b730d4bdd22f270821719c0ac0b7b7d608c3a52fa89b9a386b90/diff:/var/lib/docker/overlay2/46e9d50bc162d2d523c12da3e378e5b859515056dc7d7f11344c1c42c64718d4/diff:/var/lib/docker/overlay2/6e7552cdb94dfc1c9a0a8efae25c758d93fff3d2eec58a3e2b710496d0c94653/diff:/var/lib/docker/overlay2/b9217960caef08b70ded801ca89f5a0808227383f2ab1de5ae7313c200a8090b/diff:/var/lib/docker/overlay2/d11fc39cafccd3e3cb8fba6cfdda2bbfdb99c2e41619ce169b0d6ede0b3f6a64/diff:/var/lib/docker/overlay2/0f89620291cba4bc3afac7c9fa1faf205450d5dcae032c3a9882009633b72b4a/diff", "MergedDir": "/var/lib/docker/overlay2/25235e8a657907fa460b6393f79aab4ea109b0924e4843714a3776886a051c03/merged", "UpperDir": "/var/lib/docker/overlay2/25235e8a657907fa460b6393f79aab4ea109b0924e4843714a3776886a051c03/diff", "WorkDir": "/var/lib/docker/overlay2/25235e8a657907fa460b6393f79aab4ea109b0924e4843714a3776886a051c03/work" }, "Name": "overlay2" }, "Mounts": [ { "Type": "volume", "Name": "crio-20200724220901-14997", "Source": "/var/lib/docker/volumes/crio-20200724220901-14997/_data", "Destination": "/var", "Driver": "local", "Mode": "z", "RW": true, "Propagation": "" }, { "Type": "bind", "Source": "/lib/modules", "Destination": "/lib/modules", "Mode": "ro", "RW": false, "Propagation": "rprivate" } ], "Config": { "Hostname": "crio-20200724220901-14997", "Domainname": "", "User": "root", "AttachStdin": false, "AttachStdout": false, "AttachStderr": false, "ExposedPorts": { "22/tcp": {}, "2376/tcp": {}, "5000/tcp": {}, "8443/tcp": {} }, "Tty": true, "OpenStdin": false, "StdinOnce": false, "Env": [ "container=docker", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], "Cmd": null, "Image": "kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438", "Volumes": null, "WorkingDir": "", "Entrypoint": [ "/usr/local/bin/entrypoint", "/sbin/init" ], "OnBuild": null, "Labels": { "created_by.minikube.sigs.k8s.io": "true", "mode.minikube.sigs.k8s.io": "crio-20200724220901-14997", "name.minikube.sigs.k8s.io": "crio-20200724220901-14997", "role.minikube.sigs.k8s.io": "" }, "StopSignal": "SIGRTMIN+3" }, "NetworkSettings": { "Bridge": "", "SandboxID": "0d795cb2f39f80073816031303c0e963a2cb0b36d8d4c2994640addd703a558d", "HairpinMode": false, "LinkLocalIPv6Address": "", "LinkLocalIPv6PrefixLen": 0, "Ports": { "22/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32888" } ], "2376/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32887" } ], "5000/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32886" } ], "8443/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32885" } ] }, "SandboxKey": "/var/run/docker/netns/0d795cb2f39f", "SecondaryIPAddresses": null, "SecondaryIPv6Addresses": null, "EndpointID": "94d389b211c871d340ba03bea225a73a99c784b3ef382985b8799306d3d827f4", "Gateway": "172.17.0.1", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "IPAddress": "172.17.0.2", "IPPrefixLen": 16, "IPv6Gateway": "", "MacAddress": "02:42:ac:11:00:02", "Networks": { "bridge": { "IPAMConfig": null, "Links": null, "Aliases": null, "NetworkID": "d4a420189740571bc15c14b2ebac1ab66e95c9c6796461aa96bb3486a5f7e03d", "EndpointID": "94d389b211c871d340ba03bea225a73a99c784b3ef382985b8799306d3d827f4", "Gateway": "172.17.0.1", "IPAddress": "172.17.0.2", "IPPrefixLen": 16, "IPv6Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "MacAddress": "02:42:ac:11:00:02", "DriverOpts": null } } } } ] -- /stdout -- helpers_test.go:232: (dbg) Run: ./minikube-linux-amd64 status --format={{.Host}} -p crio-20200724220901-14997 -n crio-20200724220901-14997 helpers_test.go:237: <<< TestStartStop/group/crio/serial/FirstStart FAILED: start of post-mortem logs <<< helpers_test.go:238: ======> post-mortem[TestStartStop/group/crio/serial/FirstStart]: minikube logs <====== helpers_test.go:240: (dbg) Run: ./minikube-linux-amd64 -p crio-20200724220901-14997 logs -n 25 helpers_test.go:240: (dbg) Done: ./minikube-linux-amd64 -p crio-20200724220901-14997 logs -n 25: (3.268507299s) helpers_test.go:245: TestStartStop/group/crio/serial/FirstStart logs: -- stdout -- * ==> CRI-O <== * -- Logs begin at Fri 2020-07-24 22:09:16 UTC, end at Fri 2020-07-24 22:17:23 UTC. -- * Jul 24 22:17:01 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:17:01.476733717Z" level=info msg="About to add CNI network crio-bridge (type=bridge)" * Jul 24 22:17:03 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:17:03.342311716Z" level=error msg="Error adding network: failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:17:03 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:17:03.342358519Z" level=error msg="Error while adding pod to CNI network \"crio-bridge\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:17:03 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:17:03.342498929Z" level=info msg="About to del CNI network lo (type=loopback)" * Jul 24 22:17:03 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:17:03.347731209Z" level=info msg="Got pod network &{Name:coredns-5d4dd4b4db-9ssg6 Namespace:kube-system ID:5afc4703f4e04f0073115aa73b66da9739eb49bde8eb90e0fc9de14e6461788b NetNS:/proc/9948/ns/net Networks:[] RuntimeConfig:map[crio-bridge:{IP: MAC: PortMappings:[] Bandwidth: IpRanges:[]}]}" * Jul 24 22:17:03 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:17:03.347782712Z" level=error msg="error loading cached network config: network \"crio-bridge\" not found in CNI cache" * Jul 24 22:17:03 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:17:03.347794813Z" level=info msg="About to del CNI network crio-bridge (type=bridge)" * Jul 24 22:17:03 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:17:03.432281936Z" level=error msg="Error deleting network: running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.0.24 -j CNI-69faed25f34ff80ba0f1e5df -m comment --comment name: \"crio-bridge\" id: \"5afc4703f4e04f0073115aa73b66da9739eb49bde8eb90e0fc9de14e6461788b\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-69faed25f34ff80ba0f1e5df':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" * Jul 24 22:17:03 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:17:03.432570457Z" level=error msg="Error while removing pod from CNI network \"crio-bridge\": running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.0.24 -j CNI-69faed25f34ff80ba0f1e5df -m comment --comment name: \"crio-bridge\" id: \"5afc4703f4e04f0073115aa73b66da9739eb49bde8eb90e0fc9de14e6461788b\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-69faed25f34ff80ba0f1e5df':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" * Jul 24 22:17:03 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:17:03.432636862Z" level=error msg="error stopping network on cleanup: failed to destroy network for pod sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(5afc4703f4e04f0073115aa73b66da9739eb49bde8eb90e0fc9de14e6461788b): running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.0.24 -j CNI-69faed25f34ff80ba0f1e5df -m comment --comment name: \"crio-bridge\" id: \"5afc4703f4e04f0073115aa73b66da9739eb49bde8eb90e0fc9de14e6461788b\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-69faed25f34ff80ba0f1e5df':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" id=e766a474-acf3-4c00-b71e-923a8b7c1c90 * Jul 24 22:17:05 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:17:05.547069295Z" level=info msg="exec'd [/bin/sh -ec ETCDCTL_API=3 etcdctl --endpoints=https://[127.0.0.1]:2379 --cacert=/var/lib/minikube/certs/etcd/ca.crt --cert=/var/lib/minikube/certs/etcd/healthcheck-client.crt --key=/var/lib/minikube/certs/etcd/healthcheck-client.key get foo] in kube-system/etcd-crio-20200724220901-14997/etcd" id=68d16087-1f57-450c-9f4e-d70d4976100f * Jul 24 22:17:15 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:17:15.282595395Z" level=info msg="attempting to run pod sandbox with infra container: kube-system/coredns-5d4dd4b4db-9ssg6/POD" id=84a5a935-0259-445b-a2f7-595238256a60 * Jul 24 22:17:15 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:17:15.463797846Z" level=info msg="About to add CNI network lo (type=loopback)" * Jul 24 22:17:15 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:17:15.467829828Z" level=info msg="Got pod network &{Name:coredns-5d4dd4b4db-9ssg6 Namespace:kube-system ID:ba6f1aa293e4e4a8777d362d69578df4264ecc04114c8b70326071ff38e0cb1f NetNS:/proc/10120/ns/net Networks:[] RuntimeConfig:map[crio-bridge:{IP: MAC: PortMappings:[] Bandwidth: IpRanges:[]}]}" * Jul 24 22:17:15 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:17:15.467862630Z" level=info msg="About to add CNI network crio-bridge (type=bridge)" * Jul 24 22:17:15 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:17:15.483187400Z" level=info msg="exec'd [/bin/sh -ec ETCDCTL_API=3 etcdctl --endpoints=https://[127.0.0.1]:2379 --cacert=/var/lib/minikube/certs/etcd/ca.crt --cert=/var/lib/minikube/certs/etcd/healthcheck-client.crt --key=/var/lib/minikube/certs/etcd/healthcheck-client.key get foo] in kube-system/etcd-crio-20200724220901-14997/etcd" id=ec8d89e9-c4fe-403d-a909-bc53f800071e * Jul 24 22:17:17 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:17:17.569881606Z" level=error msg="Error adding network: failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:17:17 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:17:17.569937010Z" level=error msg="Error while adding pod to CNI network \"crio-bridge\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:17:17 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:17:17.570056218Z" level=info msg="About to del CNI network lo (type=loopback)" * Jul 24 22:17:17 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:17:17.574293817Z" level=info msg="Got pod network &{Name:coredns-5d4dd4b4db-9ssg6 Namespace:kube-system ID:ba6f1aa293e4e4a8777d362d69578df4264ecc04114c8b70326071ff38e0cb1f NetNS:/proc/10120/ns/net Networks:[] RuntimeConfig:map[crio-bridge:{IP: MAC: PortMappings:[] Bandwidth: IpRanges:[]}]}" * Jul 24 22:17:17 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:17:17.574349121Z" level=error msg="error loading cached network config: network \"crio-bridge\" not found in CNI cache" * Jul 24 22:17:17 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:17:17.574363922Z" level=info msg="About to del CNI network crio-bridge (type=bridge)" * Jul 24 22:17:17 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:17:17.648216527Z" level=error msg="Error deleting network: running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.0.25 -j CNI-a51536abba4c0d1b9c740281 -m comment --comment name: \"crio-bridge\" id: \"ba6f1aa293e4e4a8777d362d69578df4264ecc04114c8b70326071ff38e0cb1f\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-a51536abba4c0d1b9c740281':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" * Jul 24 22:17:17 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:17:17.648267131Z" level=error msg="Error while removing pod from CNI network \"crio-bridge\": running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.0.25 -j CNI-a51536abba4c0d1b9c740281 -m comment --comment name: \"crio-bridge\" id: \"ba6f1aa293e4e4a8777d362d69578df4264ecc04114c8b70326071ff38e0cb1f\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-a51536abba4c0d1b9c740281':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" * Jul 24 22:17:17 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:17:17.648366738Z" level=error msg="error stopping network on cleanup: failed to destroy network for pod sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(ba6f1aa293e4e4a8777d362d69578df4264ecc04114c8b70326071ff38e0cb1f): running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.0.25 -j CNI-a51536abba4c0d1b9c740281 -m comment --comment name: \"crio-bridge\" id: \"ba6f1aa293e4e4a8777d362d69578df4264ecc04114c8b70326071ff38e0cb1f\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-a51536abba4c0d1b9c740281':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" id=84a5a935-0259-445b-a2f7-595238256a60 * * ==> container status <== * CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID * a55b0d3e379aa 4689081edb103a9e8174bf23a255bfbe0b2d9ed82edc907abab6989d1c60f02c 27 seconds ago Exited storage-provisioner 6 778921bc7c55f * 9bbdfe2513207 2186a1a396deb58f1ea5eaf20193a518ca05049b46ccd754ec83366b5c8c13d5 2 minutes ago Exited kindnet-cni 5 06b62a10d0ef5 * 2be846355cebe ae3d9889423ede337df3814baa77445e566597a5a882f3cdf933b4d9e0025f0f 6 minutes ago Running kube-proxy 0 d2b29b0195201 * f609b0012ef6d c500a024ff843278184e5454ff6ee040a106c867c5a0361886fd3057cace2264 6 minutes ago Running kube-apiserver 0 8b21701a5eb2f * 13967ec1cfcb7 d2f090f2479fbf92c508100e0a6106b3516bb70421a465586661feb1494145a2 6 minutes ago Running kube-controller-manager 0 7c74210db0b18 * d0b0ef8c50a19 2c4adeb21b4ff8ed3309d0e42b6b4ae39872399f7b37e0856e673b13c4aba13d 6 minutes ago Running etcd 0 cfde083312c11 * 99dab5d3aff03 78b4180ab00d0fb99b1be2b5ef92a4831ad07f00f27e6746828f374497d79367 6 minutes ago Running kube-scheduler 0 1c739655679ee * * ==> describe nodes <== * Name: crio-20200724220901-14997 * Roles: master * Labels: beta.kubernetes.io/arch=amd64 * beta.kubernetes.io/os=linux * kubernetes.io/arch=amd64 * kubernetes.io/hostname=crio-20200724220901-14997 * kubernetes.io/os=linux * minikube.k8s.io/commit=40eac8ce825d2bb784efa63b900c8d788ea49faf * minikube.k8s.io/name=crio-20200724220901-14997 * minikube.k8s.io/updated_at=2020_07_24T22_10_37_0700 * minikube.k8s.io/version=v1.12.1 * node-role.kubernetes.io/master= * Annotations: kubeadm.alpha.kubernetes.io/cri-socket: /var/run/crio/crio.sock * node.alpha.kubernetes.io/ttl: 0 * volumes.kubernetes.io/controller-managed-attach-detach: true * CreationTimestamp: Fri, 24 Jul 2020 22:10:31 +0000 * Taints: * Unschedulable: false * Conditions: * Type Status LastHeartbeatTime LastTransitionTime Reason Message * ---- ------ ----------------- ------------------ ------ ------- * MemoryPressure False Fri, 24 Jul 2020 22:16:32 +0000 Fri, 24 Jul 2020 22:10:27 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available * DiskPressure False Fri, 24 Jul 2020 22:16:32 +0000 Fri, 24 Jul 2020 22:10:27 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure * PIDPressure False Fri, 24 Jul 2020 22:16:32 +0000 Fri, 24 Jul 2020 22:10:27 +0000 KubeletHasSufficientPID kubelet has sufficient PID available * Ready True Fri, 24 Jul 2020 22:16:32 +0000 Fri, 24 Jul 2020 22:10:27 +0000 KubeletReady kubelet is posting ready status * Addresses: * InternalIP: 172.17.0.2 * Hostname: crio-20200724220901-14997 * Capacity: * cpu: 16 * ephemeral-storage: 128884272Ki * hugepages-1Gi: 0 * hugepages-2Mi: 0 * memory: 65817044Ki * pods: 110 * Allocatable: * cpu: 16 * ephemeral-storage: 128884272Ki * hugepages-1Gi: 0 * hugepages-2Mi: 0 * memory: 65817044Ki * pods: 110 * System Info: * Machine ID: 00e6fc94f1884decb6d7faf4f3cca9d5 * System UUID: 8677386b-5379-4ccc-90e7-5b585098762e * Boot ID: 65219ec9-ab55-4151-85fa-6cbcd6144529 * Kernel Version: 5.4.0-1022-azure * OS Image: Ubuntu 19.10 * Operating System: linux * Architecture: amd64 * Container Runtime Version: cri-o://1.17.3 * Kubelet Version: v1.15.7 * Kube-Proxy Version: v1.15.7 * PodCIDR: 10.244.0.0/24 * Non-terminated Pods: (8 in total) * Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits AGE * --------- ---- ------------ ---------- --------------- ------------- --- * kube-system coredns-5d4dd4b4db-9ssg6 100m (0%) 0 (0%) 70Mi (0%) 170Mi (0%) 6m33s * kube-system etcd-crio-20200724220901-14997 0 (0%) 0 (0%) 0 (0%) 0 (0%) 5m51s * kube-system kindnet-4qfcd 100m (0%) 100m (0%) 50Mi (0%) 50Mi (0%) 6m34s * kube-system kube-apiserver-crio-20200724220901-14997 250m (1%) 0 (0%) 0 (0%) 0 (0%) 5m47s * kube-system kube-controller-manager-crio-20200724220901-14997 200m (1%) 0 (0%) 0 (0%) 0 (0%) 5m33s * kube-system kube-proxy-6wf4w 0 (0%) 0 (0%) 0 (0%) 0 (0%) 6m34s * kube-system kube-scheduler-crio-20200724220901-14997 100m (0%) 0 (0%) 0 (0%) 0 (0%) 5m44s * kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 6m32s * Allocated resources: * (Total limits may be over 100 percent, i.e., overcommitted.) * Resource Requests Limits * -------- -------- ------ * cpu 750m (4%) 100m (0%) * memory 120Mi (0%) 220Mi (0%) * ephemeral-storage 0 (0%) 0 (0%) * Events: * Type Reason Age From Message * ---- ------ ---- ---- ------- * Normal NodeHasSufficientMemory 6m59s (x7 over 6m59s) kubelet, crio-20200724220901-14997 Node crio-20200724220901-14997 status is now: NodeHasSufficientMemory * Normal NodeHasNoDiskPressure 6m59s (x7 over 6m59s) kubelet, crio-20200724220901-14997 Node crio-20200724220901-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientPID 6m59s (x7 over 6m59s) kubelet, crio-20200724220901-14997 Node crio-20200724220901-14997 status is now: NodeHasSufficientPID * Warning readOnlySysFS 6m32s kube-proxy, crio-20200724220901-14997 CRI error: /sys is read-only: cannot modify conntrack limits, problems may arise later (If running Docker, see docker issue #24000) * Normal Starting 6m32s kube-proxy, crio-20200724220901-14997 Starting kube-proxy. * * ==> dmesg <== * [ +0.008042] FS-Cache: N-cookie d=00000000087e516b n=00000000639e5e65 * [ +0.007221] FS-Cache: N-key=[8] '56c00f0000000000' * [ +0.005631] FS-Cache: Duplicate cookie detected * [ +0.003841] FS-Cache: O-cookie c=0000000011bf07ab [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.005951] FS-Cache: O-cookie d=00000000087e516b n=00000000251e97b7 * [ +0.004790] FS-Cache: O-key=[8] '56c00f0000000000' * [ +0.003643] FS-Cache: N-cookie c=00000000e2eec606 [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.005689] FS-Cache: N-cookie d=00000000087e516b n=000000007550b1ca * [ +0.004826] FS-Cache: N-key=[8] '56c00f0000000000' * [ +1.890005] FS-Cache: Duplicate cookie detected * [ +0.022632] FS-Cache: O-cookie c=0000000036b15294 [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.008521] FS-Cache: O-cookie d=00000000087e516b n=00000000efd734b0 * [ +0.007450] FS-Cache: O-key=[8] '55c00f0000000000' * [ +0.004298] FS-Cache: N-cookie c=00000000c6760349 [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.007729] FS-Cache: N-cookie d=00000000087e516b n=000000002b6d28f2 * [ +0.006730] FS-Cache: N-key=[8] '55c00f0000000000' * [ +2.781512] FS-Cache: Duplicate cookie detected * [ +0.004691] FS-Cache: O-cookie c=0000000026b60cdb [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.006817] FS-Cache: O-cookie d=00000000087e516b n=0000000083eaff9d * [ +0.006146] FS-Cache: O-key=[8] '5ac00f0000000000' * [ +0.004163] FS-Cache: N-cookie c=000000006a4278df [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.006241] FS-Cache: N-cookie d=00000000087e516b n=00000000e4c3dd6e * [ +0.005074] FS-Cache: N-key=[8] '5ac00f0000000000' * [Jul24 22:02] tee (116897): /proc/108378/oom_adj is deprecated, please use /proc/108378/oom_score_adj instead. * [Jul24 22:09] hrtimer: interrupt took 1877431 ns * * ==> etcd [d0b0ef8c50a1909ed50d4cf31a37f18c4119a48612f57acdefe63fce115d5bbc] <== * 2020-07-24 22:12:56.359915 W | etcdserver: read-only range request "key:\"/registry/health\" " with result "range_response_count:0 size:5" took too long (118.904992ms) to execute * 2020-07-24 22:12:56.359962 W | etcdserver: read-only range request "key:\"/registry/services/endpoints/default/kubernetes\" " with result "range_response_count:1 size:213" took too long (211.123161ms) to execute * 2020-07-24 22:13:06.442661 W | wal: sync duration of 1.413105502s, expected less than 1s * 2020-07-24 22:13:06.690771 W | etcdserver: request "header: txn: success:> failure: >>" with result "size:16" took too long (247.846402ms) to execute * 2020-07-24 22:13:06.691097 W | etcdserver: read-only range request "key:\"foo\" " with result "range_response_count:0 size:5" took too long (1.183999082s) to execute * 2020-07-24 22:13:06.694425 W | etcdserver: read-only range request "key:\"/registry/jobs/\" range_end:\"/registry/jobs0\" limit:500 " with result "range_response_count:0 size:5" took too long (600.61292ms) to execute * 2020-07-24 22:13:06.694450 W | etcdserver: read-only range request "key:\"/registry/leases/kube-node-lease/crio-20200724220901-14997\" " with result "range_response_count:1 size:356" took too long (775.303569ms) to execute * 2020-07-24 22:13:06.694460 W | etcdserver: read-only range request "key:\"/registry/health\" " with result "range_response_count:0 size:5" took too long (453.465816ms) to execute * 2020-07-24 22:13:06.694555 W | etcdserver: read-only range request "key:\"/registry/daemonsets\" range_end:\"/registry/daemonsett\" count_only:true " with result "range_response_count:0 size:7" took too long (960.560465ms) to execute * 2020-07-24 22:13:13.069379 W | etcdserver: request "header: lease_revoke:" with result "size:28" took too long (916.45026ms) to execute * 2020-07-24 22:13:13.069462 W | etcdserver: failed to revoke 3c247382e0a1a43a ("lease not found") * 2020-07-24 22:13:13.069476 W | etcdserver: failed to revoke 3c247382e0a1a43a ("lease not found") * 2020-07-24 22:13:13.069536 W | etcdserver: read-only range request "key:\"/registry/apiextensions.k8s.io/customresourcedefinitions\" range_end:\"/registry/apiextensions.k8s.io/customresourcedefinitiont\" count_only:true " with result "range_response_count:0 size:5" took too long (1.559649322s) to execute * 2020-07-24 22:13:13.796525 W | etcdserver: request "header: lease_revoke:" with result "error:lease not found" took too long (181.88178ms) to execute * 2020-07-24 22:13:13.796570 W | etcdserver: failed to revoke 3c247382e0a1a43a ("lease not found") * 2020-07-24 22:13:31.232246 W | etcdserver: read-only range request "key:\"/registry/leases\" range_end:\"/registry/leaset\" count_only:true " with result "range_response_count:0 size:7" took too long (354.963694ms) to execute * 2020-07-24 22:13:31.232365 W | etcdserver: read-only range request "key:\"/registry/events\" range_end:\"/registry/eventt\" count_only:true " with result "range_response_count:0 size:7" took too long (279.530529ms) to execute * 2020-07-24 22:14:36.955686 W | etcdserver: read-only range request "key:\"/registry/roles\" range_end:\"/registry/rolet\" count_only:true " with result "range_response_count:0 size:7" took too long (181.72613ms) to execute * 2020-07-24 22:14:45.950243 W | etcdserver: request "header: txn: success:> failure: >>" with result "size:16" took too long (224.564127ms) to execute * 2020-07-24 22:14:45.950604 W | etcdserver: read-only range request "key:\"foo\" " with result "range_response_count:0 size:5" took too long (455.525668ms) to execute * 2020-07-24 22:14:45.950770 W | etcdserver: read-only range request "key:\"/registry/services/specs\" range_end:\"/registry/services/spect\" count_only:true " with result "range_response_count:0 size:7" took too long (296.95691ms) to execute * 2020-07-24 22:14:46.860832 W | etcdserver: read-only range request "key:\"/registry/jobs/\" range_end:\"/registry/jobs0\" limit:500 " with result "range_response_count:0 size:5" took too long (124.556458ms) to execute * 2020-07-24 22:14:46.860871 W | etcdserver: read-only range request "key:\"/registry/apiregistration.k8s.io/apiservices\" range_end:\"/registry/apiregistration.k8s.io/apiservicet\" count_only:true " with result "range_response_count:0 size:7" took too long (246.55995ms) to execute * 2020-07-24 22:14:46.860887 W | etcdserver: read-only range request "key:\"/registry/leases/kube-node-lease/crio-20200724220901-14997\" " with result "range_response_count:1 size:356" took too long (124.582559ms) to execute * 2020-07-24 22:14:46.860969 W | etcdserver: read-only range request "key:\"/registry/health\" " with result "range_response_count:0 size:5" took too long (619.55938ms) to execute * * ==> kernel <== * 22:17:23 up 44 min, 0 users, load average: 7.41, 10.98, 8.97 * Linux crio-20200724220901-14997 5.4.0-1022-azure #22-Ubuntu SMP Fri Jul 10 06:14:37 UTC 2020 x86_64 x86_64 x86_64 GNU/Linux * PRETTY_NAME="Ubuntu 19.10" * * ==> kube-apiserver [f609b0012ef6d35e24beac43cf9734c27d6548f09da0af9dcdd690856155d5c3] <== * I0724 22:13:06.694833 1 trace.go:81] Trace[2108251570]: "List etcd3: key=/jobs, resourceVersion=, limit: 500, continue: " (started: 2020-07-24 22:13:06.093334068 +0000 UTC m=+159.350497018) (total time: 601.467477ms): * Trace[2108251570]: [601.467477ms] [601.467477ms] END * I0724 22:13:06.694907 1 trace.go:81] Trace[1240683393]: "Get /apis/coordination.k8s.io/v1beta1/namespaces/kube-node-lease/leases/crio-20200724220901-14997" (started: 2020-07-24 22:13:05.918684921 +0000 UTC m=+159.175847871) (total time: 776.191529ms): * Trace[1240683393]: [776.141025ms] [776.095222ms] About to write a response * I0724 22:13:06.694912 1 trace.go:81] Trace[1232300274]: "List /apis/batch/v1/jobs" (started: 2020-07-24 22:13:06.093273563 +0000 UTC m=+159.350436513) (total time: 601.626088ms): * Trace[1232300274]: [601.583085ms] [601.529581ms] Listing from storage done * I0724 22:13:08.766673 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [] * I0724 22:13:08.766832 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:13:08.766909 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:13:08.766923 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:13:08.775255 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:13:28.766807 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [] * I0724 22:13:28.766933 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:13:28.767002 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:13:28.775030 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:13:48.766978 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [] * I0724 22:13:48.767147 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:13:48.767261 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:13:48.776497 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:14:08.767132 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [] * I0724 22:14:08.767258 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:14:08.767377 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:14:08.777775 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:14:45.951213 1 trace.go:81] Trace[369453456]: "GuaranteedUpdate etcd3: *v1.Endpoints" (started: 2020-07-24 22:14:45.030377302 +0000 UTC m=+258.287540252) (total time: 920.796269ms): * Trace[369453456]: [920.769167ms] [919.342375ms] Transaction committed * * ==> kube-controller-manager [13967ec1cfcb7e04177fc3a4bb3390ad4c893aba3aa1f453a274a9e947393268] <== * I0724 22:10:49.553282 1 range_allocator.go:310] Set node crio-20200724220901-14997 PodCIDR to 10.244.0.0/24 * E0724 22:10:49.585268 1 clusterroleaggregation_controller.go:180] edit failed with : Operation cannot be fulfilled on clusterroles.rbac.authorization.k8s.io "edit": the object has been modified; please apply your changes to the latest version and try again * E0724 22:10:49.585566 1 clusterroleaggregation_controller.go:180] admin failed with : Operation cannot be fulfilled on clusterroles.rbac.authorization.k8s.io "admin": the object has been modified; please apply your changes to the latest version and try again * I0724 22:10:49.586050 1 event.go:258] Event(v1.ObjectReference{Kind:"DaemonSet", Namespace:"kube-system", Name:"kube-proxy", UID:"ae9639a0-2be9-470a-89a1-9f9fc0f539e5", APIVersion:"apps/v1", ResourceVersion:"220", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: kube-proxy-6wf4w * I0724 22:10:49.586086 1 event.go:258] Event(v1.ObjectReference{Kind:"DaemonSet", Namespace:"kube-system", Name:"kindnet", UID:"bc740eee-a735-44a2-b533-64fd88dc756e", APIVersion:"apps/v1", ResourceVersion:"233", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: kindnet-4qfcd * I0724 22:10:49.635568 1 controller_utils.go:1036] Caches are synced for attach detach controller * I0724 22:10:49.724776 1 controller_utils.go:1036] Caches are synced for HPA controller * I0724 22:10:49.955648 1 controller_utils.go:1036] Caches are synced for job controller * I0724 22:10:49.971265 1 controller_utils.go:1036] Caches are synced for taint controller * I0724 22:10:49.971354 1 taint_manager.go:182] Starting NoExecuteTaintManager * I0724 22:10:49.971368 1 node_lifecycle_controller.go:1189] Initializing eviction metric for zone: * W0724 22:10:49.971433 1 node_lifecycle_controller.go:863] Missing timestamp for Node crio-20200724220901-14997. Assuming now as a timestamp. * I0724 22:10:49.971476 1 node_lifecycle_controller.go:1089] Controller detected that zone is now in state Normal. * I0724 22:10:49.971486 1 event.go:258] Event(v1.ObjectReference{Kind:"Node", Namespace:"", Name:"crio-20200724220901-14997", UID:"ef0f0ad1-a556-4ea4-8b4e-4df0035f7a0b", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'RegisteredNode' Node crio-20200724220901-14997 event: Registered Node crio-20200724220901-14997 in Controller * I0724 22:10:50.130631 1 controller_utils.go:1036] Caches are synced for ReplicaSet controller * I0724 22:10:50.181760 1 controller_utils.go:1036] Caches are synced for disruption controller * I0724 22:10:50.181988 1 disruption.go:338] Sending events to api server. * I0724 22:10:50.188393 1 controller_utils.go:1036] Caches are synced for garbage collector controller * I0724 22:10:50.189975 1 controller_utils.go:1036] Caches are synced for deployment controller * I0724 22:10:50.193823 1 event.go:258] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"kube-system", Name:"coredns", UID:"92d4aefc-f780-4c0c-a563-2ab1476f4694", APIVersion:"apps/v1", ResourceVersion:"340", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set coredns-5d4dd4b4db to 1 * I0724 22:10:50.198446 1 event.go:258] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"kube-system", Name:"coredns-5d4dd4b4db", UID:"ab110c4f-65e4-4d3c-987b-c9a844b107b4", APIVersion:"apps/v1", ResourceVersion:"341", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: coredns-5d4dd4b4db-9ssg6 * I0724 22:10:50.248567 1 controller_utils.go:1036] Caches are synced for resource quota controller * I0724 22:10:50.273968 1 controller_utils.go:1036] Caches are synced for garbage collector controller * I0724 22:10:50.273996 1 garbagecollector.go:137] Garbage collector: all resource monitors have synced. Proceeding to collect garbage * I0724 22:10:50.275749 1 controller_utils.go:1036] Caches are synced for resource quota controller * * ==> kube-proxy [2be846355cebea723ec99bffd78104b575e2937b018759d3884dc0c79bb92ea0] <== * W0724 22:10:51.541191 1 server_others.go:249] Flag proxy-mode="" unknown, assuming iptables proxy * I0724 22:10:51.711236 1 server_others.go:143] Using iptables Proxier. * I0724 22:10:51.711733 1 server.go:534] Version: v1.15.7 * I0724 22:10:51.747623 1 conntrack.go:52] Setting nf_conntrack_max to 524288 * E0724 22:10:51.748800 1 conntrack.go:127] sysfs is not writable: {Device:sysfs Path:/sys Type:sysfs Opts:[ro nosuid nodev noexec relatime] Freq:0 Pass:0} (mount options are [ro nosuid nodev noexec relatime]) * I0724 22:10:51.749038 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_established' to 86400 * I0724 22:10:51.749127 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_close_wait' to 3600 * I0724 22:10:51.749315 1 config.go:187] Starting service config controller * I0724 22:10:51.749352 1 controller_utils.go:1029] Waiting for caches to sync for service config controller * I0724 22:10:51.749404 1 config.go:96] Starting endpoints config controller * I0724 22:10:51.749502 1 controller_utils.go:1029] Waiting for caches to sync for endpoints config controller * I0724 22:10:51.849555 1 controller_utils.go:1036] Caches are synced for service config controller * I0724 22:10:51.849731 1 controller_utils.go:1036] Caches are synced for endpoints config controller * * ==> kube-scheduler [99dab5d3aff03ed6049a58c4f600d958b17db3a0f7df5c9c68abab533c8b9dfa] <== * I0724 22:10:28.185753 1 defaults.go:87] TaintNodesByCondition is enabled, PodToleratesNodeTaints predicate is mandatory * W0724 22:10:28.186676 1 authorization.go:47] Authorization is disabled * W0724 22:10:28.186691 1 authentication.go:55] Authentication is disabled * I0724 22:10:28.186705 1 deprecated_insecure_serving.go:51] Serving healthz insecurely on [::]:10251 * I0724 22:10:28.187130 1 secure_serving.go:116] Serving securely on 127.0.0.1:10259 * E0724 22:10:31.838038 1 reflector.go:125] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:226: Failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope * E0724 22:10:31.850274 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope * E0724 22:10:31.851076 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1beta1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope * E0724 22:10:31.851166 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope * E0724 22:10:31.935670 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope * E0724 22:10:31.937897 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope * E0724 22:10:31.938070 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope * E0724 22:10:31.940942 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope * E0724 22:10:31.942934 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope * E0724 22:10:31.946983 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope * E0724 22:10:32.840099 1 reflector.go:125] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:226: Failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope * E0724 22:10:32.851310 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope * E0724 22:10:32.936974 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1beta1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope * E0724 22:10:32.938093 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope * E0724 22:10:32.940907 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope * E0724 22:10:32.940967 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope * E0724 22:10:32.941768 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope * E0724 22:10:32.942905 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope * E0724 22:10:32.943936 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope * E0724 22:10:32.947924 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope * * ==> kubelet <== * -- Logs begin at Fri 2020-07-24 22:09:16 UTC, end at Fri 2020-07-24 22:17:25 UTC. -- * Jul 24 22:16:33 crio-20200724220901-14997 kubelet[2275]: E0724 22:16:33.282507 2275 pod_workers.go:190] Error syncing pod 9fb35a28-5601-47e4-88e6-be2a18fa55ef ("kindnet-4qfcd_kube-system(9fb35a28-5601-47e4-88e6-be2a18fa55ef)"), skipping: failed to "StartContainer" for "kindnet-cni" with CrashLoopBackOff: "Back-off 2m40s restarting failed container=kindnet-cni pod=kindnet-4qfcd_kube-system(9fb35a28-5601-47e4-88e6-be2a18fa55ef)" * Jul 24 22:16:41 crio-20200724220901-14997 kubelet[2275]: E0724 22:16:41.282371 2275 pod_workers.go:190] Error syncing pod e76b38cb-d66f-4b4b-a962-cdd9c670e250 ("storage-provisioner_kube-system(e76b38cb-d66f-4b4b-a962-cdd9c670e250)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "Back-off 2m40s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(e76b38cb-d66f-4b4b-a962-cdd9c670e250)" * Jul 24 22:16:44 crio-20200724220901-14997 kubelet[2275]: E0724 22:16:44.282958 2275 pod_workers.go:190] Error syncing pod 9fb35a28-5601-47e4-88e6-be2a18fa55ef ("kindnet-4qfcd_kube-system(9fb35a28-5601-47e4-88e6-be2a18fa55ef)"), skipping: failed to "StartContainer" for "kindnet-cni" with CrashLoopBackOff: "Back-off 2m40s restarting failed container=kindnet-cni pod=kindnet-4qfcd_kube-system(9fb35a28-5601-47e4-88e6-be2a18fa55ef)" * Jul 24 22:16:50 crio-20200724220901-14997 kubelet[2275]: E0724 22:16:50.958973 2275 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(55145cea6bd7fdc06e072f3959f4fbd846e2e9c761bd62cc1a2bf789e9de8cd8): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:16:50 crio-20200724220901-14997 kubelet[2275]: E0724 22:16:50.959034 2275 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(55145cea6bd7fdc06e072f3959f4fbd846e2e9c761bd62cc1a2bf789e9de8cd8): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:16:50 crio-20200724220901-14997 kubelet[2275]: E0724 22:16:50.959058 2275 kuberuntime_manager.go:692] createPodSandbox for pod "coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(55145cea6bd7fdc06e072f3959f4fbd846e2e9c761bd62cc1a2bf789e9de8cd8): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:16:50 crio-20200724220901-14997 kubelet[2275]: E0724 22:16:50.959112 2275 pod_workers.go:190] Error syncing pod dbd8dc2c-1af2-4c99-89f0-d44686805742 ("coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)"), skipping: failed to "CreatePodSandbox" for "coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)" with CreatePodSandboxError: "CreatePodSandbox for pod \"coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)\" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(55145cea6bd7fdc06e072f3959f4fbd846e2e9c761bd62cc1a2bf789e9de8cd8): failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:16:56 crio-20200724220901-14997 kubelet[2275]: E0724 22:16:56.283018 2275 pod_workers.go:190] Error syncing pod 9fb35a28-5601-47e4-88e6-be2a18fa55ef ("kindnet-4qfcd_kube-system(9fb35a28-5601-47e4-88e6-be2a18fa55ef)"), skipping: failed to "StartContainer" for "kindnet-cni" with CrashLoopBackOff: "Back-off 2m40s restarting failed container=kindnet-cni pod=kindnet-4qfcd_kube-system(9fb35a28-5601-47e4-88e6-be2a18fa55ef)" * Jul 24 22:16:59 crio-20200724220901-14997 kubelet[2275]: E0724 22:16:59.899827 2275 pod_workers.go:190] Error syncing pod e76b38cb-d66f-4b4b-a962-cdd9c670e250 ("storage-provisioner_kube-system(e76b38cb-d66f-4b4b-a962-cdd9c670e250)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "Back-off 5m0s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(e76b38cb-d66f-4b4b-a962-cdd9c670e250)" * Jul 24 22:17:03 crio-20200724220901-14997 kubelet[2275]: E0724 22:17:03.632555 2275 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(5afc4703f4e04f0073115aa73b66da9739eb49bde8eb90e0fc9de14e6461788b): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:17:03 crio-20200724220901-14997 kubelet[2275]: E0724 22:17:03.632621 2275 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(5afc4703f4e04f0073115aa73b66da9739eb49bde8eb90e0fc9de14e6461788b): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:17:03 crio-20200724220901-14997 kubelet[2275]: E0724 22:17:03.632645 2275 kuberuntime_manager.go:692] createPodSandbox for pod "coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(5afc4703f4e04f0073115aa73b66da9739eb49bde8eb90e0fc9de14e6461788b): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:17:03 crio-20200724220901-14997 kubelet[2275]: E0724 22:17:03.632698 2275 pod_workers.go:190] Error syncing pod dbd8dc2c-1af2-4c99-89f0-d44686805742 ("coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)"), skipping: failed to "CreatePodSandbox" for "coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)" with CreatePodSandboxError: "CreatePodSandbox for pod \"coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)\" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(5afc4703f4e04f0073115aa73b66da9739eb49bde8eb90e0fc9de14e6461788b): failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:17:08 crio-20200724220901-14997 kubelet[2275]: E0724 22:17:08.282867 2275 pod_workers.go:190] Error syncing pod 9fb35a28-5601-47e4-88e6-be2a18fa55ef ("kindnet-4qfcd_kube-system(9fb35a28-5601-47e4-88e6-be2a18fa55ef)"), skipping: failed to "StartContainer" for "kindnet-cni" with CrashLoopBackOff: "Back-off 2m40s restarting failed container=kindnet-cni pod=kindnet-4qfcd_kube-system(9fb35a28-5601-47e4-88e6-be2a18fa55ef)" * Jul 24 22:17:13 crio-20200724220901-14997 kubelet[2275]: E0724 22:17:13.282429 2275 pod_workers.go:190] Error syncing pod e76b38cb-d66f-4b4b-a962-cdd9c670e250 ("storage-provisioner_kube-system(e76b38cb-d66f-4b4b-a962-cdd9c670e250)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "Back-off 5m0s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(e76b38cb-d66f-4b4b-a962-cdd9c670e250)" * Jul 24 22:17:17 crio-20200724220901-14997 kubelet[2275]: E0724 22:17:17.900941 2275 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(ba6f1aa293e4e4a8777d362d69578df4264ecc04114c8b70326071ff38e0cb1f): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:17:17 crio-20200724220901-14997 kubelet[2275]: E0724 22:17:17.901010 2275 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(ba6f1aa293e4e4a8777d362d69578df4264ecc04114c8b70326071ff38e0cb1f): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:17:17 crio-20200724220901-14997 kubelet[2275]: E0724 22:17:17.901034 2275 kuberuntime_manager.go:692] createPodSandbox for pod "coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(ba6f1aa293e4e4a8777d362d69578df4264ecc04114c8b70326071ff38e0cb1f): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:17:17 crio-20200724220901-14997 kubelet[2275]: E0724 22:17:17.901103 2275 pod_workers.go:190] Error syncing pod dbd8dc2c-1af2-4c99-89f0-d44686805742 ("coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)"), skipping: failed to "CreatePodSandbox" for "coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)" with CreatePodSandboxError: "CreatePodSandbox for pod \"coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)\" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(ba6f1aa293e4e4a8777d362d69578df4264ecc04114c8b70326071ff38e0cb1f): failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:17:23 crio-20200724220901-14997 kubelet[2275]: E0724 22:17:23.282662 2275 pod_workers.go:190] Error syncing pod 9fb35a28-5601-47e4-88e6-be2a18fa55ef ("kindnet-4qfcd_kube-system(9fb35a28-5601-47e4-88e6-be2a18fa55ef)"), skipping: failed to "StartContainer" for "kindnet-cni" with CrashLoopBackOff: "Back-off 2m40s restarting failed container=kindnet-cni pod=kindnet-4qfcd_kube-system(9fb35a28-5601-47e4-88e6-be2a18fa55ef)" * Jul 24 22:17:24 crio-20200724220901-14997 kubelet[2275]: E0724 22:17:24.283535 2275 pod_workers.go:190] Error syncing pod e76b38cb-d66f-4b4b-a962-cdd9c670e250 ("storage-provisioner_kube-system(e76b38cb-d66f-4b4b-a962-cdd9c670e250)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "Back-off 5m0s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(e76b38cb-d66f-4b4b-a962-cdd9c670e250)" * Jul 24 22:17:24 crio-20200724220901-14997 kubelet[2275]: E0724 22:17:24.405771 2275 manager.go:1084] Failed to create existing container: /kubepods/burstable/pod3fa5678dc40111ec8016c600a161dae3/crio-8b21701a5eb2f6af152a40dcc25e4c05e30e6a13c60607c3f6613007a489942a: Error finding container 8b21701a5eb2f6af152a40dcc25e4c05e30e6a13c60607c3f6613007a489942a: Status 404 returned error &{%!s(*http.body=&{0xc001334920 false false {0 0} false false false }) {%!s(int32=0) %!s(uint32=0)} %!s(bool=false) %!s(func(error) error=0x737ab0) %!s(func() error=0x737a40)} * Jul 24 22:17:24 crio-20200724220901-14997 kubelet[2275]: E0724 22:17:24.406387 2275 manager.go:1084] Failed to create existing container: /kubepods/besteffort/pod111a1e02ecfd8e373f0c9f774a428541/crio-cfde083312c1168171f556afd9f9d427052d7f2b24bd0882af47a77d42058755: Error finding container cfde083312c1168171f556afd9f9d427052d7f2b24bd0882af47a77d42058755: Status 404 returned error &{%!s(*http.body=&{0xc000ce1180 false false {0 0} false false false }) {%!s(int32=0) %!s(uint32=0)} %!s(bool=false) %!s(func(error) error=0x737ab0) %!s(func() error=0x737a40)} * Jul 24 22:17:24 crio-20200724220901-14997 kubelet[2275]: E0724 22:17:24.407042 2275 manager.go:1084] Failed to create existing container: /kubepods/burstable/podd56135b6f61d5db3f635e70693e7224d/crio-1c739655679ee845e51f5144c1084d6dfd219bb19e3eed0336cafc28ace6dde8: Error finding container 1c739655679ee845e51f5144c1084d6dfd219bb19e3eed0336cafc28ace6dde8: Status 404 returned error &{%!s(*http.body=&{0xc000ce1340 false false {0 0} false false false }) {%!s(int32=0) %!s(uint32=0)} %!s(bool=false) %!s(func(error) error=0x737ab0) %!s(func() error=0x737a40)} * Jul 24 22:17:24 crio-20200724220901-14997 kubelet[2275]: E0724 22:17:24.408915 2275 manager.go:1084] Failed to create existing container: /kubepods/burstable/poda6564eecd841ea73dca56559a73a98cf/crio-7c74210db0b18ed89e977abd92469c112cabeae1e18ff5a27d9bc1e4a2d586b3: Error finding container 7c74210db0b18ed89e977abd92469c112cabeae1e18ff5a27d9bc1e4a2d586b3: Status 404 returned error &{%!s(*http.body=&{0xc0013351c0 false false {0 0} false false false }) {%!s(int32=0) %!s(uint32=0)} %!s(bool=false) %!s(func(error) error=0x737ab0) %!s(func() error=0x737a40)} * * ==> storage-provisioner [a55b0d3e379aa4b7395cd2032ff96463d3a2113d7ea1ed9427947c646ce7905d] <== * F0724 22:16:59.720661 1 main.go:37] Error getting server version: Get https://10.96.0.1:443/version: dial tcp 10.96.0.1:443: getsockopt: no route to host -- /stdout -- helpers_test.go:247: (dbg) Run: ./minikube-linux-amd64 status --format={{.APIServer}} -p crio-20200724220901-14997 -n crio-20200724220901-14997 helpers_test.go:254: (dbg) Run: kubectl --context crio-20200724220901-14997 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running helpers_test.go:254: (dbg) Done: kubectl --context crio-20200724220901-14997 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running: (2.939993715s) helpers_test.go:260: non-running pods: coredns-5d4dd4b4db-9ssg6 helpers_test.go:262: ======> post-mortem[TestStartStop/group/crio/serial/FirstStart]: describe non-running pods <====== helpers_test.go:265: (dbg) Run: kubectl --context crio-20200724220901-14997 describe pod coredns-5d4dd4b4db-9ssg6 helpers_test.go:265: (dbg) Non-zero exit: kubectl --context crio-20200724220901-14997 describe pod coredns-5d4dd4b4db-9ssg6: exit status 1 (78.834935ms) ** stderr ** Error from server (NotFound): pods "coredns-5d4dd4b4db-9ssg6" not found ** /stderr ** helpers_test.go:267: kubectl --context crio-20200724220901-14997 describe pod coredns-5d4dd4b4db-9ssg6: exit status 1 === RUN TestStartStop/group/crio/serial/DeployApp start_stop_delete_test.go:158: (dbg) Run: kubectl --context crio-20200724220901-14997 create -f testdata/busybox.yaml start_stop_delete_test.go:158: (dbg) TestStartStop/group/crio/serial/DeployApp: waiting 8m0s for pods matching "integration-test=busybox" in namespace "default" ... helpers_test.go:332: "busybox" [b947226f-0b0e-4916-9b65-c7b70a6e137e] Pending helpers_test.go:332: "busybox" [b947226f-0b0e-4916-9b65-c7b70a6e137e] Pending / Ready:ContainersNotReady (containers with unready status: [busybox]) / ContainersReady:ContainersNotReady (containers with unready status: [busybox]) === CONT TestStartStop/group/containerd/serial/FirstStart start_stop_delete_test.go:149: (dbg) Non-zero exit: ./minikube-linux-amd64 start -p containerd-20200724221200-14997 --memory=2200 --alsologtostderr --wait=true --container-runtime=containerd --docker-opt containerd=/var/run/containerd/containerd.sock --apiserver-port=8444 --vm-driver=docker --base-image=local/kicbase:-snapshot --kubernetes-version=v1.18.3: exit status 70 (7m36.437310084s) -- stdout -- * [containerd-20200724221200-14997] minikube v1.12.1 on Ubuntu 20.04 - KUBECONFIG=/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/kubeconfig - MINIKUBE_HOME=/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome * Using the docker driver based on user configuration * Starting control plane node containerd-20200724221200-14997 in cluster containerd-20200724221200-14997 * Pulling base image ... * Creating docker container (CPUs=2, Memory=2200MB) ... * Preparing Kubernetes v1.18.3 on containerd 1.3.3-14-g449e9269 ... - opt containerd=/var/run/containerd/containerd.sock * Configuring CNI (Container Networking Interface) ... * Verifying Kubernetes components... * Enabled addons: default-storageclass, storage-provisioner -- /stdout -- ** stderr ** I0724 22:12:01.032851 335425 out.go:188] Setting JSON to false I0724 22:12:01.037997 335425 start.go:101] hostinfo: {"hostname":"mini-test-11-ubuntu","uptime":2360,"bootTime":1595626361,"procs":1253,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.4.0-1022-azure","virtualizationSystem":"kvm","virtualizationRole":"host","hostid":"c95cb721-f5cd-cb47-980f-2a6f7a0ad6b2"} I0724 22:12:01.038803 335425 start.go:111] virtualization: kvm host I0724 22:12:01.048052 335425 notify.go:125] Checking for updates... I0724 22:12:01.048478 335425 driver.go:287] Setting default libvirt URI to qemu:///system I0724 22:12:01.126957 335425 docker.go:87] docker version: linux-19.03.8 I0724 22:12:01.142024 335425 start.go:217] selected driver: docker I0724 22:12:01.142033 335425 start.go:623] validating driver "docker" against I0724 22:12:01.142050 335425 start.go:634] status for docker: {Installed:true Healthy:true NeedsImprovement:false Error: Fix: Doc:} I0724 22:12:01.142129 335425 cli_runner.go:109] Run: docker system info --format "{{json .}}" I0724 22:12:01.210821 335425 start_flags.go:223] no existing cluster config was found, will generate one from the flags I0724 22:12:01.211051 335425 start_flags.go:617] Waiting for all components: map[apiserver:true apps_running:true default_sa:true system_pods:true] I0724 22:12:01.211077 335425 cni.go:74] Creating CNI manager for "" I0724 22:12:01.211082 335425 cni.go:105] "docker" driver + containerd runtime found, recommending kindnet I0724 22:12:01.211099 335425 start_flags.go:340] Found "CNI" CNI - setting NetworkPlugin=cni I0724 22:12:01.211106 335425 start_flags.go:345] config: {Name:containerd-20200724221200-14997 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:local/kicbase:-snapshot Memory:2200 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.99.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false DockerOpt:[containerd=/var/run/containerd/containerd.sock] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio KubernetesConfig:{KubernetesVersion:v1.18.3 ClusterName:containerd-20200724221200-14997 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8444 NodeName:} Nodes:[] Addons:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true system_pods:true]} I0724 22:12:01.284258 335425 cache.go:117] Beginning downloading kic base image for docker with containerd I0724 22:12:01.295472 335425 preload.go:97] Checking if preload exists for k8s version v1.18.3 and runtime containerd I0724 22:12:01.295521 335425 preload.go:105] Found local preload: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v4-v1.18.3-containerd-overlay2-amd64.tar.lz4 I0724 22:12:01.295534 335425 cache.go:51] Caching tarball of preloaded images I0724 22:12:01.295546 335425 preload.go:131] Found /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v4-v1.18.3-containerd-overlay2-amd64.tar.lz4 in cache, skipping download I0724 22:12:01.295556 335425 cache.go:54] Finished verifying existence of preloaded tar for v1.18.3 on containerd I0724 22:12:01.295682 335425 cache.go:137] Downloading local/kicbase:-snapshot to local daemon I0724 22:12:01.295722 335425 image.go:140] Writing local/kicbase:-snapshot to local daemon I0724 22:12:01.295827 335425 profile.go:150] Saving config to /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/containerd-20200724221200-14997/config.json ... I0724 22:12:01.295923 335425 lock.go:35] WriteFile acquiring /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/containerd-20200724221200-14997/config.json: {Name:mk78b002dd42f3dee71cf7d46c93c8fe3fa103f4 Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0724 22:12:01.690137 335425 cache.go:151] failed to download local/kicbase:-snapshot, will try fallback image if available: getting remote image: GET https://index.docker.io/v2/local/kicbase/manifests/-snapshot: unsupported status code 404; body: 404 page not found I0724 22:12:01.690203 335425 cache.go:137] Downloading kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 to local daemon I0724 22:12:01.690209 335425 image.go:140] Writing kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 to local daemon I0724 22:12:06.303173 335425 cache.go:140] successfully downloaded kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 ! minikube was unable to download local/kicbase:-snapshot, but successfully downloaded kicbase/stable:v0.0.10 as a fallback image I0724 22:12:06.303231 335425 cache.go:178] Successfully downloaded all kic artifacts I0724 22:12:06.303274 335425 start.go:241] acquiring machines lock for containerd-20200724221200-14997: {Name:mk91e0531dbac8bf0bab062b1deb1a07bd4532ce Clock:{} Delay:500ms Timeout:15m0s Cancel:} I0724 22:12:06.303527 335425 start.go:245] acquired machines lock for "containerd-20200724221200-14997" in 227.916µs I0724 22:12:06.303560 335425 start.go:85] Provisioning new machine with config: &{Name:containerd-20200724221200-14997 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 Memory:2200 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.99.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false DockerOpt:[containerd=/var/run/containerd/containerd.sock] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio KubernetesConfig:{KubernetesVersion:v1.18.3 ClusterName:containerd-20200724221200-14997 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8444 NodeName:} Nodes:[{Name: IP: Port:8444 KubernetesVersion:v1.18.3 ControlPlane:true Worker:true}] Addons:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true system_pods:true]} &{Name: IP: Port:8444 KubernetesVersion:v1.18.3 ControlPlane:true Worker:true} I0724 22:12:06.303637 335425 start.go:122] createHost starting for "" (driver="docker") I0724 22:12:06.400475 335425 start.go:158] libmachine.API.Create for "containerd-20200724221200-14997" (driver="docker") I0724 22:12:06.400515 335425 client.go:161] LocalClient.Create starting I0724 22:12:06.400550 335425 main.go:115] libmachine: Reading certificate data from /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca.pem I0724 22:12:06.400595 335425 main.go:115] libmachine: Decoding PEM data... I0724 22:12:06.400618 335425 main.go:115] libmachine: Parsing certificate... I0724 22:12:06.400750 335425 main.go:115] libmachine: Reading certificate data from /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/cert.pem I0724 22:12:06.400779 335425 main.go:115] libmachine: Decoding PEM data... I0724 22:12:06.400792 335425 main.go:115] libmachine: Parsing certificate... I0724 22:12:06.401766 335425 cli_runner.go:109] Run: docker ps -a --format {{.Names}} I0724 22:12:06.487295 335425 cli_runner.go:109] Run: docker volume create containerd-20200724221200-14997 --label name.minikube.sigs.k8s.io=containerd-20200724221200-14997 --label created_by.minikube.sigs.k8s.io=true I0724 22:12:06.587490 335425 oci.go:101] Successfully created a docker volume containerd-20200724221200-14997 I0724 22:12:06.587590 335425 cli_runner.go:109] Run: docker run --rm --entrypoint /usr/bin/test -v containerd-20200724221200-14997:/var kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 -d /var/lib I0724 22:12:08.661354 335425 cli_runner.go:151] Completed: docker run --rm --entrypoint /usr/bin/test -v containerd-20200724221200-14997:/var kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 -d /var/lib: (2.073724043s) I0724 22:12:08.661380 335425 oci.go:105] Successfully prepared a docker volume containerd-20200724221200-14997 W0724 22:12:08.661418 335425 oci.go:165] Your kernel does not support swap limit capabilities or the cgroup is not mounted. I0724 22:12:08.661979 335425 cli_runner.go:109] Run: docker info --format "'{{json .SecurityOptions}}'" I0724 22:12:08.661443 335425 preload.go:97] Checking if preload exists for k8s version v1.18.3 and runtime containerd I0724 22:12:08.662401 335425 preload.go:105] Found local preload: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v4-v1.18.3-containerd-overlay2-amd64.tar.lz4 I0724 22:12:08.662421 335425 kic.go:133] Starting extracting preloaded images to volume ... I0724 22:12:08.662566 335425 cli_runner.go:109] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v4-v1.18.3-containerd-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v containerd-20200724221200-14997:/extractDir kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 -I lz4 -xvf /preloaded.tar -C /extractDir I0724 22:12:08.756584 335425 cli_runner.go:109] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname containerd-20200724221200-14997 --name containerd-20200724221200-14997 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=containerd-20200724221200-14997 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=containerd-20200724221200-14997 --volume containerd-20200724221200-14997:/var --security-opt apparmor=unconfined --cpus=2 --memory=2200mb -e container=docker --expose 8444 --publish=127.0.0.1::8444 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 I0724 22:12:10.454954 335425 cli_runner.go:151] Completed: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname containerd-20200724221200-14997 --name containerd-20200724221200-14997 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=containerd-20200724221200-14997 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=containerd-20200724221200-14997 --volume containerd-20200724221200-14997:/var --security-opt apparmor=unconfined --cpus=2 --memory=2200mb -e container=docker --expose 8444 --publish=127.0.0.1::8444 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438: (1.698273452s) I0724 22:12:10.455036 335425 cli_runner.go:109] Run: docker container inspect containerd-20200724221200-14997 --format={{.State.Running}} I0724 22:12:10.517908 335425 cli_runner.go:109] Run: docker container inspect containerd-20200724221200-14997 --format={{.State.Status}} I0724 22:12:10.573920 335425 cli_runner.go:109] Run: docker exec containerd-20200724221200-14997 stat /var/lib/dpkg/alternatives/iptables I0724 22:12:24.827654 335425 cli_runner.go:151] Completed: docker exec containerd-20200724221200-14997 stat /var/lib/dpkg/alternatives/iptables: (14.253687431s) I0724 22:12:24.827682 335425 oci.go:222] the created container "containerd-20200724221200-14997" has a running status. I0724 22:12:24.827693 335425 kic.go:157] Creating ssh key for kic: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/containerd-20200724221200-14997/id_rsa... I0724 22:12:25.088694 335425 kic_runner.go:179] docker (temp): /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/containerd-20200724221200-14997/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes) I0724 22:12:25.401102 335425 cli_runner.go:109] Run: docker container inspect containerd-20200724221200-14997 --format={{.State.Status}} I0724 22:12:25.511207 335425 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys I0724 22:12:25.511230 335425 kic_runner.go:114] Args: [docker exec --privileged containerd-20200724221200-14997 chown docker:docker /home/docker/.ssh/authorized_keys] I0724 22:12:25.663707 335425 cli_runner.go:151] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v4-v1.18.3-containerd-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v containerd-20200724221200-14997:/extractDir kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 -I lz4 -xvf /preloaded.tar -C /extractDir: (17.001088436s) I0724 22:12:25.663742 335425 kic.go:138] duration metric: took 17.001319 seconds to extract preloaded images to volume I0724 22:12:25.663882 335425 cli_runner.go:109] Run: docker container inspect containerd-20200724221200-14997 --format={{.State.Status}} I0724 22:12:25.718827 335425 machine.go:88] provisioning docker machine ... I0724 22:12:25.718861 335425 ubuntu.go:166] provisioning hostname "containerd-20200724221200-14997" I0724 22:12:25.718938 335425 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" containerd-20200724221200-14997 I0724 22:12:25.779516 335425 main.go:115] libmachine: Using SSH client type: native I0724 22:12:25.779834 335425 main.go:115] libmachine: &{{{ 0 [] [] []} docker [0x7b99a0] 0x7b9970 [] 0s} 127.0.0.1 32900 } I0724 22:12:25.779871 335425 main.go:115] libmachine: About to run SSH command: sudo hostname containerd-20200724221200-14997 && echo "containerd-20200724221200-14997" | sudo tee /etc/hostname I0724 22:12:25.780539 335425 main.go:115] libmachine: Error dialing TCP: ssh: handshake failed: read tcp 127.0.0.1:57438->127.0.0.1:32900: read: connection reset by peer I0724 22:12:28.947212 335425 main.go:115] libmachine: SSH cmd err, output: : containerd-20200724221200-14997 I0724 22:12:28.948088 335425 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" containerd-20200724221200-14997 I0724 22:12:29.011017 335425 main.go:115] libmachine: Using SSH client type: native I0724 22:12:29.011190 335425 main.go:115] libmachine: &{{{ 0 [] [] []} docker [0x7b99a0] 0x7b9970 [] 0s} 127.0.0.1 32900 } I0724 22:12:29.011218 335425 main.go:115] libmachine: About to run SSH command: if ! grep -xq '.*\scontainerd-20200724221200-14997' /etc/hosts; then if grep -xq '127.0.1.1\s.*' /etc/hosts; then sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 containerd-20200724221200-14997/g' /etc/hosts; else echo '127.0.1.1 containerd-20200724221200-14997' | sudo tee -a /etc/hosts; fi fi I0724 22:12:29.140066 335425 main.go:115] libmachine: SSH cmd err, output: : I0724 22:12:29.140108 335425 ubuntu.go:172] set auth options {CertDir:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube CaCertPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube} I0724 22:12:29.140136 335425 ubuntu.go:174] setting up certificates I0724 22:12:29.140146 335425 provision.go:82] configureAuth start I0724 22:12:29.140217 335425 cli_runner.go:109] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" containerd-20200724221200-14997 I0724 22:12:29.200869 335425 provision.go:131] copyHostCerts I0724 22:12:29.200936 335425 exec_runner.go:91] found /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/ca.pem, removing ... I0724 22:12:29.200995 335425 exec_runner.go:98] cp: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca.pem --> /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/ca.pem (1038 bytes) I0724 22:12:29.201070 335425 exec_runner.go:91] found /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cert.pem, removing ... I0724 22:12:29.201101 335425 exec_runner.go:98] cp: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/cert.pem --> /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cert.pem (1078 bytes) I0724 22:12:29.201157 335425 exec_runner.go:91] found /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/key.pem, removing ... I0724 22:12:29.201185 335425 exec_runner.go:98] cp: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/key.pem --> /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/key.pem (1675 bytes) I0724 22:12:29.201223 335425 provision.go:105] generating server cert: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/server.pem ca-key=/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca.pem private-key=/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca-key.pem org=jenkins.containerd-20200724221200-14997 san=[172.17.0.5 localhost 127.0.0.1] I0724 22:12:29.336304 335425 provision.go:159] copyRemoteCerts I0724 22:12:29.340453 335425 ssh_runner.go:148] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker I0724 22:12:29.340535 335425 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" containerd-20200724221200-14997 I0724 22:12:29.409442 335425 sshutil.go:44] new ssh client: &{IP:127.0.0.1 Port:32900 SSHKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/containerd-20200724221200-14997/id_rsa Username:docker} I0724 22:12:29.505250 335425 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes) I0724 22:12:29.537088 335425 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1038 bytes) I0724 22:12:29.563099 335425 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/server.pem --> /etc/docker/server.pem (1151 bytes) I0724 22:12:29.585082 335425 provision.go:85] duration metric: configureAuth took 444.914411ms I0724 22:12:29.585105 335425 ubuntu.go:190] setting minikube options for container-runtime I0724 22:12:29.585243 335425 machine.go:91] provisioned docker machine in 3.866393122s I0724 22:12:29.585257 335425 client.go:164] LocalClient.Create took 23.184737063s I0724 22:12:29.585275 335425 start.go:163] duration metric: libmachine.API.Create for "containerd-20200724221200-14997" took 23.184806168s I0724 22:12:29.585287 335425 start.go:204] post-start starting for "containerd-20200724221200-14997" (driver="docker") I0724 22:12:29.585292 335425 start.go:214] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs] I0724 22:12:29.585344 335425 ssh_runner.go:148] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs I0724 22:12:29.585390 335425 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" containerd-20200724221200-14997 I0724 22:12:29.645036 335425 sshutil.go:44] new ssh client: &{IP:127.0.0.1 Port:32900 SSHKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/containerd-20200724221200-14997/id_rsa Username:docker} I0724 22:12:29.738558 335425 ssh_runner.go:148] Run: cat /etc/os-release I0724 22:12:29.742096 335425 main.go:115] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found I0724 22:12:29.742139 335425 main.go:115] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found I0724 22:12:29.742150 335425 main.go:115] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found I0724 22:12:29.742161 335425 info.go:98] Remote host: Ubuntu 19.10 I0724 22:12:29.742170 335425 filesync.go:118] Scanning /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/addons for local assets ... I0724 22:12:29.742217 335425 filesync.go:118] Scanning /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/files for local assets ... I0724 22:12:29.742402 335425 filesync.go:141] local asset: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/files/etc/test/nested/copy/14997/hosts -> hosts in /etc/test/nested/copy/14997 I0724 22:12:29.742467 335425 ssh_runner.go:148] Run: sudo mkdir -p /etc/test/nested/copy/14997 I0724 22:12:29.754479 335425 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/files/etc/test/nested/copy/14997/hosts --> /etc/test/nested/copy/14997/hosts (40 bytes) I0724 22:12:29.782521 335425 start.go:207] post-start completed in 197.222319ms I0724 22:12:29.782868 335425 cli_runner.go:109] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" containerd-20200724221200-14997 I0724 22:12:29.838049 335425 profile.go:150] Saving config to /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/containerd-20200724221200-14997/config.json ... I0724 22:12:29.838284 335425 start.go:125] duration metric: createHost completed in 23.534634821s I0724 22:12:29.838309 335425 start.go:76] releasing machines lock for "containerd-20200724221200-14997", held for 23.53476573s I0724 22:12:29.838415 335425 cli_runner.go:109] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" containerd-20200724221200-14997 I0724 22:12:29.897381 335425 ssh_runner.go:148] Run: systemctl --version I0724 22:12:29.897441 335425 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" containerd-20200724221200-14997 I0724 22:12:29.897470 335425 ssh_runner.go:148] Run: curl -sS -m 2 https://k8s.gcr.io/ I0724 22:12:29.897548 335425 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" containerd-20200724221200-14997 I0724 22:12:29.975054 335425 sshutil.go:44] new ssh client: &{IP:127.0.0.1 Port:32900 SSHKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/containerd-20200724221200-14997/id_rsa Username:docker} I0724 22:12:29.980463 335425 sshutil.go:44] new ssh client: &{IP:127.0.0.1 Port:32900 SSHKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/containerd-20200724221200-14997/id_rsa Username:docker} I0724 22:12:30.129300 335425 ssh_runner.go:148] Run: sudo systemctl is-active --quiet service crio I0724 22:12:30.144262 335425 ssh_runner.go:148] Run: sudo systemctl is-active --quiet service docker I0724 22:12:30.160669 335425 ssh_runner.go:148] Run: sudo systemctl stop -f docker I0724 22:12:30.186276 335425 ssh_runner.go:148] Run: sudo systemctl is-active --quiet service docker I0724 22:12:30.198273 335425 ssh_runner.go:148] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock image-endpoint: unix:///run/containerd/containerd.sock " | sudo tee /etc/crictl.yaml" I0724 22:12:30.243824 335425 ssh_runner.go:148] Run: /bin/bash -c "sudo mkdir -p /etc/containerd && printf %s "cm9vdCA9ICIvdmFyL2xpYi9jb250YWluZXJkIgpzdGF0ZSA9ICIvcnVuL2NvbnRhaW5lcmQiCm9vbV9zY29yZSA9IDAKCltncnBjXQogIGFkZHJlc3MgPSAiL3J1bi9jb250YWluZXJkL2NvbnRhaW5lcmQuc29jayIKICB1aWQgPSAwCiAgZ2lkID0gMAogIG1heF9yZWN2X21lc3NhZ2Vfc2l6ZSA9IDE2Nzc3MjE2CiAgbWF4X3NlbmRfbWVzc2FnZV9zaXplID0gMTY3NzcyMTYKCltkZWJ1Z10KICBhZGRyZXNzID0gIiIKICB1aWQgPSAwCiAgZ2lkID0gMAogIGxldmVsID0gIiIKClttZXRyaWNzXQogIGFkZHJlc3MgPSAiIgogIGdycGNfaGlzdG9ncmFtID0gZmFsc2UKCltjZ3JvdXBdCiAgcGF0aCA9ICIiCgpbcGx1Z2luc10KICBbcGx1Z2lucy5jZ3JvdXBzXQogICAgbm9fcHJvbWV0aGV1cyA9IGZhbHNlCiAgW3BsdWdpbnMuY3JpXQogICAgc3RyZWFtX3NlcnZlcl9hZGRyZXNzID0gIiIKICAgIHN0cmVhbV9zZXJ2ZXJfcG9ydCA9ICIxMDAxMCIKICAgIGVuYWJsZV9zZWxpbnV4ID0gZmFsc2UKICAgIHNhbmRib3hfaW1hZ2UgPSAiazhzLmdjci5pby9wYXVzZTozLjIiCiAgICBzdGF0c19jb2xsZWN0X3BlcmlvZCA9IDEwCiAgICBzeXN0ZW1kX2Nncm91cCA9IGZhbHNlCiAgICBlbmFibGVfdGxzX3N0cmVhbWluZyA9IGZhbHNlCiAgICBtYXhfY29udGFpbmVyX2xvZ19saW5lX3NpemUgPSAxNjM4NAogICAgW3BsdWdpbnMuY3JpLmNvbnRhaW5lcmRdCiAgICAgIHNuYXBzaG90dGVyID0gIm92ZXJsYXlmcyIKICAgICAgbm9fcGl2b3QgPSB0cnVlCiAgICAgIFtwbHVnaW5zLmNyaS5jb250YWluZXJkLmRlZmF1bHRfcnVudGltZV0KICAgICAgICBydW50aW1lX3R5cGUgPSAiaW8uY29udGFpbmVyZC5ydW50aW1lLnYxLmxpbnV4IgogICAgICAgIHJ1bnRpbWVfZW5naW5lID0gIiIKICAgICAgICBydW50aW1lX3Jvb3QgPSAiIgogICAgICBbcGx1Z2lucy5jcmkuY29udGFpbmVyZC51bnRydXN0ZWRfd29ya2xvYWRfcnVudGltZV0KICAgICAgICBydW50aW1lX3R5cGUgPSAiIgogICAgICAgIHJ1bnRpbWVfZW5naW5lID0gIiIKICAgICAgICBydW50aW1lX3Jvb3QgPSAiIgogICAgW3BsdWdpbnMuY3JpLmNuaV0KICAgICAgYmluX2RpciA9ICIvb3B0L2NuaS9iaW4iCiAgICAgIGNvbmZfZGlyID0gIi9ldGMvY25pL25ldC5kIgogICAgICBjb25mX3RlbXBsYXRlID0gIiIKICAgIFtwbHVnaW5zLmNyaS5yZWdpc3RyeV0KICAgICAgW3BsdWdpbnMuY3JpLnJlZ2lzdHJ5Lm1pcnJvcnNdCiAgICAgICAgW3BsdWdpbnMuY3JpLnJlZ2lzdHJ5Lm1pcnJvcnMuImRvY2tlci5pbyJdCiAgICAgICAgICBlbmRwb2ludCA9IFsiaHR0cHM6Ly9yZWdpc3RyeS0xLmRvY2tlci5pbyJdCiAgW3BsdWdpbnMuZGlmZi1zZXJ2aWNlXQogICAgZGVmYXVsdCA9IFsid2Fsa2luZyJdCiAgW3BsdWdpbnMubGludXhdCiAgICBzaGltID0gImNvbnRhaW5lcmQtc2hpbSIKICAgIHJ1bnRpbWUgPSAicnVuYyIKICAgIHJ1bnRpbWVfcm9vdCA9ICIiCiAgICBub19zaGltID0gZmFsc2UKICAgIHNoaW1fZGVidWcgPSBmYWxzZQogIFtwbHVnaW5zLnNjaGVkdWxlcl0KICAgIHBhdXNlX3RocmVzaG9sZCA9IDAuMDIKICAgIGRlbGV0aW9uX3RocmVzaG9sZCA9IDAKICAgIG11dGF0aW9uX3RocmVzaG9sZCA9IDEwMAogICAgc2NoZWR1bGVfZGVsYXkgPSAiMHMiCiAgICBzdGFydHVwX2RlbGF5ID0gIjEwMG1zIgo=" | base64 -d | sudo tee /etc/containerd/config.toml" I0724 22:12:30.298412 335425 ssh_runner.go:148] Run: sudo sysctl net.bridge.bridge-nf-call-iptables I0724 22:12:30.308238 335425 ssh_runner.go:148] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward" I0724 22:12:30.319806 335425 ssh_runner.go:148] Run: sudo systemctl daemon-reload I0724 22:12:30.466266 335425 ssh_runner.go:148] Run: sudo systemctl restart containerd I0724 22:12:30.501399 335425 ssh_runner.go:148] Run: containerd --version I0724 22:12:30.598699 335425 cli_runner.go:109] Run: docker network ls --filter name=bridge --format {{.ID}} I0724 22:12:30.669130 335425 cli_runner.go:109] Run: docker network inspect --format "{{(index .IPAM.Config 0).Gateway}}" d4a420189740 I0724 22:12:30.735762 335425 network.go:77] got host ip for mount in container by inspect docker network: 172.17.0.1 I0724 22:12:30.735839 335425 ssh_runner.go:148] Run: grep 172.17.0.1 host.minikube.internal$ /etc/hosts I0724 22:12:30.741002 335425 ssh_runner.go:148] Run: /bin/bash -c "{ grep -v '\thost.minikube.internal$' /etc/hosts; echo "172.17.0.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ /etc/hosts" I0724 22:12:30.755088 335425 preload.go:97] Checking if preload exists for k8s version v1.18.3 and runtime containerd I0724 22:12:30.755154 335425 preload.go:105] Found local preload: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v4-v1.18.3-containerd-overlay2-amd64.tar.lz4 I0724 22:12:30.755233 335425 ssh_runner.go:148] Run: sudo crictl images --output json I0724 22:12:30.779006 335425 containerd.go:412] all images are preloaded for containerd runtime. I0724 22:12:30.779033 335425 containerd.go:322] Images already preloaded, skipping extraction I0724 22:12:30.779090 335425 ssh_runner.go:148] Run: sudo crictl images --output json I0724 22:12:30.796909 335425 containerd.go:412] all images are preloaded for containerd runtime. I0724 22:12:30.796933 335425 cache_images.go:69] Images are preloaded, skipping loading I0724 22:12:30.796985 335425 ssh_runner.go:148] Run: sudo crictl info I0724 22:12:30.842487 335425 cni.go:74] Creating CNI manager for "" I0724 22:12:30.842512 335425 cni.go:105] "docker" driver + containerd runtime found, recommending kindnet I0724 22:12:30.842521 335425 kubeadm.go:84] Using pod CIDR: 10.244.0.0/16 I0724 22:12:30.842537 335425 kubeadm.go:150] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:172.17.0.5 APIServerPort:8444 KubernetesVersion:v1.18.3 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:containerd-20200724221200-14997 NodeName:containerd-20200724221200-14997 DNSDomain:cluster.local CRISocket:/run/containerd/containerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "172.17.0.5"]]}] FeatureArgs:map[] NoTaintMaster:true NodeIP:172.17.0.5 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[]} I0724 22:12:30.842685 335425 kubeadm.go:154] kubeadm config: apiVersion: kubeadm.k8s.io/v1beta2 kind: InitConfiguration localAPIEndpoint: advertiseAddress: 172.17.0.5 bindPort: 8444 bootstrapTokens: - groups: - system:bootstrappers:kubeadm:default-node-token ttl: 24h0m0s usages: - signing - authentication nodeRegistration: criSocket: /run/containerd/containerd.sock name: "containerd-20200724221200-14997" kubeletExtraArgs: node-ip: 172.17.0.5 taints: [] --- apiVersion: kubeadm.k8s.io/v1beta2 kind: ClusterConfiguration apiServer: certSANs: ["127.0.0.1", "localhost", "172.17.0.5"] extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs clusterName: mk controlPlaneEndpoint: control-plane.minikube.internal:8444 dns: type: CoreDNS etcd: local: dataDir: /var/lib/minikube/etcd controllerManager: extraArgs: "leader-elect": "false" scheduler: extraArgs: "leader-elect": "false" kubernetesVersion: v1.18.3 networking: dnsDomain: cluster.local podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: cgroupfs clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 evictionHard: nodefs.available: "0%" nodefs.inodesFree: "0%" imagefs.available: "0%" failSwapOn: false staticPodPath: /etc/kubernetes/manifests --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration clusterCIDR: "10.244.0.0/16" metricsBindAddress: 172.17.0.5:10249 I0724 22:12:30.842783 335425 kubeadm.go:790] kubelet [Unit] Wants=containerd.service [Service] ExecStart= ExecStart=/var/lib/minikube/binaries/v1.18.3/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock --hostname-override=containerd-20200724221200-14997 --image-service-endpoint=unix:///run/containerd/containerd.sock --kubeconfig=/etc/kubernetes/kubelet.conf --network-plugin=cni --node-ip=172.17.0.5 --runtime-request-timeout=15m [Install] config: {KubernetesVersion:v1.18.3 ClusterName:containerd-20200724221200-14997 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8444 NodeName:} I0724 22:12:30.842859 335425 ssh_runner.go:148] Run: sudo ls /var/lib/minikube/binaries/v1.18.3 I0724 22:12:30.851570 335425 binaries.go:43] Found k8s binaries, skipping transfer I0724 22:12:30.851633 335425 ssh_runner.go:148] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube I0724 22:12:30.861028 335425 ssh_runner.go:215] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (543 bytes) I0724 22:12:30.884198 335425 ssh_runner.go:215] scp memory --> /lib/systemd/system/kubelet.service (349 bytes) I0724 22:12:30.907678 335425 ssh_runner.go:215] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (1786 bytes) I0724 22:12:30.929527 335425 ssh_runner.go:148] Run: grep 172.17.0.5 control-plane.minikube.internal$ /etc/hosts I0724 22:12:30.933540 335425 ssh_runner.go:148] Run: /bin/bash -c "{ grep -v '\tcontrol-plane.minikube.internal$' /etc/hosts; echo "172.17.0.5 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ /etc/hosts" I0724 22:12:30.949283 335425 ssh_runner.go:148] Run: sudo systemctl daemon-reload I0724 22:12:31.183848 335425 ssh_runner.go:148] Run: sudo systemctl start kubelet I0724 22:12:31.385199 335425 certs.go:52] Setting up /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/containerd-20200724221200-14997 for IP: 172.17.0.5 I0724 22:12:31.385271 335425 certs.go:169] skipping minikubeCA CA generation: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/ca.key I0724 22:12:31.385297 335425 certs.go:169] skipping proxyClientCA CA generation: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/proxy-client-ca.key I0724 22:12:31.385385 335425 certs.go:273] generating minikube-user signed cert: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/containerd-20200724221200-14997/client.key I0724 22:12:31.385403 335425 crypto.go:69] Generating cert /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/containerd-20200724221200-14997/client.crt with IP's: [] I0724 22:12:31.700269 335425 crypto.go:157] Writing cert to /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/containerd-20200724221200-14997/client.crt ... I0724 22:12:31.700303 335425 lock.go:35] WriteFile acquiring /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/containerd-20200724221200-14997/client.crt: {Name:mk2bff12047f6d4d9f49f0d3456a70f81cf50827 Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0724 22:12:31.700506 335425 crypto.go:165] Writing key to /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/containerd-20200724221200-14997/client.key ... I0724 22:12:31.700524 335425 lock.go:35] WriteFile acquiring /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/containerd-20200724221200-14997/client.key: {Name:mk4ef880c3b23aaebd4e2611bff32153c9f557db Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0724 22:12:31.700623 335425 certs.go:273] generating minikube signed cert: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/containerd-20200724221200-14997/apiserver.key.38ada39b I0724 22:12:31.700634 335425 crypto.go:69] Generating cert /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/containerd-20200724221200-14997/apiserver.crt.38ada39b with IP's: [172.17.0.5 10.96.0.1 127.0.0.1 10.0.0.1] I0724 22:12:31.849386 335425 crypto.go:157] Writing cert to /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/containerd-20200724221200-14997/apiserver.crt.38ada39b ... I0724 22:12:31.849421 335425 lock.go:35] WriteFile acquiring /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/containerd-20200724221200-14997/apiserver.crt.38ada39b: {Name:mk5420e5ff15d9315664fe37eec3d68c6e01226b Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0724 22:12:31.849612 335425 crypto.go:165] Writing key to /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/containerd-20200724221200-14997/apiserver.key.38ada39b ... I0724 22:12:31.849635 335425 lock.go:35] WriteFile acquiring /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/containerd-20200724221200-14997/apiserver.key.38ada39b: {Name:mk5cfad92a7be721bb1aed7dc19291c09328a943 Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0724 22:12:31.849744 335425 certs.go:284] copying /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/containerd-20200724221200-14997/apiserver.crt.38ada39b -> /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/containerd-20200724221200-14997/apiserver.crt I0724 22:12:31.849828 335425 certs.go:288] copying /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/containerd-20200724221200-14997/apiserver.key.38ada39b -> /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/containerd-20200724221200-14997/apiserver.key I0724 22:12:31.849893 335425 certs.go:273] generating aggregator signed cert: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/containerd-20200724221200-14997/proxy-client.key I0724 22:12:31.849907 335425 crypto.go:69] Generating cert /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/containerd-20200724221200-14997/proxy-client.crt with IP's: [] I0724 22:12:31.947964 335425 crypto.go:157] Writing cert to /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/containerd-20200724221200-14997/proxy-client.crt ... I0724 22:12:31.947993 335425 lock.go:35] WriteFile acquiring /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/containerd-20200724221200-14997/proxy-client.crt: {Name:mkb99e549a360f78bf59148bac97a7a72f90d23f Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0724 22:12:31.948164 335425 crypto.go:165] Writing key to /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/containerd-20200724221200-14997/proxy-client.key ... I0724 22:12:31.948181 335425 lock.go:35] WriteFile acquiring /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/containerd-20200724221200-14997/proxy-client.key: {Name:mk61a804789511394866069b73ee58f43d3f7c9b Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0724 22:12:31.948519 335425 certs.go:348] found cert: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/14997.pem (1338 bytes) W0724 22:12:31.948572 335425 certs.go:344] ignoring /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/14997_empty.pem, impossibly tiny 0 bytes I0724 22:12:31.948589 335425 certs.go:348] found cert: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca-key.pem (1675 bytes) I0724 22:12:31.948617 335425 certs.go:348] found cert: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca.pem (1038 bytes) I0724 22:12:31.948648 335425 certs.go:348] found cert: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/cert.pem (1078 bytes) I0724 22:12:31.948677 335425 certs.go:348] found cert: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/key.pem (1675 bytes) I0724 22:12:31.949509 335425 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/containerd-20200724221200-14997/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1350 bytes) I0724 22:12:31.972407 335425 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/containerd-20200724221200-14997/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes) I0724 22:12:31.993400 335425 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/containerd-20200724221200-14997/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1103 bytes) I0724 22:12:32.014895 335425 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/containerd-20200724221200-14997/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes) I0724 22:12:32.035387 335425 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1066 bytes) I0724 22:12:32.057505 335425 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1679 bytes) I0724 22:12:32.078774 335425 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1074 bytes) I0724 22:12:32.100999 335425 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes) I0724 22:12:32.124005 335425 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/14997.pem --> /usr/share/ca-certificates/14997.pem (1338 bytes) I0724 22:12:32.146551 335425 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1066 bytes) I0724 22:12:32.175394 335425 ssh_runner.go:215] scp memory --> /var/lib/minikube/kubeconfig (392 bytes) I0724 22:12:32.197444 335425 ssh_runner.go:148] Run: openssl version I0724 22:12:32.203327 335425 ssh_runner.go:148] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/14997.pem && ln -fs /usr/share/ca-certificates/14997.pem /etc/ssl/certs/14997.pem" I0724 22:12:32.212643 335425 ssh_runner.go:148] Run: ls -la /usr/share/ca-certificates/14997.pem I0724 22:12:32.216543 335425 certs.go:389] hashing: -rw-r--r-- 1 root root 1338 Jul 24 21:50 /usr/share/ca-certificates/14997.pem I0724 22:12:32.216593 335425 ssh_runner.go:148] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/14997.pem I0724 22:12:32.222464 335425 ssh_runner.go:148] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/14997.pem /etc/ssl/certs/51391683.0" I0724 22:12:32.231090 335425 ssh_runner.go:148] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem" I0724 22:12:32.239521 335425 ssh_runner.go:148] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem I0724 22:12:32.243777 335425 certs.go:389] hashing: -rw-r--r-- 1 root root 1066 Jul 24 21:47 /usr/share/ca-certificates/minikubeCA.pem I0724 22:12:32.243825 335425 ssh_runner.go:148] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem I0724 22:12:32.249538 335425 ssh_runner.go:148] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0" I0724 22:12:32.258333 335425 kubeadm.go:327] StartCluster: {Name:containerd-20200724221200-14997 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 Memory:2200 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.99.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false DockerOpt:[containerd=/var/run/containerd/containerd.sock] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio KubernetesConfig:{KubernetesVersion:v1.18.3 ClusterName:containerd-20200724221200-14997 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8444 NodeName:} Nodes:[{Name: IP:172.17.0.5 Port:8444 KubernetesVersion:v1.18.3 ControlPlane:true Worker:true}] Addons:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true system_pods:true]} I0724 22:12:32.258392 335425 cri.go:41] listing CRI containers in root /run/containerd/runc/k8s.io: {State:paused Name: Namespaces:[kube-system]} I0724 22:12:32.258435 335425 ssh_runner.go:148] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system" I0724 22:12:32.275662 335425 cri.go:76] found id: "" I0724 22:12:32.275726 335425 ssh_runner.go:148] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd I0724 22:12:32.283818 335425 ssh_runner.go:148] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml I0724 22:12:32.292163 335425 kubeadm.go:211] ignoring SystemVerification for kubeadm because of docker driver I0724 22:12:32.292216 335425 ssh_runner.go:148] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf I0724 22:12:32.301240 335425 kubeadm.go:147] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2 stdout: stderr: ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory I0724 22:12:32.301270 335425 ssh_runner.go:148] Run: /bin/bash -c "sudo env PATH=/var/lib/minikube/binaries/v1.18.3:$PATH kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables" I0724 22:13:00.815620 335425 ssh_runner.go:188] Completed: /bin/bash -c "sudo env PATH=/var/lib/minikube/binaries/v1.18.3:$PATH kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables": (28.514325675s) I0724 22:13:00.815666 335425 cni.go:74] Creating CNI manager for "" I0724 22:13:00.815675 335425 cni.go:105] "docker" driver + containerd runtime found, recommending kindnet I0724 22:13:00.844478 335425 ssh_runner.go:148] Run: stat /opt/cni/bin/portmap I0724 22:13:00.849741 335425 cni.go:137] applying CNI manifest using /var/lib/minikube/binaries/v1.18.3/kubectl ... I0724 22:13:00.849760 335425 ssh_runner.go:215] scp memory --> /var/tmp/minikube/cni.yaml (2285 bytes) I0724 22:13:00.880032 335425 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml I0724 22:13:02.852275 335425 ssh_runner.go:188] Completed: sudo /var/lib/minikube/binaries/v1.18.3/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml: (1.972196162s) I0724 22:13:02.852413 335425 ssh_runner.go:148] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj" I0724 22:13:02.852565 335425 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:13:02.852649 335425 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl label nodes minikube.k8s.io/version=v1.12.1 minikube.k8s.io/commit=40eac8ce825d2bb784efa63b900c8d788ea49faf minikube.k8s.io/name=containerd-20200724221200-14997 minikube.k8s.io/updated_at=2020_07_24T22_13_02_0700 --all --overwrite --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:13:03.039129 335425 ops.go:35] apiserver oom_adj: -16 I0724 22:13:03.039192 335425 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:13:03.646327 335425 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:13:04.146273 335425 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:13:04.646348 335425 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:13:05.146218 335425 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:13:05.646281 335425 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:13:06.146262 335425 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:13:06.646264 335425 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:13:07.146258 335425 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:13:07.646460 335425 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:13:08.146362 335425 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:13:08.646249 335425 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:13:09.146287 335425 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:13:09.646270 335425 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:13:10.146256 335425 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:13:10.646291 335425 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:13:11.146294 335425 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:13:15.477809 335425 ssh_runner.go:188] Completed: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig: (4.331461858s) I0724 22:13:15.646360 335425 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:13:16.146229 335425 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:13:16.646262 335425 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:13:17.146313 335425 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:13:17.646261 335425 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:13:18.146343 335425 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:13:18.344148 335425 kubeadm.go:866] duration metric: took 15.491636889s to wait for elevateKubeSystemPrivileges. I0724 22:13:18.344185 335425 kubeadm.go:329] StartCluster complete in 46.085856312s I0724 22:13:18.344214 335425 settings.go:123] acquiring lock: {Name:mk120aead41f4abf9b6da50636235ecd4ae2a41a Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0724 22:13:18.344392 335425 settings.go:131] Updating kubeconfig: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/kubeconfig I0724 22:13:18.347016 335425 lock.go:35] WriteFile acquiring /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/kubeconfig: {Name:mk94f19b810ab6208411eb086ed6241d89a90d8c Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0724 22:13:18.347239 335425 start.go:195] Will wait wait-timeout for node ... I0724 22:13:18.347278 335425 addons.go:353] enableAddons start: toEnable=map[], additional=[] I0724 22:13:18.359031 335425 addons.go:53] Setting storage-provisioner=true in profile "containerd-20200724221200-14997" I0724 22:13:18.359057 335425 addons.go:129] Setting addon storage-provisioner=true in "containerd-20200724221200-14997" I0724 22:13:18.347386 335425 ssh_runner.go:148] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.18.3/kubectl scale deployment --replicas=1 coredns -n=kube-system I0724 22:13:18.359075 335425 addons.go:53] Setting default-storageclass=true in profile "containerd-20200724221200-14997" I0724 22:13:18.359100 335425 addons.go:267] enableOrDisableStorageClasses default-storageclass=true on "containerd-20200724221200-14997" W0724 22:13:18.359068 335425 addons.go:138] addon storage-provisioner should already be in state true I0724 22:13:18.359206 335425 host.go:65] Checking if "containerd-20200724221200-14997" exists ... I0724 22:13:18.359545 335425 cli_runner.go:109] Run: docker container inspect containerd-20200724221200-14997 --format={{.State.Status}} I0724 22:13:18.359940 335425 cli_runner.go:109] Run: docker container inspect containerd-20200724221200-14997 --format={{.State.Status}} I0724 22:13:18.362988 335425 api_server.go:48] waiting for apiserver process to appear ... I0724 22:13:18.363040 335425 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:13:18.431773 335425 addons.go:236] installing /etc/kubernetes/addons/storage-provisioner.yaml I0724 22:13:18.431795 335425 ssh_runner.go:215] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2668 bytes) I0724 22:13:18.431861 335425 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" containerd-20200724221200-14997 I0724 22:13:18.447485 335425 addons.go:129] Setting addon default-storageclass=true in "containerd-20200724221200-14997" W0724 22:13:18.447513 335425 addons.go:138] addon default-storageclass should already be in state true I0724 22:13:18.447528 335425 host.go:65] Checking if "containerd-20200724221200-14997" exists ... I0724 22:13:18.448466 335425 cli_runner.go:109] Run: docker container inspect containerd-20200724221200-14997 --format={{.State.Status}} I0724 22:13:18.501378 335425 sshutil.go:44] new ssh client: &{IP:127.0.0.1 Port:32900 SSHKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/containerd-20200724221200-14997/id_rsa Username:docker} I0724 22:13:18.509273 335425 addons.go:236] installing /etc/kubernetes/addons/storageclass.yaml I0724 22:13:18.509304 335425 ssh_runner.go:215] scp deploy/addons/storageclass/storageclass.yaml.tmpl --> /etc/kubernetes/addons/storageclass.yaml (271 bytes) I0724 22:13:18.509382 335425 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" containerd-20200724221200-14997 I0724 22:13:18.559160 335425 start.go:549] successfully scaled coredns replicas to 1 I0724 22:13:18.559233 335425 api_server.go:68] duration metric: took 211.960583ms to wait for apiserver process to appear ... I0724 22:13:18.559262 335425 api_server.go:84] waiting for apiserver healthz status ... I0724 22:13:18.559271 335425 api_server.go:221] Checking apiserver healthz at https://172.17.0.5:8444/healthz ... I0724 22:13:18.571632 335425 api_server.go:241] https://172.17.0.5:8444/healthz returned 200: ok I0724 22:13:18.572660 335425 api_server.go:137] control plane version: v1.18.3 I0724 22:13:18.572684 335425 api_server.go:127] duration metric: took 13.41626ms to wait for apiserver health ... I0724 22:13:18.572694 335425 system_pods.go:43] waiting for kube-system pods to appear ... I0724 22:13:18.575485 335425 sshutil.go:44] new ssh client: &{IP:127.0.0.1 Port:32900 SSHKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/containerd-20200724221200-14997/id_rsa Username:docker} I0724 22:13:18.646490 335425 system_pods.go:59] 8 kube-system pods found I0724 22:13:18.646532 335425 system_pods.go:61] "coredns-66bff467f8-cgqcr" [dd2b00b1-a5a7-4656-8d40-5bfedf60d013] Pending: PodScheduled:Unschedulable (0/1 nodes are available: 1 node(s) had taint {node.kubernetes.io/not-ready: }, that the pod didn't tolerate.) I0724 22:13:18.646539 335425 system_pods.go:61] "coredns-66bff467f8-hlk9j" [584286ea-2ddf-4194-8a3c-48d505f7f607] Pending: PodScheduled:Unschedulable (0/1 nodes are available: 1 node(s) had taint {node.kubernetes.io/not-ready: }, that the pod didn't tolerate.) I0724 22:13:18.646547 335425 system_pods.go:61] "etcd-containerd-20200724221200-14997" [8610fe2f-3eeb-4715-bd02-2eb75b77541c] Running I0724 22:13:18.646556 335425 system_pods.go:61] "kindnet-nsc8k" [562af1c8-c195-4fed-b475-4ff2a3cca8b5] Pending / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:13:18.646562 335425 system_pods.go:61] "kube-apiserver-containerd-20200724221200-14997" [08b29940-e531-4528-ae07-4607aa6050e8] Running I0724 22:13:18.646567 335425 system_pods.go:61] "kube-controller-manager-containerd-20200724221200-14997" [6460d851-bac6-4fd9-977f-c71ff25d78cd] Running I0724 22:13:18.646574 335425 system_pods.go:61] "kube-proxy-x7fwq" [4a6282eb-2be4-496a-890f-bf90dab64aa5] Pending / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy]) I0724 22:13:18.646579 335425 system_pods.go:61] "kube-scheduler-containerd-20200724221200-14997" [0784d162-3188-4cef-947b-950b5f11830c] Running I0724 22:13:18.646586 335425 system_pods.go:74] duration metric: took 73.884435ms to wait for pod list to return data ... I0724 22:13:18.646599 335425 default_sa.go:33] waiting for default service account to be created ... I0724 22:13:18.668031 335425 ssh_runner.go:148] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.18.3/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml I0724 22:13:18.671752 335425 default_sa.go:44] found service account: "default" I0724 22:13:18.671777 335425 default_sa.go:54] duration metric: took 25.171313ms for default service account to be created ... I0724 22:13:18.671788 335425 system_pods.go:116] waiting for k8s-apps to be running ... I0724 22:13:18.755280 335425 system_pods.go:86] 7 kube-system pods found I0724 22:13:18.755326 335425 system_pods.go:89] "coredns-66bff467f8-hlk9j" [584286ea-2ddf-4194-8a3c-48d505f7f607] Pending: PodScheduled:Unschedulable (0/1 nodes are available: 1 node(s) had taint {node.kubernetes.io/not-ready: }, that the pod didn't tolerate.) I0724 22:13:18.755341 335425 system_pods.go:89] "etcd-containerd-20200724221200-14997" [8610fe2f-3eeb-4715-bd02-2eb75b77541c] Running I0724 22:13:18.755356 335425 system_pods.go:89] "kindnet-nsc8k" [562af1c8-c195-4fed-b475-4ff2a3cca8b5] Pending / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:13:18.755378 335425 system_pods.go:89] "kube-apiserver-containerd-20200724221200-14997" [08b29940-e531-4528-ae07-4607aa6050e8] Running I0724 22:13:18.755389 335425 system_pods.go:89] "kube-controller-manager-containerd-20200724221200-14997" [6460d851-bac6-4fd9-977f-c71ff25d78cd] Running I0724 22:13:18.755400 335425 system_pods.go:89] "kube-proxy-x7fwq" [4a6282eb-2be4-496a-890f-bf90dab64aa5] Pending / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy]) I0724 22:13:18.755407 335425 system_pods.go:89] "kube-scheduler-containerd-20200724221200-14997" [0784d162-3188-4cef-947b-950b5f11830c] Running I0724 22:13:18.755429 335425 retry.go:30] will retry after 263.082536ms: missing components: kube-dns, kube-proxy I0724 22:13:18.763281 335425 ssh_runner.go:148] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.18.3/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml I0724 22:13:19.023160 335425 system_pods.go:86] 7 kube-system pods found I0724 22:13:19.023192 335425 system_pods.go:89] "coredns-66bff467f8-hlk9j" [584286ea-2ddf-4194-8a3c-48d505f7f607] Pending: PodScheduled:Unschedulable (0/1 nodes are available: 1 node(s) had taint {node.kubernetes.io/not-ready: }, that the pod didn't tolerate.) I0724 22:13:19.023199 335425 system_pods.go:89] "etcd-containerd-20200724221200-14997" [8610fe2f-3eeb-4715-bd02-2eb75b77541c] Running I0724 22:13:19.023208 335425 system_pods.go:89] "kindnet-nsc8k" [562af1c8-c195-4fed-b475-4ff2a3cca8b5] Pending / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:13:19.023214 335425 system_pods.go:89] "kube-apiserver-containerd-20200724221200-14997" [08b29940-e531-4528-ae07-4607aa6050e8] Running I0724 22:13:19.023220 335425 system_pods.go:89] "kube-controller-manager-containerd-20200724221200-14997" [6460d851-bac6-4fd9-977f-c71ff25d78cd] Running I0724 22:13:19.023227 335425 system_pods.go:89] "kube-proxy-x7fwq" [4a6282eb-2be4-496a-890f-bf90dab64aa5] Pending / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy]) I0724 22:13:19.023232 335425 system_pods.go:89] "kube-scheduler-containerd-20200724221200-14997" [0784d162-3188-4cef-947b-950b5f11830c] Running I0724 22:13:19.023244 335425 retry.go:30] will retry after 381.329545ms: missing components: kube-dns, kube-proxy I0724 22:13:19.295860 335425 addons.go:355] enableAddons completed in 948.586287ms I0724 22:13:19.419737 335425 system_pods.go:86] 8 kube-system pods found I0724 22:13:19.419786 335425 system_pods.go:89] "coredns-66bff467f8-hlk9j" [584286ea-2ddf-4194-8a3c-48d505f7f607] Pending: PodScheduled:Unschedulable (0/1 nodes are available: 1 node(s) had taint {node.kubernetes.io/not-ready: }, that the pod didn't tolerate.) I0724 22:13:19.419801 335425 system_pods.go:89] "etcd-containerd-20200724221200-14997" [8610fe2f-3eeb-4715-bd02-2eb75b77541c] Running I0724 22:13:19.419816 335425 system_pods.go:89] "kindnet-nsc8k" [562af1c8-c195-4fed-b475-4ff2a3cca8b5] Pending / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:13:19.419826 335425 system_pods.go:89] "kube-apiserver-containerd-20200724221200-14997" [08b29940-e531-4528-ae07-4607aa6050e8] Running I0724 22:13:19.419838 335425 system_pods.go:89] "kube-controller-manager-containerd-20200724221200-14997" [6460d851-bac6-4fd9-977f-c71ff25d78cd] Running I0724 22:13:19.419888 335425 system_pods.go:89] "kube-proxy-x7fwq" [4a6282eb-2be4-496a-890f-bf90dab64aa5] Pending / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy]) I0724 22:13:19.419912 335425 system_pods.go:89] "kube-scheduler-containerd-20200724221200-14997" [0784d162-3188-4cef-947b-950b5f11830c] Running I0724 22:13:19.419924 335425 system_pods.go:89] "storage-provisioner" [25727a03-b071-4a2c-8dfb-ea2a7038e4cd] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:13:19.419948 335425 retry.go:30] will retry after 422.765636ms: missing components: kube-dns, kube-proxy I0724 22:13:19.849306 335425 system_pods.go:86] 8 kube-system pods found I0724 22:13:19.849356 335425 system_pods.go:89] "coredns-66bff467f8-hlk9j" [584286ea-2ddf-4194-8a3c-48d505f7f607] Pending: PodScheduled:Unschedulable (0/1 nodes are available: 1 node(s) had taint {node.kubernetes.io/not-ready: }, that the pod didn't tolerate.) I0724 22:13:19.849367 335425 system_pods.go:89] "etcd-containerd-20200724221200-14997" [8610fe2f-3eeb-4715-bd02-2eb75b77541c] Running I0724 22:13:19.849384 335425 system_pods.go:89] "kindnet-nsc8k" [562af1c8-c195-4fed-b475-4ff2a3cca8b5] Pending / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:13:19.849414 335425 system_pods.go:89] "kube-apiserver-containerd-20200724221200-14997" [08b29940-e531-4528-ae07-4607aa6050e8] Running I0724 22:13:19.849425 335425 system_pods.go:89] "kube-controller-manager-containerd-20200724221200-14997" [6460d851-bac6-4fd9-977f-c71ff25d78cd] Running I0724 22:13:19.849437 335425 system_pods.go:89] "kube-proxy-x7fwq" [4a6282eb-2be4-496a-890f-bf90dab64aa5] Pending / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy]) I0724 22:13:19.849457 335425 system_pods.go:89] "kube-scheduler-containerd-20200724221200-14997" [0784d162-3188-4cef-947b-950b5f11830c] Running I0724 22:13:19.849468 335425 system_pods.go:89] "storage-provisioner" [25727a03-b071-4a2c-8dfb-ea2a7038e4cd] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:13:19.849482 335425 retry.go:30] will retry after 473.074753ms: missing components: kube-dns, kube-proxy I0724 22:13:20.327936 335425 system_pods.go:86] 8 kube-system pods found I0724 22:13:20.327970 335425 system_pods.go:89] "coredns-66bff467f8-hlk9j" [584286ea-2ddf-4194-8a3c-48d505f7f607] Pending: PodScheduled:Unschedulable (0/1 nodes are available: 1 node(s) had taint {node.kubernetes.io/not-ready: }, that the pod didn't tolerate.) I0724 22:13:20.327977 335425 system_pods.go:89] "etcd-containerd-20200724221200-14997" [8610fe2f-3eeb-4715-bd02-2eb75b77541c] Running I0724 22:13:20.327988 335425 system_pods.go:89] "kindnet-nsc8k" [562af1c8-c195-4fed-b475-4ff2a3cca8b5] Running I0724 22:13:20.327995 335425 system_pods.go:89] "kube-apiserver-containerd-20200724221200-14997" [08b29940-e531-4528-ae07-4607aa6050e8] Running I0724 22:13:20.328005 335425 system_pods.go:89] "kube-controller-manager-containerd-20200724221200-14997" [6460d851-bac6-4fd9-977f-c71ff25d78cd] Running I0724 22:13:20.328013 335425 system_pods.go:89] "kube-proxy-x7fwq" [4a6282eb-2be4-496a-890f-bf90dab64aa5] Running I0724 22:13:20.328021 335425 system_pods.go:89] "kube-scheduler-containerd-20200724221200-14997" [0784d162-3188-4cef-947b-950b5f11830c] Running I0724 22:13:20.328028 335425 system_pods.go:89] "storage-provisioner" [25727a03-b071-4a2c-8dfb-ea2a7038e4cd] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:13:20.328046 335425 retry.go:30] will retry after 587.352751ms: missing components: kube-dns I0724 22:13:20.920890 335425 system_pods.go:86] 8 kube-system pods found I0724 22:13:20.920921 335425 system_pods.go:89] "coredns-66bff467f8-hlk9j" [584286ea-2ddf-4194-8a3c-48d505f7f607] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:13:20.920928 335425 system_pods.go:89] "etcd-containerd-20200724221200-14997" [8610fe2f-3eeb-4715-bd02-2eb75b77541c] Running I0724 22:13:20.920936 335425 system_pods.go:89] "kindnet-nsc8k" [562af1c8-c195-4fed-b475-4ff2a3cca8b5] Running I0724 22:13:20.920941 335425 system_pods.go:89] "kube-apiserver-containerd-20200724221200-14997" [08b29940-e531-4528-ae07-4607aa6050e8] Running I0724 22:13:20.920946 335425 system_pods.go:89] "kube-controller-manager-containerd-20200724221200-14997" [6460d851-bac6-4fd9-977f-c71ff25d78cd] Running I0724 22:13:20.920951 335425 system_pods.go:89] "kube-proxy-x7fwq" [4a6282eb-2be4-496a-890f-bf90dab64aa5] Running I0724 22:13:20.920957 335425 system_pods.go:89] "kube-scheduler-containerd-20200724221200-14997" [0784d162-3188-4cef-947b-950b5f11830c] Running I0724 22:13:20.920963 335425 system_pods.go:89] "storage-provisioner" [25727a03-b071-4a2c-8dfb-ea2a7038e4cd] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:13:20.920974 335425 retry.go:30] will retry after 834.206799ms: missing components: kube-dns I0724 22:13:21.760980 335425 system_pods.go:86] 8 kube-system pods found I0724 22:13:21.761015 335425 system_pods.go:89] "coredns-66bff467f8-hlk9j" [584286ea-2ddf-4194-8a3c-48d505f7f607] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:13:21.761025 335425 system_pods.go:89] "etcd-containerd-20200724221200-14997" [8610fe2f-3eeb-4715-bd02-2eb75b77541c] Running I0724 22:13:21.761034 335425 system_pods.go:89] "kindnet-nsc8k" [562af1c8-c195-4fed-b475-4ff2a3cca8b5] Running I0724 22:13:21.761043 335425 system_pods.go:89] "kube-apiserver-containerd-20200724221200-14997" [08b29940-e531-4528-ae07-4607aa6050e8] Running I0724 22:13:21.761052 335425 system_pods.go:89] "kube-controller-manager-containerd-20200724221200-14997" [6460d851-bac6-4fd9-977f-c71ff25d78cd] Running I0724 22:13:21.761061 335425 system_pods.go:89] "kube-proxy-x7fwq" [4a6282eb-2be4-496a-890f-bf90dab64aa5] Running I0724 22:13:21.761071 335425 system_pods.go:89] "kube-scheduler-containerd-20200724221200-14997" [0784d162-3188-4cef-947b-950b5f11830c] Running I0724 22:13:21.761088 335425 system_pods.go:89] "storage-provisioner" [25727a03-b071-4a2c-8dfb-ea2a7038e4cd] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:13:21.761109 335425 retry.go:30] will retry after 746.553905ms: missing components: kube-dns I0724 22:13:22.514360 335425 system_pods.go:86] 8 kube-system pods found I0724 22:13:22.514407 335425 system_pods.go:89] "coredns-66bff467f8-hlk9j" [584286ea-2ddf-4194-8a3c-48d505f7f607] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:13:22.514418 335425 system_pods.go:89] "etcd-containerd-20200724221200-14997" [8610fe2f-3eeb-4715-bd02-2eb75b77541c] Running I0724 22:13:22.514428 335425 system_pods.go:89] "kindnet-nsc8k" [562af1c8-c195-4fed-b475-4ff2a3cca8b5] Running I0724 22:13:22.514437 335425 system_pods.go:89] "kube-apiserver-containerd-20200724221200-14997" [08b29940-e531-4528-ae07-4607aa6050e8] Running I0724 22:13:22.514446 335425 system_pods.go:89] "kube-controller-manager-containerd-20200724221200-14997" [6460d851-bac6-4fd9-977f-c71ff25d78cd] Running I0724 22:13:22.514454 335425 system_pods.go:89] "kube-proxy-x7fwq" [4a6282eb-2be4-496a-890f-bf90dab64aa5] Running I0724 22:13:22.514464 335425 system_pods.go:89] "kube-scheduler-containerd-20200724221200-14997" [0784d162-3188-4cef-947b-950b5f11830c] Running I0724 22:13:22.514483 335425 system_pods.go:89] "storage-provisioner" [25727a03-b071-4a2c-8dfb-ea2a7038e4cd] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:13:22.514497 335425 retry.go:30] will retry after 987.362415ms: missing components: kube-dns I0724 22:13:23.507217 335425 system_pods.go:86] 8 kube-system pods found I0724 22:13:23.507250 335425 system_pods.go:89] "coredns-66bff467f8-hlk9j" [584286ea-2ddf-4194-8a3c-48d505f7f607] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:13:23.507256 335425 system_pods.go:89] "etcd-containerd-20200724221200-14997" [8610fe2f-3eeb-4715-bd02-2eb75b77541c] Running I0724 22:13:23.507263 335425 system_pods.go:89] "kindnet-nsc8k" [562af1c8-c195-4fed-b475-4ff2a3cca8b5] Running I0724 22:13:23.507269 335425 system_pods.go:89] "kube-apiserver-containerd-20200724221200-14997" [08b29940-e531-4528-ae07-4607aa6050e8] Running I0724 22:13:23.507275 335425 system_pods.go:89] "kube-controller-manager-containerd-20200724221200-14997" [6460d851-bac6-4fd9-977f-c71ff25d78cd] Running I0724 22:13:23.507280 335425 system_pods.go:89] "kube-proxy-x7fwq" [4a6282eb-2be4-496a-890f-bf90dab64aa5] Running I0724 22:13:23.507285 335425 system_pods.go:89] "kube-scheduler-containerd-20200724221200-14997" [0784d162-3188-4cef-947b-950b5f11830c] Running I0724 22:13:23.507325 335425 system_pods.go:89] "storage-provisioner" [25727a03-b071-4a2c-8dfb-ea2a7038e4cd] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:13:23.507343 335425 retry.go:30] will retry after 1.189835008s: missing components: kube-dns I0724 22:13:24.704184 335425 system_pods.go:86] 8 kube-system pods found I0724 22:13:24.704242 335425 system_pods.go:89] "coredns-66bff467f8-hlk9j" [584286ea-2ddf-4194-8a3c-48d505f7f607] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:13:24.704255 335425 system_pods.go:89] "etcd-containerd-20200724221200-14997" [8610fe2f-3eeb-4715-bd02-2eb75b77541c] Running I0724 22:13:24.704272 335425 system_pods.go:89] "kindnet-nsc8k" [562af1c8-c195-4fed-b475-4ff2a3cca8b5] Running I0724 22:13:24.704285 335425 system_pods.go:89] "kube-apiserver-containerd-20200724221200-14997" [08b29940-e531-4528-ae07-4607aa6050e8] Running I0724 22:13:24.704299 335425 system_pods.go:89] "kube-controller-manager-containerd-20200724221200-14997" [6460d851-bac6-4fd9-977f-c71ff25d78cd] Running I0724 22:13:24.704307 335425 system_pods.go:89] "kube-proxy-x7fwq" [4a6282eb-2be4-496a-890f-bf90dab64aa5] Running I0724 22:13:24.704315 335425 system_pods.go:89] "kube-scheduler-containerd-20200724221200-14997" [0784d162-3188-4cef-947b-950b5f11830c] Running I0724 22:13:24.704430 335425 system_pods.go:89] "storage-provisioner" [25727a03-b071-4a2c-8dfb-ea2a7038e4cd] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:13:24.704447 335425 retry.go:30] will retry after 1.677229867s: missing components: kube-dns I0724 22:13:26.388471 335425 system_pods.go:86] 8 kube-system pods found I0724 22:13:26.388517 335425 system_pods.go:89] "coredns-66bff467f8-hlk9j" [584286ea-2ddf-4194-8a3c-48d505f7f607] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:13:26.388528 335425 system_pods.go:89] "etcd-containerd-20200724221200-14997" [8610fe2f-3eeb-4715-bd02-2eb75b77541c] Running I0724 22:13:26.388540 335425 system_pods.go:89] "kindnet-nsc8k" [562af1c8-c195-4fed-b475-4ff2a3cca8b5] Running I0724 22:13:26.388550 335425 system_pods.go:89] "kube-apiserver-containerd-20200724221200-14997" [08b29940-e531-4528-ae07-4607aa6050e8] Running I0724 22:13:26.388559 335425 system_pods.go:89] "kube-controller-manager-containerd-20200724221200-14997" [6460d851-bac6-4fd9-977f-c71ff25d78cd] Running I0724 22:13:26.388567 335425 system_pods.go:89] "kube-proxy-x7fwq" [4a6282eb-2be4-496a-890f-bf90dab64aa5] Running I0724 22:13:26.388587 335425 system_pods.go:89] "kube-scheduler-containerd-20200724221200-14997" [0784d162-3188-4cef-947b-950b5f11830c] Running I0724 22:13:26.388598 335425 system_pods.go:89] "storage-provisioner" [25727a03-b071-4a2c-8dfb-ea2a7038e4cd] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:13:26.388621 335425 retry.go:30] will retry after 2.346016261s: missing components: kube-dns I0724 22:13:28.740912 335425 system_pods.go:86] 8 kube-system pods found I0724 22:13:28.740953 335425 system_pods.go:89] "coredns-66bff467f8-hlk9j" [584286ea-2ddf-4194-8a3c-48d505f7f607] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:13:28.740965 335425 system_pods.go:89] "etcd-containerd-20200724221200-14997" [8610fe2f-3eeb-4715-bd02-2eb75b77541c] Running I0724 22:13:28.740974 335425 system_pods.go:89] "kindnet-nsc8k" [562af1c8-c195-4fed-b475-4ff2a3cca8b5] Running I0724 22:13:28.740981 335425 system_pods.go:89] "kube-apiserver-containerd-20200724221200-14997" [08b29940-e531-4528-ae07-4607aa6050e8] Running I0724 22:13:28.740996 335425 system_pods.go:89] "kube-controller-manager-containerd-20200724221200-14997" [6460d851-bac6-4fd9-977f-c71ff25d78cd] Running I0724 22:13:28.741003 335425 system_pods.go:89] "kube-proxy-x7fwq" [4a6282eb-2be4-496a-890f-bf90dab64aa5] Running I0724 22:13:28.741009 335425 system_pods.go:89] "kube-scheduler-containerd-20200724221200-14997" [0784d162-3188-4cef-947b-950b5f11830c] Running I0724 22:13:28.741025 335425 system_pods.go:89] "storage-provisioner" [25727a03-b071-4a2c-8dfb-ea2a7038e4cd] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:13:28.741042 335425 retry.go:30] will retry after 3.36678925s: missing components: kube-dns I0724 22:13:32.119860 335425 system_pods.go:86] 8 kube-system pods found I0724 22:13:32.119895 335425 system_pods.go:89] "coredns-66bff467f8-hlk9j" [584286ea-2ddf-4194-8a3c-48d505f7f607] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:13:32.119902 335425 system_pods.go:89] "etcd-containerd-20200724221200-14997" [8610fe2f-3eeb-4715-bd02-2eb75b77541c] Running I0724 22:13:32.119913 335425 system_pods.go:89] "kindnet-nsc8k" [562af1c8-c195-4fed-b475-4ff2a3cca8b5] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:13:32.119922 335425 system_pods.go:89] "kube-apiserver-containerd-20200724221200-14997" [08b29940-e531-4528-ae07-4607aa6050e8] Running I0724 22:13:32.119929 335425 system_pods.go:89] "kube-controller-manager-containerd-20200724221200-14997" [6460d851-bac6-4fd9-977f-c71ff25d78cd] Running I0724 22:13:32.119934 335425 system_pods.go:89] "kube-proxy-x7fwq" [4a6282eb-2be4-496a-890f-bf90dab64aa5] Running I0724 22:13:32.119944 335425 system_pods.go:89] "kube-scheduler-containerd-20200724221200-14997" [0784d162-3188-4cef-947b-950b5f11830c] Running I0724 22:13:32.119961 335425 system_pods.go:89] "storage-provisioner" [25727a03-b071-4a2c-8dfb-ea2a7038e4cd] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:13:32.119974 335425 retry.go:30] will retry after 3.11822781s: missing components: kube-dns I0724 22:13:35.244073 335425 system_pods.go:86] 8 kube-system pods found I0724 22:13:35.244117 335425 system_pods.go:89] "coredns-66bff467f8-hlk9j" [584286ea-2ddf-4194-8a3c-48d505f7f607] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:13:35.244128 335425 system_pods.go:89] "etcd-containerd-20200724221200-14997" [8610fe2f-3eeb-4715-bd02-2eb75b77541c] Running I0724 22:13:35.244138 335425 system_pods.go:89] "kindnet-nsc8k" [562af1c8-c195-4fed-b475-4ff2a3cca8b5] Running I0724 22:13:35.244147 335425 system_pods.go:89] "kube-apiserver-containerd-20200724221200-14997" [08b29940-e531-4528-ae07-4607aa6050e8] Running I0724 22:13:35.244157 335425 system_pods.go:89] "kube-controller-manager-containerd-20200724221200-14997" [6460d851-bac6-4fd9-977f-c71ff25d78cd] Running I0724 22:13:35.244199 335425 system_pods.go:89] "kube-proxy-x7fwq" [4a6282eb-2be4-496a-890f-bf90dab64aa5] Running I0724 22:13:35.244209 335425 system_pods.go:89] "kube-scheduler-containerd-20200724221200-14997" [0784d162-3188-4cef-947b-950b5f11830c] Running I0724 22:13:35.244229 335425 system_pods.go:89] "storage-provisioner" [25727a03-b071-4a2c-8dfb-ea2a7038e4cd] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:13:35.244244 335425 retry.go:30] will retry after 4.276119362s: missing components: kube-dns I0724 22:13:39.525690 335425 system_pods.go:86] 8 kube-system pods found I0724 22:13:39.525727 335425 system_pods.go:89] "coredns-66bff467f8-hlk9j" [584286ea-2ddf-4194-8a3c-48d505f7f607] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:13:39.525734 335425 system_pods.go:89] "etcd-containerd-20200724221200-14997" [8610fe2f-3eeb-4715-bd02-2eb75b77541c] Running I0724 22:13:39.525742 335425 system_pods.go:89] "kindnet-nsc8k" [562af1c8-c195-4fed-b475-4ff2a3cca8b5] Running I0724 22:13:39.525748 335425 system_pods.go:89] "kube-apiserver-containerd-20200724221200-14997" [08b29940-e531-4528-ae07-4607aa6050e8] Running I0724 22:13:39.525754 335425 system_pods.go:89] "kube-controller-manager-containerd-20200724221200-14997" [6460d851-bac6-4fd9-977f-c71ff25d78cd] Running I0724 22:13:39.525759 335425 system_pods.go:89] "kube-proxy-x7fwq" [4a6282eb-2be4-496a-890f-bf90dab64aa5] Running I0724 22:13:39.525765 335425 system_pods.go:89] "kube-scheduler-containerd-20200724221200-14997" [0784d162-3188-4cef-947b-950b5f11830c] Running I0724 22:13:39.525770 335425 system_pods.go:89] "storage-provisioner" [25727a03-b071-4a2c-8dfb-ea2a7038e4cd] Running I0724 22:13:39.525779 335425 retry.go:30] will retry after 5.167232101s: missing components: kube-dns I0724 22:13:44.698103 335425 system_pods.go:86] 8 kube-system pods found I0724 22:13:44.698137 335425 system_pods.go:89] "coredns-66bff467f8-hlk9j" [584286ea-2ddf-4194-8a3c-48d505f7f607] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:13:44.698145 335425 system_pods.go:89] "etcd-containerd-20200724221200-14997" [8610fe2f-3eeb-4715-bd02-2eb75b77541c] Running I0724 22:13:44.698152 335425 system_pods.go:89] "kindnet-nsc8k" [562af1c8-c195-4fed-b475-4ff2a3cca8b5] Running I0724 22:13:44.698157 335425 system_pods.go:89] "kube-apiserver-containerd-20200724221200-14997" [08b29940-e531-4528-ae07-4607aa6050e8] Running I0724 22:13:44.698163 335425 system_pods.go:89] "kube-controller-manager-containerd-20200724221200-14997" [6460d851-bac6-4fd9-977f-c71ff25d78cd] Running I0724 22:13:44.698168 335425 system_pods.go:89] "kube-proxy-x7fwq" [4a6282eb-2be4-496a-890f-bf90dab64aa5] Running I0724 22:13:44.698173 335425 system_pods.go:89] "kube-scheduler-containerd-20200724221200-14997" [0784d162-3188-4cef-947b-950b5f11830c] Running I0724 22:13:44.698179 335425 system_pods.go:89] "storage-provisioner" [25727a03-b071-4a2c-8dfb-ea2a7038e4cd] Running I0724 22:13:44.698189 335425 retry.go:30] will retry after 6.994901864s: missing components: kube-dns I0724 22:13:51.698513 335425 system_pods.go:86] 8 kube-system pods found I0724 22:13:51.698552 335425 system_pods.go:89] "coredns-66bff467f8-hlk9j" [584286ea-2ddf-4194-8a3c-48d505f7f607] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:13:51.698562 335425 system_pods.go:89] "etcd-containerd-20200724221200-14997" [8610fe2f-3eeb-4715-bd02-2eb75b77541c] Running I0724 22:13:51.698571 335425 system_pods.go:89] "kindnet-nsc8k" [562af1c8-c195-4fed-b475-4ff2a3cca8b5] Running I0724 22:13:51.698577 335425 system_pods.go:89] "kube-apiserver-containerd-20200724221200-14997" [08b29940-e531-4528-ae07-4607aa6050e8] Running I0724 22:13:51.698582 335425 system_pods.go:89] "kube-controller-manager-containerd-20200724221200-14997" [6460d851-bac6-4fd9-977f-c71ff25d78cd] Running I0724 22:13:51.698587 335425 system_pods.go:89] "kube-proxy-x7fwq" [4a6282eb-2be4-496a-890f-bf90dab64aa5] Running I0724 22:13:51.698592 335425 system_pods.go:89] "kube-scheduler-containerd-20200724221200-14997" [0784d162-3188-4cef-947b-950b5f11830c] Running I0724 22:13:51.698597 335425 system_pods.go:89] "storage-provisioner" [25727a03-b071-4a2c-8dfb-ea2a7038e4cd] Running I0724 22:13:51.698607 335425 retry.go:30] will retry after 7.91826225s: missing components: kube-dns I0724 22:13:59.623185 335425 system_pods.go:86] 8 kube-system pods found I0724 22:13:59.623237 335425 system_pods.go:89] "coredns-66bff467f8-hlk9j" [584286ea-2ddf-4194-8a3c-48d505f7f607] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:13:59.623247 335425 system_pods.go:89] "etcd-containerd-20200724221200-14997" [8610fe2f-3eeb-4715-bd02-2eb75b77541c] Running I0724 22:13:59.623270 335425 system_pods.go:89] "kindnet-nsc8k" [562af1c8-c195-4fed-b475-4ff2a3cca8b5] Running I0724 22:13:59.623280 335425 system_pods.go:89] "kube-apiserver-containerd-20200724221200-14997" [08b29940-e531-4528-ae07-4607aa6050e8] Running I0724 22:13:59.623302 335425 system_pods.go:89] "kube-controller-manager-containerd-20200724221200-14997" [6460d851-bac6-4fd9-977f-c71ff25d78cd] Running I0724 22:13:59.623311 335425 system_pods.go:89] "kube-proxy-x7fwq" [4a6282eb-2be4-496a-890f-bf90dab64aa5] Running I0724 22:13:59.623326 335425 system_pods.go:89] "kube-scheduler-containerd-20200724221200-14997" [0784d162-3188-4cef-947b-950b5f11830c] Running I0724 22:13:59.623334 335425 system_pods.go:89] "storage-provisioner" [25727a03-b071-4a2c-8dfb-ea2a7038e4cd] Running I0724 22:13:59.623348 335425 retry.go:30] will retry after 9.953714808s: missing components: kube-dns I0724 22:14:09.582782 335425 system_pods.go:86] 8 kube-system pods found I0724 22:14:09.582816 335425 system_pods.go:89] "coredns-66bff467f8-hlk9j" [584286ea-2ddf-4194-8a3c-48d505f7f607] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:14:09.582823 335425 system_pods.go:89] "etcd-containerd-20200724221200-14997" [8610fe2f-3eeb-4715-bd02-2eb75b77541c] Running I0724 22:14:09.582829 335425 system_pods.go:89] "kindnet-nsc8k" [562af1c8-c195-4fed-b475-4ff2a3cca8b5] Running I0724 22:14:09.582836 335425 system_pods.go:89] "kube-apiserver-containerd-20200724221200-14997" [08b29940-e531-4528-ae07-4607aa6050e8] Running I0724 22:14:09.582841 335425 system_pods.go:89] "kube-controller-manager-containerd-20200724221200-14997" [6460d851-bac6-4fd9-977f-c71ff25d78cd] Running I0724 22:14:09.582847 335425 system_pods.go:89] "kube-proxy-x7fwq" [4a6282eb-2be4-496a-890f-bf90dab64aa5] Running I0724 22:14:09.582852 335425 system_pods.go:89] "kube-scheduler-containerd-20200724221200-14997" [0784d162-3188-4cef-947b-950b5f11830c] Running I0724 22:14:09.582860 335425 system_pods.go:89] "storage-provisioner" [25727a03-b071-4a2c-8dfb-ea2a7038e4cd] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:14:09.582877 335425 retry.go:30] will retry after 15.120437328s: missing components: kube-dns I0724 22:14:24.708340 335425 system_pods.go:86] 8 kube-system pods found I0724 22:14:24.708430 335425 system_pods.go:89] "coredns-66bff467f8-hlk9j" [584286ea-2ddf-4194-8a3c-48d505f7f607] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:14:24.708444 335425 system_pods.go:89] "etcd-containerd-20200724221200-14997" [8610fe2f-3eeb-4715-bd02-2eb75b77541c] Running I0724 22:14:24.708454 335425 system_pods.go:89] "kindnet-nsc8k" [562af1c8-c195-4fed-b475-4ff2a3cca8b5] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:14:24.708465 335425 system_pods.go:89] "kube-apiserver-containerd-20200724221200-14997" [08b29940-e531-4528-ae07-4607aa6050e8] Running I0724 22:14:24.708472 335425 system_pods.go:89] "kube-controller-manager-containerd-20200724221200-14997" [6460d851-bac6-4fd9-977f-c71ff25d78cd] Running I0724 22:14:24.708482 335425 system_pods.go:89] "kube-proxy-x7fwq" [4a6282eb-2be4-496a-890f-bf90dab64aa5] Running I0724 22:14:24.708487 335425 system_pods.go:89] "kube-scheduler-containerd-20200724221200-14997" [0784d162-3188-4cef-947b-950b5f11830c] Running I0724 22:14:24.708494 335425 system_pods.go:89] "storage-provisioner" [25727a03-b071-4a2c-8dfb-ea2a7038e4cd] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:14:24.708510 335425 retry.go:30] will retry after 14.90607158s: missing components: kube-dns I0724 22:14:39.620816 335425 system_pods.go:86] 8 kube-system pods found I0724 22:14:39.620850 335425 system_pods.go:89] "coredns-66bff467f8-hlk9j" [584286ea-2ddf-4194-8a3c-48d505f7f607] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:14:39.620857 335425 system_pods.go:89] "etcd-containerd-20200724221200-14997" [8610fe2f-3eeb-4715-bd02-2eb75b77541c] Running I0724 22:14:39.620865 335425 system_pods.go:89] "kindnet-nsc8k" [562af1c8-c195-4fed-b475-4ff2a3cca8b5] Running I0724 22:14:39.620871 335425 system_pods.go:89] "kube-apiserver-containerd-20200724221200-14997" [08b29940-e531-4528-ae07-4607aa6050e8] Running I0724 22:14:39.620877 335425 system_pods.go:89] "kube-controller-manager-containerd-20200724221200-14997" [6460d851-bac6-4fd9-977f-c71ff25d78cd] Running I0724 22:14:39.620885 335425 system_pods.go:89] "kube-proxy-x7fwq" [4a6282eb-2be4-496a-890f-bf90dab64aa5] Running I0724 22:14:39.620893 335425 system_pods.go:89] "kube-scheduler-containerd-20200724221200-14997" [0784d162-3188-4cef-947b-950b5f11830c] Running I0724 22:14:39.620904 335425 system_pods.go:89] "storage-provisioner" [25727a03-b071-4a2c-8dfb-ea2a7038e4cd] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:14:39.620926 335425 retry.go:30] will retry after 18.465989061s: missing components: kube-dns I0724 22:14:58.091965 335425 system_pods.go:86] 8 kube-system pods found I0724 22:14:58.091997 335425 system_pods.go:89] "coredns-66bff467f8-hlk9j" [584286ea-2ddf-4194-8a3c-48d505f7f607] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:14:58.092004 335425 system_pods.go:89] "etcd-containerd-20200724221200-14997" [8610fe2f-3eeb-4715-bd02-2eb75b77541c] Running I0724 22:14:58.092011 335425 system_pods.go:89] "kindnet-nsc8k" [562af1c8-c195-4fed-b475-4ff2a3cca8b5] Running I0724 22:14:58.092017 335425 system_pods.go:89] "kube-apiserver-containerd-20200724221200-14997" [08b29940-e531-4528-ae07-4607aa6050e8] Running I0724 22:14:58.092023 335425 system_pods.go:89] "kube-controller-manager-containerd-20200724221200-14997" [6460d851-bac6-4fd9-977f-c71ff25d78cd] Running I0724 22:14:58.092028 335425 system_pods.go:89] "kube-proxy-x7fwq" [4a6282eb-2be4-496a-890f-bf90dab64aa5] Running I0724 22:14:58.092034 335425 system_pods.go:89] "kube-scheduler-containerd-20200724221200-14997" [0784d162-3188-4cef-947b-950b5f11830c] Running I0724 22:14:58.092041 335425 system_pods.go:89] "storage-provisioner" [25727a03-b071-4a2c-8dfb-ea2a7038e4cd] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:14:58.092051 335425 retry.go:30] will retry after 25.219510332s: missing components: kube-dns I0724 22:15:23.318465 335425 system_pods.go:86] 8 kube-system pods found I0724 22:15:23.318500 335425 system_pods.go:89] "coredns-66bff467f8-hlk9j" [584286ea-2ddf-4194-8a3c-48d505f7f607] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:15:23.318507 335425 system_pods.go:89] "etcd-containerd-20200724221200-14997" [8610fe2f-3eeb-4715-bd02-2eb75b77541c] Running I0724 22:15:23.318517 335425 system_pods.go:89] "kindnet-nsc8k" [562af1c8-c195-4fed-b475-4ff2a3cca8b5] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:15:23.318524 335425 system_pods.go:89] "kube-apiserver-containerd-20200724221200-14997" [08b29940-e531-4528-ae07-4607aa6050e8] Running I0724 22:15:23.318532 335425 system_pods.go:89] "kube-controller-manager-containerd-20200724221200-14997" [6460d851-bac6-4fd9-977f-c71ff25d78cd] Running I0724 22:15:23.318537 335425 system_pods.go:89] "kube-proxy-x7fwq" [4a6282eb-2be4-496a-890f-bf90dab64aa5] Running I0724 22:15:23.318542 335425 system_pods.go:89] "kube-scheduler-containerd-20200724221200-14997" [0784d162-3188-4cef-947b-950b5f11830c] Running I0724 22:15:23.318549 335425 system_pods.go:89] "storage-provisioner" [25727a03-b071-4a2c-8dfb-ea2a7038e4cd] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:15:23.318558 335425 retry.go:30] will retry after 35.078569648s: missing components: kube-dns I0724 22:15:58.402126 335425 system_pods.go:86] 8 kube-system pods found I0724 22:15:58.402161 335425 system_pods.go:89] "coredns-66bff467f8-hlk9j" [584286ea-2ddf-4194-8a3c-48d505f7f607] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:15:58.402168 335425 system_pods.go:89] "etcd-containerd-20200724221200-14997" [8610fe2f-3eeb-4715-bd02-2eb75b77541c] Running I0724 22:15:58.402177 335425 system_pods.go:89] "kindnet-nsc8k" [562af1c8-c195-4fed-b475-4ff2a3cca8b5] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:15:58.402184 335425 system_pods.go:89] "kube-apiserver-containerd-20200724221200-14997" [08b29940-e531-4528-ae07-4607aa6050e8] Running I0724 22:15:58.402190 335425 system_pods.go:89] "kube-controller-manager-containerd-20200724221200-14997" [6460d851-bac6-4fd9-977f-c71ff25d78cd] Running I0724 22:15:58.402195 335425 system_pods.go:89] "kube-proxy-x7fwq" [4a6282eb-2be4-496a-890f-bf90dab64aa5] Running I0724 22:15:58.402202 335425 system_pods.go:89] "kube-scheduler-containerd-20200724221200-14997" [0784d162-3188-4cef-947b-950b5f11830c] Running I0724 22:15:58.402209 335425 system_pods.go:89] "storage-provisioner" [25727a03-b071-4a2c-8dfb-ea2a7038e4cd] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:15:58.402219 335425 retry.go:30] will retry after 50.027701973s: missing components: kube-dns I0724 22:16:48.435679 335425 system_pods.go:86] 8 kube-system pods found I0724 22:16:48.435716 335425 system_pods.go:89] "coredns-66bff467f8-hlk9j" [584286ea-2ddf-4194-8a3c-48d505f7f607] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:16:48.435723 335425 system_pods.go:89] "etcd-containerd-20200724221200-14997" [8610fe2f-3eeb-4715-bd02-2eb75b77541c] Running I0724 22:16:48.435730 335425 system_pods.go:89] "kindnet-nsc8k" [562af1c8-c195-4fed-b475-4ff2a3cca8b5] Running I0724 22:16:48.435736 335425 system_pods.go:89] "kube-apiserver-containerd-20200724221200-14997" [08b29940-e531-4528-ae07-4607aa6050e8] Running I0724 22:16:48.435742 335425 system_pods.go:89] "kube-controller-manager-containerd-20200724221200-14997" [6460d851-bac6-4fd9-977f-c71ff25d78cd] Running I0724 22:16:48.435749 335425 system_pods.go:89] "kube-proxy-x7fwq" [4a6282eb-2be4-496a-890f-bf90dab64aa5] Running I0724 22:16:48.435755 335425 system_pods.go:89] "kube-scheduler-containerd-20200724221200-14997" [0784d162-3188-4cef-947b-950b5f11830c] Running I0724 22:16:48.435760 335425 system_pods.go:89] "storage-provisioner" [25727a03-b071-4a2c-8dfb-ea2a7038e4cd] Running I0724 22:16:48.435770 335425 retry.go:30] will retry after 47.463338706s: missing components: kube-dns I0724 22:17:35.905316 335425 system_pods.go:86] 8 kube-system pods found I0724 22:17:35.905366 335425 system_pods.go:89] "coredns-66bff467f8-hlk9j" [584286ea-2ddf-4194-8a3c-48d505f7f607] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:17:35.905375 335425 system_pods.go:89] "etcd-containerd-20200724221200-14997" [8610fe2f-3eeb-4715-bd02-2eb75b77541c] Running I0724 22:17:35.905385 335425 system_pods.go:89] "kindnet-nsc8k" [562af1c8-c195-4fed-b475-4ff2a3cca8b5] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:17:35.905392 335425 system_pods.go:89] "kube-apiserver-containerd-20200724221200-14997" [08b29940-e531-4528-ae07-4607aa6050e8] Running I0724 22:17:35.905400 335425 system_pods.go:89] "kube-controller-manager-containerd-20200724221200-14997" [6460d851-bac6-4fd9-977f-c71ff25d78cd] Running I0724 22:17:35.905406 335425 system_pods.go:89] "kube-proxy-x7fwq" [4a6282eb-2be4-496a-890f-bf90dab64aa5] Running I0724 22:17:35.905411 335425 system_pods.go:89] "kube-scheduler-containerd-20200724221200-14997" [0784d162-3188-4cef-947b-950b5f11830c] Running I0724 22:17:35.905419 335425 system_pods.go:89] "storage-provisioner" [25727a03-b071-4a2c-8dfb-ea2a7038e4cd] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:17:35.905439 335425 retry.go:30] will retry after 53.912476906s: missing components: kube-dns I0724 22:18:29.823122 335425 system_pods.go:86] 8 kube-system pods found I0724 22:18:29.823159 335425 system_pods.go:89] "coredns-66bff467f8-hlk9j" [584286ea-2ddf-4194-8a3c-48d505f7f607] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:18:29.823165 335425 system_pods.go:89] "etcd-containerd-20200724221200-14997" [8610fe2f-3eeb-4715-bd02-2eb75b77541c] Running I0724 22:18:29.823173 335425 system_pods.go:89] "kindnet-nsc8k" [562af1c8-c195-4fed-b475-4ff2a3cca8b5] Running I0724 22:18:29.823179 335425 system_pods.go:89] "kube-apiserver-containerd-20200724221200-14997" [08b29940-e531-4528-ae07-4607aa6050e8] Running I0724 22:18:29.823184 335425 system_pods.go:89] "kube-controller-manager-containerd-20200724221200-14997" [6460d851-bac6-4fd9-977f-c71ff25d78cd] Running I0724 22:18:29.823189 335425 system_pods.go:89] "kube-proxy-x7fwq" [4a6282eb-2be4-496a-890f-bf90dab64aa5] Running I0724 22:18:29.823195 335425 system_pods.go:89] "kube-scheduler-containerd-20200724221200-14997" [0784d162-3188-4cef-947b-950b5f11830c] Running I0724 22:18:29.823210 335425 system_pods.go:89] "storage-provisioner" [25727a03-b071-4a2c-8dfb-ea2a7038e4cd] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:18:29.823223 335425 retry.go:30] will retry after 1m7.577191067s: missing components: kube-dns I0724 22:19:37.408624 335425 system_pods.go:86] 8 kube-system pods found I0724 22:19:37.408670 335425 system_pods.go:89] "coredns-66bff467f8-hlk9j" [584286ea-2ddf-4194-8a3c-48d505f7f607] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:19:37.408680 335425 system_pods.go:89] "etcd-containerd-20200724221200-14997" [8610fe2f-3eeb-4715-bd02-2eb75b77541c] Running I0724 22:19:37.408693 335425 system_pods.go:89] "kindnet-nsc8k" [562af1c8-c195-4fed-b475-4ff2a3cca8b5] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:19:37.408709 335425 system_pods.go:89] "kube-apiserver-containerd-20200724221200-14997" [08b29940-e531-4528-ae07-4607aa6050e8] Running I0724 22:19:37.408719 335425 system_pods.go:89] "kube-controller-manager-containerd-20200724221200-14997" [6460d851-bac6-4fd9-977f-c71ff25d78cd] Running I0724 22:19:37.408733 335425 system_pods.go:89] "kube-proxy-x7fwq" [4a6282eb-2be4-496a-890f-bf90dab64aa5] Running I0724 22:19:37.408741 335425 system_pods.go:89] "kube-scheduler-containerd-20200724221200-14997" [0784d162-3188-4cef-947b-950b5f11830c] Running I0724 22:19:37.408757 335425 system_pods.go:89] "storage-provisioner" [25727a03-b071-4a2c-8dfb-ea2a7038e4cd] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:19:37.408877 335425 exit.go:58] WithError(failed to start node)=startup failed: wait 6m0s for node: waiting for apps_running: expected k8s-apps: missing components: kube-dns called from: goroutine 1 [running]: runtime/debug.Stack(0x0, 0x0, 0x100000000000000) /home/jenkins/actions-runner/_work/_tool/go/1.14.6/x64/src/runtime/debug/stack.go:24 +0x9d k8s.io/minikube/pkg/minikube/exit.WithError(0x1ba7c56, 0x14, 0x1ebf200, 0xc00090f1e0) /home/jenkins/actions-runner/_work/minikube/minikube/pkg/minikube/exit/exit.go:58 +0x34 k8s.io/minikube/cmd/minikube/cmd.runStart(0x2cd0820, 0xc0003f6620, 0x2, 0xe) /home/jenkins/actions-runner/_work/minikube/minikube/cmd/minikube/cmd/start.go:206 +0x505 github.com/spf13/cobra.(*Command).execute(0x2cd0820, 0xc0003f6540, 0xe, 0xe, 0x2cd0820, 0xc0003f6540) /home/jenkins/go/pkg/mod/github.com/spf13/cobra@v1.0.0/command.go:846 +0x29d github.com/spf13/cobra.(*Command).ExecuteC(0x2ccf860, 0x0, 0x1, 0xc000a1a5f0) /home/jenkins/go/pkg/mod/github.com/spf13/cobra@v1.0.0/command.go:950 +0x349 github.com/spf13/cobra.(*Command).Execute(...) /home/jenkins/go/pkg/mod/github.com/spf13/cobra@v1.0.0/command.go:887 k8s.io/minikube/cmd/minikube/cmd.Execute() /home/jenkins/actions-runner/_work/minikube/minikube/cmd/minikube/cmd/root.go:106 +0x72c main.main() /home/jenkins/actions-runner/_work/minikube/minikube/cmd/minikube/main.go:71 +0x11f W0724 22:19:37.409094 335425 out.go:249] failed to start node: startup failed: wait 6m0s for node: waiting for apps_running: expected k8s-apps: missing components: kube-dns * X failed to start node: startup failed: wait 6m0s for node: waiting for apps_running: expected k8s-apps: missing components: kube-dns * * minikube is exiting due to an error. If the above message is not useful, open an issue: - https://github.com/kubernetes/minikube/issues/new/choose ** /stderr ** start_stop_delete_test.go:151: failed starting minikube -first start-. args "./minikube-linux-amd64 start -p containerd-20200724221200-14997 --memory=2200 --alsologtostderr --wait=true --container-runtime=containerd --docker-opt containerd=/var/run/containerd/containerd.sock --apiserver-port=8444 --vm-driver=docker --base-image=local/kicbase:-snapshot --kubernetes-version=v1.18.3": exit status 70 helpers_test.go:215: -----------------------post-mortem-------------------------------- helpers_test.go:223: ======> post-mortem[TestStartStop/group/containerd/serial/FirstStart]: docker inspect <====== helpers_test.go:224: (dbg) Run: docker inspect containerd-20200724221200-14997 helpers_test.go:228: (dbg) docker inspect containerd-20200724221200-14997: -- stdout -- [ { "Id": "0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318", "Created": "2020-07-24T22:12:08.823590057Z", "Path": "/usr/local/bin/entrypoint", "Args": [ "/sbin/init" ], "State": { "Status": "running", "Running": true, "Paused": false, "Restarting": false, "OOMKilled": false, "Dead": false, "Pid": 338511, "ExitCode": 0, "Error": "", "StartedAt": "2020-07-24T22:12:10.40831313Z", "FinishedAt": "0001-01-01T00:00:00Z" }, "Image": "sha256:e6bc41c39dc48b2b472936db36aedb28527ce0f675ed1bc20d029125c9ccf578", "ResolvConfPath": "/var/lib/docker/containers/0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318/resolv.conf", "HostnamePath": "/var/lib/docker/containers/0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318/hostname", "HostsPath": "/var/lib/docker/containers/0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318/hosts", "LogPath": "/var/lib/docker/containers/0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318/0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318-json.log", "Name": "/containerd-20200724221200-14997", "RestartCount": 0, "Driver": "overlay2", "Platform": "linux", "MountLabel": "", "ProcessLabel": "", "AppArmorProfile": "unconfined", "ExecIDs": null, "HostConfig": { "Binds": [ "/lib/modules:/lib/modules:ro", "containerd-20200724221200-14997:/var" ], "ContainerIDFile": "", "LogConfig": { "Type": "json-file", "Config": {} }, "NetworkMode": "default", "PortBindings": { "22/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "2376/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "5000/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "8444/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ] }, "RestartPolicy": { "Name": "no", "MaximumRetryCount": 0 }, "AutoRemove": false, "VolumeDriver": "", "VolumesFrom": null, "CapAdd": null, "CapDrop": null, "Capabilities": null, "Dns": [], "DnsOptions": [], "DnsSearch": [], "ExtraHosts": null, "GroupAdd": null, "IpcMode": "private", "Cgroup": "", "Links": null, "OomScoreAdj": 0, "PidMode": "", "Privileged": true, "PublishAllPorts": false, "ReadonlyRootfs": false, "SecurityOpt": [ "seccomp=unconfined", "apparmor=unconfined", "label=disable" ], "Tmpfs": { "/run": "", "/tmp": "" }, "UTSMode": "", "UsernsMode": "", "ShmSize": 67108864, "Runtime": "runc", "ConsoleSize": [ 0, 0 ], "Isolation": "", "CpuShares": 0, "Memory": 2306867200, "NanoCpus": 2000000000, "CgroupParent": "", "BlkioWeight": 0, "BlkioWeightDevice": [], "BlkioDeviceReadBps": null, "BlkioDeviceWriteBps": null, "BlkioDeviceReadIOps": null, "BlkioDeviceWriteIOps": null, "CpuPeriod": 0, "CpuQuota": 0, "CpuRealtimePeriod": 0, "CpuRealtimeRuntime": 0, "CpusetCpus": "", "CpusetMems": "", "Devices": [], "DeviceCgroupRules": null, "DeviceRequests": null, "KernelMemory": 0, "KernelMemoryTCP": 0, "MemoryReservation": 0, "MemorySwap": -1, "MemorySwappiness": null, "OomKillDisable": false, "PidsLimit": null, "Ulimits": null, "CpuCount": 0, "CpuPercent": 0, "IOMaximumIOps": 0, "IOMaximumBandwidth": 0, "MaskedPaths": null, "ReadonlyPaths": null }, "GraphDriver": { "Data": { "LowerDir": "/var/lib/docker/overlay2/302021dc5d13c095c42529cd14fcb13b8e0e062683e3b8dd811d0a384f95b4d8-init/diff:/var/lib/docker/overlay2/33ef5d7ed4fbb30d55d2e5925de9b5c359ee2f6bcaf5a31308f0c4c11e358759/diff:/var/lib/docker/overlay2/3cf31b1deb19cf9de586952311d9f94baab00dac6393d96e1a80da943c3823e6/diff:/var/lib/docker/overlay2/5e25be0edbb143d613aab16e23056ece18370c2427214464ccc0b8f033fc76c5/diff:/var/lib/docker/overlay2/933d7b760faf1726c0231eb48cbf3e7af8aeea7d1f449959b7bf07bbe9a84376/diff:/var/lib/docker/overlay2/69ec5555996fe29cf36b7e927034690ecb66f32d4c72396f3c2f3d73303ab10c/diff:/var/lib/docker/overlay2/891f9311e0e93aca44c576d873c0860bf67e972178833fc3ac0569308996933c/diff:/var/lib/docker/overlay2/431ddf5eea9395c020c3a27539c6984764cb7a0dd8f3eeadfbb24f37a897caf7/diff:/var/lib/docker/overlay2/ef7d3d9d53c985fdb46a41c8bcbc04c7d98baa3d8030b1432d115657269a4611/diff:/var/lib/docker/overlay2/d976f7471257da8ef85e0de564cae8e6b16eeb8f83fff6750b3985387f87052c/diff:/var/lib/docker/overlay2/16ed6f6abe25e98b50b10529e70d835da8b64ca99934243f70e5cc63cca44ba1/diff:/var/lib/docker/overlay2/a6495d6f579c32bd0faf387dc3b48c733dae0219e3cfc5d94f9f6ba027261668/diff:/var/lib/docker/overlay2/a34db548da9e24b0323e43e53767c49484fa19509ba9c1e738fb42330788ec52/diff:/var/lib/docker/overlay2/03f93f74dafc48e2bb0234297546abf70dc9918c047a7e3b500ed2242aec09ff/diff:/var/lib/docker/overlay2/b0d08d0be034080c25561d9451d8081050a591c2991a7f3d242234b9bf7c046d/diff:/var/lib/docker/overlay2/d7be6ed61eedc76988e74fe1d87a6a1ef287912654acea5566e7f9cb797b3e3c/diff:/var/lib/docker/overlay2/bde6a4a42d5652b3ef22e3a1dddd0f6fb765d2cd9f5b1a38d4ecb9b44c45b998/diff:/var/lib/docker/overlay2/0f384da0b034b730d4bdd22f270821719c0ac0b7b7d608c3a52fa89b9a386b90/diff:/var/lib/docker/overlay2/46e9d50bc162d2d523c12da3e378e5b859515056dc7d7f11344c1c42c64718d4/diff:/var/lib/docker/overlay2/6e7552cdb94dfc1c9a0a8efae25c758d93fff3d2eec58a3e2b710496d0c94653/diff:/var/lib/docker/overlay2/b9217960caef08b70ded801ca89f5a0808227383f2ab1de5ae7313c200a8090b/diff:/var/lib/docker/overlay2/d11fc39cafccd3e3cb8fba6cfdda2bbfdb99c2e41619ce169b0d6ede0b3f6a64/diff:/var/lib/docker/overlay2/0f89620291cba4bc3afac7c9fa1faf205450d5dcae032c3a9882009633b72b4a/diff", "MergedDir": "/var/lib/docker/overlay2/302021dc5d13c095c42529cd14fcb13b8e0e062683e3b8dd811d0a384f95b4d8/merged", "UpperDir": "/var/lib/docker/overlay2/302021dc5d13c095c42529cd14fcb13b8e0e062683e3b8dd811d0a384f95b4d8/diff", "WorkDir": "/var/lib/docker/overlay2/302021dc5d13c095c42529cd14fcb13b8e0e062683e3b8dd811d0a384f95b4d8/work" }, "Name": "overlay2" }, "Mounts": [ { "Type": "bind", "Source": "/lib/modules", "Destination": "/lib/modules", "Mode": "ro", "RW": false, "Propagation": "rprivate" }, { "Type": "volume", "Name": "containerd-20200724221200-14997", "Source": "/var/lib/docker/volumes/containerd-20200724221200-14997/_data", "Destination": "/var", "Driver": "local", "Mode": "z", "RW": true, "Propagation": "" } ], "Config": { "Hostname": "containerd-20200724221200-14997", "Domainname": "", "User": "root", "AttachStdin": false, "AttachStdout": false, "AttachStderr": false, "ExposedPorts": { "22/tcp": {}, "2376/tcp": {}, "5000/tcp": {}, "8444/tcp": {} }, "Tty": true, "OpenStdin": false, "StdinOnce": false, "Env": [ "container=docker", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], "Cmd": null, "Image": "kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438", "Volumes": null, "WorkingDir": "", "Entrypoint": [ "/usr/local/bin/entrypoint", "/sbin/init" ], "OnBuild": null, "Labels": { "created_by.minikube.sigs.k8s.io": "true", "mode.minikube.sigs.k8s.io": "containerd-20200724221200-14997", "name.minikube.sigs.k8s.io": "containerd-20200724221200-14997", "role.minikube.sigs.k8s.io": "" }, "StopSignal": "SIGRTMIN+3" }, "NetworkSettings": { "Bridge": "", "SandboxID": "3e3fc4759cbd4070ed5dddc0264a24f411b397aab6336237e6416cbda2769e84", "HairpinMode": false, "LinkLocalIPv6Address": "", "LinkLocalIPv6PrefixLen": 0, "Ports": { "22/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32900" } ], "2376/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32899" } ], "5000/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32898" } ], "8444/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32897" } ] }, "SandboxKey": "/var/run/docker/netns/3e3fc4759cbd", "SecondaryIPAddresses": null, "SecondaryIPv6Addresses": null, "EndpointID": "1a1c80297b8f177f80b8e0fe79832f4492796767c95941e75116c272b2743f88", "Gateway": "172.17.0.1", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "IPAddress": "172.17.0.5", "IPPrefixLen": 16, "IPv6Gateway": "", "MacAddress": "02:42:ac:11:00:05", "Networks": { "bridge": { "IPAMConfig": null, "Links": null, "Aliases": null, "NetworkID": "d4a420189740571bc15c14b2ebac1ab66e95c9c6796461aa96bb3486a5f7e03d", "EndpointID": "1a1c80297b8f177f80b8e0fe79832f4492796767c95941e75116c272b2743f88", "Gateway": "172.17.0.1", "IPAddress": "172.17.0.5", "IPPrefixLen": 16, "IPv6Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "MacAddress": "02:42:ac:11:00:05", "DriverOpts": null } } } } ] -- /stdout -- helpers_test.go:232: (dbg) Run: ./minikube-linux-amd64 status --format={{.Host}} -p containerd-20200724221200-14997 -n containerd-20200724221200-14997 helpers_test.go:237: <<< TestStartStop/group/containerd/serial/FirstStart FAILED: start of post-mortem logs <<< helpers_test.go:238: ======> post-mortem[TestStartStop/group/containerd/serial/FirstStart]: minikube logs <====== helpers_test.go:240: (dbg) Run: ./minikube-linux-amd64 -p containerd-20200724221200-14997 logs -n 25 helpers_test.go:245: TestStartStop/group/containerd/serial/FirstStart logs: -- stdout -- * ==> container status <== * CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID * 36500c287a12c 4689081edb103 5 seconds ago Exited storage-provisioner 6 92684e9147786 * 14578a42043db 2186a1a396deb About a minute ago Exited kindnet-cni 5 980558efef9f1 * 003518d48f7b6 3439b7546f29b 6 minutes ago Running kube-proxy 0 ca6897a07eae7 * 6f125fe745aef da26705ccb4b5 6 minutes ago Running kube-controller-manager 1 e257c0c495dd3 * 3b03abf8faaef 7e28efa976bd1 6 minutes ago Running kube-apiserver 0 c2da5c12ff9ea * 6dc3461120ba7 da26705ccb4b5 6 minutes ago Exited kube-controller-manager 0 e257c0c495dd3 * bff121f454668 76216c34ed0c7 6 minutes ago Running kube-scheduler 0 4c3ff6ff63ddc * db0fd28c536d4 303ce5db0e90d 6 minutes ago Running etcd 0 0de406b420c86 * * ==> containerd <== * -- Logs begin at Fri 2020-07-24 22:12:25 UTC, end at Fri 2020-07-24 22:19:38 UTC. -- * Jul 24 22:18:32 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:18:32.943428955Z" level=info msg="TaskExit event &TaskExit{ContainerID:14578a42043dbf86e3162040dc392c661e36b9fceb1473b8efe551a4ef3cc2ce,ID:14578a42043dbf86e3162040dc392c661e36b9fceb1473b8efe551a4ef3cc2ce,Pid:4086,ExitStatus:2,ExitedAt:2020-07-24 22:18:32.943181538 +0000 UTC,XXX_unrecognized:[],}" * Jul 24 22:18:32 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:18:32.978876131Z" level=info msg="shim reaped" id=14578a42043dbf86e3162040dc392c661e36b9fceb1473b8efe551a4ef3cc2ce * Jul 24 22:18:33 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:18:33.514801465Z" level=info msg="RemoveContainer for \"b8e1ccf6be24c7d99e89284e54678b168de91929e102b8c99be4916f54a29087\"" * Jul 24 22:18:33 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:18:33.549179466Z" level=info msg="RemoveContainer for \"b8e1ccf6be24c7d99e89284e54678b168de91929e102b8c99be4916f54a29087\" returns successfully" * Jul 24 22:18:43 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:18:43.957543563Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,}" * Jul 24 22:18:45 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:18:45.953574876Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,} failed, error" error="failed to setup network for sandbox \"55fcfcc47fb6ba611000f00f6d8af3f4d0c963783bc3cb67e64d34bfade6f762\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:18:56 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:18:56.957657880Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,}" * Jul 24 22:18:58 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:18:58.937848728Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,} failed, error" error="failed to setup network for sandbox \"3930c8492e1cdc76f5686653b6737cca9f6cc78412165ca1eac8b4f70e56b6e8\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:19:09 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:19:09.957557879Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,}" * Jul 24 22:19:11 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:19:11.769066113Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,} failed, error" error="failed to setup network for sandbox \"f79959429abee3a1710f30e8cde2d406482c77d9cf5e2d8d1ab01c25695dfa89\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:19:23 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:19:23.957677490Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,}" * Jul 24 22:19:25 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:19:25.758814221Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,} failed, error" error="failed to setup network for sandbox \"b51e6594da27b8be9db76fc002801d53e59cdd7b399b5ce21f7319efffa7004a\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:19:32 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:19:32.959185159Z" level=info msg="CreateContainer within sandbox \"92684e91477862737c3da16abc6ba57adfebcd6b329e7fd233dbcdedaffb2c74\" for container &ContainerMetadata{Name:storage-provisioner,Attempt:6,}" * Jul 24 22:19:33 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:19:33.033360434Z" level=info msg="CreateContainer within sandbox \"92684e91477862737c3da16abc6ba57adfebcd6b329e7fd233dbcdedaffb2c74\" for &ContainerMetadata{Name:storage-provisioner,Attempt:6,} returns container id \"36500c287a12c3bd76dde8371c9c7b20c770270f4ef0f2171537776b553d6fb4\"" * Jul 24 22:19:33 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:19:33.033730460Z" level=info msg="StartContainer for \"36500c287a12c3bd76dde8371c9c7b20c770270f4ef0f2171537776b553d6fb4\"" * Jul 24 22:19:33 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:19:33.034548617Z" level=info msg="shim containerd-shim started" address=/containerd-shim/0956aa9427e04ed85df95aa0060c11223d015773d7dae8a53412649df90c452a.sock debug=false pid=4408 * Jul 24 22:19:33 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:19:33.137508400Z" level=info msg="StartContainer for \"36500c287a12c3bd76dde8371c9c7b20c770270f4ef0f2171537776b553d6fb4\" returns successfully" * Jul 24 22:19:35 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:19:35.957511989Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,}" * Jul 24 22:19:36 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:19:36.370484059Z" level=info msg="Finish piping stderr of container \"36500c287a12c3bd76dde8371c9c7b20c770270f4ef0f2171537776b553d6fb4\"" * Jul 24 22:19:36 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:19:36.370490059Z" level=info msg="Finish piping stdout of container \"36500c287a12c3bd76dde8371c9c7b20c770270f4ef0f2171537776b553d6fb4\"" * Jul 24 22:19:36 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:19:36.371846154Z" level=info msg="TaskExit event &TaskExit{ContainerID:36500c287a12c3bd76dde8371c9c7b20c770270f4ef0f2171537776b553d6fb4,ID:36500c287a12c3bd76dde8371c9c7b20c770270f4ef0f2171537776b553d6fb4,Pid:4436,ExitStatus:1,ExitedAt:2020-07-24 22:19:36.371614838 +0000 UTC,XXX_unrecognized:[],}" * Jul 24 22:19:36 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:19:36.401912349Z" level=info msg="shim reaped" id=36500c287a12c3bd76dde8371c9c7b20c770270f4ef0f2171537776b553d6fb4 * Jul 24 22:19:36 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:19:36.592375018Z" level=info msg="RemoveContainer for \"394f89b0cefae66bc4dd009358f5b26854c1c1d309e47581a77b165c08fd360b\"" * Jul 24 22:19:36 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:19:36.623048654Z" level=info msg="RemoveContainer for \"394f89b0cefae66bc4dd009358f5b26854c1c1d309e47581a77b165c08fd360b\" returns successfully" * Jul 24 22:19:38 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:19:38.000656128Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,} failed, error" error="failed to setup network for sandbox \"dc02af12a85bc425677af025657ca743bc31a3b93ade5f169a4e544c4f14c19d\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * * ==> describe nodes <== * Name: containerd-20200724221200-14997 * Roles: master * Labels: beta.kubernetes.io/arch=amd64 * beta.kubernetes.io/os=linux * kubernetes.io/arch=amd64 * kubernetes.io/hostname=containerd-20200724221200-14997 * kubernetes.io/os=linux * minikube.k8s.io/commit=40eac8ce825d2bb784efa63b900c8d788ea49faf * minikube.k8s.io/name=containerd-20200724221200-14997 * minikube.k8s.io/updated_at=2020_07_24T22_13_02_0700 * minikube.k8s.io/version=v1.12.1 * node-role.kubernetes.io/master= * Annotations: kubeadm.alpha.kubernetes.io/cri-socket: /run/containerd/containerd.sock * node.alpha.kubernetes.io/ttl: 0 * volumes.kubernetes.io/controller-managed-attach-detach: true * CreationTimestamp: Fri, 24 Jul 2020 22:12:47 +0000 * Taints: * Unschedulable: false * Lease: * HolderIdentity: containerd-20200724221200-14997 * AcquireTime: * RenewTime: Fri, 24 Jul 2020 22:19:31 +0000 * Conditions: * Type Status LastHeartbeatTime LastTransitionTime Reason Message * ---- ------ ----------------- ------------------ ------ ------- * MemoryPressure False Fri, 24 Jul 2020 22:18:15 +0000 Fri, 24 Jul 2020 22:12:43 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available * DiskPressure False Fri, 24 Jul 2020 22:18:15 +0000 Fri, 24 Jul 2020 22:12:43 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure * PIDPressure False Fri, 24 Jul 2020 22:18:15 +0000 Fri, 24 Jul 2020 22:12:43 +0000 KubeletHasSufficientPID kubelet has sufficient PID available * Ready True Fri, 24 Jul 2020 22:18:15 +0000 Fri, 24 Jul 2020 22:13:11 +0000 KubeletReady kubelet is posting ready status * Addresses: * InternalIP: 172.17.0.5 * Hostname: containerd-20200724221200-14997 * Capacity: * cpu: 16 * ephemeral-storage: 128884272Ki * hugepages-1Gi: 0 * hugepages-2Mi: 0 * memory: 65817044Ki * pods: 110 * Allocatable: * cpu: 16 * ephemeral-storage: 128884272Ki * hugepages-1Gi: 0 * hugepages-2Mi: 0 * memory: 65817044Ki * pods: 110 * System Info: * Machine ID: a912f43aab5e4ba59b29e84664ffc131 * System UUID: 763ff36b-3261-45b1-b62e-092cbae790ce * Boot ID: 65219ec9-ab55-4151-85fa-6cbcd6144529 * Kernel Version: 5.4.0-1022-azure * OS Image: Ubuntu 19.10 * Operating System: linux * Architecture: amd64 * Container Runtime Version: containerd://1.3.3-14-g449e9269 * Kubelet Version: v1.18.3 * Kube-Proxy Version: v1.18.3 * PodCIDR: 10.244.0.0/24 * PodCIDRs: 10.244.0.0/24 * Non-terminated Pods: (8 in total) * Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits AGE * --------- ---- ------------ ---------- --------------- ------------- --- * kube-system coredns-66bff467f8-hlk9j 100m (0%) 0 (0%) 70Mi (0%) 170Mi (0%) 6m20s * kube-system etcd-containerd-20200724221200-14997 0 (0%) 0 (0%) 0 (0%) 0 (0%) 6m37s * kube-system kindnet-nsc8k 100m (0%) 100m (0%) 50Mi (0%) 50Mi (0%) 6m20s * kube-system kube-apiserver-containerd-20200724221200-14997 250m (1%) 0 (0%) 0 (0%) 0 (0%) 6m37s * kube-system kube-controller-manager-containerd-20200724221200-14997 200m (1%) 0 (0%) 0 (0%) 0 (0%) 6m41s * kube-system kube-proxy-x7fwq 0 (0%) 0 (0%) 0 (0%) 0 (0%) 6m20s * kube-system kube-scheduler-containerd-20200724221200-14997 100m (0%) 0 (0%) 0 (0%) 0 (0%) 6m37s * kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 6m19s * Allocated resources: * (Total limits may be over 100 percent, i.e., overcommitted.) * Resource Requests Limits * -------- -------- ------ * cpu 750m (4%) 100m (0%) * memory 120Mi (0%) 220Mi (0%) * ephemeral-storage 0 (0%) 0 (0%) * hugepages-1Gi 0 (0%) 0 (0%) * hugepages-2Mi 0 (0%) 0 (0%) * Events: * Type Reason Age From Message * ---- ------ ---- ---- ------- * Normal NodeHasSufficientMemory 7m1s (x6 over 7m2s) kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasSufficientMemory * Normal NodeHasNoDiskPressure 7m1s (x6 over 7m2s) kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientPID 7m1s (x5 over 7m2s) kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasSufficientPID * Normal Starting 6m38s kubelet, containerd-20200724221200-14997 Starting kubelet. * Warning SystemOOM 6m38s kubelet, containerd-20200724221200-14997 System OOM encountered, victim process: registry-server, pid: 26041 * Warning SystemOOM 6m38s kubelet, containerd-20200724221200-14997 System OOM encountered, victim process: registry-server, pid: 28567 * Normal NodeHasSufficientMemory 6m37s kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasSufficientMemory * Normal NodeHasNoDiskPressure 6m37s kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientPID 6m37s kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasSufficientPID * Normal NodeAllocatableEnforced 6m37s kubelet, containerd-20200724221200-14997 Updated Node Allocatable limit across pods * Normal NodeReady 6m27s kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeReady * Warning readOnlySysFS 6m19s kube-proxy, containerd-20200724221200-14997 CRI error: /sys is read-only: cannot modify conntrack limits, problems may arise later (If running Docker, see docker issue #24000) * Normal Starting 6m19s kube-proxy, containerd-20200724221200-14997 Starting kube-proxy. * * ==> dmesg <== * [ +0.008042] FS-Cache: N-cookie d=00000000087e516b n=00000000639e5e65 * [ +0.007221] FS-Cache: N-key=[8] '56c00f0000000000' * [ +0.005631] FS-Cache: Duplicate cookie detected * [ +0.003841] FS-Cache: O-cookie c=0000000011bf07ab [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.005951] FS-Cache: O-cookie d=00000000087e516b n=00000000251e97b7 * [ +0.004790] FS-Cache: O-key=[8] '56c00f0000000000' * [ +0.003643] FS-Cache: N-cookie c=00000000e2eec606 [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.005689] FS-Cache: N-cookie d=00000000087e516b n=000000007550b1ca * [ +0.004826] FS-Cache: N-key=[8] '56c00f0000000000' * [ +1.890005] FS-Cache: Duplicate cookie detected * [ +0.022632] FS-Cache: O-cookie c=0000000036b15294 [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.008521] FS-Cache: O-cookie d=00000000087e516b n=00000000efd734b0 * [ +0.007450] FS-Cache: O-key=[8] '55c00f0000000000' * [ +0.004298] FS-Cache: N-cookie c=00000000c6760349 [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.007729] FS-Cache: N-cookie d=00000000087e516b n=000000002b6d28f2 * [ +0.006730] FS-Cache: N-key=[8] '55c00f0000000000' * [ +2.781512] FS-Cache: Duplicate cookie detected * [ +0.004691] FS-Cache: O-cookie c=0000000026b60cdb [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.006817] FS-Cache: O-cookie d=00000000087e516b n=0000000083eaff9d * [ +0.006146] FS-Cache: O-key=[8] '5ac00f0000000000' * [ +0.004163] FS-Cache: N-cookie c=000000006a4278df [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.006241] FS-Cache: N-cookie d=00000000087e516b n=00000000e4c3dd6e * [ +0.005074] FS-Cache: N-key=[8] '5ac00f0000000000' * [Jul24 22:02] tee (116897): /proc/108378/oom_adj is deprecated, please use /proc/108378/oom_score_adj instead. * [Jul24 22:09] hrtimer: interrupt took 1877431 ns * * ==> etcd [db0fd28c536d4d051abb0c3b7d4219ed9ad43fae8b4e05f52f322ed31bd12c02] <== * 2020-07-24 22:12:56.626530 W | etcdserver: read-only range request "key:\"/registry/resourcequotas/kube-system/\" range_end:\"/registry/resourcequotas/kube-system0\" " with result "range_response_count:0 size:4" took too long (232.29517ms) to execute * 2020-07-24 22:12:56.626632 W | etcdserver: read-only range request "key:\"/registry/priorityclasses/system-cluster-critical\" " with result "range_response_count:0 size:4" took too long (233.165919ms) to execute * 2020-07-24 22:12:56.778191 W | etcdserver: read-only range request "key:\"/registry/clusterroles/system:aggregate-to-admin\" " with result "range_response_count:0 size:4" took too long (172.974981ms) to execute * 2020-07-24 22:12:56.812483 W | etcdserver: read-only range request "key:\"/registry/health\" " with result "range_response_count:0 size:4" took too long (183.62819ms) to execute * 2020-07-24 22:12:56.812513 W | etcdserver: read-only range request "key:\"/registry/ranges/serviceips\" " with result "range_response_count:1 size:114" took too long (184.546442ms) to execute * 2020-07-24 22:12:56.812550 W | etcdserver: read-only range request "key:\"/registry/health\" " with result "range_response_count:0 size:4" took too long (135.884062ms) to execute * 2020-07-24 22:12:56.812562 W | etcdserver: read-only range request "key:\"/registry/ranges/servicenodeports\" " with result "range_response_count:1 size:119" took too long (184.712051ms) to execute * 2020-07-24 22:12:56.812574 W | etcdserver: read-only range request "key:\"/registry/namespaces/default\" " with result "range_response_count:0 size:4" took too long (155.514884ms) to execute * 2020-07-24 22:12:56.812662 W | etcdserver: read-only range request "key:\"/registry/namespaces/kube-system\" " with result "range_response_count:1 size:262" took too long (183.360374ms) to execute * 2020-07-24 22:12:57.044210 W | etcdserver: read-only range request "key:\"/registry/health\" " with result "range_response_count:0 size:4" took too long (129.896221ms) to execute * 2020-07-24 22:12:57.044242 W | etcdserver: read-only range request "key:\"/registry/clusterroles/system:aggregate-to-view\" " with result "range_response_count:0 size:4" took too long (228.539655ms) to execute * 2020-07-24 22:12:57.044266 W | etcdserver: read-only range request "key:\"/registry/namespaces/default\" " with result "range_response_count:0 size:4" took too long (178.276384ms) to execute * 2020-07-24 22:12:57.044278 W | etcdserver: read-only range request "key:\"/registry/namespaces/kube-node-lease\" " with result "range_response_count:0 size:4" took too long (227.465694ms) to execute * 2020-07-24 22:13:15.434077 W | wal: sync duration of 3.834899995s, expected less than 1s * 2020-07-24 22:13:15.471535 W | etcdserver: read-only range request "key:\"/registry/pods/kube-system/kube-apiserver-containerd-20200724221200-14997\" " with result "range_response_count:1 size:5503" took too long (4.51397778s) to execute * 2020-07-24 22:13:15.471572 W | etcdserver: request "header: txn: success:> failure:<>>" with result "size:16" took too long (3.872288817s) to execute * 2020-07-24 22:13:15.471926 W | etcdserver: read-only range request "key:\"/registry/serviceaccounts/default/default\" " with result "range_response_count:0 size:5" took too long (4.245793609s) to execute * 2020-07-24 22:13:15.471957 W | etcdserver: read-only range request "key:\"/registry/minions/\" range_end:\"/registry/minions0\" " with result "range_response_count:1 size:5367" took too long (2.908773208s) to execute * 2020-07-24 22:13:30.080019 W | etcdserver: request "header: txn: success:> failure: >>" with result "size:16" took too long (518.961173ms) to execute * 2020-07-24 22:13:31.304192 W | etcdserver: read-only range request "key:\"/registry/jobs/\" range_end:\"/registry/jobs0\" limit:500 " with result "range_response_count:0 size:5" took too long (430.352557ms) to execute * 2020-07-24 22:13:31.304379 W | etcdserver: request "header: txn: success:> failure: >>" with result "size:16" took too long (347.781631ms) to execute * 2020-07-24 22:14:39.084771 W | wal: sync duration of 1.885899215s, expected less than 1s * 2020-07-24 22:14:39.085392 W | etcdserver: read-only range request "key:\"/registry/services/specs\" range_end:\"/registry/services/spect\" count_only:true " with result "range_response_count:0 size:7" took too long (752.73234ms) to execute * 2020-07-24 22:14:39.204221 W | etcdserver: read-only range request "key:\"/registry/statefulsets\" range_end:\"/registry/statefulsett\" count_only:true " with result "range_response_count:0 size:5" took too long (526.93962ms) to execute * 2020-07-24 22:14:39.204593 W | etcdserver: read-only range request "key:\"/registry/namespaces/default\" " with result "range_response_count:1 size:257" took too long (314.821685ms) to execute * * ==> kernel <== * 22:19:38 up 46 min, 0 users, load average: 6.02, 9.14, 8.55 * Linux containerd-20200724221200-14997 5.4.0-1022-azure #22-Ubuntu SMP Fri Jul 10 06:14:37 UTC 2020 x86_64 x86_64 x86_64 GNU/Linux * PRETTY_NAME="Ubuntu 19.10" * * ==> kube-apiserver [3b03abf8faaefbc437950aa59178dcca778ce856951332f14e9c159b83a9cd10] <== * I0724 22:12:59.714156 1 controller.go:606] quota admission added evaluator for: serviceaccounts * I0724 22:13:00.637968 1 controller.go:606] quota admission added evaluator for: deployments.apps * I0724 22:13:00.714747 1 controller.go:606] quota admission added evaluator for: daemonsets.apps * I0724 22:13:15.472257 1 trace.go:116] Trace[371264249]: "Create" url:/api/v1/namespaces/default/events,user-agent:kubelet/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:172.17.0.5 (started: 2020-07-24 22:13:11.165316402 +0000 UTC m=+28.885265456) (total time: 4.306895167s): * Trace[371264249]: [4.306830363s] [4.306748958s] Object stored in database * I0724 22:13:15.472429 1 trace.go:116] Trace[1157467879]: "Get" url:/api/v1/namespaces/default/serviceaccounts/default,user-agent:kubectl/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:127.0.0.1 (started: 2020-07-24 22:13:11.225715914 +0000 UTC m=+28.945664968) (total time: 4.246681467s): * Trace[1157467879]: [4.246681467s] [4.246669166s] END * I0724 22:13:15.472453 1 trace.go:116] Trace[1376546597]: "GuaranteedUpdate etcd3" type:*core.Node (started: 2020-07-24 22:13:11.166090152 +0000 UTC m=+28.886039306) (total time: 4.306337531s): * Trace[1376546597]: [4.306206523s] [4.304357703s] Transaction committed * I0724 22:13:15.472507 1 trace.go:116] Trace[1373390526]: "List etcd3" key:/minions,resourceVersion:,limit:0,continue: (started: 2020-07-24 22:13:12.562772918 +0000 UTC m=+30.282722072) (total time: 2.909710469s): * Trace[1373390526]: [2.909710469s] [2.909710469s] END * I0724 22:13:15.472573 1 trace.go:116] Trace[209798184]: "Get" url:/api/v1/namespaces/kube-system/pods/kube-apiserver-containerd-20200724221200-14997,user-agent:kubelet/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:172.17.0.5 (started: 2020-07-24 22:13:10.957265426 +0000 UTC m=+28.677214580) (total time: 4.515285465s): * Trace[209798184]: [4.51489514s] [4.514888939s] About to write a response * I0724 22:13:15.472707 1 trace.go:116] Trace[484532927]: "Patch" url:/api/v1/nodes/containerd-20200724221200-14997/status,user-agent:kubelet/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:172.17.0.5 (started: 2020-07-24 22:13:11.165983745 +0000 UTC m=+28.885932899) (total time: 4.306690554s): * Trace[484532927]: [4.306486741s] [4.304883637s] Object stored in database * I0724 22:13:15.472774 1 trace.go:116] Trace[667541080]: "List" url:/api/v1/nodes,user-agent:kube-controller-manager/v1.18.3 (linux/amd64) kubernetes/2e7996e/system:serviceaccount:kube-system:node-controller,client:172.17.0.5 (started: 2020-07-24 22:13:12.562758918 +0000 UTC m=+30.282707972) (total time: 2.909990386s): * Trace[667541080]: [2.909773072s] [2.909764572s] Listing from storage done * I0724 22:13:18.078919 1 controller.go:606] quota admission added evaluator for: replicasets.apps * I0724 22:13:18.426181 1 controller.go:606] quota admission added evaluator for: controllerrevisions.apps * I0724 22:13:30.080930 1 trace.go:116] Trace[926841721]: "GuaranteedUpdate etcd3" type:*v1.Endpoints (started: 2020-07-24 22:13:28.891002839 +0000 UTC m=+46.610951993) (total time: 1.189883246s): * Trace[926841721]: [1.189853344s] [1.188435753s] Transaction committed * I0724 22:14:39.085727 1 trace.go:116] Trace[943610803]: "GuaranteedUpdate etcd3" type:*apps.DaemonSet (started: 2020-07-24 22:14:37.201680483 +0000 UTC m=+114.921629637) (total time: 1.884006692s): * Trace[943610803]: [1.883921787s] [1.882993127s] Transaction committed * I0724 22:14:39.086045 1 trace.go:116] Trace[431771147]: "Update" url:/apis/apps/v1/namespaces/kube-system/daemonsets/kindnet/status,user-agent:kube-controller-manager/v1.18.3 (linux/amd64) kubernetes/2e7996e/system:serviceaccount:kube-system:daemon-set-controller,client:172.17.0.5 (started: 2020-07-24 22:14:37.201520273 +0000 UTC m=+114.921469327) (total time: 1.884488723s): * Trace[431771147]: [1.884264808s] [1.884157301s] Object stored in database * * ==> kube-controller-manager [6dc3461120ba7717ddda6d5e83f663ff1cb958b21f4b6b496ed88923606aebf1] <== * I0724 22:12:43.365003 1 serving.go:313] Generated self-signed cert in-memory * I0724 22:12:44.072514 1 controllermanager.go:161] Version: v1.18.3 * I0724 22:12:44.073870 1 dynamic_cafile_content.go:167] Starting request-header::/var/lib/minikube/certs/front-proxy-ca.crt * I0724 22:12:44.073872 1 dynamic_cafile_content.go:167] Starting client-ca-bundle::/var/lib/minikube/certs/ca.crt * I0724 22:12:44.074524 1 secure_serving.go:178] Serving securely on 127.0.0.1:10257 * I0724 22:12:44.074632 1 tlsconfig.go:240] Starting DynamicServingCertificateController * I0724 22:12:44.075551 1 deprecated_insecure_serving.go:53] Serving insecurely on [::]:10252 * F0724 22:12:57.450686 1 controllermanager.go:230] error building controller context: failed to wait for apiserver being healthy: timed out waiting for the condition: failed to get apiserver /healthz status: forbidden: User "system:kube-controller-manager" cannot get path "/healthz" * * ==> kube-controller-manager [6f125fe745aef65ddf16605622b6130fd0ebc50c6ee73b0d5b8431248dc821c2] <== * I0724 22:13:18.083396 1 event.go:278] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"kube-system", Name:"coredns", UID:"c339b220-56ba-47ef-93ef-bb5429df2c46", APIVersion:"apps/v1", ResourceVersion:"208", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set coredns-66bff467f8 to 2 * I0724 22:13:18.088573 1 event.go:278] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"kube-system", Name:"coredns-66bff467f8", UID:"1f68eda2-5dbc-4ff7-802d-d0fc15a0855d", APIVersion:"apps/v1", ResourceVersion:"325", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: coredns-66bff467f8-cgqcr * I0724 22:13:18.143495 1 event.go:278] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"kube-system", Name:"coredns-66bff467f8", UID:"1f68eda2-5dbc-4ff7-802d-d0fc15a0855d", APIVersion:"apps/v1", ResourceVersion:"325", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: coredns-66bff467f8-hlk9j * I0724 22:13:18.177452 1 shared_informer.go:230] Caches are synced for persistent volume * I0724 22:13:18.240929 1 shared_informer.go:230] Caches are synced for endpoint * I0724 22:13:18.273694 1 shared_informer.go:230] Caches are synced for expand * I0724 22:13:18.275914 1 shared_informer.go:230] Caches are synced for PVC protection * I0724 22:13:18.325760 1 shared_informer.go:230] Caches are synced for stateful set * I0724 22:13:18.421570 1 shared_informer.go:230] Caches are synced for daemon sets * I0724 22:13:18.446520 1 event.go:278] Event(v1.ObjectReference{Kind:"DaemonSet", Namespace:"kube-system", Name:"kube-proxy", UID:"a0159c58-7dba-43c7-a87d-d2e4c392a926", APIVersion:"apps/v1", ResourceVersion:"214", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: kube-proxy-x7fwq * I0724 22:13:18.452732 1 event.go:278] Event(v1.ObjectReference{Kind:"DaemonSet", Namespace:"kube-system", Name:"kindnet", UID:"6b1c3e80-93ca-41d3-9d48-43047d4d93a6", APIVersion:"apps/v1", ResourceVersion:"261", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: kindnet-nsc8k * I0724 22:13:18.463040 1 shared_informer.go:230] Caches are synced for taint * I0724 22:13:18.463114 1 taint_manager.go:187] Starting NoExecuteTaintManager * I0724 22:13:18.463170 1 node_lifecycle_controller.go:1433] Initializing eviction metric for zone: * W0724 22:13:18.463272 1 node_lifecycle_controller.go:1048] Missing timestamp for Node containerd-20200724221200-14997. Assuming now as a timestamp. * I0724 22:13:18.463319 1 node_lifecycle_controller.go:1249] Controller detected that zone is now in state Normal. * I0724 22:13:18.463344 1 event.go:278] Event(v1.ObjectReference{Kind:"Node", Namespace:"", Name:"containerd-20200724221200-14997", UID:"878dba67-2126-43d2-a5be-2ad809c96173", APIVersion:"v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'RegisteredNode' Node containerd-20200724221200-14997 event: Registered Node containerd-20200724221200-14997 in Controller * I0724 22:13:18.564886 1 event.go:278] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"kube-system", Name:"coredns", UID:"c339b220-56ba-47ef-93ef-bb5429df2c46", APIVersion:"apps/v1", ResourceVersion:"372", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled down replica set coredns-66bff467f8 to 1 * I0724 22:13:18.613309 1 shared_informer.go:230] Caches are synced for attach detach * I0724 22:13:18.635502 1 shared_informer.go:230] Caches are synced for garbage collector * I0724 22:13:18.635535 1 garbagecollector.go:142] Garbage collector: all resource monitors have synced. Proceeding to collect garbage * I0724 22:13:18.639165 1 event.go:278] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"kube-system", Name:"coredns-66bff467f8", UID:"1f68eda2-5dbc-4ff7-802d-d0fc15a0855d", APIVersion:"apps/v1", ResourceVersion:"373", FieldPath:""}): type: 'Normal' reason: 'SuccessfulDelete' Deleted pod: coredns-66bff467f8-cgqcr * I0724 22:13:18.653092 1 shared_informer.go:230] Caches are synced for resource quota * I0724 22:13:18.676387 1 shared_informer.go:230] Caches are synced for resource quota * I0724 22:13:18.677047 1 shared_informer.go:230] Caches are synced for garbage collector * * ==> kube-proxy [003518d48f7b64998ac9b35be592bc8821d93926af4451deb91a09ab8b6e907d] <== * W0724 22:13:19.661270 1 server_others.go:559] Unknown proxy mode "", assuming iptables proxy * I0724 22:13:19.702749 1 node.go:136] Successfully retrieved node IP: 172.17.0.5 * I0724 22:13:19.702805 1 server_others.go:186] Using iptables Proxier. * I0724 22:13:19.703119 1 server.go:583] Version: v1.18.3 * I0724 22:13:19.703827 1 conntrack.go:52] Setting nf_conntrack_max to 524288 * E0724 22:13:19.704435 1 conntrack.go:127] sysfs is not writable: {Device:sysfs Path:/sys Type:sysfs Opts:[ro nosuid nodev noexec relatime] Freq:0 Pass:0} (mount options are [ro nosuid nodev noexec relatime]) * I0724 22:13:19.704682 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_established' to 86400 * I0724 22:13:19.704788 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_close_wait' to 3600 * I0724 22:13:19.706305 1 config.go:133] Starting endpoints config controller * I0724 22:13:19.706338 1 config.go:315] Starting service config controller * I0724 22:13:19.706344 1 shared_informer.go:223] Waiting for caches to sync for endpoints config * I0724 22:13:19.706365 1 shared_informer.go:223] Waiting for caches to sync for service config * I0724 22:13:19.806590 1 shared_informer.go:230] Caches are synced for service config * I0724 22:13:19.806609 1 shared_informer.go:230] Caches are synced for endpoints config * * ==> kube-scheduler [bff121f454668ac500a195fcf425cf4c1545c2b5466ceef795a4d2c11d3c6d76] <== * E0724 22:12:48.856731 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope * E0724 22:12:48.943428 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope * E0724 22:12:48.947394 1 reflector.go:178] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system" * E0724 22:12:49.021495 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope * E0724 22:12:49.042794 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope * E0724 22:12:49.115776 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope * E0724 22:12:50.494405 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope * E0724 22:12:50.855911 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope * E0724 22:12:50.917390 1 reflector.go:178] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system" * E0724 22:12:51.267562 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope * E0724 22:12:51.297256 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1beta1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope * E0724 22:12:51.371164 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope * E0724 22:12:51.686787 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope * E0724 22:12:51.816022 1 reflector.go:178] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:233: Failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope * E0724 22:12:52.019881 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope * E0724 22:12:54.420521 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope * E0724 22:12:55.430747 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope * E0724 22:12:56.236464 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope * E0724 22:12:56.459747 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1beta1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope * E0724 22:12:56.965178 1 reflector.go:178] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:233: Failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope * E0724 22:12:57.125721 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope * E0724 22:12:57.141479 1 reflector.go:178] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system" * E0724 22:12:57.352492 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope * E0724 22:12:58.018671 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope * I0724 22:13:03.848890 1 shared_informer.go:230] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file * * ==> kubelet <== * -- Logs begin at Fri 2020-07-24 22:12:25 UTC, end at Fri 2020-07-24 22:19:38 UTC. -- * Jul 24 22:19:01 containerd-20200724221200-14997 kubelet[1328]: E0724 22:19:01.957540 1328 pod_workers.go:191] Error syncing pod 562af1c8-c195-4fed-b475-4ff2a3cca8b5 ("kindnet-nsc8k_kube-system(562af1c8-c195-4fed-b475-4ff2a3cca8b5)"), skipping: failed to "StartContainer" for "kindnet-cni" with CrashLoopBackOff: "back-off 2m40s restarting failed container=kindnet-cni pod=kindnet-nsc8k_kube-system(562af1c8-c195-4fed-b475-4ff2a3cca8b5)" * Jul 24 22:19:07 containerd-20200724221200-14997 kubelet[1328]: I0724 22:19:07.957224 1328 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: 394f89b0cefae66bc4dd009358f5b26854c1c1d309e47581a77b165c08fd360b * Jul 24 22:19:07 containerd-20200724221200-14997 kubelet[1328]: E0724 22:19:07.957516 1328 pod_workers.go:191] Error syncing pod 25727a03-b071-4a2c-8dfb-ea2a7038e4cd ("storage-provisioner_kube-system(25727a03-b071-4a2c-8dfb-ea2a7038e4cd)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 2m40s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(25727a03-b071-4a2c-8dfb-ea2a7038e4cd)" * Jul 24 22:19:11 containerd-20200724221200-14997 kubelet[1328]: E0724 22:19:11.769304 1328 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to setup network for sandbox "f79959429abee3a1710f30e8cde2d406482c77d9cf5e2d8d1ab01c25695dfa89": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:19:11 containerd-20200724221200-14997 kubelet[1328]: E0724 22:19:11.769363 1328 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "f79959429abee3a1710f30e8cde2d406482c77d9cf5e2d8d1ab01c25695dfa89": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:19:11 containerd-20200724221200-14997 kubelet[1328]: E0724 22:19:11.769381 1328 kuberuntime_manager.go:727] createPodSandbox for pod "coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "f79959429abee3a1710f30e8cde2d406482c77d9cf5e2d8d1ab01c25695dfa89": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:19:11 containerd-20200724221200-14997 kubelet[1328]: E0724 22:19:11.769433 1328 pod_workers.go:191] Error syncing pod 584286ea-2ddf-4194-8a3c-48d505f7f607 ("coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)"), skipping: failed to "CreatePodSandbox" for "coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)" with CreatePodSandboxError: "CreatePodSandbox for pod \"coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)\" failed: rpc error: code = Unknown desc = failed to setup network for sandbox \"f79959429abee3a1710f30e8cde2d406482c77d9cf5e2d8d1ab01c25695dfa89\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:19:14 containerd-20200724221200-14997 kubelet[1328]: I0724 22:19:14.957570 1328 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: 14578a42043dbf86e3162040dc392c661e36b9fceb1473b8efe551a4ef3cc2ce * Jul 24 22:19:14 containerd-20200724221200-14997 kubelet[1328]: E0724 22:19:14.958122 1328 pod_workers.go:191] Error syncing pod 562af1c8-c195-4fed-b475-4ff2a3cca8b5 ("kindnet-nsc8k_kube-system(562af1c8-c195-4fed-b475-4ff2a3cca8b5)"), skipping: failed to "StartContainer" for "kindnet-cni" with CrashLoopBackOff: "back-off 2m40s restarting failed container=kindnet-cni pod=kindnet-nsc8k_kube-system(562af1c8-c195-4fed-b475-4ff2a3cca8b5)" * Jul 24 22:19:19 containerd-20200724221200-14997 kubelet[1328]: I0724 22:19:19.957150 1328 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: 394f89b0cefae66bc4dd009358f5b26854c1c1d309e47581a77b165c08fd360b * Jul 24 22:19:19 containerd-20200724221200-14997 kubelet[1328]: E0724 22:19:19.957412 1328 pod_workers.go:191] Error syncing pod 25727a03-b071-4a2c-8dfb-ea2a7038e4cd ("storage-provisioner_kube-system(25727a03-b071-4a2c-8dfb-ea2a7038e4cd)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 2m40s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(25727a03-b071-4a2c-8dfb-ea2a7038e4cd)" * Jul 24 22:19:25 containerd-20200724221200-14997 kubelet[1328]: E0724 22:19:25.759066 1328 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to setup network for sandbox "b51e6594da27b8be9db76fc002801d53e59cdd7b399b5ce21f7319efffa7004a": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:19:25 containerd-20200724221200-14997 kubelet[1328]: E0724 22:19:25.759126 1328 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "b51e6594da27b8be9db76fc002801d53e59cdd7b399b5ce21f7319efffa7004a": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:19:25 containerd-20200724221200-14997 kubelet[1328]: E0724 22:19:25.759142 1328 kuberuntime_manager.go:727] createPodSandbox for pod "coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "b51e6594da27b8be9db76fc002801d53e59cdd7b399b5ce21f7319efffa7004a": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:19:25 containerd-20200724221200-14997 kubelet[1328]: E0724 22:19:25.759195 1328 pod_workers.go:191] Error syncing pod 584286ea-2ddf-4194-8a3c-48d505f7f607 ("coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)"), skipping: failed to "CreatePodSandbox" for "coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)" with CreatePodSandboxError: "CreatePodSandbox for pod \"coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)\" failed: rpc error: code = Unknown desc = failed to setup network for sandbox \"b51e6594da27b8be9db76fc002801d53e59cdd7b399b5ce21f7319efffa7004a\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:19:28 containerd-20200724221200-14997 kubelet[1328]: I0724 22:19:28.957372 1328 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: 14578a42043dbf86e3162040dc392c661e36b9fceb1473b8efe551a4ef3cc2ce * Jul 24 22:19:28 containerd-20200724221200-14997 kubelet[1328]: E0724 22:19:28.957976 1328 pod_workers.go:191] Error syncing pod 562af1c8-c195-4fed-b475-4ff2a3cca8b5 ("kindnet-nsc8k_kube-system(562af1c8-c195-4fed-b475-4ff2a3cca8b5)"), skipping: failed to "StartContainer" for "kindnet-cni" with CrashLoopBackOff: "back-off 2m40s restarting failed container=kindnet-cni pod=kindnet-nsc8k_kube-system(562af1c8-c195-4fed-b475-4ff2a3cca8b5)" * Jul 24 22:19:32 containerd-20200724221200-14997 kubelet[1328]: I0724 22:19:32.957341 1328 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: 394f89b0cefae66bc4dd009358f5b26854c1c1d309e47581a77b165c08fd360b * Jul 24 22:19:36 containerd-20200724221200-14997 kubelet[1328]: I0724 22:19:36.591653 1328 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: 394f89b0cefae66bc4dd009358f5b26854c1c1d309e47581a77b165c08fd360b * Jul 24 22:19:36 containerd-20200724221200-14997 kubelet[1328]: I0724 22:19:36.591971 1328 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: 36500c287a12c3bd76dde8371c9c7b20c770270f4ef0f2171537776b553d6fb4 * Jul 24 22:19:36 containerd-20200724221200-14997 kubelet[1328]: E0724 22:19:36.592225 1328 pod_workers.go:191] Error syncing pod 25727a03-b071-4a2c-8dfb-ea2a7038e4cd ("storage-provisioner_kube-system(25727a03-b071-4a2c-8dfb-ea2a7038e4cd)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 5m0s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(25727a03-b071-4a2c-8dfb-ea2a7038e4cd)" * Jul 24 22:19:38 containerd-20200724221200-14997 kubelet[1328]: E0724 22:19:38.000937 1328 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to setup network for sandbox "dc02af12a85bc425677af025657ca743bc31a3b93ade5f169a4e544c4f14c19d": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:19:38 containerd-20200724221200-14997 kubelet[1328]: E0724 22:19:38.000987 1328 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "dc02af12a85bc425677af025657ca743bc31a3b93ade5f169a4e544c4f14c19d": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:19:38 containerd-20200724221200-14997 kubelet[1328]: E0724 22:19:38.001002 1328 kuberuntime_manager.go:727] createPodSandbox for pod "coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "dc02af12a85bc425677af025657ca743bc31a3b93ade5f169a4e544c4f14c19d": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:19:38 containerd-20200724221200-14997 kubelet[1328]: E0724 22:19:38.001055 1328 pod_workers.go:191] Error syncing pod 584286ea-2ddf-4194-8a3c-48d505f7f607 ("coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)"), skipping: failed to "CreatePodSandbox" for "coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)" with CreatePodSandboxError: "CreatePodSandbox for pod \"coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)\" failed: rpc error: code = Unknown desc = failed to setup network for sandbox \"dc02af12a85bc425677af025657ca743bc31a3b93ade5f169a4e544c4f14c19d\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * * ==> storage-provisioner [36500c287a12c3bd76dde8371c9c7b20c770270f4ef0f2171537776b553d6fb4] <== * F0724 22:19:36.364574 1 main.go:37] Error getting server version: Get https://10.96.0.1:443/version: dial tcp 10.96.0.1:443: getsockopt: no route to host -- /stdout -- helpers_test.go:247: (dbg) Run: ./minikube-linux-amd64 status --format={{.APIServer}} -p containerd-20200724221200-14997 -n containerd-20200724221200-14997 helpers_test.go:254: (dbg) Run: kubectl --context containerd-20200724221200-14997 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running helpers_test.go:260: non-running pods: coredns-66bff467f8-hlk9j helpers_test.go:262: ======> post-mortem[TestStartStop/group/containerd/serial/FirstStart]: describe non-running pods <====== helpers_test.go:265: (dbg) Run: kubectl --context containerd-20200724221200-14997 describe pod coredns-66bff467f8-hlk9j helpers_test.go:265: (dbg) Non-zero exit: kubectl --context containerd-20200724221200-14997 describe pod coredns-66bff467f8-hlk9j: exit status 1 (74.369481ms) ** stderr ** Error from server (NotFound): pods "coredns-66bff467f8-hlk9j" not found ** /stderr ** helpers_test.go:267: kubectl --context containerd-20200724221200-14997 describe pod coredns-66bff467f8-hlk9j: exit status 1 === RUN TestStartStop/group/containerd/serial/DeployApp start_stop_delete_test.go:158: (dbg) Run: kubectl --context containerd-20200724221200-14997 create -f testdata/busybox.yaml start_stop_delete_test.go:158: (dbg) TestStartStop/group/containerd/serial/DeployApp: waiting 8m0s for pods matching "integration-test=busybox" in namespace "default" ... helpers_test.go:332: "busybox" [4b662b5f-6e78-48de-818c-81989d7f4ea9] Pending helpers_test.go:332: "busybox" [4b662b5f-6e78-48de-818c-81989d7f4ea9] Pending / Ready:ContainersNotReady (containers with unready status: [busybox]) / ContainersReady:ContainersNotReady (containers with unready status: [busybox]) === CONT TestStartStop/group/crio/serial/DeployApp start_stop_delete_test.go:158: ***** TestStartStop/group/crio/serial/DeployApp: pod "integration-test=busybox" failed to start within 8m0s: timed out waiting for the condition **** start_stop_delete_test.go:158: (dbg) Run: ./minikube-linux-amd64 status --format={{.APIServer}} -p crio-20200724220901-14997 -n crio-20200724220901-14997 start_stop_delete_test.go:158: TestStartStop/group/crio/serial/DeployApp: showing logs for failed pods as of 2020-07-24 22:25:30.404718726 +0000 UTC m=+2960.529083211 start_stop_delete_test.go:158: (dbg) Run: kubectl --context crio-20200724220901-14997 describe po busybox -n default start_stop_delete_test.go:158: (dbg) kubectl --context crio-20200724220901-14997 describe po busybox -n default: Name: busybox Namespace: default Priority: 0 Node: crio-20200724220901-14997/172.17.0.2 Start Time: Fri, 24 Jul 2020 22:17:30 +0000 Labels: integration-test=busybox Annotations: Status: Pending IP: IPs: Containers: busybox: Container ID: Image: busybox:1.28.4-glibc Image ID: Port: Host Port: Command: sleep 3600 State: Waiting Reason: ContainerCreating Ready: False Restart Count: 0 Environment: Mounts: /var/run/secrets/kubernetes.io/serviceaccount from default-token-2jsfl (ro) Conditions: Type Status Initialized True Ready False ContainersReady False PodScheduled True Volumes: default-token-2jsfl: Type: Secret (a volume populated by a Secret) SecretName: default-token-2jsfl Optional: false QoS Class: BestEffort Node-Selectors: Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s node.kubernetes.io/unreachable:NoExecute for 300s Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Scheduled 8m1s default-scheduler Successfully assigned default/busybox to crio-20200724220901-14997 Warning FailedCreatePodSandBox 7m57s kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(df0c3a384af70876e631b79afa238f8219e909abbaf7684ff30d1dce8a9a54fa): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 7m44s kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(7e1e51912dee7c82a399942b88c2992e156a60bb2bdcfc5a0340b0f5eb894fe0): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 7m29s kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(eb6542b74bcd5168e50e03b41a224e2df5f3761d00e96252755d3d77cabf510e): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 7m16s kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(95381eb738e20acf009ca3de52d9dd44c0e10363127a19f55cc9d8b9b15d935d): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 6m58s kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(5da5f64daf132db1d2f57fcbbd23261f5b055438f42fdc9e58f02b1dd57b9240): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 6m45s kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(d843353d3186fd450ada3d11767e5294d07ef90a741caba47e62a59eb82f9e97): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 6m31s kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(cdb66590d250947869cc36af8c6d22ecc1fd99752b4ba0d09be444e2de9b06eb): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 6m14s kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(590dbdf2276a1c93cf59fe15b35411b3d19be44299a08851ea9957e8c6c03681): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 5m58s kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(fe109ba9d55515001bcdb9c5ba7b51262263252184892313119b14411c1ba69a): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 102s (x17 over 5m45s) kubelet, crio-20200724220901-14997 (combined from similar events): Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(4bee5e35c962aafeecadb6e69afb27ad039cbcf256940e8735ce4f079bf10862): failed to set bridge addr: could not add IP address to "cni0": permission denied start_stop_delete_test.go:158: (dbg) Run: kubectl --context crio-20200724220901-14997 logs busybox -n default start_stop_delete_test.go:158: (dbg) Non-zero exit: kubectl --context crio-20200724220901-14997 logs busybox -n default: exit status 1 (89.194504ms) ** stderr ** Error from server (BadRequest): container "busybox" in pod "busybox" is waiting to start: ContainerCreating ** /stderr ** start_stop_delete_test.go:158: kubectl --context crio-20200724220901-14997 logs busybox -n default: exit status 1 start_stop_delete_test.go:158: wait: integration-test=busybox within 8m0s: timed out waiting for the condition helpers_test.go:215: -----------------------post-mortem-------------------------------- helpers_test.go:223: ======> post-mortem[TestStartStop/group/crio/serial/DeployApp]: docker inspect <====== helpers_test.go:224: (dbg) Run: docker inspect crio-20200724220901-14997 helpers_test.go:228: (dbg) docker inspect crio-20200724220901-14997: -- stdout -- [ { "Id": "d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a", "Created": "2020-07-24T22:09:11.178770681Z", "Path": "/usr/local/bin/entrypoint", "Args": [ "/sbin/init" ], "State": { "Status": "running", "Running": true, "Paused": false, "Restarting": false, "OOMKilled": false, "Dead": false, "Pid": 270617, "ExitCode": 0, "Error": "", "StartedAt": "2020-07-24T22:09:15.593531886Z", "FinishedAt": "0001-01-01T00:00:00Z" }, "Image": "sha256:e6bc41c39dc48b2b472936db36aedb28527ce0f675ed1bc20d029125c9ccf578", "ResolvConfPath": "/var/lib/docker/containers/d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a/resolv.conf", "HostnamePath": "/var/lib/docker/containers/d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a/hostname", "HostsPath": "/var/lib/docker/containers/d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a/hosts", "LogPath": "/var/lib/docker/containers/d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a/d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a-json.log", "Name": "/crio-20200724220901-14997", "RestartCount": 0, "Driver": "overlay2", "Platform": "linux", "MountLabel": "", "ProcessLabel": "", "AppArmorProfile": "unconfined", "ExecIDs": null, "HostConfig": { "Binds": [ "/lib/modules:/lib/modules:ro", "crio-20200724220901-14997:/var" ], "ContainerIDFile": "", "LogConfig": { "Type": "json-file", "Config": {} }, "NetworkMode": "default", "PortBindings": { "22/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "2376/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "5000/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "8443/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ] }, "RestartPolicy": { "Name": "no", "MaximumRetryCount": 0 }, "AutoRemove": false, "VolumeDriver": "", "VolumesFrom": null, "CapAdd": null, "CapDrop": null, "Capabilities": null, "Dns": [], "DnsOptions": [], "DnsSearch": [], "ExtraHosts": null, "GroupAdd": null, "IpcMode": "private", "Cgroup": "", "Links": null, "OomScoreAdj": 0, "PidMode": "", "Privileged": true, "PublishAllPorts": false, "ReadonlyRootfs": false, "SecurityOpt": [ "seccomp=unconfined", "apparmor=unconfined", "label=disable" ], "Tmpfs": { "/run": "", "/tmp": "" }, "UTSMode": "", "UsernsMode": "", "ShmSize": 67108864, "Runtime": "runc", "ConsoleSize": [ 0, 0 ], "Isolation": "", "CpuShares": 0, "Memory": 2306867200, "NanoCpus": 2000000000, "CgroupParent": "", "BlkioWeight": 0, "BlkioWeightDevice": [], "BlkioDeviceReadBps": null, "BlkioDeviceWriteBps": null, "BlkioDeviceReadIOps": null, "BlkioDeviceWriteIOps": null, "CpuPeriod": 0, "CpuQuota": 0, "CpuRealtimePeriod": 0, "CpuRealtimeRuntime": 0, "CpusetCpus": "", "CpusetMems": "", "Devices": [], "DeviceCgroupRules": null, "DeviceRequests": null, "KernelMemory": 0, "KernelMemoryTCP": 0, "MemoryReservation": 0, "MemorySwap": -1, "MemorySwappiness": null, "OomKillDisable": false, "PidsLimit": null, "Ulimits": null, "CpuCount": 0, "CpuPercent": 0, "IOMaximumIOps": 0, "IOMaximumBandwidth": 0, "MaskedPaths": null, "ReadonlyPaths": null }, "GraphDriver": { "Data": { "LowerDir": "/var/lib/docker/overlay2/25235e8a657907fa460b6393f79aab4ea109b0924e4843714a3776886a051c03-init/diff:/var/lib/docker/overlay2/33ef5d7ed4fbb30d55d2e5925de9b5c359ee2f6bcaf5a31308f0c4c11e358759/diff:/var/lib/docker/overlay2/3cf31b1deb19cf9de586952311d9f94baab00dac6393d96e1a80da943c3823e6/diff:/var/lib/docker/overlay2/5e25be0edbb143d613aab16e23056ece18370c2427214464ccc0b8f033fc76c5/diff:/var/lib/docker/overlay2/933d7b760faf1726c0231eb48cbf3e7af8aeea7d1f449959b7bf07bbe9a84376/diff:/var/lib/docker/overlay2/69ec5555996fe29cf36b7e927034690ecb66f32d4c72396f3c2f3d73303ab10c/diff:/var/lib/docker/overlay2/891f9311e0e93aca44c576d873c0860bf67e972178833fc3ac0569308996933c/diff:/var/lib/docker/overlay2/431ddf5eea9395c020c3a27539c6984764cb7a0dd8f3eeadfbb24f37a897caf7/diff:/var/lib/docker/overlay2/ef7d3d9d53c985fdb46a41c8bcbc04c7d98baa3d8030b1432d115657269a4611/diff:/var/lib/docker/overlay2/d976f7471257da8ef85e0de564cae8e6b16eeb8f83fff6750b3985387f87052c/diff:/var/lib/docker/overlay2/16ed6f6abe25e98b50b10529e70d835da8b64ca99934243f70e5cc63cca44ba1/diff:/var/lib/docker/overlay2/a6495d6f579c32bd0faf387dc3b48c733dae0219e3cfc5d94f9f6ba027261668/diff:/var/lib/docker/overlay2/a34db548da9e24b0323e43e53767c49484fa19509ba9c1e738fb42330788ec52/diff:/var/lib/docker/overlay2/03f93f74dafc48e2bb0234297546abf70dc9918c047a7e3b500ed2242aec09ff/diff:/var/lib/docker/overlay2/b0d08d0be034080c25561d9451d8081050a591c2991a7f3d242234b9bf7c046d/diff:/var/lib/docker/overlay2/d7be6ed61eedc76988e74fe1d87a6a1ef287912654acea5566e7f9cb797b3e3c/diff:/var/lib/docker/overlay2/bde6a4a42d5652b3ef22e3a1dddd0f6fb765d2cd9f5b1a38d4ecb9b44c45b998/diff:/var/lib/docker/overlay2/0f384da0b034b730d4bdd22f270821719c0ac0b7b7d608c3a52fa89b9a386b90/diff:/var/lib/docker/overlay2/46e9d50bc162d2d523c12da3e378e5b859515056dc7d7f11344c1c42c64718d4/diff:/var/lib/docker/overlay2/6e7552cdb94dfc1c9a0a8efae25c758d93fff3d2eec58a3e2b710496d0c94653/diff:/var/lib/docker/overlay2/b9217960caef08b70ded801ca89f5a0808227383f2ab1de5ae7313c200a8090b/diff:/var/lib/docker/overlay2/d11fc39cafccd3e3cb8fba6cfdda2bbfdb99c2e41619ce169b0d6ede0b3f6a64/diff:/var/lib/docker/overlay2/0f89620291cba4bc3afac7c9fa1faf205450d5dcae032c3a9882009633b72b4a/diff", "MergedDir": "/var/lib/docker/overlay2/25235e8a657907fa460b6393f79aab4ea109b0924e4843714a3776886a051c03/merged", "UpperDir": "/var/lib/docker/overlay2/25235e8a657907fa460b6393f79aab4ea109b0924e4843714a3776886a051c03/diff", "WorkDir": "/var/lib/docker/overlay2/25235e8a657907fa460b6393f79aab4ea109b0924e4843714a3776886a051c03/work" }, "Name": "overlay2" }, "Mounts": [ { "Type": "bind", "Source": "/lib/modules", "Destination": "/lib/modules", "Mode": "ro", "RW": false, "Propagation": "rprivate" }, { "Type": "volume", "Name": "crio-20200724220901-14997", "Source": "/var/lib/docker/volumes/crio-20200724220901-14997/_data", "Destination": "/var", "Driver": "local", "Mode": "z", "RW": true, "Propagation": "" } ], "Config": { "Hostname": "crio-20200724220901-14997", "Domainname": "", "User": "root", "AttachStdin": false, "AttachStdout": false, "AttachStderr": false, "ExposedPorts": { "22/tcp": {}, "2376/tcp": {}, "5000/tcp": {}, "8443/tcp": {} }, "Tty": true, "OpenStdin": false, "StdinOnce": false, "Env": [ "container=docker", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], "Cmd": null, "Image": "kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438", "Volumes": null, "WorkingDir": "", "Entrypoint": [ "/usr/local/bin/entrypoint", "/sbin/init" ], "OnBuild": null, "Labels": { "created_by.minikube.sigs.k8s.io": "true", "mode.minikube.sigs.k8s.io": "crio-20200724220901-14997", "name.minikube.sigs.k8s.io": "crio-20200724220901-14997", "role.minikube.sigs.k8s.io": "" }, "StopSignal": "SIGRTMIN+3" }, "NetworkSettings": { "Bridge": "", "SandboxID": "0d795cb2f39f80073816031303c0e963a2cb0b36d8d4c2994640addd703a558d", "HairpinMode": false, "LinkLocalIPv6Address": "", "LinkLocalIPv6PrefixLen": 0, "Ports": { "22/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32888" } ], "2376/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32887" } ], "5000/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32886" } ], "8443/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32885" } ] }, "SandboxKey": "/var/run/docker/netns/0d795cb2f39f", "SecondaryIPAddresses": null, "SecondaryIPv6Addresses": null, "EndpointID": "94d389b211c871d340ba03bea225a73a99c784b3ef382985b8799306d3d827f4", "Gateway": "172.17.0.1", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "IPAddress": "172.17.0.2", "IPPrefixLen": 16, "IPv6Gateway": "", "MacAddress": "02:42:ac:11:00:02", "Networks": { "bridge": { "IPAMConfig": null, "Links": null, "Aliases": null, "NetworkID": "d4a420189740571bc15c14b2ebac1ab66e95c9c6796461aa96bb3486a5f7e03d", "EndpointID": "94d389b211c871d340ba03bea225a73a99c784b3ef382985b8799306d3d827f4", "Gateway": "172.17.0.1", "IPAddress": "172.17.0.2", "IPPrefixLen": 16, "IPv6Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "MacAddress": "02:42:ac:11:00:02", "DriverOpts": null } } } } ] -- /stdout -- helpers_test.go:232: (dbg) Run: ./minikube-linux-amd64 status --format={{.Host}} -p crio-20200724220901-14997 -n crio-20200724220901-14997 helpers_test.go:237: <<< TestStartStop/group/crio/serial/DeployApp FAILED: start of post-mortem logs <<< helpers_test.go:238: ======> post-mortem[TestStartStop/group/crio/serial/DeployApp]: minikube logs <====== helpers_test.go:240: (dbg) Run: ./minikube-linux-amd64 -p crio-20200724220901-14997 logs -n 25 helpers_test.go:240: (dbg) Done: ./minikube-linux-amd64 -p crio-20200724220901-14997 logs -n 25: (1.915981974s) helpers_test.go:245: TestStartStop/group/crio/serial/DeployApp logs: -- stdout -- * ==> CRI-O <== * -- Logs begin at Fri 2020-07-24 22:09:16 UTC, end at Fri 2020-07-24 22:25:31 UTC. -- * Jul 24 22:25:18 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:25:18.442754725Z" level=info msg="Got pod network &{Name:coredns-5d4dd4b4db-9ssg6 Namespace:kube-system ID:ed614af69c11ccb4ed4667c9a00677c5ca9b11c4f258b7ee9e2fa17df0a9cf13 NetNS:/proc/19743/ns/net Networks:[] RuntimeConfig:map[crio-bridge:{IP: MAC: PortMappings:[] Bandwidth: IpRanges:[]}]}" * Jul 24 22:25:18 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:25:18.442789027Z" level=info msg="About to add CNI network crio-bridge (type=bridge)" * Jul 24 22:25:19 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:25:19.282581792Z" level=info msg="attempting to run pod sandbox with infra container: default/busybox/POD" id=682afd7a-c8c7-4b0b-9d3b-6e0d6a0ef681 * Jul 24 22:25:19 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:25:19.455063200Z" level=info msg="About to add CNI network lo (type=loopback)" * Jul 24 22:25:19 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:25:19.459347699Z" level=info msg="Got pod network &{Name:busybox Namespace:default ID:041531438a4661c02d3ccd98e702e9460082208bc96ab684486f048904597ef8 NetNS:/proc/19797/ns/net Networks:[] RuntimeConfig:map[crio-bridge:{IP: MAC: PortMappings:[] Bandwidth: IpRanges:[]}]}" * Jul 24 22:25:19 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:25:19.459381601Z" level=info msg="About to add CNI network crio-bridge (type=bridge)" * Jul 24 22:25:20 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:25:20.595619404Z" level=error msg="Error adding network: failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:25:20 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:25:20.595658107Z" level=error msg="Error while adding pod to CNI network \"crio-bridge\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:25:20 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:25:20.595809117Z" level=info msg="About to del CNI network lo (type=loopback)" * Jul 24 22:25:20 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:25:20.602347773Z" level=info msg="Got pod network &{Name:coredns-5d4dd4b4db-9ssg6 Namespace:kube-system ID:ed614af69c11ccb4ed4667c9a00677c5ca9b11c4f258b7ee9e2fa17df0a9cf13 NetNS:/proc/19743/ns/net Networks:[] RuntimeConfig:map[crio-bridge:{IP: MAC: PortMappings:[] Bandwidth: IpRanges:[]}]}" * Jul 24 22:25:20 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:25:20.602398976Z" level=error msg="error loading cached network config: network \"crio-bridge\" not found in CNI cache" * Jul 24 22:25:20 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:25:20.602410877Z" level=info msg="About to del CNI network crio-bridge (type=bridge)" * Jul 24 22:25:20 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:25:20.692431944Z" level=error msg="Error deleting network: running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.0.87 -j CNI-28bb383a3d9e144e4f3dd402 -m comment --comment name: \"crio-bridge\" id: \"ed614af69c11ccb4ed4667c9a00677c5ca9b11c4f258b7ee9e2fa17df0a9cf13\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-28bb383a3d9e144e4f3dd402':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" * Jul 24 22:25:20 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:25:20.692475147Z" level=error msg="Error while removing pod from CNI network \"crio-bridge\": running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.0.87 -j CNI-28bb383a3d9e144e4f3dd402 -m comment --comment name: \"crio-bridge\" id: \"ed614af69c11ccb4ed4667c9a00677c5ca9b11c4f258b7ee9e2fa17df0a9cf13\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-28bb383a3d9e144e4f3dd402':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" * Jul 24 22:25:20 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:25:20.692537552Z" level=error msg="error stopping network on cleanup: failed to destroy network for pod sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(ed614af69c11ccb4ed4667c9a00677c5ca9b11c4f258b7ee9e2fa17df0a9cf13): running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.0.87 -j CNI-28bb383a3d9e144e4f3dd402 -m comment --comment name: \"crio-bridge\" id: \"ed614af69c11ccb4ed4667c9a00677c5ca9b11c4f258b7ee9e2fa17df0a9cf13\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-28bb383a3d9e144e4f3dd402':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" id=283f11bd-02c9-41b2-9b15-506168a03e0d * Jul 24 22:25:21 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:25:21.508971191Z" level=error msg="Error adding network: failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:25:21 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:25:21.509022194Z" level=error msg="Error while adding pod to CNI network \"crio-bridge\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:25:21 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:25:21.509194806Z" level=info msg="About to del CNI network lo (type=loopback)" * Jul 24 22:25:21 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:25:21.513085877Z" level=info msg="Got pod network &{Name:busybox Namespace:default ID:041531438a4661c02d3ccd98e702e9460082208bc96ab684486f048904597ef8 NetNS:/proc/19797/ns/net Networks:[] RuntimeConfig:map[crio-bridge:{IP: MAC: PortMappings:[] Bandwidth: IpRanges:[]}]}" * Jul 24 22:25:21 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:25:21.513131680Z" level=error msg="error loading cached network config: network \"crio-bridge\" not found in CNI cache" * Jul 24 22:25:21 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:25:21.513143381Z" level=info msg="About to del CNI network crio-bridge (type=bridge)" * Jul 24 22:25:21 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:25:21.596486783Z" level=error msg="Error deleting network: running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.0.88 -j CNI-29b843a1a83022111db4de93 -m comment --comment name: \"crio-bridge\" id: \"041531438a4661c02d3ccd98e702e9460082208bc96ab684486f048904597ef8\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-29b843a1a83022111db4de93':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" * Jul 24 22:25:21 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:25:21.596535287Z" level=error msg="Error while removing pod from CNI network \"crio-bridge\": running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.0.88 -j CNI-29b843a1a83022111db4de93 -m comment --comment name: \"crio-bridge\" id: \"041531438a4661c02d3ccd98e702e9460082208bc96ab684486f048904597ef8\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-29b843a1a83022111db4de93':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" * Jul 24 22:25:21 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:25:21.596611092Z" level=error msg="error stopping network on cleanup: failed to destroy network for pod sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(041531438a4661c02d3ccd98e702e9460082208bc96ab684486f048904597ef8): running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.0.88 -j CNI-29b843a1a83022111db4de93 -m comment --comment name: \"crio-bridge\" id: \"041531438a4661c02d3ccd98e702e9460082208bc96ab684486f048904597ef8\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-29b843a1a83022111db4de93':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" id=682afd7a-c8c7-4b0b-9d3b-6e0d6a0ef681 * Jul 24 22:25:25 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:25:25.503126758Z" level=info msg="exec'd [/bin/sh -ec ETCDCTL_API=3 etcdctl --endpoints=https://[127.0.0.1]:2379 --cacert=/var/lib/minikube/certs/etcd/ca.crt --cert=/var/lib/minikube/certs/etcd/healthcheck-client.crt --key=/var/lib/minikube/certs/etcd/healthcheck-client.key get foo] in kube-system/etcd-crio-20200724220901-14997/etcd" id=32429087-b1a2-4992-9e76-b8897dbb6873 * * ==> container status <== * CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID * af0c9e2fb2f0a 2186a1a396deb58f1ea5eaf20193a518ca05049b46ccd754ec83366b5c8c13d5 About a minute ago Exited kindnet-cni 7 06b62a10d0ef5 * cd1d584b7f0b4 4689081edb103a9e8174bf23a255bfbe0b2d9ed82edc907abab6989d1c60f02c 3 minutes ago Exited storage-provisioner 7 778921bc7c55f * 2be846355cebe ae3d9889423ede337df3814baa77445e566597a5a882f3cdf933b4d9e0025f0f 14 minutes ago Running kube-proxy 0 d2b29b0195201 * f609b0012ef6d c500a024ff843278184e5454ff6ee040a106c867c5a0361886fd3057cace2264 15 minutes ago Running kube-apiserver 0 8b21701a5eb2f * 13967ec1cfcb7 d2f090f2479fbf92c508100e0a6106b3516bb70421a465586661feb1494145a2 15 minutes ago Running kube-controller-manager 0 7c74210db0b18 * d0b0ef8c50a19 2c4adeb21b4ff8ed3309d0e42b6b4ae39872399f7b37e0856e673b13c4aba13d 15 minutes ago Running etcd 0 cfde083312c11 * 99dab5d3aff03 78b4180ab00d0fb99b1be2b5ef92a4831ad07f00f27e6746828f374497d79367 15 minutes ago Running kube-scheduler 0 1c739655679ee * * ==> describe nodes <== * Name: crio-20200724220901-14997 * Roles: master * Labels: beta.kubernetes.io/arch=amd64 * beta.kubernetes.io/os=linux * kubernetes.io/arch=amd64 * kubernetes.io/hostname=crio-20200724220901-14997 * kubernetes.io/os=linux * minikube.k8s.io/commit=40eac8ce825d2bb784efa63b900c8d788ea49faf * minikube.k8s.io/name=crio-20200724220901-14997 * minikube.k8s.io/updated_at=2020_07_24T22_10_37_0700 * minikube.k8s.io/version=v1.12.1 * node-role.kubernetes.io/master= * Annotations: kubeadm.alpha.kubernetes.io/cri-socket: /var/run/crio/crio.sock * node.alpha.kubernetes.io/ttl: 0 * volumes.kubernetes.io/controller-managed-attach-detach: true * CreationTimestamp: Fri, 24 Jul 2020 22:10:31 +0000 * Taints: * Unschedulable: false * Conditions: * Type Status LastHeartbeatTime LastTransitionTime Reason Message * ---- ------ ----------------- ------------------ ------ ------- * MemoryPressure False Fri, 24 Jul 2020 22:24:33 +0000 Fri, 24 Jul 2020 22:10:27 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available * DiskPressure False Fri, 24 Jul 2020 22:24:33 +0000 Fri, 24 Jul 2020 22:10:27 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure * PIDPressure False Fri, 24 Jul 2020 22:24:33 +0000 Fri, 24 Jul 2020 22:10:27 +0000 KubeletHasSufficientPID kubelet has sufficient PID available * Ready True Fri, 24 Jul 2020 22:24:33 +0000 Fri, 24 Jul 2020 22:10:27 +0000 KubeletReady kubelet is posting ready status * Addresses: * InternalIP: 172.17.0.2 * Hostname: crio-20200724220901-14997 * Capacity: * cpu: 16 * ephemeral-storage: 128884272Ki * hugepages-1Gi: 0 * hugepages-2Mi: 0 * memory: 65817044Ki * pods: 110 * Allocatable: * cpu: 16 * ephemeral-storage: 128884272Ki * hugepages-1Gi: 0 * hugepages-2Mi: 0 * memory: 65817044Ki * pods: 110 * System Info: * Machine ID: 00e6fc94f1884decb6d7faf4f3cca9d5 * System UUID: 8677386b-5379-4ccc-90e7-5b585098762e * Boot ID: 65219ec9-ab55-4151-85fa-6cbcd6144529 * Kernel Version: 5.4.0-1022-azure * OS Image: Ubuntu 19.10 * Operating System: linux * Architecture: amd64 * Container Runtime Version: cri-o://1.17.3 * Kubelet Version: v1.15.7 * Kube-Proxy Version: v1.15.7 * PodCIDR: 10.244.0.0/24 * Non-terminated Pods: (9 in total) * Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits AGE * --------- ---- ------------ ---------- --------------- ------------- --- * default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 8m3s * kube-system coredns-5d4dd4b4db-9ssg6 100m (0%) 0 (0%) 70Mi (0%) 170Mi (0%) 14m * kube-system etcd-crio-20200724220901-14997 0 (0%) 0 (0%) 0 (0%) 0 (0%) 14m * kube-system kindnet-4qfcd 100m (0%) 100m (0%) 50Mi (0%) 50Mi (0%) 14m * kube-system kube-apiserver-crio-20200724220901-14997 250m (1%) 0 (0%) 0 (0%) 0 (0%) 13m * kube-system kube-controller-manager-crio-20200724220901-14997 200m (1%) 0 (0%) 0 (0%) 0 (0%) 13m * kube-system kube-proxy-6wf4w 0 (0%) 0 (0%) 0 (0%) 0 (0%) 14m * kube-system kube-scheduler-crio-20200724220901-14997 100m (0%) 0 (0%) 0 (0%) 0 (0%) 13m * kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 14m * Allocated resources: * (Total limits may be over 100 percent, i.e., overcommitted.) * Resource Requests Limits * -------- -------- ------ * cpu 750m (4%) 100m (0%) * memory 120Mi (0%) 220Mi (0%) * ephemeral-storage 0 (0%) 0 (0%) * Events: * Type Reason Age From Message * ---- ------ ---- ---- ------- * Normal NodeHasSufficientMemory 15m (x7 over 15m) kubelet, crio-20200724220901-14997 Node crio-20200724220901-14997 status is now: NodeHasSufficientMemory * Normal NodeHasNoDiskPressure 15m (x7 over 15m) kubelet, crio-20200724220901-14997 Node crio-20200724220901-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientPID 15m (x7 over 15m) kubelet, crio-20200724220901-14997 Node crio-20200724220901-14997 status is now: NodeHasSufficientPID * Warning readOnlySysFS 14m kube-proxy, crio-20200724220901-14997 CRI error: /sys is read-only: cannot modify conntrack limits, problems may arise later (If running Docker, see docker issue #24000) * Normal Starting 14m kube-proxy, crio-20200724220901-14997 Starting kube-proxy. * * ==> dmesg <== * [ +0.008042] FS-Cache: N-cookie d=00000000087e516b n=00000000639e5e65 * [ +0.007221] FS-Cache: N-key=[8] '56c00f0000000000' * [ +0.005631] FS-Cache: Duplicate cookie detected * [ +0.003841] FS-Cache: O-cookie c=0000000011bf07ab [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.005951] FS-Cache: O-cookie d=00000000087e516b n=00000000251e97b7 * [ +0.004790] FS-Cache: O-key=[8] '56c00f0000000000' * [ +0.003643] FS-Cache: N-cookie c=00000000e2eec606 [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.005689] FS-Cache: N-cookie d=00000000087e516b n=000000007550b1ca * [ +0.004826] FS-Cache: N-key=[8] '56c00f0000000000' * [ +1.890005] FS-Cache: Duplicate cookie detected * [ +0.022632] FS-Cache: O-cookie c=0000000036b15294 [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.008521] FS-Cache: O-cookie d=00000000087e516b n=00000000efd734b0 * [ +0.007450] FS-Cache: O-key=[8] '55c00f0000000000' * [ +0.004298] FS-Cache: N-cookie c=00000000c6760349 [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.007729] FS-Cache: N-cookie d=00000000087e516b n=000000002b6d28f2 * [ +0.006730] FS-Cache: N-key=[8] '55c00f0000000000' * [ +2.781512] FS-Cache: Duplicate cookie detected * [ +0.004691] FS-Cache: O-cookie c=0000000026b60cdb [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.006817] FS-Cache: O-cookie d=00000000087e516b n=0000000083eaff9d * [ +0.006146] FS-Cache: O-key=[8] '5ac00f0000000000' * [ +0.004163] FS-Cache: N-cookie c=000000006a4278df [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.006241] FS-Cache: N-cookie d=00000000087e516b n=00000000e4c3dd6e * [ +0.005074] FS-Cache: N-key=[8] '5ac00f0000000000' * [Jul24 22:02] tee (116897): /proc/108378/oom_adj is deprecated, please use /proc/108378/oom_score_adj instead. * [Jul24 22:09] hrtimer: interrupt took 1877431 ns * * ==> etcd [d0b0ef8c50a1909ed50d4cf31a37f18c4119a48612f57acdefe63fce115d5bbc] <== * 2020-07-24 22:13:06.691097 W | etcdserver: read-only range request "key:\"foo\" " with result "range_response_count:0 size:5" took too long (1.183999082s) to execute * 2020-07-24 22:13:06.694425 W | etcdserver: read-only range request "key:\"/registry/jobs/\" range_end:\"/registry/jobs0\" limit:500 " with result "range_response_count:0 size:5" took too long (600.61292ms) to execute * 2020-07-24 22:13:06.694450 W | etcdserver: read-only range request "key:\"/registry/leases/kube-node-lease/crio-20200724220901-14997\" " with result "range_response_count:1 size:356" took too long (775.303569ms) to execute * 2020-07-24 22:13:06.694460 W | etcdserver: read-only range request "key:\"/registry/health\" " with result "range_response_count:0 size:5" took too long (453.465816ms) to execute * 2020-07-24 22:13:06.694555 W | etcdserver: read-only range request "key:\"/registry/daemonsets\" range_end:\"/registry/daemonsett\" count_only:true " with result "range_response_count:0 size:7" took too long (960.560465ms) to execute * 2020-07-24 22:13:13.069379 W | etcdserver: request "header: lease_revoke:" with result "size:28" took too long (916.45026ms) to execute * 2020-07-24 22:13:13.069462 W | etcdserver: failed to revoke 3c247382e0a1a43a ("lease not found") * 2020-07-24 22:13:13.069476 W | etcdserver: failed to revoke 3c247382e0a1a43a ("lease not found") * 2020-07-24 22:13:13.069536 W | etcdserver: read-only range request "key:\"/registry/apiextensions.k8s.io/customresourcedefinitions\" range_end:\"/registry/apiextensions.k8s.io/customresourcedefinitiont\" count_only:true " with result "range_response_count:0 size:5" took too long (1.559649322s) to execute * 2020-07-24 22:13:13.796525 W | etcdserver: request "header: lease_revoke:" with result "error:lease not found" took too long (181.88178ms) to execute * 2020-07-24 22:13:13.796570 W | etcdserver: failed to revoke 3c247382e0a1a43a ("lease not found") * 2020-07-24 22:13:31.232246 W | etcdserver: read-only range request "key:\"/registry/leases\" range_end:\"/registry/leaset\" count_only:true " with result "range_response_count:0 size:7" took too long (354.963694ms) to execute * 2020-07-24 22:13:31.232365 W | etcdserver: read-only range request "key:\"/registry/events\" range_end:\"/registry/eventt\" count_only:true " with result "range_response_count:0 size:7" took too long (279.530529ms) to execute * 2020-07-24 22:14:36.955686 W | etcdserver: read-only range request "key:\"/registry/roles\" range_end:\"/registry/rolet\" count_only:true " with result "range_response_count:0 size:7" took too long (181.72613ms) to execute * 2020-07-24 22:14:45.950243 W | etcdserver: request "header: txn: success:> failure: >>" with result "size:16" took too long (224.564127ms) to execute * 2020-07-24 22:14:45.950604 W | etcdserver: read-only range request "key:\"foo\" " with result "range_response_count:0 size:5" took too long (455.525668ms) to execute * 2020-07-24 22:14:45.950770 W | etcdserver: read-only range request "key:\"/registry/services/specs\" range_end:\"/registry/services/spect\" count_only:true " with result "range_response_count:0 size:7" took too long (296.95691ms) to execute * 2020-07-24 22:14:46.860832 W | etcdserver: read-only range request "key:\"/registry/jobs/\" range_end:\"/registry/jobs0\" limit:500 " with result "range_response_count:0 size:5" took too long (124.556458ms) to execute * 2020-07-24 22:14:46.860871 W | etcdserver: read-only range request "key:\"/registry/apiregistration.k8s.io/apiservices\" range_end:\"/registry/apiregistration.k8s.io/apiservicet\" count_only:true " with result "range_response_count:0 size:7" took too long (246.55995ms) to execute * 2020-07-24 22:14:46.860887 W | etcdserver: read-only range request "key:\"/registry/leases/kube-node-lease/crio-20200724220901-14997\" " with result "range_response_count:1 size:356" took too long (124.582559ms) to execute * 2020-07-24 22:14:46.860969 W | etcdserver: read-only range request "key:\"/registry/health\" " with result "range_response_count:0 size:5" took too long (619.55938ms) to execute * 2020-07-24 22:20:27.807681 I | mvcc: store.index: compact 549 * 2020-07-24 22:20:27.808638 I | mvcc: finished scheduled compaction at 549 (took 618.143µs) * 2020-07-24 22:25:27.828726 I | mvcc: store.index: compact 654 * 2020-07-24 22:25:27.829312 I | mvcc: finished scheduled compaction at 654 (took 332.123µs) * * ==> kernel <== * 22:25:32 up 52 min, 0 users, load average: 5.55, 6.98, 7.78 * Linux crio-20200724220901-14997 5.4.0-1022-azure #22-Ubuntu SMP Fri Jul 10 06:14:37 UTC 2020 x86_64 x86_64 x86_64 GNU/Linux * PRETTY_NAME="Ubuntu 19.10" * * ==> kube-apiserver [f609b0012ef6d35e24beac43cf9734c27d6548f09da0af9dcdd690856155d5c3] <== * I0724 22:13:06.694833 1 trace.go:81] Trace[2108251570]: "List etcd3: key=/jobs, resourceVersion=, limit: 500, continue: " (started: 2020-07-24 22:13:06.093334068 +0000 UTC m=+159.350497018) (total time: 601.467477ms): * Trace[2108251570]: [601.467477ms] [601.467477ms] END * I0724 22:13:06.694907 1 trace.go:81] Trace[1240683393]: "Get /apis/coordination.k8s.io/v1beta1/namespaces/kube-node-lease/leases/crio-20200724220901-14997" (started: 2020-07-24 22:13:05.918684921 +0000 UTC m=+159.175847871) (total time: 776.191529ms): * Trace[1240683393]: [776.141025ms] [776.095222ms] About to write a response * I0724 22:13:06.694912 1 trace.go:81] Trace[1232300274]: "List /apis/batch/v1/jobs" (started: 2020-07-24 22:13:06.093273563 +0000 UTC m=+159.350436513) (total time: 601.626088ms): * Trace[1232300274]: [601.583085ms] [601.529581ms] Listing from storage done * I0724 22:13:08.766673 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [] * I0724 22:13:08.766832 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:13:08.766909 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:13:08.766923 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:13:08.775255 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:13:28.766807 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [] * I0724 22:13:28.766933 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:13:28.767002 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:13:28.775030 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:13:48.766978 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [] * I0724 22:13:48.767147 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:13:48.767261 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:13:48.776497 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:14:08.767132 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [] * I0724 22:14:08.767258 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:14:08.767377 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:14:08.777775 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:14:45.951213 1 trace.go:81] Trace[369453456]: "GuaranteedUpdate etcd3: *v1.Endpoints" (started: 2020-07-24 22:14:45.030377302 +0000 UTC m=+258.287540252) (total time: 920.796269ms): * Trace[369453456]: [920.769167ms] [919.342375ms] Transaction committed * * ==> kube-controller-manager [13967ec1cfcb7e04177fc3a4bb3390ad4c893aba3aa1f453a274a9e947393268] <== * I0724 22:10:49.553282 1 range_allocator.go:310] Set node crio-20200724220901-14997 PodCIDR to 10.244.0.0/24 * E0724 22:10:49.585268 1 clusterroleaggregation_controller.go:180] edit failed with : Operation cannot be fulfilled on clusterroles.rbac.authorization.k8s.io "edit": the object has been modified; please apply your changes to the latest version and try again * E0724 22:10:49.585566 1 clusterroleaggregation_controller.go:180] admin failed with : Operation cannot be fulfilled on clusterroles.rbac.authorization.k8s.io "admin": the object has been modified; please apply your changes to the latest version and try again * I0724 22:10:49.586050 1 event.go:258] Event(v1.ObjectReference{Kind:"DaemonSet", Namespace:"kube-system", Name:"kube-proxy", UID:"ae9639a0-2be9-470a-89a1-9f9fc0f539e5", APIVersion:"apps/v1", ResourceVersion:"220", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: kube-proxy-6wf4w * I0724 22:10:49.586086 1 event.go:258] Event(v1.ObjectReference{Kind:"DaemonSet", Namespace:"kube-system", Name:"kindnet", UID:"bc740eee-a735-44a2-b533-64fd88dc756e", APIVersion:"apps/v1", ResourceVersion:"233", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: kindnet-4qfcd * I0724 22:10:49.635568 1 controller_utils.go:1036] Caches are synced for attach detach controller * I0724 22:10:49.724776 1 controller_utils.go:1036] Caches are synced for HPA controller * I0724 22:10:49.955648 1 controller_utils.go:1036] Caches are synced for job controller * I0724 22:10:49.971265 1 controller_utils.go:1036] Caches are synced for taint controller * I0724 22:10:49.971354 1 taint_manager.go:182] Starting NoExecuteTaintManager * I0724 22:10:49.971368 1 node_lifecycle_controller.go:1189] Initializing eviction metric for zone: * W0724 22:10:49.971433 1 node_lifecycle_controller.go:863] Missing timestamp for Node crio-20200724220901-14997. Assuming now as a timestamp. * I0724 22:10:49.971476 1 node_lifecycle_controller.go:1089] Controller detected that zone is now in state Normal. * I0724 22:10:49.971486 1 event.go:258] Event(v1.ObjectReference{Kind:"Node", Namespace:"", Name:"crio-20200724220901-14997", UID:"ef0f0ad1-a556-4ea4-8b4e-4df0035f7a0b", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'RegisteredNode' Node crio-20200724220901-14997 event: Registered Node crio-20200724220901-14997 in Controller * I0724 22:10:50.130631 1 controller_utils.go:1036] Caches are synced for ReplicaSet controller * I0724 22:10:50.181760 1 controller_utils.go:1036] Caches are synced for disruption controller * I0724 22:10:50.181988 1 disruption.go:338] Sending events to api server. * I0724 22:10:50.188393 1 controller_utils.go:1036] Caches are synced for garbage collector controller * I0724 22:10:50.189975 1 controller_utils.go:1036] Caches are synced for deployment controller * I0724 22:10:50.193823 1 event.go:258] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"kube-system", Name:"coredns", UID:"92d4aefc-f780-4c0c-a563-2ab1476f4694", APIVersion:"apps/v1", ResourceVersion:"340", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set coredns-5d4dd4b4db to 1 * I0724 22:10:50.198446 1 event.go:258] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"kube-system", Name:"coredns-5d4dd4b4db", UID:"ab110c4f-65e4-4d3c-987b-c9a844b107b4", APIVersion:"apps/v1", ResourceVersion:"341", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: coredns-5d4dd4b4db-9ssg6 * I0724 22:10:50.248567 1 controller_utils.go:1036] Caches are synced for resource quota controller * I0724 22:10:50.273968 1 controller_utils.go:1036] Caches are synced for garbage collector controller * I0724 22:10:50.273996 1 garbagecollector.go:137] Garbage collector: all resource monitors have synced. Proceeding to collect garbage * I0724 22:10:50.275749 1 controller_utils.go:1036] Caches are synced for resource quota controller * * ==> kube-proxy [2be846355cebea723ec99bffd78104b575e2937b018759d3884dc0c79bb92ea0] <== * W0724 22:10:51.541191 1 server_others.go:249] Flag proxy-mode="" unknown, assuming iptables proxy * I0724 22:10:51.711236 1 server_others.go:143] Using iptables Proxier. * I0724 22:10:51.711733 1 server.go:534] Version: v1.15.7 * I0724 22:10:51.747623 1 conntrack.go:52] Setting nf_conntrack_max to 524288 * E0724 22:10:51.748800 1 conntrack.go:127] sysfs is not writable: {Device:sysfs Path:/sys Type:sysfs Opts:[ro nosuid nodev noexec relatime] Freq:0 Pass:0} (mount options are [ro nosuid nodev noexec relatime]) * I0724 22:10:51.749038 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_established' to 86400 * I0724 22:10:51.749127 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_close_wait' to 3600 * I0724 22:10:51.749315 1 config.go:187] Starting service config controller * I0724 22:10:51.749352 1 controller_utils.go:1029] Waiting for caches to sync for service config controller * I0724 22:10:51.749404 1 config.go:96] Starting endpoints config controller * I0724 22:10:51.749502 1 controller_utils.go:1029] Waiting for caches to sync for endpoints config controller * I0724 22:10:51.849555 1 controller_utils.go:1036] Caches are synced for service config controller * I0724 22:10:51.849731 1 controller_utils.go:1036] Caches are synced for endpoints config controller * * ==> kube-scheduler [99dab5d3aff03ed6049a58c4f600d958b17db3a0f7df5c9c68abab533c8b9dfa] <== * I0724 22:10:28.185753 1 defaults.go:87] TaintNodesByCondition is enabled, PodToleratesNodeTaints predicate is mandatory * W0724 22:10:28.186676 1 authorization.go:47] Authorization is disabled * W0724 22:10:28.186691 1 authentication.go:55] Authentication is disabled * I0724 22:10:28.186705 1 deprecated_insecure_serving.go:51] Serving healthz insecurely on [::]:10251 * I0724 22:10:28.187130 1 secure_serving.go:116] Serving securely on 127.0.0.1:10259 * E0724 22:10:31.838038 1 reflector.go:125] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:226: Failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope * E0724 22:10:31.850274 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope * E0724 22:10:31.851076 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1beta1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope * E0724 22:10:31.851166 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope * E0724 22:10:31.935670 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope * E0724 22:10:31.937897 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope * E0724 22:10:31.938070 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope * E0724 22:10:31.940942 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope * E0724 22:10:31.942934 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope * E0724 22:10:31.946983 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope * E0724 22:10:32.840099 1 reflector.go:125] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:226: Failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope * E0724 22:10:32.851310 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope * E0724 22:10:32.936974 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1beta1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope * E0724 22:10:32.938093 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope * E0724 22:10:32.940907 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope * E0724 22:10:32.940967 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope * E0724 22:10:32.941768 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope * E0724 22:10:32.942905 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope * E0724 22:10:32.943936 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope * E0724 22:10:32.947924 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope * * ==> kubelet <== * -- Logs begin at Fri 2020-07-24 22:09:16 UTC, end at Fri 2020-07-24 22:25:32 UTC. -- * Jul 24 22:25:03 crio-20200724220901-14997 kubelet[2275]: E0724 22:25:03.282704 2275 pod_workers.go:190] Error syncing pod 9fb35a28-5601-47e4-88e6-be2a18fa55ef ("kindnet-4qfcd_kube-system(9fb35a28-5601-47e4-88e6-be2a18fa55ef)"), skipping: failed to "StartContainer" for "kindnet-cni" with CrashLoopBackOff: "Back-off 5m0s restarting failed container=kindnet-cni pod=kindnet-4qfcd_kube-system(9fb35a28-5601-47e4-88e6-be2a18fa55ef)" * Jul 24 22:25:03 crio-20200724220901-14997 kubelet[2275]: E0724 22:25:03.316306 2275 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(1058a2a3a0bfc9c0d02f51164409f356aca6d1a7850b3b94b4addd043726f805): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:25:03 crio-20200724220901-14997 kubelet[2275]: E0724 22:25:03.316407 2275 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(1058a2a3a0bfc9c0d02f51164409f356aca6d1a7850b3b94b4addd043726f805): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:25:03 crio-20200724220901-14997 kubelet[2275]: E0724 22:25:03.316432 2275 kuberuntime_manager.go:692] createPodSandbox for pod "coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(1058a2a3a0bfc9c0d02f51164409f356aca6d1a7850b3b94b4addd043726f805): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:25:03 crio-20200724220901-14997 kubelet[2275]: E0724 22:25:03.316491 2275 pod_workers.go:190] Error syncing pod dbd8dc2c-1af2-4c99-89f0-d44686805742 ("coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)"), skipping: failed to "CreatePodSandbox" for "coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)" with CreatePodSandboxError: "CreatePodSandbox for pod \"coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)\" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(1058a2a3a0bfc9c0d02f51164409f356aca6d1a7850b3b94b4addd043726f805): failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:25:06 crio-20200724220901-14997 kubelet[2275]: E0724 22:25:06.703621 2275 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(da6562e101e3b7a58c5ba8294a32c4bb428039a5fbb1281b7511b9d840ce35bd): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:25:06 crio-20200724220901-14997 kubelet[2275]: E0724 22:25:06.703687 2275 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(da6562e101e3b7a58c5ba8294a32c4bb428039a5fbb1281b7511b9d840ce35bd): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:25:06 crio-20200724220901-14997 kubelet[2275]: E0724 22:25:06.703711 2275 kuberuntime_manager.go:692] createPodSandbox for pod "busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(da6562e101e3b7a58c5ba8294a32c4bb428039a5fbb1281b7511b9d840ce35bd): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:25:06 crio-20200724220901-14997 kubelet[2275]: E0724 22:25:06.703760 2275 pod_workers.go:190] Error syncing pod b947226f-0b0e-4916-9b65-c7b70a6e137e ("busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)"), skipping: failed to "CreatePodSandbox" for "busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)" with CreatePodSandboxError: "CreatePodSandbox for pod \"busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)\" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(da6562e101e3b7a58c5ba8294a32c4bb428039a5fbb1281b7511b9d840ce35bd): failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:25:12 crio-20200724220901-14997 kubelet[2275]: E0724 22:25:12.283166 2275 pod_workers.go:190] Error syncing pod e76b38cb-d66f-4b4b-a962-cdd9c670e250 ("storage-provisioner_kube-system(e76b38cb-d66f-4b4b-a962-cdd9c670e250)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "Back-off 5m0s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(e76b38cb-d66f-4b4b-a962-cdd9c670e250)" * Jul 24 22:25:14 crio-20200724220901-14997 kubelet[2275]: E0724 22:25:14.282639 2275 pod_workers.go:190] Error syncing pod 9fb35a28-5601-47e4-88e6-be2a18fa55ef ("kindnet-4qfcd_kube-system(9fb35a28-5601-47e4-88e6-be2a18fa55ef)"), skipping: failed to "StartContainer" for "kindnet-cni" with CrashLoopBackOff: "Back-off 5m0s restarting failed container=kindnet-cni pod=kindnet-4qfcd_kube-system(9fb35a28-5601-47e4-88e6-be2a18fa55ef)" * Jul 24 22:25:20 crio-20200724220901-14997 kubelet[2275]: E0724 22:25:20.892202 2275 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(ed614af69c11ccb4ed4667c9a00677c5ca9b11c4f258b7ee9e2fa17df0a9cf13): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:25:20 crio-20200724220901-14997 kubelet[2275]: E0724 22:25:20.892276 2275 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(ed614af69c11ccb4ed4667c9a00677c5ca9b11c4f258b7ee9e2fa17df0a9cf13): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:25:20 crio-20200724220901-14997 kubelet[2275]: E0724 22:25:20.892311 2275 kuberuntime_manager.go:692] createPodSandbox for pod "coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(ed614af69c11ccb4ed4667c9a00677c5ca9b11c4f258b7ee9e2fa17df0a9cf13): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:25:20 crio-20200724220901-14997 kubelet[2275]: E0724 22:25:20.892426 2275 pod_workers.go:190] Error syncing pod dbd8dc2c-1af2-4c99-89f0-d44686805742 ("coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)"), skipping: failed to "CreatePodSandbox" for "coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)" with CreatePodSandboxError: "CreatePodSandbox for pod \"coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)\" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(ed614af69c11ccb4ed4667c9a00677c5ca9b11c4f258b7ee9e2fa17df0a9cf13): failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:25:21 crio-20200724220901-14997 kubelet[2275]: E0724 22:25:21.813561 2275 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(041531438a4661c02d3ccd98e702e9460082208bc96ab684486f048904597ef8): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:25:21 crio-20200724220901-14997 kubelet[2275]: E0724 22:25:21.813630 2275 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(041531438a4661c02d3ccd98e702e9460082208bc96ab684486f048904597ef8): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:25:21 crio-20200724220901-14997 kubelet[2275]: E0724 22:25:21.813654 2275 kuberuntime_manager.go:692] createPodSandbox for pod "busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(041531438a4661c02d3ccd98e702e9460082208bc96ab684486f048904597ef8): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:25:21 crio-20200724220901-14997 kubelet[2275]: E0724 22:25:21.813707 2275 pod_workers.go:190] Error syncing pod b947226f-0b0e-4916-9b65-c7b70a6e137e ("busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)"), skipping: failed to "CreatePodSandbox" for "busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)" with CreatePodSandboxError: "CreatePodSandbox for pod \"busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)\" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(041531438a4661c02d3ccd98e702e9460082208bc96ab684486f048904597ef8): failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:25:24 crio-20200724220901-14997 kubelet[2275]: E0724 22:25:24.403870 2275 manager.go:1084] Failed to create existing container: /kubepods/besteffort/pod111a1e02ecfd8e373f0c9f774a428541/crio-cfde083312c1168171f556afd9f9d427052d7f2b24bd0882af47a77d42058755: Error finding container cfde083312c1168171f556afd9f9d427052d7f2b24bd0882af47a77d42058755: Status 404 returned error &{%!s(*http.body=&{0xc0011a82c0 false false {0 0} false false false }) {%!s(int32=0) %!s(uint32=0)} %!s(bool=false) %!s(func(error) error=0x737ab0) %!s(func() error=0x737a40)} * Jul 24 22:25:24 crio-20200724220901-14997 kubelet[2275]: E0724 22:25:24.407603 2275 manager.go:1084] Failed to create existing container: /kubepods/burstable/podd56135b6f61d5db3f635e70693e7224d/crio-1c739655679ee845e51f5144c1084d6dfd219bb19e3eed0336cafc28ace6dde8: Error finding container 1c739655679ee845e51f5144c1084d6dfd219bb19e3eed0336cafc28ace6dde8: Status 404 returned error &{%!s(*http.body=&{0xc000dfbd60 false false {0 0} false false false }) {%!s(int32=0) %!s(uint32=0)} %!s(bool=false) %!s(func(error) error=0x737ab0) %!s(func() error=0x737a40)} * Jul 24 22:25:24 crio-20200724220901-14997 kubelet[2275]: E0724 22:25:24.408738 2275 manager.go:1084] Failed to create existing container: /kubepods/burstable/poda6564eecd841ea73dca56559a73a98cf/crio-7c74210db0b18ed89e977abd92469c112cabeae1e18ff5a27d9bc1e4a2d586b3: Error finding container 7c74210db0b18ed89e977abd92469c112cabeae1e18ff5a27d9bc1e4a2d586b3: Status 404 returned error &{%!s(*http.body=&{0xc00112dc00 false false {0 0} false false false }) {%!s(int32=0) %!s(uint32=0)} %!s(bool=false) %!s(func(error) error=0x737ab0) %!s(func() error=0x737a40)} * Jul 24 22:25:24 crio-20200724220901-14997 kubelet[2275]: E0724 22:25:24.410290 2275 manager.go:1084] Failed to create existing container: /kubepods/burstable/pod3fa5678dc40111ec8016c600a161dae3/crio-8b21701a5eb2f6af152a40dcc25e4c05e30e6a13c60607c3f6613007a489942a: Error finding container 8b21701a5eb2f6af152a40dcc25e4c05e30e6a13c60607c3f6613007a489942a: Status 404 returned error &{%!s(*http.body=&{0xc000ce1fe0 false false {0 0} false false false }) {%!s(int32=0) %!s(uint32=0)} %!s(bool=false) %!s(func(error) error=0x737ab0) %!s(func() error=0x737a40)} * Jul 24 22:25:27 crio-20200724220901-14997 kubelet[2275]: E0724 22:25:27.282404 2275 pod_workers.go:190] Error syncing pod e76b38cb-d66f-4b4b-a962-cdd9c670e250 ("storage-provisioner_kube-system(e76b38cb-d66f-4b4b-a962-cdd9c670e250)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "Back-off 5m0s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(e76b38cb-d66f-4b4b-a962-cdd9c670e250)" * Jul 24 22:25:28 crio-20200724220901-14997 kubelet[2275]: E0724 22:25:28.282803 2275 pod_workers.go:190] Error syncing pod 9fb35a28-5601-47e4-88e6-be2a18fa55ef ("kindnet-4qfcd_kube-system(9fb35a28-5601-47e4-88e6-be2a18fa55ef)"), skipping: failed to "StartContainer" for "kindnet-cni" with CrashLoopBackOff: "Back-off 5m0s restarting failed container=kindnet-cni pod=kindnet-4qfcd_kube-system(9fb35a28-5601-47e4-88e6-be2a18fa55ef)" * * ==> storage-provisioner [cd1d584b7f0b403a35f252f8a87cf7721e803ff8bb98e485d54de30f5dd9a643] <== * F0724 22:22:10.732535 1 main.go:37] Error getting server version: Get https://10.96.0.1:443/version: dial tcp 10.96.0.1:443: getsockopt: no route to host -- /stdout -- helpers_test.go:247: (dbg) Run: ./minikube-linux-amd64 status --format={{.APIServer}} -p crio-20200724220901-14997 -n crio-20200724220901-14997 helpers_test.go:254: (dbg) Run: kubectl --context crio-20200724220901-14997 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running helpers_test.go:260: non-running pods: busybox coredns-5d4dd4b4db-9ssg6 helpers_test.go:262: ======> post-mortem[TestStartStop/group/crio/serial/DeployApp]: describe non-running pods <====== helpers_test.go:265: (dbg) Run: kubectl --context crio-20200724220901-14997 describe pod busybox coredns-5d4dd4b4db-9ssg6 helpers_test.go:265: (dbg) Non-zero exit: kubectl --context crio-20200724220901-14997 describe pod busybox coredns-5d4dd4b4db-9ssg6: exit status 1 (92.755652ms) -- stdout -- Name: busybox Namespace: default Priority: 0 Node: crio-20200724220901-14997/172.17.0.2 Start Time: Fri, 24 Jul 2020 22:17:30 +0000 Labels: integration-test=busybox Annotations: Status: Pending IP: IPs: Containers: busybox: Container ID: Image: busybox:1.28.4-glibc Image ID: Port: Host Port: Command: sleep 3600 State: Waiting Reason: ContainerCreating Ready: False Restart Count: 0 Environment: Mounts: /var/run/secrets/kubernetes.io/serviceaccount from default-token-2jsfl (ro) Conditions: Type Status Initialized True Ready False ContainersReady False PodScheduled True Volumes: default-token-2jsfl: Type: Secret (a volume populated by a Secret) SecretName: default-token-2jsfl Optional: false QoS Class: BestEffort Node-Selectors: Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s node.kubernetes.io/unreachable:NoExecute for 300s Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Scheduled 8m4s default-scheduler Successfully assigned default/busybox to crio-20200724220901-14997 Warning FailedCreatePodSandBox 8m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(df0c3a384af70876e631b79afa238f8219e909abbaf7684ff30d1dce8a9a54fa): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 7m47s kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(7e1e51912dee7c82a399942b88c2992e156a60bb2bdcfc5a0340b0f5eb894fe0): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 7m32s kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(eb6542b74bcd5168e50e03b41a224e2df5f3761d00e96252755d3d77cabf510e): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 7m19s kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(95381eb738e20acf009ca3de52d9dd44c0e10363127a19f55cc9d8b9b15d935d): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 7m1s kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(5da5f64daf132db1d2f57fcbbd23261f5b055438f42fdc9e58f02b1dd57b9240): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 6m48s kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(d843353d3186fd450ada3d11767e5294d07ef90a741caba47e62a59eb82f9e97): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 6m34s kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(cdb66590d250947869cc36af8c6d22ecc1fd99752b4ba0d09be444e2de9b06eb): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 6m17s kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(590dbdf2276a1c93cf59fe15b35411b3d19be44299a08851ea9957e8c6c03681): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 6m1s kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(fe109ba9d55515001bcdb9c5ba7b51262263252184892313119b14411c1ba69a): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 105s (x17 over 5m48s) kubelet, crio-20200724220901-14997 (combined from similar events): Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(4bee5e35c962aafeecadb6e69afb27ad039cbcf256940e8735ce4f079bf10862): failed to set bridge addr: could not add IP address to "cni0": permission denied -- /stdout -- ** stderr ** Error from server (NotFound): pods "coredns-5d4dd4b4db-9ssg6" not found ** /stderr ** helpers_test.go:267: kubectl --context crio-20200724220901-14997 describe pod busybox coredns-5d4dd4b4db-9ssg6: exit status 1 helpers_test.go:215: -----------------------post-mortem-------------------------------- helpers_test.go:223: ======> post-mortem[TestStartStop/group/crio/serial/DeployApp]: docker inspect <====== helpers_test.go:224: (dbg) Run: docker inspect crio-20200724220901-14997 helpers_test.go:228: (dbg) docker inspect crio-20200724220901-14997: -- stdout -- [ { "Id": "d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a", "Created": "2020-07-24T22:09:11.178770681Z", "Path": "/usr/local/bin/entrypoint", "Args": [ "/sbin/init" ], "State": { "Status": "running", "Running": true, "Paused": false, "Restarting": false, "OOMKilled": false, "Dead": false, "Pid": 270617, "ExitCode": 0, "Error": "", "StartedAt": "2020-07-24T22:09:15.593531886Z", "FinishedAt": "0001-01-01T00:00:00Z" }, "Image": "sha256:e6bc41c39dc48b2b472936db36aedb28527ce0f675ed1bc20d029125c9ccf578", "ResolvConfPath": "/var/lib/docker/containers/d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a/resolv.conf", "HostnamePath": "/var/lib/docker/containers/d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a/hostname", "HostsPath": "/var/lib/docker/containers/d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a/hosts", "LogPath": "/var/lib/docker/containers/d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a/d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a-json.log", "Name": "/crio-20200724220901-14997", "RestartCount": 0, "Driver": "overlay2", "Platform": "linux", "MountLabel": "", "ProcessLabel": "", "AppArmorProfile": "unconfined", "ExecIDs": null, "HostConfig": { "Binds": [ "/lib/modules:/lib/modules:ro", "crio-20200724220901-14997:/var" ], "ContainerIDFile": "", "LogConfig": { "Type": "json-file", "Config": {} }, "NetworkMode": "default", "PortBindings": { "22/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "2376/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "5000/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "8443/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ] }, "RestartPolicy": { "Name": "no", "MaximumRetryCount": 0 }, "AutoRemove": false, "VolumeDriver": "", "VolumesFrom": null, "CapAdd": null, "CapDrop": null, "Capabilities": null, "Dns": [], "DnsOptions": [], "DnsSearch": [], "ExtraHosts": null, "GroupAdd": null, "IpcMode": "private", "Cgroup": "", "Links": null, "OomScoreAdj": 0, "PidMode": "", "Privileged": true, "PublishAllPorts": false, "ReadonlyRootfs": false, "SecurityOpt": [ "seccomp=unconfined", "apparmor=unconfined", "label=disable" ], "Tmpfs": { "/run": "", "/tmp": "" }, "UTSMode": "", "UsernsMode": "", "ShmSize": 67108864, "Runtime": "runc", "ConsoleSize": [ 0, 0 ], "Isolation": "", "CpuShares": 0, "Memory": 2306867200, "NanoCpus": 2000000000, "CgroupParent": "", "BlkioWeight": 0, "BlkioWeightDevice": [], "BlkioDeviceReadBps": null, "BlkioDeviceWriteBps": null, "BlkioDeviceReadIOps": null, "BlkioDeviceWriteIOps": null, "CpuPeriod": 0, "CpuQuota": 0, "CpuRealtimePeriod": 0, "CpuRealtimeRuntime": 0, "CpusetCpus": "", "CpusetMems": "", "Devices": [], "DeviceCgroupRules": null, "DeviceRequests": null, "KernelMemory": 0, "KernelMemoryTCP": 0, "MemoryReservation": 0, "MemorySwap": -1, "MemorySwappiness": null, "OomKillDisable": false, "PidsLimit": null, "Ulimits": null, "CpuCount": 0, "CpuPercent": 0, "IOMaximumIOps": 0, "IOMaximumBandwidth": 0, "MaskedPaths": null, "ReadonlyPaths": null }, "GraphDriver": { "Data": { "LowerDir": "/var/lib/docker/overlay2/25235e8a657907fa460b6393f79aab4ea109b0924e4843714a3776886a051c03-init/diff:/var/lib/docker/overlay2/33ef5d7ed4fbb30d55d2e5925de9b5c359ee2f6bcaf5a31308f0c4c11e358759/diff:/var/lib/docker/overlay2/3cf31b1deb19cf9de586952311d9f94baab00dac6393d96e1a80da943c3823e6/diff:/var/lib/docker/overlay2/5e25be0edbb143d613aab16e23056ece18370c2427214464ccc0b8f033fc76c5/diff:/var/lib/docker/overlay2/933d7b760faf1726c0231eb48cbf3e7af8aeea7d1f449959b7bf07bbe9a84376/diff:/var/lib/docker/overlay2/69ec5555996fe29cf36b7e927034690ecb66f32d4c72396f3c2f3d73303ab10c/diff:/var/lib/docker/overlay2/891f9311e0e93aca44c576d873c0860bf67e972178833fc3ac0569308996933c/diff:/var/lib/docker/overlay2/431ddf5eea9395c020c3a27539c6984764cb7a0dd8f3eeadfbb24f37a897caf7/diff:/var/lib/docker/overlay2/ef7d3d9d53c985fdb46a41c8bcbc04c7d98baa3d8030b1432d115657269a4611/diff:/var/lib/docker/overlay2/d976f7471257da8ef85e0de564cae8e6b16eeb8f83fff6750b3985387f87052c/diff:/var/lib/docker/overlay2/16ed6f6abe25e98b50b10529e70d835da8b64ca99934243f70e5cc63cca44ba1/diff:/var/lib/docker/overlay2/a6495d6f579c32bd0faf387dc3b48c733dae0219e3cfc5d94f9f6ba027261668/diff:/var/lib/docker/overlay2/a34db548da9e24b0323e43e53767c49484fa19509ba9c1e738fb42330788ec52/diff:/var/lib/docker/overlay2/03f93f74dafc48e2bb0234297546abf70dc9918c047a7e3b500ed2242aec09ff/diff:/var/lib/docker/overlay2/b0d08d0be034080c25561d9451d8081050a591c2991a7f3d242234b9bf7c046d/diff:/var/lib/docker/overlay2/d7be6ed61eedc76988e74fe1d87a6a1ef287912654acea5566e7f9cb797b3e3c/diff:/var/lib/docker/overlay2/bde6a4a42d5652b3ef22e3a1dddd0f6fb765d2cd9f5b1a38d4ecb9b44c45b998/diff:/var/lib/docker/overlay2/0f384da0b034b730d4bdd22f270821719c0ac0b7b7d608c3a52fa89b9a386b90/diff:/var/lib/docker/overlay2/46e9d50bc162d2d523c12da3e378e5b859515056dc7d7f11344c1c42c64718d4/diff:/var/lib/docker/overlay2/6e7552cdb94dfc1c9a0a8efae25c758d93fff3d2eec58a3e2b710496d0c94653/diff:/var/lib/docker/overlay2/b9217960caef08b70ded801ca89f5a0808227383f2ab1de5ae7313c200a8090b/diff:/var/lib/docker/overlay2/d11fc39cafccd3e3cb8fba6cfdda2bbfdb99c2e41619ce169b0d6ede0b3f6a64/diff:/var/lib/docker/overlay2/0f89620291cba4bc3afac7c9fa1faf205450d5dcae032c3a9882009633b72b4a/diff", "MergedDir": "/var/lib/docker/overlay2/25235e8a657907fa460b6393f79aab4ea109b0924e4843714a3776886a051c03/merged", "UpperDir": "/var/lib/docker/overlay2/25235e8a657907fa460b6393f79aab4ea109b0924e4843714a3776886a051c03/diff", "WorkDir": "/var/lib/docker/overlay2/25235e8a657907fa460b6393f79aab4ea109b0924e4843714a3776886a051c03/work" }, "Name": "overlay2" }, "Mounts": [ { "Type": "volume", "Name": "crio-20200724220901-14997", "Source": "/var/lib/docker/volumes/crio-20200724220901-14997/_data", "Destination": "/var", "Driver": "local", "Mode": "z", "RW": true, "Propagation": "" }, { "Type": "bind", "Source": "/lib/modules", "Destination": "/lib/modules", "Mode": "ro", "RW": false, "Propagation": "rprivate" } ], "Config": { "Hostname": "crio-20200724220901-14997", "Domainname": "", "User": "root", "AttachStdin": false, "AttachStdout": false, "AttachStderr": false, "ExposedPorts": { "22/tcp": {}, "2376/tcp": {}, "5000/tcp": {}, "8443/tcp": {} }, "Tty": true, "OpenStdin": false, "StdinOnce": false, "Env": [ "container=docker", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], "Cmd": null, "Image": "kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438", "Volumes": null, "WorkingDir": "", "Entrypoint": [ "/usr/local/bin/entrypoint", "/sbin/init" ], "OnBuild": null, "Labels": { "created_by.minikube.sigs.k8s.io": "true", "mode.minikube.sigs.k8s.io": "crio-20200724220901-14997", "name.minikube.sigs.k8s.io": "crio-20200724220901-14997", "role.minikube.sigs.k8s.io": "" }, "StopSignal": "SIGRTMIN+3" }, "NetworkSettings": { "Bridge": "", "SandboxID": "0d795cb2f39f80073816031303c0e963a2cb0b36d8d4c2994640addd703a558d", "HairpinMode": false, "LinkLocalIPv6Address": "", "LinkLocalIPv6PrefixLen": 0, "Ports": { "22/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32888" } ], "2376/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32887" } ], "5000/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32886" } ], "8443/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32885" } ] }, "SandboxKey": "/var/run/docker/netns/0d795cb2f39f", "SecondaryIPAddresses": null, "SecondaryIPv6Addresses": null, "EndpointID": "94d389b211c871d340ba03bea225a73a99c784b3ef382985b8799306d3d827f4", "Gateway": "172.17.0.1", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "IPAddress": "172.17.0.2", "IPPrefixLen": 16, "IPv6Gateway": "", "MacAddress": "02:42:ac:11:00:02", "Networks": { "bridge": { "IPAMConfig": null, "Links": null, "Aliases": null, "NetworkID": "d4a420189740571bc15c14b2ebac1ab66e95c9c6796461aa96bb3486a5f7e03d", "EndpointID": "94d389b211c871d340ba03bea225a73a99c784b3ef382985b8799306d3d827f4", "Gateway": "172.17.0.1", "IPAddress": "172.17.0.2", "IPPrefixLen": 16, "IPv6Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "MacAddress": "02:42:ac:11:00:02", "DriverOpts": null } } } } ] -- /stdout -- helpers_test.go:232: (dbg) Run: ./minikube-linux-amd64 status --format={{.Host}} -p crio-20200724220901-14997 -n crio-20200724220901-14997 helpers_test.go:237: <<< TestStartStop/group/crio/serial/DeployApp FAILED: start of post-mortem logs <<< helpers_test.go:238: ======> post-mortem[TestStartStop/group/crio/serial/DeployApp]: minikube logs <====== helpers_test.go:240: (dbg) Run: ./minikube-linux-amd64 -p crio-20200724220901-14997 logs -n 25 helpers_test.go:240: (dbg) Done: ./minikube-linux-amd64 -p crio-20200724220901-14997 logs -n 25: (5.432697842s) helpers_test.go:245: TestStartStop/group/crio/serial/DeployApp logs: -- stdout -- * ==> CRI-O <== * -- Logs begin at Fri 2020-07-24 22:09:16 UTC, end at Fri 2020-07-24 22:25:34 UTC. -- * Jul 24 22:25:19 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:25:19.459381601Z" level=info msg="About to add CNI network crio-bridge (type=bridge)" * Jul 24 22:25:20 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:25:20.595619404Z" level=error msg="Error adding network: failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:25:20 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:25:20.595658107Z" level=error msg="Error while adding pod to CNI network \"crio-bridge\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:25:20 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:25:20.595809117Z" level=info msg="About to del CNI network lo (type=loopback)" * Jul 24 22:25:20 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:25:20.602347773Z" level=info msg="Got pod network &{Name:coredns-5d4dd4b4db-9ssg6 Namespace:kube-system ID:ed614af69c11ccb4ed4667c9a00677c5ca9b11c4f258b7ee9e2fa17df0a9cf13 NetNS:/proc/19743/ns/net Networks:[] RuntimeConfig:map[crio-bridge:{IP: MAC: PortMappings:[] Bandwidth: IpRanges:[]}]}" * Jul 24 22:25:20 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:25:20.602398976Z" level=error msg="error loading cached network config: network \"crio-bridge\" not found in CNI cache" * Jul 24 22:25:20 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:25:20.602410877Z" level=info msg="About to del CNI network crio-bridge (type=bridge)" * Jul 24 22:25:20 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:25:20.692431944Z" level=error msg="Error deleting network: running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.0.87 -j CNI-28bb383a3d9e144e4f3dd402 -m comment --comment name: \"crio-bridge\" id: \"ed614af69c11ccb4ed4667c9a00677c5ca9b11c4f258b7ee9e2fa17df0a9cf13\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-28bb383a3d9e144e4f3dd402':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" * Jul 24 22:25:20 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:25:20.692475147Z" level=error msg="Error while removing pod from CNI network \"crio-bridge\": running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.0.87 -j CNI-28bb383a3d9e144e4f3dd402 -m comment --comment name: \"crio-bridge\" id: \"ed614af69c11ccb4ed4667c9a00677c5ca9b11c4f258b7ee9e2fa17df0a9cf13\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-28bb383a3d9e144e4f3dd402':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" * Jul 24 22:25:20 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:25:20.692537552Z" level=error msg="error stopping network on cleanup: failed to destroy network for pod sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(ed614af69c11ccb4ed4667c9a00677c5ca9b11c4f258b7ee9e2fa17df0a9cf13): running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.0.87 -j CNI-28bb383a3d9e144e4f3dd402 -m comment --comment name: \"crio-bridge\" id: \"ed614af69c11ccb4ed4667c9a00677c5ca9b11c4f258b7ee9e2fa17df0a9cf13\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-28bb383a3d9e144e4f3dd402':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" id=283f11bd-02c9-41b2-9b15-506168a03e0d * Jul 24 22:25:21 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:25:21.508971191Z" level=error msg="Error adding network: failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:25:21 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:25:21.509022194Z" level=error msg="Error while adding pod to CNI network \"crio-bridge\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:25:21 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:25:21.509194806Z" level=info msg="About to del CNI network lo (type=loopback)" * Jul 24 22:25:21 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:25:21.513085877Z" level=info msg="Got pod network &{Name:busybox Namespace:default ID:041531438a4661c02d3ccd98e702e9460082208bc96ab684486f048904597ef8 NetNS:/proc/19797/ns/net Networks:[] RuntimeConfig:map[crio-bridge:{IP: MAC: PortMappings:[] Bandwidth: IpRanges:[]}]}" * Jul 24 22:25:21 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:25:21.513131680Z" level=error msg="error loading cached network config: network \"crio-bridge\" not found in CNI cache" * Jul 24 22:25:21 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:25:21.513143381Z" level=info msg="About to del CNI network crio-bridge (type=bridge)" * Jul 24 22:25:21 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:25:21.596486783Z" level=error msg="Error deleting network: running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.0.88 -j CNI-29b843a1a83022111db4de93 -m comment --comment name: \"crio-bridge\" id: \"041531438a4661c02d3ccd98e702e9460082208bc96ab684486f048904597ef8\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-29b843a1a83022111db4de93':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" * Jul 24 22:25:21 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:25:21.596535287Z" level=error msg="Error while removing pod from CNI network \"crio-bridge\": running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.0.88 -j CNI-29b843a1a83022111db4de93 -m comment --comment name: \"crio-bridge\" id: \"041531438a4661c02d3ccd98e702e9460082208bc96ab684486f048904597ef8\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-29b843a1a83022111db4de93':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" * Jul 24 22:25:21 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:25:21.596611092Z" level=error msg="error stopping network on cleanup: failed to destroy network for pod sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(041531438a4661c02d3ccd98e702e9460082208bc96ab684486f048904597ef8): running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.0.88 -j CNI-29b843a1a83022111db4de93 -m comment --comment name: \"crio-bridge\" id: \"041531438a4661c02d3ccd98e702e9460082208bc96ab684486f048904597ef8\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-29b843a1a83022111db4de93':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" id=682afd7a-c8c7-4b0b-9d3b-6e0d6a0ef681 * Jul 24 22:25:25 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:25:25.503126758Z" level=info msg="exec'd [/bin/sh -ec ETCDCTL_API=3 etcdctl --endpoints=https://[127.0.0.1]:2379 --cacert=/var/lib/minikube/certs/etcd/ca.crt --cert=/var/lib/minikube/certs/etcd/healthcheck-client.crt --key=/var/lib/minikube/certs/etcd/healthcheck-client.key get foo] in kube-system/etcd-crio-20200724220901-14997/etcd" id=32429087-b1a2-4992-9e76-b8897dbb6873 * Jul 24 22:25:33 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:25:33.282710016Z" level=info msg="attempting to run pod sandbox with infra container: default/busybox/POD" id=868999a3-8c23-4f0e-82ed-f6d82b93f8fe * Jul 24 22:25:33 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:25:33.451892385Z" level=info msg="About to add CNI network lo (type=loopback)" * Jul 24 22:25:33 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:25:33.456804226Z" level=info msg="Got pod network &{Name:busybox Namespace:default ID:c24ad7780a5b9fa736a60c20ccba9211ff79b023e7df86150493f1579719da16 NetNS:/proc/20253/ns/net Networks:[] RuntimeConfig:map[crio-bridge:{IP: MAC: PortMappings:[] Bandwidth: IpRanges:[]}]}" * Jul 24 22:25:33 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:25:33.456840829Z" level=info msg="About to add CNI network crio-bridge (type=bridge)" * Jul 24 22:25:34 crio-20200724220901-14997 crio[3496]: time="2020-07-24 22:25:34.283377117Z" level=info msg="attempting to run pod sandbox with infra container: kube-system/coredns-5d4dd4b4db-9ssg6/POD" id=0b3d77fc-8b35-4770-b889-df4c6f73f84e * * ==> container status <== * CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID * af0c9e2fb2f0a 2186a1a396deb58f1ea5eaf20193a518ca05049b46ccd754ec83366b5c8c13d5 About a minute ago Exited kindnet-cni 7 06b62a10d0ef5 * cd1d584b7f0b4 4689081edb103a9e8174bf23a255bfbe0b2d9ed82edc907abab6989d1c60f02c 3 minutes ago Exited storage-provisioner 7 778921bc7c55f * 2be846355cebe ae3d9889423ede337df3814baa77445e566597a5a882f3cdf933b4d9e0025f0f 14 minutes ago Running kube-proxy 0 d2b29b0195201 * f609b0012ef6d c500a024ff843278184e5454ff6ee040a106c867c5a0361886fd3057cace2264 15 minutes ago Running kube-apiserver 0 8b21701a5eb2f * 13967ec1cfcb7 d2f090f2479fbf92c508100e0a6106b3516bb70421a465586661feb1494145a2 15 minutes ago Running kube-controller-manager 0 7c74210db0b18 * d0b0ef8c50a19 2c4adeb21b4ff8ed3309d0e42b6b4ae39872399f7b37e0856e673b13c4aba13d 15 minutes ago Running etcd 0 cfde083312c11 * 99dab5d3aff03 78b4180ab00d0fb99b1be2b5ef92a4831ad07f00f27e6746828f374497d79367 15 minutes ago Running kube-scheduler 0 1c739655679ee * * ==> describe nodes <== * Name: crio-20200724220901-14997 * Roles: master * Labels: beta.kubernetes.io/arch=amd64 * beta.kubernetes.io/os=linux * kubernetes.io/arch=amd64 * kubernetes.io/hostname=crio-20200724220901-14997 * kubernetes.io/os=linux * minikube.k8s.io/commit=40eac8ce825d2bb784efa63b900c8d788ea49faf * minikube.k8s.io/name=crio-20200724220901-14997 * minikube.k8s.io/updated_at=2020_07_24T22_10_37_0700 * minikube.k8s.io/version=v1.12.1 * node-role.kubernetes.io/master= * Annotations: kubeadm.alpha.kubernetes.io/cri-socket: /var/run/crio/crio.sock * node.alpha.kubernetes.io/ttl: 0 * volumes.kubernetes.io/controller-managed-attach-detach: true * CreationTimestamp: Fri, 24 Jul 2020 22:10:31 +0000 * Taints: * Unschedulable: false * Conditions: * Type Status LastHeartbeatTime LastTransitionTime Reason Message * ---- ------ ----------------- ------------------ ------ ------- * MemoryPressure False Fri, 24 Jul 2020 22:25:34 +0000 Fri, 24 Jul 2020 22:10:27 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available * DiskPressure False Fri, 24 Jul 2020 22:25:34 +0000 Fri, 24 Jul 2020 22:10:27 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure * PIDPressure False Fri, 24 Jul 2020 22:25:34 +0000 Fri, 24 Jul 2020 22:10:27 +0000 KubeletHasSufficientPID kubelet has sufficient PID available * Ready True Fri, 24 Jul 2020 22:25:34 +0000 Fri, 24 Jul 2020 22:10:27 +0000 KubeletReady kubelet is posting ready status * Addresses: * InternalIP: 172.17.0.2 * Hostname: crio-20200724220901-14997 * Capacity: * cpu: 16 * ephemeral-storage: 128884272Ki * hugepages-1Gi: 0 * hugepages-2Mi: 0 * memory: 65817044Ki * pods: 110 * Allocatable: * cpu: 16 * ephemeral-storage: 128884272Ki * hugepages-1Gi: 0 * hugepages-2Mi: 0 * memory: 65817044Ki * pods: 110 * System Info: * Machine ID: 00e6fc94f1884decb6d7faf4f3cca9d5 * System UUID: 8677386b-5379-4ccc-90e7-5b585098762e * Boot ID: 65219ec9-ab55-4151-85fa-6cbcd6144529 * Kernel Version: 5.4.0-1022-azure * OS Image: Ubuntu 19.10 * Operating System: linux * Architecture: amd64 * Container Runtime Version: cri-o://1.17.3 * Kubelet Version: v1.15.7 * Kube-Proxy Version: v1.15.7 * PodCIDR: 10.244.0.0/24 * Non-terminated Pods: (9 in total) * Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits AGE * --------- ---- ------------ ---------- --------------- ------------- --- * default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 8m6s * kube-system coredns-5d4dd4b4db-9ssg6 100m (0%) 0 (0%) 70Mi (0%) 170Mi (0%) 14m * kube-system etcd-crio-20200724220901-14997 0 (0%) 0 (0%) 0 (0%) 0 (0%) 14m * kube-system kindnet-4qfcd 100m (0%) 100m (0%) 50Mi (0%) 50Mi (0%) 14m * kube-system kube-apiserver-crio-20200724220901-14997 250m (1%) 0 (0%) 0 (0%) 0 (0%) 13m * kube-system kube-controller-manager-crio-20200724220901-14997 200m (1%) 0 (0%) 0 (0%) 0 (0%) 13m * kube-system kube-proxy-6wf4w 0 (0%) 0 (0%) 0 (0%) 0 (0%) 14m * kube-system kube-scheduler-crio-20200724220901-14997 100m (0%) 0 (0%) 0 (0%) 0 (0%) 13m * kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 14m * Allocated resources: * (Total limits may be over 100 percent, i.e., overcommitted.) * Resource Requests Limits * -------- -------- ------ * cpu 750m (4%) 100m (0%) * memory 120Mi (0%) 220Mi (0%) * ephemeral-storage 0 (0%) 0 (0%) * Events: * Type Reason Age From Message * ---- ------ ---- ---- ------- * Normal NodeHasSufficientMemory 15m (x7 over 15m) kubelet, crio-20200724220901-14997 Node crio-20200724220901-14997 status is now: NodeHasSufficientMemory * Normal NodeHasNoDiskPressure 15m (x7 over 15m) kubelet, crio-20200724220901-14997 Node crio-20200724220901-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientPID 15m (x7 over 15m) kubelet, crio-20200724220901-14997 Node crio-20200724220901-14997 status is now: NodeHasSufficientPID * Warning readOnlySysFS 14m kube-proxy, crio-20200724220901-14997 CRI error: /sys is read-only: cannot modify conntrack limits, problems may arise later (If running Docker, see docker issue #24000) * Normal Starting 14m kube-proxy, crio-20200724220901-14997 Starting kube-proxy. * * ==> dmesg <== * [ +0.008042] FS-Cache: N-cookie d=00000000087e516b n=00000000639e5e65 * [ +0.007221] FS-Cache: N-key=[8] '56c00f0000000000' * [ +0.005631] FS-Cache: Duplicate cookie detected * [ +0.003841] FS-Cache: O-cookie c=0000000011bf07ab [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.005951] FS-Cache: O-cookie d=00000000087e516b n=00000000251e97b7 * [ +0.004790] FS-Cache: O-key=[8] '56c00f0000000000' * [ +0.003643] FS-Cache: N-cookie c=00000000e2eec606 [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.005689] FS-Cache: N-cookie d=00000000087e516b n=000000007550b1ca * [ +0.004826] FS-Cache: N-key=[8] '56c00f0000000000' * [ +1.890005] FS-Cache: Duplicate cookie detected * [ +0.022632] FS-Cache: O-cookie c=0000000036b15294 [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.008521] FS-Cache: O-cookie d=00000000087e516b n=00000000efd734b0 * [ +0.007450] FS-Cache: O-key=[8] '55c00f0000000000' * [ +0.004298] FS-Cache: N-cookie c=00000000c6760349 [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.007729] FS-Cache: N-cookie d=00000000087e516b n=000000002b6d28f2 * [ +0.006730] FS-Cache: N-key=[8] '55c00f0000000000' * [ +2.781512] FS-Cache: Duplicate cookie detected * [ +0.004691] FS-Cache: O-cookie c=0000000026b60cdb [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.006817] FS-Cache: O-cookie d=00000000087e516b n=0000000083eaff9d * [ +0.006146] FS-Cache: O-key=[8] '5ac00f0000000000' * [ +0.004163] FS-Cache: N-cookie c=000000006a4278df [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.006241] FS-Cache: N-cookie d=00000000087e516b n=00000000e4c3dd6e * [ +0.005074] FS-Cache: N-key=[8] '5ac00f0000000000' * [Jul24 22:02] tee (116897): /proc/108378/oom_adj is deprecated, please use /proc/108378/oom_score_adj instead. * [Jul24 22:09] hrtimer: interrupt took 1877431 ns * * ==> etcd [d0b0ef8c50a1909ed50d4cf31a37f18c4119a48612f57acdefe63fce115d5bbc] <== * 2020-07-24 22:13:06.691097 W | etcdserver: read-only range request "key:\"foo\" " with result "range_response_count:0 size:5" took too long (1.183999082s) to execute * 2020-07-24 22:13:06.694425 W | etcdserver: read-only range request "key:\"/registry/jobs/\" range_end:\"/registry/jobs0\" limit:500 " with result "range_response_count:0 size:5" took too long (600.61292ms) to execute * 2020-07-24 22:13:06.694450 W | etcdserver: read-only range request "key:\"/registry/leases/kube-node-lease/crio-20200724220901-14997\" " with result "range_response_count:1 size:356" took too long (775.303569ms) to execute * 2020-07-24 22:13:06.694460 W | etcdserver: read-only range request "key:\"/registry/health\" " with result "range_response_count:0 size:5" took too long (453.465816ms) to execute * 2020-07-24 22:13:06.694555 W | etcdserver: read-only range request "key:\"/registry/daemonsets\" range_end:\"/registry/daemonsett\" count_only:true " with result "range_response_count:0 size:7" took too long (960.560465ms) to execute * 2020-07-24 22:13:13.069379 W | etcdserver: request "header: lease_revoke:" with result "size:28" took too long (916.45026ms) to execute * 2020-07-24 22:13:13.069462 W | etcdserver: failed to revoke 3c247382e0a1a43a ("lease not found") * 2020-07-24 22:13:13.069476 W | etcdserver: failed to revoke 3c247382e0a1a43a ("lease not found") * 2020-07-24 22:13:13.069536 W | etcdserver: read-only range request "key:\"/registry/apiextensions.k8s.io/customresourcedefinitions\" range_end:\"/registry/apiextensions.k8s.io/customresourcedefinitiont\" count_only:true " with result "range_response_count:0 size:5" took too long (1.559649322s) to execute * 2020-07-24 22:13:13.796525 W | etcdserver: request "header: lease_revoke:" with result "error:lease not found" took too long (181.88178ms) to execute * 2020-07-24 22:13:13.796570 W | etcdserver: failed to revoke 3c247382e0a1a43a ("lease not found") * 2020-07-24 22:13:31.232246 W | etcdserver: read-only range request "key:\"/registry/leases\" range_end:\"/registry/leaset\" count_only:true " with result "range_response_count:0 size:7" took too long (354.963694ms) to execute * 2020-07-24 22:13:31.232365 W | etcdserver: read-only range request "key:\"/registry/events\" range_end:\"/registry/eventt\" count_only:true " with result "range_response_count:0 size:7" took too long (279.530529ms) to execute * 2020-07-24 22:14:36.955686 W | etcdserver: read-only range request "key:\"/registry/roles\" range_end:\"/registry/rolet\" count_only:true " with result "range_response_count:0 size:7" took too long (181.72613ms) to execute * 2020-07-24 22:14:45.950243 W | etcdserver: request "header: txn: success:> failure: >>" with result "size:16" took too long (224.564127ms) to execute * 2020-07-24 22:14:45.950604 W | etcdserver: read-only range request "key:\"foo\" " with result "range_response_count:0 size:5" took too long (455.525668ms) to execute * 2020-07-24 22:14:45.950770 W | etcdserver: read-only range request "key:\"/registry/services/specs\" range_end:\"/registry/services/spect\" count_only:true " with result "range_response_count:0 size:7" took too long (296.95691ms) to execute * 2020-07-24 22:14:46.860832 W | etcdserver: read-only range request "key:\"/registry/jobs/\" range_end:\"/registry/jobs0\" limit:500 " with result "range_response_count:0 size:5" took too long (124.556458ms) to execute * 2020-07-24 22:14:46.860871 W | etcdserver: read-only range request "key:\"/registry/apiregistration.k8s.io/apiservices\" range_end:\"/registry/apiregistration.k8s.io/apiservicet\" count_only:true " with result "range_response_count:0 size:7" took too long (246.55995ms) to execute * 2020-07-24 22:14:46.860887 W | etcdserver: read-only range request "key:\"/registry/leases/kube-node-lease/crio-20200724220901-14997\" " with result "range_response_count:1 size:356" took too long (124.582559ms) to execute * 2020-07-24 22:14:46.860969 W | etcdserver: read-only range request "key:\"/registry/health\" " with result "range_response_count:0 size:5" took too long (619.55938ms) to execute * 2020-07-24 22:20:27.807681 I | mvcc: store.index: compact 549 * 2020-07-24 22:20:27.808638 I | mvcc: finished scheduled compaction at 549 (took 618.143µs) * 2020-07-24 22:25:27.828726 I | mvcc: store.index: compact 654 * 2020-07-24 22:25:27.829312 I | mvcc: finished scheduled compaction at 654 (took 332.123µs) * * ==> kernel <== * 22:25:37 up 52 min, 0 users, load average: 6.07, 7.06, 7.80 * Linux crio-20200724220901-14997 5.4.0-1022-azure #22-Ubuntu SMP Fri Jul 10 06:14:37 UTC 2020 x86_64 x86_64 x86_64 GNU/Linux * PRETTY_NAME="Ubuntu 19.10" * * ==> kube-apiserver [f609b0012ef6d35e24beac43cf9734c27d6548f09da0af9dcdd690856155d5c3] <== * I0724 22:13:06.694833 1 trace.go:81] Trace[2108251570]: "List etcd3: key=/jobs, resourceVersion=, limit: 500, continue: " (started: 2020-07-24 22:13:06.093334068 +0000 UTC m=+159.350497018) (total time: 601.467477ms): * Trace[2108251570]: [601.467477ms] [601.467477ms] END * I0724 22:13:06.694907 1 trace.go:81] Trace[1240683393]: "Get /apis/coordination.k8s.io/v1beta1/namespaces/kube-node-lease/leases/crio-20200724220901-14997" (started: 2020-07-24 22:13:05.918684921 +0000 UTC m=+159.175847871) (total time: 776.191529ms): * Trace[1240683393]: [776.141025ms] [776.095222ms] About to write a response * I0724 22:13:06.694912 1 trace.go:81] Trace[1232300274]: "List /apis/batch/v1/jobs" (started: 2020-07-24 22:13:06.093273563 +0000 UTC m=+159.350436513) (total time: 601.626088ms): * Trace[1232300274]: [601.583085ms] [601.529581ms] Listing from storage done * I0724 22:13:08.766673 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [] * I0724 22:13:08.766832 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:13:08.766909 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:13:08.766923 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:13:08.775255 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:13:28.766807 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [] * I0724 22:13:28.766933 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:13:28.767002 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:13:28.775030 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:13:48.766978 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [] * I0724 22:13:48.767147 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:13:48.767261 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:13:48.776497 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:14:08.767132 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [] * I0724 22:14:08.767258 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:14:08.767377 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:14:08.777775 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:14:45.951213 1 trace.go:81] Trace[369453456]: "GuaranteedUpdate etcd3: *v1.Endpoints" (started: 2020-07-24 22:14:45.030377302 +0000 UTC m=+258.287540252) (total time: 920.796269ms): * Trace[369453456]: [920.769167ms] [919.342375ms] Transaction committed * * ==> kube-controller-manager [13967ec1cfcb7e04177fc3a4bb3390ad4c893aba3aa1f453a274a9e947393268] <== * I0724 22:10:49.553282 1 range_allocator.go:310] Set node crio-20200724220901-14997 PodCIDR to 10.244.0.0/24 * E0724 22:10:49.585268 1 clusterroleaggregation_controller.go:180] edit failed with : Operation cannot be fulfilled on clusterroles.rbac.authorization.k8s.io "edit": the object has been modified; please apply your changes to the latest version and try again * E0724 22:10:49.585566 1 clusterroleaggregation_controller.go:180] admin failed with : Operation cannot be fulfilled on clusterroles.rbac.authorization.k8s.io "admin": the object has been modified; please apply your changes to the latest version and try again * I0724 22:10:49.586050 1 event.go:258] Event(v1.ObjectReference{Kind:"DaemonSet", Namespace:"kube-system", Name:"kube-proxy", UID:"ae9639a0-2be9-470a-89a1-9f9fc0f539e5", APIVersion:"apps/v1", ResourceVersion:"220", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: kube-proxy-6wf4w * I0724 22:10:49.586086 1 event.go:258] Event(v1.ObjectReference{Kind:"DaemonSet", Namespace:"kube-system", Name:"kindnet", UID:"bc740eee-a735-44a2-b533-64fd88dc756e", APIVersion:"apps/v1", ResourceVersion:"233", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: kindnet-4qfcd * I0724 22:10:49.635568 1 controller_utils.go:1036] Caches are synced for attach detach controller * I0724 22:10:49.724776 1 controller_utils.go:1036] Caches are synced for HPA controller * I0724 22:10:49.955648 1 controller_utils.go:1036] Caches are synced for job controller * I0724 22:10:49.971265 1 controller_utils.go:1036] Caches are synced for taint controller * I0724 22:10:49.971354 1 taint_manager.go:182] Starting NoExecuteTaintManager * I0724 22:10:49.971368 1 node_lifecycle_controller.go:1189] Initializing eviction metric for zone: * W0724 22:10:49.971433 1 node_lifecycle_controller.go:863] Missing timestamp for Node crio-20200724220901-14997. Assuming now as a timestamp. * I0724 22:10:49.971476 1 node_lifecycle_controller.go:1089] Controller detected that zone is now in state Normal. * I0724 22:10:49.971486 1 event.go:258] Event(v1.ObjectReference{Kind:"Node", Namespace:"", Name:"crio-20200724220901-14997", UID:"ef0f0ad1-a556-4ea4-8b4e-4df0035f7a0b", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'RegisteredNode' Node crio-20200724220901-14997 event: Registered Node crio-20200724220901-14997 in Controller * I0724 22:10:50.130631 1 controller_utils.go:1036] Caches are synced for ReplicaSet controller * I0724 22:10:50.181760 1 controller_utils.go:1036] Caches are synced for disruption controller * I0724 22:10:50.181988 1 disruption.go:338] Sending events to api server. * I0724 22:10:50.188393 1 controller_utils.go:1036] Caches are synced for garbage collector controller * I0724 22:10:50.189975 1 controller_utils.go:1036] Caches are synced for deployment controller * I0724 22:10:50.193823 1 event.go:258] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"kube-system", Name:"coredns", UID:"92d4aefc-f780-4c0c-a563-2ab1476f4694", APIVersion:"apps/v1", ResourceVersion:"340", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set coredns-5d4dd4b4db to 1 * I0724 22:10:50.198446 1 event.go:258] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"kube-system", Name:"coredns-5d4dd4b4db", UID:"ab110c4f-65e4-4d3c-987b-c9a844b107b4", APIVersion:"apps/v1", ResourceVersion:"341", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: coredns-5d4dd4b4db-9ssg6 * I0724 22:10:50.248567 1 controller_utils.go:1036] Caches are synced for resource quota controller * I0724 22:10:50.273968 1 controller_utils.go:1036] Caches are synced for garbage collector controller * I0724 22:10:50.273996 1 garbagecollector.go:137] Garbage collector: all resource monitors have synced. Proceeding to collect garbage * I0724 22:10:50.275749 1 controller_utils.go:1036] Caches are synced for resource quota controller * * ==> kube-proxy [2be846355cebea723ec99bffd78104b575e2937b018759d3884dc0c79bb92ea0] <== * W0724 22:10:51.541191 1 server_others.go:249] Flag proxy-mode="" unknown, assuming iptables proxy * I0724 22:10:51.711236 1 server_others.go:143] Using iptables Proxier. * I0724 22:10:51.711733 1 server.go:534] Version: v1.15.7 * I0724 22:10:51.747623 1 conntrack.go:52] Setting nf_conntrack_max to 524288 * E0724 22:10:51.748800 1 conntrack.go:127] sysfs is not writable: {Device:sysfs Path:/sys Type:sysfs Opts:[ro nosuid nodev noexec relatime] Freq:0 Pass:0} (mount options are [ro nosuid nodev noexec relatime]) * I0724 22:10:51.749038 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_established' to 86400 * I0724 22:10:51.749127 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_close_wait' to 3600 * I0724 22:10:51.749315 1 config.go:187] Starting service config controller * I0724 22:10:51.749352 1 controller_utils.go:1029] Waiting for caches to sync for service config controller * I0724 22:10:51.749404 1 config.go:96] Starting endpoints config controller * I0724 22:10:51.749502 1 controller_utils.go:1029] Waiting for caches to sync for endpoints config controller * I0724 22:10:51.849555 1 controller_utils.go:1036] Caches are synced for service config controller * I0724 22:10:51.849731 1 controller_utils.go:1036] Caches are synced for endpoints config controller * * ==> kube-scheduler [99dab5d3aff03ed6049a58c4f600d958b17db3a0f7df5c9c68abab533c8b9dfa] <== * I0724 22:10:28.185753 1 defaults.go:87] TaintNodesByCondition is enabled, PodToleratesNodeTaints predicate is mandatory * W0724 22:10:28.186676 1 authorization.go:47] Authorization is disabled * W0724 22:10:28.186691 1 authentication.go:55] Authentication is disabled * I0724 22:10:28.186705 1 deprecated_insecure_serving.go:51] Serving healthz insecurely on [::]:10251 * I0724 22:10:28.187130 1 secure_serving.go:116] Serving securely on 127.0.0.1:10259 * E0724 22:10:31.838038 1 reflector.go:125] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:226: Failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope * E0724 22:10:31.850274 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope * E0724 22:10:31.851076 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1beta1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope * E0724 22:10:31.851166 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope * E0724 22:10:31.935670 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope * E0724 22:10:31.937897 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope * E0724 22:10:31.938070 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope * E0724 22:10:31.940942 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope * E0724 22:10:31.942934 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope * E0724 22:10:31.946983 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope * E0724 22:10:32.840099 1 reflector.go:125] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:226: Failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope * E0724 22:10:32.851310 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope * E0724 22:10:32.936974 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1beta1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope * E0724 22:10:32.938093 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope * E0724 22:10:32.940907 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope * E0724 22:10:32.940967 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope * E0724 22:10:32.941768 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope * E0724 22:10:32.942905 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope * E0724 22:10:32.943936 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope * E0724 22:10:32.947924 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope * * ==> kubelet <== * -- Logs begin at Fri 2020-07-24 22:09:16 UTC, end at Fri 2020-07-24 22:25:38 UTC. -- * Jul 24 22:25:06 crio-20200724220901-14997 kubelet[2275]: E0724 22:25:06.703760 2275 pod_workers.go:190] Error syncing pod b947226f-0b0e-4916-9b65-c7b70a6e137e ("busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)"), skipping: failed to "CreatePodSandbox" for "busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)" with CreatePodSandboxError: "CreatePodSandbox for pod \"busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)\" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(da6562e101e3b7a58c5ba8294a32c4bb428039a5fbb1281b7511b9d840ce35bd): failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:25:12 crio-20200724220901-14997 kubelet[2275]: E0724 22:25:12.283166 2275 pod_workers.go:190] Error syncing pod e76b38cb-d66f-4b4b-a962-cdd9c670e250 ("storage-provisioner_kube-system(e76b38cb-d66f-4b4b-a962-cdd9c670e250)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "Back-off 5m0s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(e76b38cb-d66f-4b4b-a962-cdd9c670e250)" * Jul 24 22:25:14 crio-20200724220901-14997 kubelet[2275]: E0724 22:25:14.282639 2275 pod_workers.go:190] Error syncing pod 9fb35a28-5601-47e4-88e6-be2a18fa55ef ("kindnet-4qfcd_kube-system(9fb35a28-5601-47e4-88e6-be2a18fa55ef)"), skipping: failed to "StartContainer" for "kindnet-cni" with CrashLoopBackOff: "Back-off 5m0s restarting failed container=kindnet-cni pod=kindnet-4qfcd_kube-system(9fb35a28-5601-47e4-88e6-be2a18fa55ef)" * Jul 24 22:25:20 crio-20200724220901-14997 kubelet[2275]: E0724 22:25:20.892202 2275 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(ed614af69c11ccb4ed4667c9a00677c5ca9b11c4f258b7ee9e2fa17df0a9cf13): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:25:20 crio-20200724220901-14997 kubelet[2275]: E0724 22:25:20.892276 2275 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(ed614af69c11ccb4ed4667c9a00677c5ca9b11c4f258b7ee9e2fa17df0a9cf13): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:25:20 crio-20200724220901-14997 kubelet[2275]: E0724 22:25:20.892311 2275 kuberuntime_manager.go:692] createPodSandbox for pod "coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(ed614af69c11ccb4ed4667c9a00677c5ca9b11c4f258b7ee9e2fa17df0a9cf13): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:25:20 crio-20200724220901-14997 kubelet[2275]: E0724 22:25:20.892426 2275 pod_workers.go:190] Error syncing pod dbd8dc2c-1af2-4c99-89f0-d44686805742 ("coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)"), skipping: failed to "CreatePodSandbox" for "coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)" with CreatePodSandboxError: "CreatePodSandbox for pod \"coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)\" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(ed614af69c11ccb4ed4667c9a00677c5ca9b11c4f258b7ee9e2fa17df0a9cf13): failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:25:21 crio-20200724220901-14997 kubelet[2275]: E0724 22:25:21.813561 2275 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(041531438a4661c02d3ccd98e702e9460082208bc96ab684486f048904597ef8): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:25:21 crio-20200724220901-14997 kubelet[2275]: E0724 22:25:21.813630 2275 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(041531438a4661c02d3ccd98e702e9460082208bc96ab684486f048904597ef8): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:25:21 crio-20200724220901-14997 kubelet[2275]: E0724 22:25:21.813654 2275 kuberuntime_manager.go:692] createPodSandbox for pod "busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(041531438a4661c02d3ccd98e702e9460082208bc96ab684486f048904597ef8): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:25:21 crio-20200724220901-14997 kubelet[2275]: E0724 22:25:21.813707 2275 pod_workers.go:190] Error syncing pod b947226f-0b0e-4916-9b65-c7b70a6e137e ("busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)"), skipping: failed to "CreatePodSandbox" for "busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)" with CreatePodSandboxError: "CreatePodSandbox for pod \"busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)\" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(041531438a4661c02d3ccd98e702e9460082208bc96ab684486f048904597ef8): failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:25:24 crio-20200724220901-14997 kubelet[2275]: E0724 22:25:24.403870 2275 manager.go:1084] Failed to create existing container: /kubepods/besteffort/pod111a1e02ecfd8e373f0c9f774a428541/crio-cfde083312c1168171f556afd9f9d427052d7f2b24bd0882af47a77d42058755: Error finding container cfde083312c1168171f556afd9f9d427052d7f2b24bd0882af47a77d42058755: Status 404 returned error &{%!s(*http.body=&{0xc0011a82c0 false false {0 0} false false false }) {%!s(int32=0) %!s(uint32=0)} %!s(bool=false) %!s(func(error) error=0x737ab0) %!s(func() error=0x737a40)} * Jul 24 22:25:24 crio-20200724220901-14997 kubelet[2275]: E0724 22:25:24.407603 2275 manager.go:1084] Failed to create existing container: /kubepods/burstable/podd56135b6f61d5db3f635e70693e7224d/crio-1c739655679ee845e51f5144c1084d6dfd219bb19e3eed0336cafc28ace6dde8: Error finding container 1c739655679ee845e51f5144c1084d6dfd219bb19e3eed0336cafc28ace6dde8: Status 404 returned error &{%!s(*http.body=&{0xc000dfbd60 false false {0 0} false false false }) {%!s(int32=0) %!s(uint32=0)} %!s(bool=false) %!s(func(error) error=0x737ab0) %!s(func() error=0x737a40)} * Jul 24 22:25:24 crio-20200724220901-14997 kubelet[2275]: E0724 22:25:24.408738 2275 manager.go:1084] Failed to create existing container: /kubepods/burstable/poda6564eecd841ea73dca56559a73a98cf/crio-7c74210db0b18ed89e977abd92469c112cabeae1e18ff5a27d9bc1e4a2d586b3: Error finding container 7c74210db0b18ed89e977abd92469c112cabeae1e18ff5a27d9bc1e4a2d586b3: Status 404 returned error &{%!s(*http.body=&{0xc00112dc00 false false {0 0} false false false }) {%!s(int32=0) %!s(uint32=0)} %!s(bool=false) %!s(func(error) error=0x737ab0) %!s(func() error=0x737a40)} * Jul 24 22:25:24 crio-20200724220901-14997 kubelet[2275]: E0724 22:25:24.410290 2275 manager.go:1084] Failed to create existing container: /kubepods/burstable/pod3fa5678dc40111ec8016c600a161dae3/crio-8b21701a5eb2f6af152a40dcc25e4c05e30e6a13c60607c3f6613007a489942a: Error finding container 8b21701a5eb2f6af152a40dcc25e4c05e30e6a13c60607c3f6613007a489942a: Status 404 returned error &{%!s(*http.body=&{0xc000ce1fe0 false false {0 0} false false false }) {%!s(int32=0) %!s(uint32=0)} %!s(bool=false) %!s(func(error) error=0x737ab0) %!s(func() error=0x737a40)} * Jul 24 22:25:27 crio-20200724220901-14997 kubelet[2275]: E0724 22:25:27.282404 2275 pod_workers.go:190] Error syncing pod e76b38cb-d66f-4b4b-a962-cdd9c670e250 ("storage-provisioner_kube-system(e76b38cb-d66f-4b4b-a962-cdd9c670e250)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "Back-off 5m0s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(e76b38cb-d66f-4b4b-a962-cdd9c670e250)" * Jul 24 22:25:28 crio-20200724220901-14997 kubelet[2275]: E0724 22:25:28.282803 2275 pod_workers.go:190] Error syncing pod 9fb35a28-5601-47e4-88e6-be2a18fa55ef ("kindnet-4qfcd_kube-system(9fb35a28-5601-47e4-88e6-be2a18fa55ef)"), skipping: failed to "StartContainer" for "kindnet-cni" with CrashLoopBackOff: "Back-off 5m0s restarting failed container=kindnet-cni pod=kindnet-4qfcd_kube-system(9fb35a28-5601-47e4-88e6-be2a18fa55ef)" * Jul 24 22:25:35 crio-20200724220901-14997 kubelet[2275]: E0724 22:25:35.920175 2275 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(c24ad7780a5b9fa736a60c20ccba9211ff79b023e7df86150493f1579719da16): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:25:35 crio-20200724220901-14997 kubelet[2275]: E0724 22:25:35.920238 2275 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(c24ad7780a5b9fa736a60c20ccba9211ff79b023e7df86150493f1579719da16): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:25:35 crio-20200724220901-14997 kubelet[2275]: E0724 22:25:35.920262 2275 kuberuntime_manager.go:692] createPodSandbox for pod "busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(c24ad7780a5b9fa736a60c20ccba9211ff79b023e7df86150493f1579719da16): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:25:35 crio-20200724220901-14997 kubelet[2275]: E0724 22:25:35.920312 2275 pod_workers.go:190] Error syncing pod b947226f-0b0e-4916-9b65-c7b70a6e137e ("busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)"), skipping: failed to "CreatePodSandbox" for "busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)" with CreatePodSandboxError: "CreatePodSandbox for pod \"busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)\" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(c24ad7780a5b9fa736a60c20ccba9211ff79b023e7df86150493f1579719da16): failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:25:36 crio-20200724220901-14997 kubelet[2275]: E0724 22:25:36.731249 2275 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(89657f415f677b45c39a803cca6bd996ac1b3bbeebaf40b2018269e6ca8c7fe9): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:25:36 crio-20200724220901-14997 kubelet[2275]: E0724 22:25:36.731311 2275 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(89657f415f677b45c39a803cca6bd996ac1b3bbeebaf40b2018269e6ca8c7fe9): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:25:36 crio-20200724220901-14997 kubelet[2275]: E0724 22:25:36.731335 2275 kuberuntime_manager.go:692] createPodSandbox for pod "coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(89657f415f677b45c39a803cca6bd996ac1b3bbeebaf40b2018269e6ca8c7fe9): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:25:36 crio-20200724220901-14997 kubelet[2275]: E0724 22:25:36.731531 2275 pod_workers.go:190] Error syncing pod dbd8dc2c-1af2-4c99-89f0-d44686805742 ("coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)"), skipping: failed to "CreatePodSandbox" for "coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)" with CreatePodSandboxError: "CreatePodSandbox for pod \"coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)\" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(89657f415f677b45c39a803cca6bd996ac1b3bbeebaf40b2018269e6ca8c7fe9): failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * * ==> storage-provisioner [cd1d584b7f0b403a35f252f8a87cf7721e803ff8bb98e485d54de30f5dd9a643] <== * F0724 22:22:10.732535 1 main.go:37] Error getting server version: Get https://10.96.0.1:443/version: dial tcp 10.96.0.1:443: getsockopt: no route to host -- /stdout -- helpers_test.go:247: (dbg) Run: ./minikube-linux-amd64 status --format={{.APIServer}} -p crio-20200724220901-14997 -n crio-20200724220901-14997 helpers_test.go:254: (dbg) Run: kubectl --context crio-20200724220901-14997 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running helpers_test.go:260: non-running pods: busybox coredns-5d4dd4b4db-9ssg6 helpers_test.go:262: ======> post-mortem[TestStartStop/group/crio/serial/DeployApp]: describe non-running pods <====== helpers_test.go:265: (dbg) Run: kubectl --context crio-20200724220901-14997 describe pod busybox coredns-5d4dd4b4db-9ssg6 helpers_test.go:265: (dbg) Non-zero exit: kubectl --context crio-20200724220901-14997 describe pod busybox coredns-5d4dd4b4db-9ssg6: exit status 1 (88.478854ms) -- stdout -- Name: busybox Namespace: default Priority: 0 Node: crio-20200724220901-14997/172.17.0.2 Start Time: Fri, 24 Jul 2020 22:17:30 +0000 Labels: integration-test=busybox Annotations: Status: Pending IP: IPs: Containers: busybox: Container ID: Image: busybox:1.28.4-glibc Image ID: Port: Host Port: Command: sleep 3600 State: Waiting Reason: ContainerCreating Ready: False Restart Count: 0 Environment: Mounts: /var/run/secrets/kubernetes.io/serviceaccount from default-token-2jsfl (ro) Conditions: Type Status Initialized True Ready False ContainersReady False PodScheduled True Volumes: default-token-2jsfl: Type: Secret (a volume populated by a Secret) SecretName: default-token-2jsfl Optional: false QoS Class: BestEffort Node-Selectors: Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s node.kubernetes.io/unreachable:NoExecute for 300s Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Scheduled 8m10s default-scheduler Successfully assigned default/busybox to crio-20200724220901-14997 Warning FailedCreatePodSandBox 8m6s kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(df0c3a384af70876e631b79afa238f8219e909abbaf7684ff30d1dce8a9a54fa): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 7m53s kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(7e1e51912dee7c82a399942b88c2992e156a60bb2bdcfc5a0340b0f5eb894fe0): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 7m38s kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(eb6542b74bcd5168e50e03b41a224e2df5f3761d00e96252755d3d77cabf510e): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 7m25s kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(95381eb738e20acf009ca3de52d9dd44c0e10363127a19f55cc9d8b9b15d935d): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 7m7s kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(5da5f64daf132db1d2f57fcbbd23261f5b055438f42fdc9e58f02b1dd57b9240): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 6m54s kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(d843353d3186fd450ada3d11767e5294d07ef90a741caba47e62a59eb82f9e97): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 6m40s kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(cdb66590d250947869cc36af8c6d22ecc1fd99752b4ba0d09be444e2de9b06eb): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 6m23s kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(590dbdf2276a1c93cf59fe15b35411b3d19be44299a08851ea9957e8c6c03681): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 6m7s kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(fe109ba9d55515001bcdb9c5ba7b51262263252184892313119b14411c1ba69a): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 111s (x17 over 5m54s) kubelet, crio-20200724220901-14997 (combined from similar events): Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(4bee5e35c962aafeecadb6e69afb27ad039cbcf256940e8735ce4f079bf10862): failed to set bridge addr: could not add IP address to "cni0": permission denied -- /stdout -- ** stderr ** Error from server (NotFound): pods "coredns-5d4dd4b4db-9ssg6" not found ** /stderr ** helpers_test.go:267: kubectl --context crio-20200724220901-14997 describe pod busybox coredns-5d4dd4b4db-9ssg6: exit status 1 === RUN TestStartStop/group/crio/serial/Stop start_stop_delete_test.go:164: (dbg) Run: ./minikube-linux-amd64 stop -p crio-20200724220901-14997 --alsologtostderr -v=3 start_stop_delete_test.go:164: (dbg) Done: ./minikube-linux-amd64 stop -p crio-20200724220901-14997 --alsologtostderr -v=3: (21.098286131s) === RUN TestStartStop/group/crio/serial/EnableAddonAfterStop start_stop_delete_test.go:174: (dbg) Run: ./minikube-linux-amd64 status --format={{.Host}} -p crio-20200724220901-14997 -n crio-20200724220901-14997 start_stop_delete_test.go:174: (dbg) Non-zero exit: ./minikube-linux-amd64 status --format={{.Host}} -p crio-20200724220901-14997 -n crio-20200724220901-14997: exit status 7 (121.577163ms) -- stdout -- Stopped -- /stdout -- start_stop_delete_test.go:174: status error: exit status 7 (may be ok) start_stop_delete_test.go:181: (dbg) Run: ./minikube-linux-amd64 addons enable dashboard -p crio-20200724220901-14997 === RUN TestStartStop/group/crio/serial/SecondStart start_stop_delete_test.go:190: (dbg) Run: ./minikube-linux-amd64 start -p crio-20200724220901-14997 --memory=2200 --alsologtostderr --wait=true --container-runtime=crio --disable-driver-mounts --extra-config=kubeadm.ignore-preflight-errors=SystemVerification --vm-driver=docker --base-image=local/kicbase:-snapshot --kubernetes-version=v1.15.7 === CONT TestStartStop/group/containerd/serial/DeployApp start_stop_delete_test.go:158: ***** TestStartStop/group/containerd/serial/DeployApp: pod "integration-test=busybox" failed to start within 8m0s: timed out waiting for the condition **** start_stop_delete_test.go:158: (dbg) Run: ./minikube-linux-amd64 status --format={{.APIServer}} -p containerd-20200724221200-14997 -n containerd-20200724221200-14997 start_stop_delete_test.go:158: TestStartStop/group/containerd/serial/DeployApp: showing logs for failed pods as of 2020-07-24 22:27:40.804165078 +0000 UTC m=+3090.928529563 start_stop_delete_test.go:158: (dbg) Run: kubectl --context containerd-20200724221200-14997 describe po busybox -n default start_stop_delete_test.go:158: (dbg) kubectl --context containerd-20200724221200-14997 describe po busybox -n default: Name: busybox Namespace: default Priority: 0 Node: containerd-20200724221200-14997/172.17.0.5 Start Time: Fri, 24 Jul 2020 22:19:40 +0000 Labels: integration-test=busybox Annotations: Status: Pending IP: IPs: Containers: busybox: Container ID: Image: busybox:1.28.4-glibc Image ID: Port: Host Port: Command: sleep 3600 State: Waiting Reason: ContainerCreating Ready: False Restart Count: 0 Environment: Mounts: /var/run/secrets/kubernetes.io/serviceaccount from default-token-xmm9f (ro) Conditions: Type Status Initialized True Ready False ContainersReady False PodScheduled True Volumes: default-token-xmm9f: Type: Secret (a volume populated by a Secret) SecretName: default-token-xmm9f Optional: false QoS Class: BestEffort Node-Selectors: Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s node.kubernetes.io/unreachable:NoExecute for 300s Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Scheduled 8m default-scheduler Successfully assigned default/busybox to containerd-20200724221200-14997 Warning FailedCreatePodSandBox 7m58s kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "5311a1ffb81c3ec44164ca704d1b425a50851c7a615951d885f3e261bb56b331": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 7m44s kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "e0b3338bd82723225aa39e3a572b31f4b10340fa640d33ac956ec7982b47a365": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 7m27s kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "bce48c23f5090e307249f38d5e9c17615b5ce4547b68fd7dd207f5616546b1ff": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 7m11s kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "c260da116bad44bab092e4453833efc9ce5c3c70209770b30f7aeffed5db766d": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 6m57s kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "e352d7747cb44ae844f2847c8895d8a03a59a5bd62570299b6f91ba0d2b31e93": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 6m44s kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "7a8e1f3fa957ea5ffbd5b203818f66025f20b18e8ba1398842a4a1dcb4beade1": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 6m27s kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "02713a33eaf3192cf9c63e173bd72b67773f28f4dfcef338060dda2a7f8489e1": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 6m12s kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "530cf34860dcb75c557c7cbc9a86910a3f4919230e74cdf10126ba1e75f3f49b": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 6m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "a33b097bd953e28e4d7499a4b0ca06585fe1b9029b95aac214b0bba904677259": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 111s (x17 over 5m47s) kubelet, containerd-20200724221200-14997 (combined from similar events): Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "47073b588559d0af9dafbb8171df657751c2a35ea5f0466a20f5762627e9cd56": failed to set bridge addr: could not add IP address to "cni0": permission denied start_stop_delete_test.go:158: (dbg) Run: kubectl --context containerd-20200724221200-14997 logs busybox -n default start_stop_delete_test.go:158: (dbg) Non-zero exit: kubectl --context containerd-20200724221200-14997 logs busybox -n default: exit status 1 (94.551088ms) ** stderr ** Error from server (BadRequest): container "busybox" in pod "busybox" is waiting to start: ContainerCreating ** /stderr ** start_stop_delete_test.go:158: kubectl --context containerd-20200724221200-14997 logs busybox -n default: exit status 1 start_stop_delete_test.go:158: wait: integration-test=busybox within 8m0s: timed out waiting for the condition helpers_test.go:215: -----------------------post-mortem-------------------------------- helpers_test.go:223: ======> post-mortem[TestStartStop/group/containerd/serial/DeployApp]: docker inspect <====== helpers_test.go:224: (dbg) Run: docker inspect containerd-20200724221200-14997 helpers_test.go:228: (dbg) docker inspect containerd-20200724221200-14997: -- stdout -- [ { "Id": "0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318", "Created": "2020-07-24T22:12:08.823590057Z", "Path": "/usr/local/bin/entrypoint", "Args": [ "/sbin/init" ], "State": { "Status": "running", "Running": true, "Paused": false, "Restarting": false, "OOMKilled": false, "Dead": false, "Pid": 338511, "ExitCode": 0, "Error": "", "StartedAt": "2020-07-24T22:12:10.40831313Z", "FinishedAt": "0001-01-01T00:00:00Z" }, "Image": "sha256:e6bc41c39dc48b2b472936db36aedb28527ce0f675ed1bc20d029125c9ccf578", "ResolvConfPath": "/var/lib/docker/containers/0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318/resolv.conf", "HostnamePath": "/var/lib/docker/containers/0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318/hostname", "HostsPath": "/var/lib/docker/containers/0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318/hosts", "LogPath": "/var/lib/docker/containers/0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318/0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318-json.log", "Name": "/containerd-20200724221200-14997", "RestartCount": 0, "Driver": "overlay2", "Platform": "linux", "MountLabel": "", "ProcessLabel": "", "AppArmorProfile": "unconfined", "ExecIDs": null, "HostConfig": { "Binds": [ "/lib/modules:/lib/modules:ro", "containerd-20200724221200-14997:/var" ], "ContainerIDFile": "", "LogConfig": { "Type": "json-file", "Config": {} }, "NetworkMode": "default", "PortBindings": { "22/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "2376/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "5000/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "8444/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ] }, "RestartPolicy": { "Name": "no", "MaximumRetryCount": 0 }, "AutoRemove": false, "VolumeDriver": "", "VolumesFrom": null, "CapAdd": null, "CapDrop": null, "Capabilities": null, "Dns": [], "DnsOptions": [], "DnsSearch": [], "ExtraHosts": null, "GroupAdd": null, "IpcMode": "private", "Cgroup": "", "Links": null, "OomScoreAdj": 0, "PidMode": "", "Privileged": true, "PublishAllPorts": false, "ReadonlyRootfs": false, "SecurityOpt": [ "seccomp=unconfined", "apparmor=unconfined", "label=disable" ], "Tmpfs": { "/run": "", "/tmp": "" }, "UTSMode": "", "UsernsMode": "", "ShmSize": 67108864, "Runtime": "runc", "ConsoleSize": [ 0, 0 ], "Isolation": "", "CpuShares": 0, "Memory": 2306867200, "NanoCpus": 2000000000, "CgroupParent": "", "BlkioWeight": 0, "BlkioWeightDevice": [], "BlkioDeviceReadBps": null, "BlkioDeviceWriteBps": null, "BlkioDeviceReadIOps": null, "BlkioDeviceWriteIOps": null, "CpuPeriod": 0, "CpuQuota": 0, "CpuRealtimePeriod": 0, "CpuRealtimeRuntime": 0, "CpusetCpus": "", "CpusetMems": "", "Devices": [], "DeviceCgroupRules": null, "DeviceRequests": null, "KernelMemory": 0, "KernelMemoryTCP": 0, "MemoryReservation": 0, "MemorySwap": -1, "MemorySwappiness": null, "OomKillDisable": false, "PidsLimit": null, "Ulimits": null, "CpuCount": 0, "CpuPercent": 0, "IOMaximumIOps": 0, "IOMaximumBandwidth": 0, "MaskedPaths": null, "ReadonlyPaths": null }, "GraphDriver": { "Data": { "LowerDir": "/var/lib/docker/overlay2/302021dc5d13c095c42529cd14fcb13b8e0e062683e3b8dd811d0a384f95b4d8-init/diff:/var/lib/docker/overlay2/33ef5d7ed4fbb30d55d2e5925de9b5c359ee2f6bcaf5a31308f0c4c11e358759/diff:/var/lib/docker/overlay2/3cf31b1deb19cf9de586952311d9f94baab00dac6393d96e1a80da943c3823e6/diff:/var/lib/docker/overlay2/5e25be0edbb143d613aab16e23056ece18370c2427214464ccc0b8f033fc76c5/diff:/var/lib/docker/overlay2/933d7b760faf1726c0231eb48cbf3e7af8aeea7d1f449959b7bf07bbe9a84376/diff:/var/lib/docker/overlay2/69ec5555996fe29cf36b7e927034690ecb66f32d4c72396f3c2f3d73303ab10c/diff:/var/lib/docker/overlay2/891f9311e0e93aca44c576d873c0860bf67e972178833fc3ac0569308996933c/diff:/var/lib/docker/overlay2/431ddf5eea9395c020c3a27539c6984764cb7a0dd8f3eeadfbb24f37a897caf7/diff:/var/lib/docker/overlay2/ef7d3d9d53c985fdb46a41c8bcbc04c7d98baa3d8030b1432d115657269a4611/diff:/var/lib/docker/overlay2/d976f7471257da8ef85e0de564cae8e6b16eeb8f83fff6750b3985387f87052c/diff:/var/lib/docker/overlay2/16ed6f6abe25e98b50b10529e70d835da8b64ca99934243f70e5cc63cca44ba1/diff:/var/lib/docker/overlay2/a6495d6f579c32bd0faf387dc3b48c733dae0219e3cfc5d94f9f6ba027261668/diff:/var/lib/docker/overlay2/a34db548da9e24b0323e43e53767c49484fa19509ba9c1e738fb42330788ec52/diff:/var/lib/docker/overlay2/03f93f74dafc48e2bb0234297546abf70dc9918c047a7e3b500ed2242aec09ff/diff:/var/lib/docker/overlay2/b0d08d0be034080c25561d9451d8081050a591c2991a7f3d242234b9bf7c046d/diff:/var/lib/docker/overlay2/d7be6ed61eedc76988e74fe1d87a6a1ef287912654acea5566e7f9cb797b3e3c/diff:/var/lib/docker/overlay2/bde6a4a42d5652b3ef22e3a1dddd0f6fb765d2cd9f5b1a38d4ecb9b44c45b998/diff:/var/lib/docker/overlay2/0f384da0b034b730d4bdd22f270821719c0ac0b7b7d608c3a52fa89b9a386b90/diff:/var/lib/docker/overlay2/46e9d50bc162d2d523c12da3e378e5b859515056dc7d7f11344c1c42c64718d4/diff:/var/lib/docker/overlay2/6e7552cdb94dfc1c9a0a8efae25c758d93fff3d2eec58a3e2b710496d0c94653/diff:/var/lib/docker/overlay2/b9217960caef08b70ded801ca89f5a0808227383f2ab1de5ae7313c200a8090b/diff:/var/lib/docker/overlay2/d11fc39cafccd3e3cb8fba6cfdda2bbfdb99c2e41619ce169b0d6ede0b3f6a64/diff:/var/lib/docker/overlay2/0f89620291cba4bc3afac7c9fa1faf205450d5dcae032c3a9882009633b72b4a/diff", "MergedDir": "/var/lib/docker/overlay2/302021dc5d13c095c42529cd14fcb13b8e0e062683e3b8dd811d0a384f95b4d8/merged", "UpperDir": "/var/lib/docker/overlay2/302021dc5d13c095c42529cd14fcb13b8e0e062683e3b8dd811d0a384f95b4d8/diff", "WorkDir": "/var/lib/docker/overlay2/302021dc5d13c095c42529cd14fcb13b8e0e062683e3b8dd811d0a384f95b4d8/work" }, "Name": "overlay2" }, "Mounts": [ { "Type": "bind", "Source": "/lib/modules", "Destination": "/lib/modules", "Mode": "ro", "RW": false, "Propagation": "rprivate" }, { "Type": "volume", "Name": "containerd-20200724221200-14997", "Source": "/var/lib/docker/volumes/containerd-20200724221200-14997/_data", "Destination": "/var", "Driver": "local", "Mode": "z", "RW": true, "Propagation": "" } ], "Config": { "Hostname": "containerd-20200724221200-14997", "Domainname": "", "User": "root", "AttachStdin": false, "AttachStdout": false, "AttachStderr": false, "ExposedPorts": { "22/tcp": {}, "2376/tcp": {}, "5000/tcp": {}, "8444/tcp": {} }, "Tty": true, "OpenStdin": false, "StdinOnce": false, "Env": [ "container=docker", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], "Cmd": null, "Image": "kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438", "Volumes": null, "WorkingDir": "", "Entrypoint": [ "/usr/local/bin/entrypoint", "/sbin/init" ], "OnBuild": null, "Labels": { "created_by.minikube.sigs.k8s.io": "true", "mode.minikube.sigs.k8s.io": "containerd-20200724221200-14997", "name.minikube.sigs.k8s.io": "containerd-20200724221200-14997", "role.minikube.sigs.k8s.io": "" }, "StopSignal": "SIGRTMIN+3" }, "NetworkSettings": { "Bridge": "", "SandboxID": "3e3fc4759cbd4070ed5dddc0264a24f411b397aab6336237e6416cbda2769e84", "HairpinMode": false, "LinkLocalIPv6Address": "", "LinkLocalIPv6PrefixLen": 0, "Ports": { "22/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32900" } ], "2376/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32899" } ], "5000/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32898" } ], "8444/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32897" } ] }, "SandboxKey": "/var/run/docker/netns/3e3fc4759cbd", "SecondaryIPAddresses": null, "SecondaryIPv6Addresses": null, "EndpointID": "1a1c80297b8f177f80b8e0fe79832f4492796767c95941e75116c272b2743f88", "Gateway": "172.17.0.1", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "IPAddress": "172.17.0.5", "IPPrefixLen": 16, "IPv6Gateway": "", "MacAddress": "02:42:ac:11:00:05", "Networks": { "bridge": { "IPAMConfig": null, "Links": null, "Aliases": null, "NetworkID": "d4a420189740571bc15c14b2ebac1ab66e95c9c6796461aa96bb3486a5f7e03d", "EndpointID": "1a1c80297b8f177f80b8e0fe79832f4492796767c95941e75116c272b2743f88", "Gateway": "172.17.0.1", "IPAddress": "172.17.0.5", "IPPrefixLen": 16, "IPv6Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "MacAddress": "02:42:ac:11:00:05", "DriverOpts": null } } } } ] -- /stdout -- helpers_test.go:232: (dbg) Run: ./minikube-linux-amd64 status --format={{.Host}} -p containerd-20200724221200-14997 -n containerd-20200724221200-14997 helpers_test.go:237: <<< TestStartStop/group/containerd/serial/DeployApp FAILED: start of post-mortem logs <<< helpers_test.go:238: ======> post-mortem[TestStartStop/group/containerd/serial/DeployApp]: minikube logs <====== helpers_test.go:240: (dbg) Run: ./minikube-linux-amd64 -p containerd-20200724221200-14997 logs -n 25 helpers_test.go:240: (dbg) Done: ./minikube-linux-amd64 -p containerd-20200724221200-14997 logs -n 25: (2.083437135s) helpers_test.go:245: TestStartStop/group/containerd/serial/DeployApp logs: -- stdout -- * ==> container status <== * CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID * ea143e20f0dbe 2186a1a396deb 55 seconds ago Exited kindnet-cni 7 980558efef9f1 * ac782f72581f0 4689081edb103 3 minutes ago Exited storage-provisioner 7 92684e9147786 * 003518d48f7b6 3439b7546f29b 14 minutes ago Running kube-proxy 0 ca6897a07eae7 * 6f125fe745aef da26705ccb4b5 14 minutes ago Running kube-controller-manager 1 e257c0c495dd3 * 3b03abf8faaef 7e28efa976bd1 14 minutes ago Running kube-apiserver 0 c2da5c12ff9ea * 6dc3461120ba7 da26705ccb4b5 14 minutes ago Exited kube-controller-manager 0 e257c0c495dd3 * bff121f454668 76216c34ed0c7 15 minutes ago Running kube-scheduler 0 4c3ff6ff63ddc * db0fd28c536d4 303ce5db0e90d 15 minutes ago Running etcd 0 0de406b420c86 * * ==> containerd <== * -- Logs begin at Fri 2020-07-24 22:12:25 UTC, end at Fri 2020-07-24 22:27:41 UTC. -- * Jul 24 22:26:46 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:26:46.246672012Z" level=info msg="StartContainer for \"ea143e20f0dbeeb2b9358673ae2e4d2a16b693a820fae1864ff422753a21ab14\"" * Jul 24 22:26:46 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:26:46.260677587Z" level=info msg="shim containerd-shim started" address=/containerd-shim/c633b307d0c204b6fa9f41a779509b5d18e528ccf5677f6b7e4342c476cf33f5.sock debug=false pid=7251 * Jul 24 22:26:46 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:26:46.519914434Z" level=info msg="StartContainer for \"ea143e20f0dbeeb2b9358673ae2e4d2a16b693a820fae1864ff422753a21ab14\" returns successfully" * Jul 24 22:26:55 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:26:55.957521429Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:busybox,Uid:4b662b5f-6e78-48de-818c-81989d7f4ea9,Namespace:default,Attempt:0,}" * Jul 24 22:26:55 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:26:55.957521929Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,}" * Jul 24 22:26:57 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:26:57.937878775Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,} failed, error" error="failed to setup network for sandbox \"01d5df04474fd2db24b4cc66aaa24be74546a453d756393223831c13b7260df4\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:26:58 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:26:58.151185533Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:4b662b5f-6e78-48de-818c-81989d7f4ea9,Namespace:default,Attempt:0,} failed, error" error="failed to setup network for sandbox \"32795834de6daa60922e8c7c9190d85a53d1c88f597635ffb382ffede60aa440\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:27:06 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:27:06.221648013Z" level=info msg="Finish piping stderr of container \"ea143e20f0dbeeb2b9358673ae2e4d2a16b693a820fae1864ff422753a21ab14\"" * Jul 24 22:27:06 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:27:06.221674015Z" level=info msg="Finish piping stdout of container \"ea143e20f0dbeeb2b9358673ae2e4d2a16b693a820fae1864ff422753a21ab14\"" * Jul 24 22:27:06 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:27:06.222933503Z" level=info msg="TaskExit event &TaskExit{ContainerID:ea143e20f0dbeeb2b9358673ae2e4d2a16b693a820fae1864ff422753a21ab14,ID:ea143e20f0dbeeb2b9358673ae2e4d2a16b693a820fae1864ff422753a21ab14,Pid:7269,ExitStatus:2,ExitedAt:2020-07-24 22:27:06.222720988 +0000 UTC,XXX_unrecognized:[],}" * Jul 24 22:27:06 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:27:06.266269337Z" level=info msg="shim reaped" id=ea143e20f0dbeeb2b9358673ae2e4d2a16b693a820fae1864ff422753a21ab14 * Jul 24 22:27:07 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:27:07.150357731Z" level=info msg="RemoveContainer for \"2e361d204abdd65431889780d3e7fb0bb69e31c84d86b350692ee6f2e10d6c87\"" * Jul 24 22:27:07 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:27:07.187292716Z" level=info msg="RemoveContainer for \"2e361d204abdd65431889780d3e7fb0bb69e31c84d86b350692ee6f2e10d6c87\" returns successfully" * Jul 24 22:27:09 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:27:09.957516843Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:busybox,Uid:4b662b5f-6e78-48de-818c-81989d7f4ea9,Namespace:default,Attempt:0,}" * Jul 24 22:27:10 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:27:10.957595606Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,}" * Jul 24 22:27:12 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:27:12.082832788Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:4b662b5f-6e78-48de-818c-81989d7f4ea9,Namespace:default,Attempt:0,} failed, error" error="failed to setup network for sandbox \"b8a54fb33593acec55fab723d614ab412e0c5139ee6fdb8f67815c9f233fa747\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:27:12 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:27:12.882178169Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,} failed, error" error="failed to setup network for sandbox \"7e8b1407230268c6869d4a73c2b33975e2390f2b24fc9f3cd9105f3cfdd1796b\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:27:23 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:27:23.957417834Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,}" * Jul 24 22:27:25 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:27:25.870161263Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,} failed, error" error="failed to setup network for sandbox \"65256ac1b854ef1aad2f5e5faba981e3ba3e2efede014e0aba9abbb838cf9e26\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:27:25 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:27:25.957540752Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:busybox,Uid:4b662b5f-6e78-48de-818c-81989d7f4ea9,Namespace:default,Attempt:0,}" * Jul 24 22:27:27 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:27:27.602669298Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:4b662b5f-6e78-48de-818c-81989d7f4ea9,Namespace:default,Attempt:0,} failed, error" error="failed to setup network for sandbox \"742f36453fef1543a625c726a7faf9c2db36f264d359adf8b4b2e5c13f8e44ae\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:27:36 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:27:36.957619331Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,}" * Jul 24 22:27:37 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:27:37.957508308Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:busybox,Uid:4b662b5f-6e78-48de-818c-81989d7f4ea9,Namespace:default,Attempt:0,}" * Jul 24 22:27:38 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:27:38.998067020Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,} failed, error" error="failed to setup network for sandbox \"2162692046689cfc16590d2369d492dd13ed331c0f624343f44091944438584d\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:27:39 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:27:39.950846515Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:4b662b5f-6e78-48de-818c-81989d7f4ea9,Namespace:default,Attempt:0,} failed, error" error="failed to setup network for sandbox \"1562b5a48d11e357ee96997306c5ad1118e140b04c14b88f1d9fcb66ca41f198\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * * ==> describe nodes <== * Name: containerd-20200724221200-14997 * Roles: master * Labels: beta.kubernetes.io/arch=amd64 * beta.kubernetes.io/os=linux * kubernetes.io/arch=amd64 * kubernetes.io/hostname=containerd-20200724221200-14997 * kubernetes.io/os=linux * minikube.k8s.io/commit=40eac8ce825d2bb784efa63b900c8d788ea49faf * minikube.k8s.io/name=containerd-20200724221200-14997 * minikube.k8s.io/updated_at=2020_07_24T22_13_02_0700 * minikube.k8s.io/version=v1.12.1 * node-role.kubernetes.io/master= * Annotations: kubeadm.alpha.kubernetes.io/cri-socket: /run/containerd/containerd.sock * node.alpha.kubernetes.io/ttl: 0 * volumes.kubernetes.io/controller-managed-attach-detach: true * CreationTimestamp: Fri, 24 Jul 2020 22:12:47 +0000 * Taints: * Unschedulable: false * Lease: * HolderIdentity: containerd-20200724221200-14997 * AcquireTime: * RenewTime: Fri, 24 Jul 2020 22:27:41 +0000 * Conditions: * Type Status LastHeartbeatTime LastTransitionTime Reason Message * ---- ------ ----------------- ------------------ ------ ------- * MemoryPressure False Fri, 24 Jul 2020 22:23:15 +0000 Fri, 24 Jul 2020 22:12:43 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available * DiskPressure False Fri, 24 Jul 2020 22:23:15 +0000 Fri, 24 Jul 2020 22:12:43 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure * PIDPressure False Fri, 24 Jul 2020 22:23:15 +0000 Fri, 24 Jul 2020 22:12:43 +0000 KubeletHasSufficientPID kubelet has sufficient PID available * Ready True Fri, 24 Jul 2020 22:23:15 +0000 Fri, 24 Jul 2020 22:13:11 +0000 KubeletReady kubelet is posting ready status * Addresses: * InternalIP: 172.17.0.5 * Hostname: containerd-20200724221200-14997 * Capacity: * cpu: 16 * ephemeral-storage: 128884272Ki * hugepages-1Gi: 0 * hugepages-2Mi: 0 * memory: 65817044Ki * pods: 110 * Allocatable: * cpu: 16 * ephemeral-storage: 128884272Ki * hugepages-1Gi: 0 * hugepages-2Mi: 0 * memory: 65817044Ki * pods: 110 * System Info: * Machine ID: a912f43aab5e4ba59b29e84664ffc131 * System UUID: 763ff36b-3261-45b1-b62e-092cbae790ce * Boot ID: 65219ec9-ab55-4151-85fa-6cbcd6144529 * Kernel Version: 5.4.0-1022-azure * OS Image: Ubuntu 19.10 * Operating System: linux * Architecture: amd64 * Container Runtime Version: containerd://1.3.3-14-g449e9269 * Kubelet Version: v1.18.3 * Kube-Proxy Version: v1.18.3 * PodCIDR: 10.244.0.0/24 * PodCIDRs: 10.244.0.0/24 * Non-terminated Pods: (9 in total) * Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits AGE * --------- ---- ------------ ---------- --------------- ------------- --- * default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 8m2s * kube-system coredns-66bff467f8-hlk9j 100m (0%) 0 (0%) 70Mi (0%) 170Mi (0%) 14m * kube-system etcd-containerd-20200724221200-14997 0 (0%) 0 (0%) 0 (0%) 0 (0%) 14m * kube-system kindnet-nsc8k 100m (0%) 100m (0%) 50Mi (0%) 50Mi (0%) 14m * kube-system kube-apiserver-containerd-20200724221200-14997 250m (1%) 0 (0%) 0 (0%) 0 (0%) 14m * kube-system kube-controller-manager-containerd-20200724221200-14997 200m (1%) 0 (0%) 0 (0%) 0 (0%) 14m * kube-system kube-proxy-x7fwq 0 (0%) 0 (0%) 0 (0%) 0 (0%) 14m * kube-system kube-scheduler-containerd-20200724221200-14997 100m (0%) 0 (0%) 0 (0%) 0 (0%) 14m * kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 14m * Allocated resources: * (Total limits may be over 100 percent, i.e., overcommitted.) * Resource Requests Limits * -------- -------- ------ * cpu 750m (4%) 100m (0%) * memory 120Mi (0%) 220Mi (0%) * ephemeral-storage 0 (0%) 0 (0%) * hugepages-1Gi 0 (0%) 0 (0%) * hugepages-2Mi 0 (0%) 0 (0%) * Events: * Type Reason Age From Message * ---- ------ ---- ---- ------- * Normal NodeHasSufficientMemory 15m (x6 over 15m) kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasSufficientMemory * Normal NodeHasNoDiskPressure 15m (x6 over 15m) kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientPID 15m (x5 over 15m) kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasSufficientPID * Normal Starting 14m kubelet, containerd-20200724221200-14997 Starting kubelet. * Warning SystemOOM 14m kubelet, containerd-20200724221200-14997 System OOM encountered, victim process: registry-server, pid: 26041 * Warning SystemOOM 14m kubelet, containerd-20200724221200-14997 System OOM encountered, victim process: registry-server, pid: 28567 * Normal NodeHasSufficientMemory 14m kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasSufficientMemory * Normal NodeHasNoDiskPressure 14m kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientPID 14m kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasSufficientPID * Normal NodeAllocatableEnforced 14m kubelet, containerd-20200724221200-14997 Updated Node Allocatable limit across pods * Normal NodeReady 14m kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeReady * Warning readOnlySysFS 14m kube-proxy, containerd-20200724221200-14997 CRI error: /sys is read-only: cannot modify conntrack limits, problems may arise later (If running Docker, see docker issue #24000) * Normal Starting 14m kube-proxy, containerd-20200724221200-14997 Starting kube-proxy. * * ==> dmesg <== * [ +0.008042] FS-Cache: N-cookie d=00000000087e516b n=00000000639e5e65 * [ +0.007221] FS-Cache: N-key=[8] '56c00f0000000000' * [ +0.005631] FS-Cache: Duplicate cookie detected * [ +0.003841] FS-Cache: O-cookie c=0000000011bf07ab [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.005951] FS-Cache: O-cookie d=00000000087e516b n=00000000251e97b7 * [ +0.004790] FS-Cache: O-key=[8] '56c00f0000000000' * [ +0.003643] FS-Cache: N-cookie c=00000000e2eec606 [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.005689] FS-Cache: N-cookie d=00000000087e516b n=000000007550b1ca * [ +0.004826] FS-Cache: N-key=[8] '56c00f0000000000' * [ +1.890005] FS-Cache: Duplicate cookie detected * [ +0.022632] FS-Cache: O-cookie c=0000000036b15294 [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.008521] FS-Cache: O-cookie d=00000000087e516b n=00000000efd734b0 * [ +0.007450] FS-Cache: O-key=[8] '55c00f0000000000' * [ +0.004298] FS-Cache: N-cookie c=00000000c6760349 [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.007729] FS-Cache: N-cookie d=00000000087e516b n=000000002b6d28f2 * [ +0.006730] FS-Cache: N-key=[8] '55c00f0000000000' * [ +2.781512] FS-Cache: Duplicate cookie detected * [ +0.004691] FS-Cache: O-cookie c=0000000026b60cdb [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.006817] FS-Cache: O-cookie d=00000000087e516b n=0000000083eaff9d * [ +0.006146] FS-Cache: O-key=[8] '5ac00f0000000000' * [ +0.004163] FS-Cache: N-cookie c=000000006a4278df [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.006241] FS-Cache: N-cookie d=00000000087e516b n=00000000e4c3dd6e * [ +0.005074] FS-Cache: N-key=[8] '5ac00f0000000000' * [Jul24 22:02] tee (116897): /proc/108378/oom_adj is deprecated, please use /proc/108378/oom_score_adj instead. * [Jul24 22:09] hrtimer: interrupt took 1877431 ns * * ==> etcd [db0fd28c536d4d051abb0c3b7d4219ed9ad43fae8b4e05f52f322ed31bd12c02] <== * 2020-07-24 22:12:56.812574 W | etcdserver: read-only range request "key:\"/registry/namespaces/default\" " with result "range_response_count:0 size:4" took too long (155.514884ms) to execute * 2020-07-24 22:12:56.812662 W | etcdserver: read-only range request "key:\"/registry/namespaces/kube-system\" " with result "range_response_count:1 size:262" took too long (183.360374ms) to execute * 2020-07-24 22:12:57.044210 W | etcdserver: read-only range request "key:\"/registry/health\" " with result "range_response_count:0 size:4" took too long (129.896221ms) to execute * 2020-07-24 22:12:57.044242 W | etcdserver: read-only range request "key:\"/registry/clusterroles/system:aggregate-to-view\" " with result "range_response_count:0 size:4" took too long (228.539655ms) to execute * 2020-07-24 22:12:57.044266 W | etcdserver: read-only range request "key:\"/registry/namespaces/default\" " with result "range_response_count:0 size:4" took too long (178.276384ms) to execute * 2020-07-24 22:12:57.044278 W | etcdserver: read-only range request "key:\"/registry/namespaces/kube-node-lease\" " with result "range_response_count:0 size:4" took too long (227.465694ms) to execute * 2020-07-24 22:13:15.434077 W | wal: sync duration of 3.834899995s, expected less than 1s * 2020-07-24 22:13:15.471535 W | etcdserver: read-only range request "key:\"/registry/pods/kube-system/kube-apiserver-containerd-20200724221200-14997\" " with result "range_response_count:1 size:5503" took too long (4.51397778s) to execute * 2020-07-24 22:13:15.471572 W | etcdserver: request "header: txn: success:> failure:<>>" with result "size:16" took too long (3.872288817s) to execute * 2020-07-24 22:13:15.471926 W | etcdserver: read-only range request "key:\"/registry/serviceaccounts/default/default\" " with result "range_response_count:0 size:5" took too long (4.245793609s) to execute * 2020-07-24 22:13:15.471957 W | etcdserver: read-only range request "key:\"/registry/minions/\" range_end:\"/registry/minions0\" " with result "range_response_count:1 size:5367" took too long (2.908773208s) to execute * 2020-07-24 22:13:30.080019 W | etcdserver: request "header: txn: success:> failure: >>" with result "size:16" took too long (518.961173ms) to execute * 2020-07-24 22:13:31.304192 W | etcdserver: read-only range request "key:\"/registry/jobs/\" range_end:\"/registry/jobs0\" limit:500 " with result "range_response_count:0 size:5" took too long (430.352557ms) to execute * 2020-07-24 22:13:31.304379 W | etcdserver: request "header: txn: success:> failure: >>" with result "size:16" took too long (347.781631ms) to execute * 2020-07-24 22:14:39.084771 W | wal: sync duration of 1.885899215s, expected less than 1s * 2020-07-24 22:14:39.085392 W | etcdserver: read-only range request "key:\"/registry/services/specs\" range_end:\"/registry/services/spect\" count_only:true " with result "range_response_count:0 size:7" took too long (752.73234ms) to execute * 2020-07-24 22:14:39.204221 W | etcdserver: read-only range request "key:\"/registry/statefulsets\" range_end:\"/registry/statefulsett\" count_only:true " with result "range_response_count:0 size:5" took too long (526.93962ms) to execute * 2020-07-24 22:14:39.204593 W | etcdserver: read-only range request "key:\"/registry/namespaces/default\" " with result "range_response_count:1 size:257" took too long (314.821685ms) to execute * 2020-07-24 22:22:43.570684 I | mvcc: store.index: compact 568 * 2020-07-24 22:22:43.571598 I | mvcc: finished scheduled compaction at 568 (took 585.341µs) * 2020-07-24 22:26:14.677739 W | etcdserver: read-only range request "key:\"/registry/pods/default/\" range_end:\"/registry/pods/default0\" " with result "range_response_count:1 size:2036" took too long (258.17647ms) to execute * 2020-07-24 22:26:29.551172 W | etcdserver: read-only range request "key:\"/registry/pods/default/\" range_end:\"/registry/pods/default0\" " with result "range_response_count:1 size:2036" took too long (131.671395ms) to execute * 2020-07-24 22:26:43.365199 W | etcdserver: read-only range request "key:\"/registry/podsecuritypolicy\" range_end:\"/registry/podsecuritypolicz\" count_only:true " with result "range_response_count:0 size:5" took too long (416.944167ms) to execute * 2020-07-24 22:26:43.365460 W | etcdserver: read-only range request "key:\"/registry/ingress\" range_end:\"/registry/ingrest\" count_only:true " with result "range_response_count:0 size:5" took too long (232.243591ms) to execute * 2020-07-24 22:27:23.173084 W | etcdserver: read-only range request "key:\"/registry/daemonsets\" range_end:\"/registry/daemonsett\" count_only:true " with result "range_response_count:0 size:7" took too long (166.661817ms) to execute * * ==> kernel <== * 22:27:43 up 55 min, 0 users, load average: 7.04, 6.98, 7.67 * Linux containerd-20200724221200-14997 5.4.0-1022-azure #22-Ubuntu SMP Fri Jul 10 06:14:37 UTC 2020 x86_64 x86_64 x86_64 GNU/Linux * PRETTY_NAME="Ubuntu 19.10" * * ==> kube-apiserver [3b03abf8faaefbc437950aa59178dcca778ce856951332f14e9c159b83a9cd10] <== * I0724 22:12:59.714156 1 controller.go:606] quota admission added evaluator for: serviceaccounts * I0724 22:13:00.637968 1 controller.go:606] quota admission added evaluator for: deployments.apps * I0724 22:13:00.714747 1 controller.go:606] quota admission added evaluator for: daemonsets.apps * I0724 22:13:15.472257 1 trace.go:116] Trace[371264249]: "Create" url:/api/v1/namespaces/default/events,user-agent:kubelet/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:172.17.0.5 (started: 2020-07-24 22:13:11.165316402 +0000 UTC m=+28.885265456) (total time: 4.306895167s): * Trace[371264249]: [4.306830363s] [4.306748958s] Object stored in database * I0724 22:13:15.472429 1 trace.go:116] Trace[1157467879]: "Get" url:/api/v1/namespaces/default/serviceaccounts/default,user-agent:kubectl/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:127.0.0.1 (started: 2020-07-24 22:13:11.225715914 +0000 UTC m=+28.945664968) (total time: 4.246681467s): * Trace[1157467879]: [4.246681467s] [4.246669166s] END * I0724 22:13:15.472453 1 trace.go:116] Trace[1376546597]: "GuaranteedUpdate etcd3" type:*core.Node (started: 2020-07-24 22:13:11.166090152 +0000 UTC m=+28.886039306) (total time: 4.306337531s): * Trace[1376546597]: [4.306206523s] [4.304357703s] Transaction committed * I0724 22:13:15.472507 1 trace.go:116] Trace[1373390526]: "List etcd3" key:/minions,resourceVersion:,limit:0,continue: (started: 2020-07-24 22:13:12.562772918 +0000 UTC m=+30.282722072) (total time: 2.909710469s): * Trace[1373390526]: [2.909710469s] [2.909710469s] END * I0724 22:13:15.472573 1 trace.go:116] Trace[209798184]: "Get" url:/api/v1/namespaces/kube-system/pods/kube-apiserver-containerd-20200724221200-14997,user-agent:kubelet/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:172.17.0.5 (started: 2020-07-24 22:13:10.957265426 +0000 UTC m=+28.677214580) (total time: 4.515285465s): * Trace[209798184]: [4.51489514s] [4.514888939s] About to write a response * I0724 22:13:15.472707 1 trace.go:116] Trace[484532927]: "Patch" url:/api/v1/nodes/containerd-20200724221200-14997/status,user-agent:kubelet/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:172.17.0.5 (started: 2020-07-24 22:13:11.165983745 +0000 UTC m=+28.885932899) (total time: 4.306690554s): * Trace[484532927]: [4.306486741s] [4.304883637s] Object stored in database * I0724 22:13:15.472774 1 trace.go:116] Trace[667541080]: "List" url:/api/v1/nodes,user-agent:kube-controller-manager/v1.18.3 (linux/amd64) kubernetes/2e7996e/system:serviceaccount:kube-system:node-controller,client:172.17.0.5 (started: 2020-07-24 22:13:12.562758918 +0000 UTC m=+30.282707972) (total time: 2.909990386s): * Trace[667541080]: [2.909773072s] [2.909764572s] Listing from storage done * I0724 22:13:18.078919 1 controller.go:606] quota admission added evaluator for: replicasets.apps * I0724 22:13:18.426181 1 controller.go:606] quota admission added evaluator for: controllerrevisions.apps * I0724 22:13:30.080930 1 trace.go:116] Trace[926841721]: "GuaranteedUpdate etcd3" type:*v1.Endpoints (started: 2020-07-24 22:13:28.891002839 +0000 UTC m=+46.610951993) (total time: 1.189883246s): * Trace[926841721]: [1.189853344s] [1.188435753s] Transaction committed * I0724 22:14:39.085727 1 trace.go:116] Trace[943610803]: "GuaranteedUpdate etcd3" type:*apps.DaemonSet (started: 2020-07-24 22:14:37.201680483 +0000 UTC m=+114.921629637) (total time: 1.884006692s): * Trace[943610803]: [1.883921787s] [1.882993127s] Transaction committed * I0724 22:14:39.086045 1 trace.go:116] Trace[431771147]: "Update" url:/apis/apps/v1/namespaces/kube-system/daemonsets/kindnet/status,user-agent:kube-controller-manager/v1.18.3 (linux/amd64) kubernetes/2e7996e/system:serviceaccount:kube-system:daemon-set-controller,client:172.17.0.5 (started: 2020-07-24 22:14:37.201520273 +0000 UTC m=+114.921469327) (total time: 1.884488723s): * Trace[431771147]: [1.884264808s] [1.884157301s] Object stored in database * * ==> kube-controller-manager [6dc3461120ba7717ddda6d5e83f663ff1cb958b21f4b6b496ed88923606aebf1] <== * I0724 22:12:43.365003 1 serving.go:313] Generated self-signed cert in-memory * I0724 22:12:44.072514 1 controllermanager.go:161] Version: v1.18.3 * I0724 22:12:44.073870 1 dynamic_cafile_content.go:167] Starting request-header::/var/lib/minikube/certs/front-proxy-ca.crt * I0724 22:12:44.073872 1 dynamic_cafile_content.go:167] Starting client-ca-bundle::/var/lib/minikube/certs/ca.crt * I0724 22:12:44.074524 1 secure_serving.go:178] Serving securely on 127.0.0.1:10257 * I0724 22:12:44.074632 1 tlsconfig.go:240] Starting DynamicServingCertificateController * I0724 22:12:44.075551 1 deprecated_insecure_serving.go:53] Serving insecurely on [::]:10252 * F0724 22:12:57.450686 1 controllermanager.go:230] error building controller context: failed to wait for apiserver being healthy: timed out waiting for the condition: failed to get apiserver /healthz status: forbidden: User "system:kube-controller-manager" cannot get path "/healthz" * * ==> kube-controller-manager [6f125fe745aef65ddf16605622b6130fd0ebc50c6ee73b0d5b8431248dc821c2] <== * I0724 22:13:18.083396 1 event.go:278] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"kube-system", Name:"coredns", UID:"c339b220-56ba-47ef-93ef-bb5429df2c46", APIVersion:"apps/v1", ResourceVersion:"208", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set coredns-66bff467f8 to 2 * I0724 22:13:18.088573 1 event.go:278] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"kube-system", Name:"coredns-66bff467f8", UID:"1f68eda2-5dbc-4ff7-802d-d0fc15a0855d", APIVersion:"apps/v1", ResourceVersion:"325", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: coredns-66bff467f8-cgqcr * I0724 22:13:18.143495 1 event.go:278] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"kube-system", Name:"coredns-66bff467f8", UID:"1f68eda2-5dbc-4ff7-802d-d0fc15a0855d", APIVersion:"apps/v1", ResourceVersion:"325", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: coredns-66bff467f8-hlk9j * I0724 22:13:18.177452 1 shared_informer.go:230] Caches are synced for persistent volume * I0724 22:13:18.240929 1 shared_informer.go:230] Caches are synced for endpoint * I0724 22:13:18.273694 1 shared_informer.go:230] Caches are synced for expand * I0724 22:13:18.275914 1 shared_informer.go:230] Caches are synced for PVC protection * I0724 22:13:18.325760 1 shared_informer.go:230] Caches are synced for stateful set * I0724 22:13:18.421570 1 shared_informer.go:230] Caches are synced for daemon sets * I0724 22:13:18.446520 1 event.go:278] Event(v1.ObjectReference{Kind:"DaemonSet", Namespace:"kube-system", Name:"kube-proxy", UID:"a0159c58-7dba-43c7-a87d-d2e4c392a926", APIVersion:"apps/v1", ResourceVersion:"214", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: kube-proxy-x7fwq * I0724 22:13:18.452732 1 event.go:278] Event(v1.ObjectReference{Kind:"DaemonSet", Namespace:"kube-system", Name:"kindnet", UID:"6b1c3e80-93ca-41d3-9d48-43047d4d93a6", APIVersion:"apps/v1", ResourceVersion:"261", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: kindnet-nsc8k * I0724 22:13:18.463040 1 shared_informer.go:230] Caches are synced for taint * I0724 22:13:18.463114 1 taint_manager.go:187] Starting NoExecuteTaintManager * I0724 22:13:18.463170 1 node_lifecycle_controller.go:1433] Initializing eviction metric for zone: * W0724 22:13:18.463272 1 node_lifecycle_controller.go:1048] Missing timestamp for Node containerd-20200724221200-14997. Assuming now as a timestamp. * I0724 22:13:18.463319 1 node_lifecycle_controller.go:1249] Controller detected that zone is now in state Normal. * I0724 22:13:18.463344 1 event.go:278] Event(v1.ObjectReference{Kind:"Node", Namespace:"", Name:"containerd-20200724221200-14997", UID:"878dba67-2126-43d2-a5be-2ad809c96173", APIVersion:"v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'RegisteredNode' Node containerd-20200724221200-14997 event: Registered Node containerd-20200724221200-14997 in Controller * I0724 22:13:18.564886 1 event.go:278] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"kube-system", Name:"coredns", UID:"c339b220-56ba-47ef-93ef-bb5429df2c46", APIVersion:"apps/v1", ResourceVersion:"372", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled down replica set coredns-66bff467f8 to 1 * I0724 22:13:18.613309 1 shared_informer.go:230] Caches are synced for attach detach * I0724 22:13:18.635502 1 shared_informer.go:230] Caches are synced for garbage collector * I0724 22:13:18.635535 1 garbagecollector.go:142] Garbage collector: all resource monitors have synced. Proceeding to collect garbage * I0724 22:13:18.639165 1 event.go:278] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"kube-system", Name:"coredns-66bff467f8", UID:"1f68eda2-5dbc-4ff7-802d-d0fc15a0855d", APIVersion:"apps/v1", ResourceVersion:"373", FieldPath:""}): type: 'Normal' reason: 'SuccessfulDelete' Deleted pod: coredns-66bff467f8-cgqcr * I0724 22:13:18.653092 1 shared_informer.go:230] Caches are synced for resource quota * I0724 22:13:18.676387 1 shared_informer.go:230] Caches are synced for resource quota * I0724 22:13:18.677047 1 shared_informer.go:230] Caches are synced for garbage collector * * ==> kube-proxy [003518d48f7b64998ac9b35be592bc8821d93926af4451deb91a09ab8b6e907d] <== * W0724 22:13:19.661270 1 server_others.go:559] Unknown proxy mode "", assuming iptables proxy * I0724 22:13:19.702749 1 node.go:136] Successfully retrieved node IP: 172.17.0.5 * I0724 22:13:19.702805 1 server_others.go:186] Using iptables Proxier. * I0724 22:13:19.703119 1 server.go:583] Version: v1.18.3 * I0724 22:13:19.703827 1 conntrack.go:52] Setting nf_conntrack_max to 524288 * E0724 22:13:19.704435 1 conntrack.go:127] sysfs is not writable: {Device:sysfs Path:/sys Type:sysfs Opts:[ro nosuid nodev noexec relatime] Freq:0 Pass:0} (mount options are [ro nosuid nodev noexec relatime]) * I0724 22:13:19.704682 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_established' to 86400 * I0724 22:13:19.704788 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_close_wait' to 3600 * I0724 22:13:19.706305 1 config.go:133] Starting endpoints config controller * I0724 22:13:19.706338 1 config.go:315] Starting service config controller * I0724 22:13:19.706344 1 shared_informer.go:223] Waiting for caches to sync for endpoints config * I0724 22:13:19.706365 1 shared_informer.go:223] Waiting for caches to sync for service config * I0724 22:13:19.806590 1 shared_informer.go:230] Caches are synced for service config * I0724 22:13:19.806609 1 shared_informer.go:230] Caches are synced for endpoints config * * ==> kube-scheduler [bff121f454668ac500a195fcf425cf4c1545c2b5466ceef795a4d2c11d3c6d76] <== * E0724 22:12:48.856731 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope * E0724 22:12:48.943428 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope * E0724 22:12:48.947394 1 reflector.go:178] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system" * E0724 22:12:49.021495 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope * E0724 22:12:49.042794 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope * E0724 22:12:49.115776 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope * E0724 22:12:50.494405 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope * E0724 22:12:50.855911 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope * E0724 22:12:50.917390 1 reflector.go:178] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system" * E0724 22:12:51.267562 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope * E0724 22:12:51.297256 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1beta1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope * E0724 22:12:51.371164 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope * E0724 22:12:51.686787 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope * E0724 22:12:51.816022 1 reflector.go:178] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:233: Failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope * E0724 22:12:52.019881 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope * E0724 22:12:54.420521 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope * E0724 22:12:55.430747 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope * E0724 22:12:56.236464 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope * E0724 22:12:56.459747 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1beta1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope * E0724 22:12:56.965178 1 reflector.go:178] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:233: Failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope * E0724 22:12:57.125721 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope * E0724 22:12:57.141479 1 reflector.go:178] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system" * E0724 22:12:57.352492 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope * E0724 22:12:58.018671 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope * I0724 22:13:03.848890 1 shared_informer.go:230] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file * * ==> kubelet <== * -- Logs begin at Fri 2020-07-24 22:12:25 UTC, end at Fri 2020-07-24 22:27:43 UTC. -- * Jul 24 22:27:12 containerd-20200724221200-14997 kubelet[1328]: E0724 22:27:12.957363 1328 pod_workers.go:191] Error syncing pod 25727a03-b071-4a2c-8dfb-ea2a7038e4cd ("storage-provisioner_kube-system(25727a03-b071-4a2c-8dfb-ea2a7038e4cd)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 5m0s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(25727a03-b071-4a2c-8dfb-ea2a7038e4cd)" * Jul 24 22:27:21 containerd-20200724221200-14997 kubelet[1328]: I0724 22:27:21.957074 1328 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: ea143e20f0dbeeb2b9358673ae2e4d2a16b693a820fae1864ff422753a21ab14 * Jul 24 22:27:21 containerd-20200724221200-14997 kubelet[1328]: E0724 22:27:21.957431 1328 pod_workers.go:191] Error syncing pod 562af1c8-c195-4fed-b475-4ff2a3cca8b5 ("kindnet-nsc8k_kube-system(562af1c8-c195-4fed-b475-4ff2a3cca8b5)"), skipping: failed to "StartContainer" for "kindnet-cni" with CrashLoopBackOff: "back-off 5m0s restarting failed container=kindnet-cni pod=kindnet-nsc8k_kube-system(562af1c8-c195-4fed-b475-4ff2a3cca8b5)" * Jul 24 22:27:24 containerd-20200724221200-14997 kubelet[1328]: I0724 22:27:24.957182 1328 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: ac782f72581f09dd5b23e949f032d10d6460499b6761e87a581036f0ed5964b4 * Jul 24 22:27:24 containerd-20200724221200-14997 kubelet[1328]: E0724 22:27:24.957408 1328 pod_workers.go:191] Error syncing pod 25727a03-b071-4a2c-8dfb-ea2a7038e4cd ("storage-provisioner_kube-system(25727a03-b071-4a2c-8dfb-ea2a7038e4cd)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 5m0s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(25727a03-b071-4a2c-8dfb-ea2a7038e4cd)" * Jul 24 22:27:25 containerd-20200724221200-14997 kubelet[1328]: E0724 22:27:25.870392 1328 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to setup network for sandbox "65256ac1b854ef1aad2f5e5faba981e3ba3e2efede014e0aba9abbb838cf9e26": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:27:25 containerd-20200724221200-14997 kubelet[1328]: E0724 22:27:25.870459 1328 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "65256ac1b854ef1aad2f5e5faba981e3ba3e2efede014e0aba9abbb838cf9e26": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:27:25 containerd-20200724221200-14997 kubelet[1328]: E0724 22:27:25.870483 1328 kuberuntime_manager.go:727] createPodSandbox for pod "coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "65256ac1b854ef1aad2f5e5faba981e3ba3e2efede014e0aba9abbb838cf9e26": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:27:25 containerd-20200724221200-14997 kubelet[1328]: E0724 22:27:25.870553 1328 pod_workers.go:191] Error syncing pod 584286ea-2ddf-4194-8a3c-48d505f7f607 ("coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)"), skipping: failed to "CreatePodSandbox" for "coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)" with CreatePodSandboxError: "CreatePodSandbox for pod \"coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)\" failed: rpc error: code = Unknown desc = failed to setup network for sandbox \"65256ac1b854ef1aad2f5e5faba981e3ba3e2efede014e0aba9abbb838cf9e26\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:27:27 containerd-20200724221200-14997 kubelet[1328]: E0724 22:27:27.604200 1328 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to setup network for sandbox "742f36453fef1543a625c726a7faf9c2db36f264d359adf8b4b2e5c13f8e44ae": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:27:27 containerd-20200724221200-14997 kubelet[1328]: E0724 22:27:27.604256 1328 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "742f36453fef1543a625c726a7faf9c2db36f264d359adf8b4b2e5c13f8e44ae": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:27:27 containerd-20200724221200-14997 kubelet[1328]: E0724 22:27:27.604272 1328 kuberuntime_manager.go:727] createPodSandbox for pod "busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "742f36453fef1543a625c726a7faf9c2db36f264d359adf8b4b2e5c13f8e44ae": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:27:27 containerd-20200724221200-14997 kubelet[1328]: E0724 22:27:27.604379 1328 pod_workers.go:191] Error syncing pod 4b662b5f-6e78-48de-818c-81989d7f4ea9 ("busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)"), skipping: failed to "CreatePodSandbox" for "busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)" with CreatePodSandboxError: "CreatePodSandbox for pod \"busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)\" failed: rpc error: code = Unknown desc = failed to setup network for sandbox \"742f36453fef1543a625c726a7faf9c2db36f264d359adf8b4b2e5c13f8e44ae\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:27:35 containerd-20200724221200-14997 kubelet[1328]: I0724 22:27:35.957250 1328 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: ea143e20f0dbeeb2b9358673ae2e4d2a16b693a820fae1864ff422753a21ab14 * Jul 24 22:27:35 containerd-20200724221200-14997 kubelet[1328]: I0724 22:27:35.957282 1328 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: ac782f72581f09dd5b23e949f032d10d6460499b6761e87a581036f0ed5964b4 * Jul 24 22:27:35 containerd-20200724221200-14997 kubelet[1328]: E0724 22:27:35.957512 1328 pod_workers.go:191] Error syncing pod 25727a03-b071-4a2c-8dfb-ea2a7038e4cd ("storage-provisioner_kube-system(25727a03-b071-4a2c-8dfb-ea2a7038e4cd)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 5m0s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(25727a03-b071-4a2c-8dfb-ea2a7038e4cd)" * Jul 24 22:27:35 containerd-20200724221200-14997 kubelet[1328]: E0724 22:27:35.957578 1328 pod_workers.go:191] Error syncing pod 562af1c8-c195-4fed-b475-4ff2a3cca8b5 ("kindnet-nsc8k_kube-system(562af1c8-c195-4fed-b475-4ff2a3cca8b5)"), skipping: failed to "StartContainer" for "kindnet-cni" with CrashLoopBackOff: "back-off 5m0s restarting failed container=kindnet-cni pod=kindnet-nsc8k_kube-system(562af1c8-c195-4fed-b475-4ff2a3cca8b5)" * Jul 24 22:27:38 containerd-20200724221200-14997 kubelet[1328]: E0724 22:27:38.998325 1328 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to setup network for sandbox "2162692046689cfc16590d2369d492dd13ed331c0f624343f44091944438584d": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:27:38 containerd-20200724221200-14997 kubelet[1328]: E0724 22:27:38.998381 1328 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "2162692046689cfc16590d2369d492dd13ed331c0f624343f44091944438584d": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:27:38 containerd-20200724221200-14997 kubelet[1328]: E0724 22:27:38.998397 1328 kuberuntime_manager.go:727] createPodSandbox for pod "coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "2162692046689cfc16590d2369d492dd13ed331c0f624343f44091944438584d": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:27:38 containerd-20200724221200-14997 kubelet[1328]: E0724 22:27:38.998452 1328 pod_workers.go:191] Error syncing pod 584286ea-2ddf-4194-8a3c-48d505f7f607 ("coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)"), skipping: failed to "CreatePodSandbox" for "coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)" with CreatePodSandboxError: "CreatePodSandbox for pod \"coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)\" failed: rpc error: code = Unknown desc = failed to setup network for sandbox \"2162692046689cfc16590d2369d492dd13ed331c0f624343f44091944438584d\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:27:39 containerd-20200724221200-14997 kubelet[1328]: E0724 22:27:39.951092 1328 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to setup network for sandbox "1562b5a48d11e357ee96997306c5ad1118e140b04c14b88f1d9fcb66ca41f198": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:27:39 containerd-20200724221200-14997 kubelet[1328]: E0724 22:27:39.951157 1328 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "1562b5a48d11e357ee96997306c5ad1118e140b04c14b88f1d9fcb66ca41f198": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:27:39 containerd-20200724221200-14997 kubelet[1328]: E0724 22:27:39.951173 1328 kuberuntime_manager.go:727] createPodSandbox for pod "busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "1562b5a48d11e357ee96997306c5ad1118e140b04c14b88f1d9fcb66ca41f198": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:27:39 containerd-20200724221200-14997 kubelet[1328]: E0724 22:27:39.951224 1328 pod_workers.go:191] Error syncing pod 4b662b5f-6e78-48de-818c-81989d7f4ea9 ("busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)"), skipping: failed to "CreatePodSandbox" for "busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)" with CreatePodSandboxError: "CreatePodSandbox for pod \"busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)\" failed: rpc error: code = Unknown desc = failed to setup network for sandbox \"1562b5a48d11e357ee96997306c5ad1118e140b04c14b88f1d9fcb66ca41f198\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * * ==> storage-provisioner [ac782f72581f09dd5b23e949f032d10d6460499b6761e87a581036f0ed5964b4] <== * F0724 22:24:44.328716 1 main.go:37] Error getting server version: Get https://10.96.0.1:443/version: dial tcp 10.96.0.1:443: getsockopt: no route to host -- /stdout -- helpers_test.go:247: (dbg) Run: ./minikube-linux-amd64 status --format={{.APIServer}} -p containerd-20200724221200-14997 -n containerd-20200724221200-14997 helpers_test.go:254: (dbg) Run: kubectl --context containerd-20200724221200-14997 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running helpers_test.go:260: non-running pods: busybox coredns-66bff467f8-hlk9j helpers_test.go:262: ======> post-mortem[TestStartStop/group/containerd/serial/DeployApp]: describe non-running pods <====== helpers_test.go:265: (dbg) Run: kubectl --context containerd-20200724221200-14997 describe pod busybox coredns-66bff467f8-hlk9j helpers_test.go:265: (dbg) Non-zero exit: kubectl --context containerd-20200724221200-14997 describe pod busybox coredns-66bff467f8-hlk9j: exit status 1 (92.417767ms) -- stdout -- Name: busybox Namespace: default Priority: 0 Node: containerd-20200724221200-14997/172.17.0.5 Start Time: Fri, 24 Jul 2020 22:19:40 +0000 Labels: integration-test=busybox Annotations: Status: Pending IP: IPs: Containers: busybox: Container ID: Image: busybox:1.28.4-glibc Image ID: Port: Host Port: Command: sleep 3600 State: Waiting Reason: ContainerCreating Ready: False Restart Count: 0 Environment: Mounts: /var/run/secrets/kubernetes.io/serviceaccount from default-token-xmm9f (ro) Conditions: Type Status Initialized True Ready False ContainersReady False PodScheduled True Volumes: default-token-xmm9f: Type: Secret (a volume populated by a Secret) SecretName: default-token-xmm9f Optional: false QoS Class: BestEffort Node-Selectors: Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s node.kubernetes.io/unreachable:NoExecute for 300s Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Scheduled 8m4s default-scheduler Successfully assigned default/busybox to containerd-20200724221200-14997 Warning FailedCreatePodSandBox 8m2s kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "5311a1ffb81c3ec44164ca704d1b425a50851c7a615951d885f3e261bb56b331": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 7m48s kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "e0b3338bd82723225aa39e3a572b31f4b10340fa640d33ac956ec7982b47a365": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 7m31s kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "bce48c23f5090e307249f38d5e9c17615b5ce4547b68fd7dd207f5616546b1ff": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 7m15s kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "c260da116bad44bab092e4453833efc9ce5c3c70209770b30f7aeffed5db766d": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 7m1s kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "e352d7747cb44ae844f2847c8895d8a03a59a5bd62570299b6f91ba0d2b31e93": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 6m48s kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "7a8e1f3fa957ea5ffbd5b203818f66025f20b18e8ba1398842a4a1dcb4beade1": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 6m31s kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "02713a33eaf3192cf9c63e173bd72b67773f28f4dfcef338060dda2a7f8489e1": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 6m16s kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "530cf34860dcb75c557c7cbc9a86910a3f4919230e74cdf10126ba1e75f3f49b": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 6m4s kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "a33b097bd953e28e4d7499a4b0ca06585fe1b9029b95aac214b0bba904677259": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 115s (x17 over 5m51s) kubelet, containerd-20200724221200-14997 (combined from similar events): Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "47073b588559d0af9dafbb8171df657751c2a35ea5f0466a20f5762627e9cd56": failed to set bridge addr: could not add IP address to "cni0": permission denied -- /stdout -- ** stderr ** Error from server (NotFound): pods "coredns-66bff467f8-hlk9j" not found ** /stderr ** helpers_test.go:267: kubectl --context containerd-20200724221200-14997 describe pod busybox coredns-66bff467f8-hlk9j: exit status 1 helpers_test.go:215: -----------------------post-mortem-------------------------------- helpers_test.go:223: ======> post-mortem[TestStartStop/group/containerd/serial/DeployApp]: docker inspect <====== helpers_test.go:224: (dbg) Run: docker inspect containerd-20200724221200-14997 helpers_test.go:228: (dbg) docker inspect containerd-20200724221200-14997: -- stdout -- [ { "Id": "0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318", "Created": "2020-07-24T22:12:08.823590057Z", "Path": "/usr/local/bin/entrypoint", "Args": [ "/sbin/init" ], "State": { "Status": "running", "Running": true, "Paused": false, "Restarting": false, "OOMKilled": false, "Dead": false, "Pid": 338511, "ExitCode": 0, "Error": "", "StartedAt": "2020-07-24T22:12:10.40831313Z", "FinishedAt": "0001-01-01T00:00:00Z" }, "Image": "sha256:e6bc41c39dc48b2b472936db36aedb28527ce0f675ed1bc20d029125c9ccf578", "ResolvConfPath": "/var/lib/docker/containers/0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318/resolv.conf", "HostnamePath": "/var/lib/docker/containers/0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318/hostname", "HostsPath": "/var/lib/docker/containers/0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318/hosts", "LogPath": "/var/lib/docker/containers/0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318/0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318-json.log", "Name": "/containerd-20200724221200-14997", "RestartCount": 0, "Driver": "overlay2", "Platform": "linux", "MountLabel": "", "ProcessLabel": "", "AppArmorProfile": "unconfined", "ExecIDs": null, "HostConfig": { "Binds": [ "/lib/modules:/lib/modules:ro", "containerd-20200724221200-14997:/var" ], "ContainerIDFile": "", "LogConfig": { "Type": "json-file", "Config": {} }, "NetworkMode": "default", "PortBindings": { "22/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "2376/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "5000/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "8444/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ] }, "RestartPolicy": { "Name": "no", "MaximumRetryCount": 0 }, "AutoRemove": false, "VolumeDriver": "", "VolumesFrom": null, "CapAdd": null, "CapDrop": null, "Capabilities": null, "Dns": [], "DnsOptions": [], "DnsSearch": [], "ExtraHosts": null, "GroupAdd": null, "IpcMode": "private", "Cgroup": "", "Links": null, "OomScoreAdj": 0, "PidMode": "", "Privileged": true, "PublishAllPorts": false, "ReadonlyRootfs": false, "SecurityOpt": [ "seccomp=unconfined", "apparmor=unconfined", "label=disable" ], "Tmpfs": { "/run": "", "/tmp": "" }, "UTSMode": "", "UsernsMode": "", "ShmSize": 67108864, "Runtime": "runc", "ConsoleSize": [ 0, 0 ], "Isolation": "", "CpuShares": 0, "Memory": 2306867200, "NanoCpus": 2000000000, "CgroupParent": "", "BlkioWeight": 0, "BlkioWeightDevice": [], "BlkioDeviceReadBps": null, "BlkioDeviceWriteBps": null, "BlkioDeviceReadIOps": null, "BlkioDeviceWriteIOps": null, "CpuPeriod": 0, "CpuQuota": 0, "CpuRealtimePeriod": 0, "CpuRealtimeRuntime": 0, "CpusetCpus": "", "CpusetMems": "", "Devices": [], "DeviceCgroupRules": null, "DeviceRequests": null, "KernelMemory": 0, "KernelMemoryTCP": 0, "MemoryReservation": 0, "MemorySwap": -1, "MemorySwappiness": null, "OomKillDisable": false, "PidsLimit": null, "Ulimits": null, "CpuCount": 0, "CpuPercent": 0, "IOMaximumIOps": 0, "IOMaximumBandwidth": 0, "MaskedPaths": null, "ReadonlyPaths": null }, "GraphDriver": { "Data": { "LowerDir": "/var/lib/docker/overlay2/302021dc5d13c095c42529cd14fcb13b8e0e062683e3b8dd811d0a384f95b4d8-init/diff:/var/lib/docker/overlay2/33ef5d7ed4fbb30d55d2e5925de9b5c359ee2f6bcaf5a31308f0c4c11e358759/diff:/var/lib/docker/overlay2/3cf31b1deb19cf9de586952311d9f94baab00dac6393d96e1a80da943c3823e6/diff:/var/lib/docker/overlay2/5e25be0edbb143d613aab16e23056ece18370c2427214464ccc0b8f033fc76c5/diff:/var/lib/docker/overlay2/933d7b760faf1726c0231eb48cbf3e7af8aeea7d1f449959b7bf07bbe9a84376/diff:/var/lib/docker/overlay2/69ec5555996fe29cf36b7e927034690ecb66f32d4c72396f3c2f3d73303ab10c/diff:/var/lib/docker/overlay2/891f9311e0e93aca44c576d873c0860bf67e972178833fc3ac0569308996933c/diff:/var/lib/docker/overlay2/431ddf5eea9395c020c3a27539c6984764cb7a0dd8f3eeadfbb24f37a897caf7/diff:/var/lib/docker/overlay2/ef7d3d9d53c985fdb46a41c8bcbc04c7d98baa3d8030b1432d115657269a4611/diff:/var/lib/docker/overlay2/d976f7471257da8ef85e0de564cae8e6b16eeb8f83fff6750b3985387f87052c/diff:/var/lib/docker/overlay2/16ed6f6abe25e98b50b10529e70d835da8b64ca99934243f70e5cc63cca44ba1/diff:/var/lib/docker/overlay2/a6495d6f579c32bd0faf387dc3b48c733dae0219e3cfc5d94f9f6ba027261668/diff:/var/lib/docker/overlay2/a34db548da9e24b0323e43e53767c49484fa19509ba9c1e738fb42330788ec52/diff:/var/lib/docker/overlay2/03f93f74dafc48e2bb0234297546abf70dc9918c047a7e3b500ed2242aec09ff/diff:/var/lib/docker/overlay2/b0d08d0be034080c25561d9451d8081050a591c2991a7f3d242234b9bf7c046d/diff:/var/lib/docker/overlay2/d7be6ed61eedc76988e74fe1d87a6a1ef287912654acea5566e7f9cb797b3e3c/diff:/var/lib/docker/overlay2/bde6a4a42d5652b3ef22e3a1dddd0f6fb765d2cd9f5b1a38d4ecb9b44c45b998/diff:/var/lib/docker/overlay2/0f384da0b034b730d4bdd22f270821719c0ac0b7b7d608c3a52fa89b9a386b90/diff:/var/lib/docker/overlay2/46e9d50bc162d2d523c12da3e378e5b859515056dc7d7f11344c1c42c64718d4/diff:/var/lib/docker/overlay2/6e7552cdb94dfc1c9a0a8efae25c758d93fff3d2eec58a3e2b710496d0c94653/diff:/var/lib/docker/overlay2/b9217960caef08b70ded801ca89f5a0808227383f2ab1de5ae7313c200a8090b/diff:/var/lib/docker/overlay2/d11fc39cafccd3e3cb8fba6cfdda2bbfdb99c2e41619ce169b0d6ede0b3f6a64/diff:/var/lib/docker/overlay2/0f89620291cba4bc3afac7c9fa1faf205450d5dcae032c3a9882009633b72b4a/diff", "MergedDir": "/var/lib/docker/overlay2/302021dc5d13c095c42529cd14fcb13b8e0e062683e3b8dd811d0a384f95b4d8/merged", "UpperDir": "/var/lib/docker/overlay2/302021dc5d13c095c42529cd14fcb13b8e0e062683e3b8dd811d0a384f95b4d8/diff", "WorkDir": "/var/lib/docker/overlay2/302021dc5d13c095c42529cd14fcb13b8e0e062683e3b8dd811d0a384f95b4d8/work" }, "Name": "overlay2" }, "Mounts": [ { "Type": "bind", "Source": "/lib/modules", "Destination": "/lib/modules", "Mode": "ro", "RW": false, "Propagation": "rprivate" }, { "Type": "volume", "Name": "containerd-20200724221200-14997", "Source": "/var/lib/docker/volumes/containerd-20200724221200-14997/_data", "Destination": "/var", "Driver": "local", "Mode": "z", "RW": true, "Propagation": "" } ], "Config": { "Hostname": "containerd-20200724221200-14997", "Domainname": "", "User": "root", "AttachStdin": false, "AttachStdout": false, "AttachStderr": false, "ExposedPorts": { "22/tcp": {}, "2376/tcp": {}, "5000/tcp": {}, "8444/tcp": {} }, "Tty": true, "OpenStdin": false, "StdinOnce": false, "Env": [ "container=docker", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], "Cmd": null, "Image": "kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438", "Volumes": null, "WorkingDir": "", "Entrypoint": [ "/usr/local/bin/entrypoint", "/sbin/init" ], "OnBuild": null, "Labels": { "created_by.minikube.sigs.k8s.io": "true", "mode.minikube.sigs.k8s.io": "containerd-20200724221200-14997", "name.minikube.sigs.k8s.io": "containerd-20200724221200-14997", "role.minikube.sigs.k8s.io": "" }, "StopSignal": "SIGRTMIN+3" }, "NetworkSettings": { "Bridge": "", "SandboxID": "3e3fc4759cbd4070ed5dddc0264a24f411b397aab6336237e6416cbda2769e84", "HairpinMode": false, "LinkLocalIPv6Address": "", "LinkLocalIPv6PrefixLen": 0, "Ports": { "22/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32900" } ], "2376/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32899" } ], "5000/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32898" } ], "8444/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32897" } ] }, "SandboxKey": "/var/run/docker/netns/3e3fc4759cbd", "SecondaryIPAddresses": null, "SecondaryIPv6Addresses": null, "EndpointID": "1a1c80297b8f177f80b8e0fe79832f4492796767c95941e75116c272b2743f88", "Gateway": "172.17.0.1", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "IPAddress": "172.17.0.5", "IPPrefixLen": 16, "IPv6Gateway": "", "MacAddress": "02:42:ac:11:00:05", "Networks": { "bridge": { "IPAMConfig": null, "Links": null, "Aliases": null, "NetworkID": "d4a420189740571bc15c14b2ebac1ab66e95c9c6796461aa96bb3486a5f7e03d", "EndpointID": "1a1c80297b8f177f80b8e0fe79832f4492796767c95941e75116c272b2743f88", "Gateway": "172.17.0.1", "IPAddress": "172.17.0.5", "IPPrefixLen": 16, "IPv6Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "MacAddress": "02:42:ac:11:00:05", "DriverOpts": null } } } } ] -- /stdout -- helpers_test.go:232: (dbg) Run: ./minikube-linux-amd64 status --format={{.Host}} -p containerd-20200724221200-14997 -n containerd-20200724221200-14997 helpers_test.go:237: <<< TestStartStop/group/containerd/serial/DeployApp FAILED: start of post-mortem logs <<< helpers_test.go:238: ======> post-mortem[TestStartStop/group/containerd/serial/DeployApp]: minikube logs <====== helpers_test.go:240: (dbg) Run: ./minikube-linux-amd64 -p containerd-20200724221200-14997 logs -n 25 helpers_test.go:240: (dbg) Done: ./minikube-linux-amd64 -p containerd-20200724221200-14997 logs -n 25: (1.280089373s) helpers_test.go:245: TestStartStop/group/containerd/serial/DeployApp logs: -- stdout -- * ==> container status <== * CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID * ea143e20f0dbe 2186a1a396deb 58 seconds ago Exited kindnet-cni 7 980558efef9f1 * ac782f72581f0 4689081edb103 3 minutes ago Exited storage-provisioner 7 92684e9147786 * 003518d48f7b6 3439b7546f29b 14 minutes ago Running kube-proxy 0 ca6897a07eae7 * 6f125fe745aef da26705ccb4b5 14 minutes ago Running kube-controller-manager 1 e257c0c495dd3 * 3b03abf8faaef 7e28efa976bd1 15 minutes ago Running kube-apiserver 0 c2da5c12ff9ea * 6dc3461120ba7 da26705ccb4b5 15 minutes ago Exited kube-controller-manager 0 e257c0c495dd3 * bff121f454668 76216c34ed0c7 15 minutes ago Running kube-scheduler 0 4c3ff6ff63ddc * db0fd28c536d4 303ce5db0e90d 15 minutes ago Running etcd 0 0de406b420c86 * * ==> containerd <== * -- Logs begin at Fri 2020-07-24 22:12:25 UTC, end at Fri 2020-07-24 22:27:45 UTC. -- * Jul 24 22:26:46 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:26:46.246672012Z" level=info msg="StartContainer for \"ea143e20f0dbeeb2b9358673ae2e4d2a16b693a820fae1864ff422753a21ab14\"" * Jul 24 22:26:46 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:26:46.260677587Z" level=info msg="shim containerd-shim started" address=/containerd-shim/c633b307d0c204b6fa9f41a779509b5d18e528ccf5677f6b7e4342c476cf33f5.sock debug=false pid=7251 * Jul 24 22:26:46 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:26:46.519914434Z" level=info msg="StartContainer for \"ea143e20f0dbeeb2b9358673ae2e4d2a16b693a820fae1864ff422753a21ab14\" returns successfully" * Jul 24 22:26:55 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:26:55.957521429Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:busybox,Uid:4b662b5f-6e78-48de-818c-81989d7f4ea9,Namespace:default,Attempt:0,}" * Jul 24 22:26:55 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:26:55.957521929Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,}" * Jul 24 22:26:57 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:26:57.937878775Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,} failed, error" error="failed to setup network for sandbox \"01d5df04474fd2db24b4cc66aaa24be74546a453d756393223831c13b7260df4\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:26:58 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:26:58.151185533Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:4b662b5f-6e78-48de-818c-81989d7f4ea9,Namespace:default,Attempt:0,} failed, error" error="failed to setup network for sandbox \"32795834de6daa60922e8c7c9190d85a53d1c88f597635ffb382ffede60aa440\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:27:06 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:27:06.221648013Z" level=info msg="Finish piping stderr of container \"ea143e20f0dbeeb2b9358673ae2e4d2a16b693a820fae1864ff422753a21ab14\"" * Jul 24 22:27:06 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:27:06.221674015Z" level=info msg="Finish piping stdout of container \"ea143e20f0dbeeb2b9358673ae2e4d2a16b693a820fae1864ff422753a21ab14\"" * Jul 24 22:27:06 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:27:06.222933503Z" level=info msg="TaskExit event &TaskExit{ContainerID:ea143e20f0dbeeb2b9358673ae2e4d2a16b693a820fae1864ff422753a21ab14,ID:ea143e20f0dbeeb2b9358673ae2e4d2a16b693a820fae1864ff422753a21ab14,Pid:7269,ExitStatus:2,ExitedAt:2020-07-24 22:27:06.222720988 +0000 UTC,XXX_unrecognized:[],}" * Jul 24 22:27:06 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:27:06.266269337Z" level=info msg="shim reaped" id=ea143e20f0dbeeb2b9358673ae2e4d2a16b693a820fae1864ff422753a21ab14 * Jul 24 22:27:07 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:27:07.150357731Z" level=info msg="RemoveContainer for \"2e361d204abdd65431889780d3e7fb0bb69e31c84d86b350692ee6f2e10d6c87\"" * Jul 24 22:27:07 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:27:07.187292716Z" level=info msg="RemoveContainer for \"2e361d204abdd65431889780d3e7fb0bb69e31c84d86b350692ee6f2e10d6c87\" returns successfully" * Jul 24 22:27:09 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:27:09.957516843Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:busybox,Uid:4b662b5f-6e78-48de-818c-81989d7f4ea9,Namespace:default,Attempt:0,}" * Jul 24 22:27:10 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:27:10.957595606Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,}" * Jul 24 22:27:12 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:27:12.082832788Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:4b662b5f-6e78-48de-818c-81989d7f4ea9,Namespace:default,Attempt:0,} failed, error" error="failed to setup network for sandbox \"b8a54fb33593acec55fab723d614ab412e0c5139ee6fdb8f67815c9f233fa747\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:27:12 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:27:12.882178169Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,} failed, error" error="failed to setup network for sandbox \"7e8b1407230268c6869d4a73c2b33975e2390f2b24fc9f3cd9105f3cfdd1796b\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:27:23 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:27:23.957417834Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,}" * Jul 24 22:27:25 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:27:25.870161263Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,} failed, error" error="failed to setup network for sandbox \"65256ac1b854ef1aad2f5e5faba981e3ba3e2efede014e0aba9abbb838cf9e26\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:27:25 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:27:25.957540752Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:busybox,Uid:4b662b5f-6e78-48de-818c-81989d7f4ea9,Namespace:default,Attempt:0,}" * Jul 24 22:27:27 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:27:27.602669298Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:4b662b5f-6e78-48de-818c-81989d7f4ea9,Namespace:default,Attempt:0,} failed, error" error="failed to setup network for sandbox \"742f36453fef1543a625c726a7faf9c2db36f264d359adf8b4b2e5c13f8e44ae\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:27:36 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:27:36.957619331Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,}" * Jul 24 22:27:37 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:27:37.957508308Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:busybox,Uid:4b662b5f-6e78-48de-818c-81989d7f4ea9,Namespace:default,Attempt:0,}" * Jul 24 22:27:38 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:27:38.998067020Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,} failed, error" error="failed to setup network for sandbox \"2162692046689cfc16590d2369d492dd13ed331c0f624343f44091944438584d\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:27:39 containerd-20200724221200-14997 containerd[364]: time="2020-07-24T22:27:39.950846515Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:4b662b5f-6e78-48de-818c-81989d7f4ea9,Namespace:default,Attempt:0,} failed, error" error="failed to setup network for sandbox \"1562b5a48d11e357ee96997306c5ad1118e140b04c14b88f1d9fcb66ca41f198\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * * ==> describe nodes <== * Name: containerd-20200724221200-14997 * Roles: master * Labels: beta.kubernetes.io/arch=amd64 * beta.kubernetes.io/os=linux * kubernetes.io/arch=amd64 * kubernetes.io/hostname=containerd-20200724221200-14997 * kubernetes.io/os=linux * minikube.k8s.io/commit=40eac8ce825d2bb784efa63b900c8d788ea49faf * minikube.k8s.io/name=containerd-20200724221200-14997 * minikube.k8s.io/updated_at=2020_07_24T22_13_02_0700 * minikube.k8s.io/version=v1.12.1 * node-role.kubernetes.io/master= * Annotations: kubeadm.alpha.kubernetes.io/cri-socket: /run/containerd/containerd.sock * node.alpha.kubernetes.io/ttl: 0 * volumes.kubernetes.io/controller-managed-attach-detach: true * CreationTimestamp: Fri, 24 Jul 2020 22:12:47 +0000 * Taints: * Unschedulable: false * Lease: * HolderIdentity: containerd-20200724221200-14997 * AcquireTime: * RenewTime: Fri, 24 Jul 2020 22:27:41 +0000 * Conditions: * Type Status LastHeartbeatTime LastTransitionTime Reason Message * ---- ------ ----------------- ------------------ ------ ------- * MemoryPressure False Fri, 24 Jul 2020 22:23:15 +0000 Fri, 24 Jul 2020 22:12:43 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available * DiskPressure False Fri, 24 Jul 2020 22:23:15 +0000 Fri, 24 Jul 2020 22:12:43 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure * PIDPressure False Fri, 24 Jul 2020 22:23:15 +0000 Fri, 24 Jul 2020 22:12:43 +0000 KubeletHasSufficientPID kubelet has sufficient PID available * Ready True Fri, 24 Jul 2020 22:23:15 +0000 Fri, 24 Jul 2020 22:13:11 +0000 KubeletReady kubelet is posting ready status * Addresses: * InternalIP: 172.17.0.5 * Hostname: containerd-20200724221200-14997 * Capacity: * cpu: 16 * ephemeral-storage: 128884272Ki * hugepages-1Gi: 0 * hugepages-2Mi: 0 * memory: 65817044Ki * pods: 110 * Allocatable: * cpu: 16 * ephemeral-storage: 128884272Ki * hugepages-1Gi: 0 * hugepages-2Mi: 0 * memory: 65817044Ki * pods: 110 * System Info: * Machine ID: a912f43aab5e4ba59b29e84664ffc131 * System UUID: 763ff36b-3261-45b1-b62e-092cbae790ce * Boot ID: 65219ec9-ab55-4151-85fa-6cbcd6144529 * Kernel Version: 5.4.0-1022-azure * OS Image: Ubuntu 19.10 * Operating System: linux * Architecture: amd64 * Container Runtime Version: containerd://1.3.3-14-g449e9269 * Kubelet Version: v1.18.3 * Kube-Proxy Version: v1.18.3 * PodCIDR: 10.244.0.0/24 * PodCIDRs: 10.244.0.0/24 * Non-terminated Pods: (9 in total) * Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits AGE * --------- ---- ------------ ---------- --------------- ------------- --- * default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 8m5s * kube-system coredns-66bff467f8-hlk9j 100m (0%) 0 (0%) 70Mi (0%) 170Mi (0%) 14m * kube-system etcd-containerd-20200724221200-14997 0 (0%) 0 (0%) 0 (0%) 0 (0%) 14m * kube-system kindnet-nsc8k 100m (0%) 100m (0%) 50Mi (0%) 50Mi (0%) 14m * kube-system kube-apiserver-containerd-20200724221200-14997 250m (1%) 0 (0%) 0 (0%) 0 (0%) 14m * kube-system kube-controller-manager-containerd-20200724221200-14997 200m (1%) 0 (0%) 0 (0%) 0 (0%) 14m * kube-system kube-proxy-x7fwq 0 (0%) 0 (0%) 0 (0%) 0 (0%) 14m * kube-system kube-scheduler-containerd-20200724221200-14997 100m (0%) 0 (0%) 0 (0%) 0 (0%) 14m * kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 14m * Allocated resources: * (Total limits may be over 100 percent, i.e., overcommitted.) * Resource Requests Limits * -------- -------- ------ * cpu 750m (4%) 100m (0%) * memory 120Mi (0%) 220Mi (0%) * ephemeral-storage 0 (0%) 0 (0%) * hugepages-1Gi 0 (0%) 0 (0%) * hugepages-2Mi 0 (0%) 0 (0%) * Events: * Type Reason Age From Message * ---- ------ ---- ---- ------- * Normal NodeHasSufficientMemory 15m (x6 over 15m) kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasSufficientMemory * Normal NodeHasNoDiskPressure 15m (x6 over 15m) kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientPID 15m (x5 over 15m) kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasSufficientPID * Normal Starting 14m kubelet, containerd-20200724221200-14997 Starting kubelet. * Warning SystemOOM 14m kubelet, containerd-20200724221200-14997 System OOM encountered, victim process: registry-server, pid: 26041 * Warning SystemOOM 14m kubelet, containerd-20200724221200-14997 System OOM encountered, victim process: registry-server, pid: 28567 * Normal NodeHasSufficientMemory 14m kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasSufficientMemory * Normal NodeHasNoDiskPressure 14m kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientPID 14m kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasSufficientPID * Normal NodeAllocatableEnforced 14m kubelet, containerd-20200724221200-14997 Updated Node Allocatable limit across pods * Normal NodeReady 14m kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeReady * Warning readOnlySysFS 14m kube-proxy, containerd-20200724221200-14997 CRI error: /sys is read-only: cannot modify conntrack limits, problems may arise later (If running Docker, see docker issue #24000) * Normal Starting 14m kube-proxy, containerd-20200724221200-14997 Starting kube-proxy. * * ==> dmesg <== * [ +0.008042] FS-Cache: N-cookie d=00000000087e516b n=00000000639e5e65 * [ +0.007221] FS-Cache: N-key=[8] '56c00f0000000000' * [ +0.005631] FS-Cache: Duplicate cookie detected * [ +0.003841] FS-Cache: O-cookie c=0000000011bf07ab [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.005951] FS-Cache: O-cookie d=00000000087e516b n=00000000251e97b7 * [ +0.004790] FS-Cache: O-key=[8] '56c00f0000000000' * [ +0.003643] FS-Cache: N-cookie c=00000000e2eec606 [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.005689] FS-Cache: N-cookie d=00000000087e516b n=000000007550b1ca * [ +0.004826] FS-Cache: N-key=[8] '56c00f0000000000' * [ +1.890005] FS-Cache: Duplicate cookie detected * [ +0.022632] FS-Cache: O-cookie c=0000000036b15294 [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.008521] FS-Cache: O-cookie d=00000000087e516b n=00000000efd734b0 * [ +0.007450] FS-Cache: O-key=[8] '55c00f0000000000' * [ +0.004298] FS-Cache: N-cookie c=00000000c6760349 [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.007729] FS-Cache: N-cookie d=00000000087e516b n=000000002b6d28f2 * [ +0.006730] FS-Cache: N-key=[8] '55c00f0000000000' * [ +2.781512] FS-Cache: Duplicate cookie detected * [ +0.004691] FS-Cache: O-cookie c=0000000026b60cdb [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.006817] FS-Cache: O-cookie d=00000000087e516b n=0000000083eaff9d * [ +0.006146] FS-Cache: O-key=[8] '5ac00f0000000000' * [ +0.004163] FS-Cache: N-cookie c=000000006a4278df [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.006241] FS-Cache: N-cookie d=00000000087e516b n=00000000e4c3dd6e * [ +0.005074] FS-Cache: N-key=[8] '5ac00f0000000000' * [Jul24 22:02] tee (116897): /proc/108378/oom_adj is deprecated, please use /proc/108378/oom_score_adj instead. * [Jul24 22:09] hrtimer: interrupt took 1877431 ns * * ==> etcd [db0fd28c536d4d051abb0c3b7d4219ed9ad43fae8b4e05f52f322ed31bd12c02] <== * 2020-07-24 22:12:57.044210 W | etcdserver: read-only range request "key:\"/registry/health\" " with result "range_response_count:0 size:4" took too long (129.896221ms) to execute * 2020-07-24 22:12:57.044242 W | etcdserver: read-only range request "key:\"/registry/clusterroles/system:aggregate-to-view\" " with result "range_response_count:0 size:4" took too long (228.539655ms) to execute * 2020-07-24 22:12:57.044266 W | etcdserver: read-only range request "key:\"/registry/namespaces/default\" " with result "range_response_count:0 size:4" took too long (178.276384ms) to execute * 2020-07-24 22:12:57.044278 W | etcdserver: read-only range request "key:\"/registry/namespaces/kube-node-lease\" " with result "range_response_count:0 size:4" took too long (227.465694ms) to execute * 2020-07-24 22:13:15.434077 W | wal: sync duration of 3.834899995s, expected less than 1s * 2020-07-24 22:13:15.471535 W | etcdserver: read-only range request "key:\"/registry/pods/kube-system/kube-apiserver-containerd-20200724221200-14997\" " with result "range_response_count:1 size:5503" took too long (4.51397778s) to execute * 2020-07-24 22:13:15.471572 W | etcdserver: request "header: txn: success:> failure:<>>" with result "size:16" took too long (3.872288817s) to execute * 2020-07-24 22:13:15.471926 W | etcdserver: read-only range request "key:\"/registry/serviceaccounts/default/default\" " with result "range_response_count:0 size:5" took too long (4.245793609s) to execute * 2020-07-24 22:13:15.471957 W | etcdserver: read-only range request "key:\"/registry/minions/\" range_end:\"/registry/minions0\" " with result "range_response_count:1 size:5367" took too long (2.908773208s) to execute * 2020-07-24 22:13:30.080019 W | etcdserver: request "header: txn: success:> failure: >>" with result "size:16" took too long (518.961173ms) to execute * 2020-07-24 22:13:31.304192 W | etcdserver: read-only range request "key:\"/registry/jobs/\" range_end:\"/registry/jobs0\" limit:500 " with result "range_response_count:0 size:5" took too long (430.352557ms) to execute * 2020-07-24 22:13:31.304379 W | etcdserver: request "header: txn: success:> failure: >>" with result "size:16" took too long (347.781631ms) to execute * 2020-07-24 22:14:39.084771 W | wal: sync duration of 1.885899215s, expected less than 1s * 2020-07-24 22:14:39.085392 W | etcdserver: read-only range request "key:\"/registry/services/specs\" range_end:\"/registry/services/spect\" count_only:true " with result "range_response_count:0 size:7" took too long (752.73234ms) to execute * 2020-07-24 22:14:39.204221 W | etcdserver: read-only range request "key:\"/registry/statefulsets\" range_end:\"/registry/statefulsett\" count_only:true " with result "range_response_count:0 size:5" took too long (526.93962ms) to execute * 2020-07-24 22:14:39.204593 W | etcdserver: read-only range request "key:\"/registry/namespaces/default\" " with result "range_response_count:1 size:257" took too long (314.821685ms) to execute * 2020-07-24 22:22:43.570684 I | mvcc: store.index: compact 568 * 2020-07-24 22:22:43.571598 I | mvcc: finished scheduled compaction at 568 (took 585.341µs) * 2020-07-24 22:26:14.677739 W | etcdserver: read-only range request "key:\"/registry/pods/default/\" range_end:\"/registry/pods/default0\" " with result "range_response_count:1 size:2036" took too long (258.17647ms) to execute * 2020-07-24 22:26:29.551172 W | etcdserver: read-only range request "key:\"/registry/pods/default/\" range_end:\"/registry/pods/default0\" " with result "range_response_count:1 size:2036" took too long (131.671395ms) to execute * 2020-07-24 22:26:43.365199 W | etcdserver: read-only range request "key:\"/registry/podsecuritypolicy\" range_end:\"/registry/podsecuritypolicz\" count_only:true " with result "range_response_count:0 size:5" took too long (416.944167ms) to execute * 2020-07-24 22:26:43.365460 W | etcdserver: read-only range request "key:\"/registry/ingress\" range_end:\"/registry/ingrest\" count_only:true " with result "range_response_count:0 size:5" took too long (232.243591ms) to execute * 2020-07-24 22:27:23.173084 W | etcdserver: read-only range request "key:\"/registry/daemonsets\" range_end:\"/registry/daemonsett\" count_only:true " with result "range_response_count:0 size:7" took too long (166.661817ms) to execute * 2020-07-24 22:27:43.589170 I | mvcc: store.index: compact 670 * 2020-07-24 22:27:43.589777 I | mvcc: finished scheduled compaction at 670 (took 264.018µs) * * ==> kernel <== * 22:27:45 up 55 min, 0 users, load average: 6.79, 6.93, 7.65 * Linux containerd-20200724221200-14997 5.4.0-1022-azure #22-Ubuntu SMP Fri Jul 10 06:14:37 UTC 2020 x86_64 x86_64 x86_64 GNU/Linux * PRETTY_NAME="Ubuntu 19.10" * * ==> kube-apiserver [3b03abf8faaefbc437950aa59178dcca778ce856951332f14e9c159b83a9cd10] <== * I0724 22:12:59.714156 1 controller.go:606] quota admission added evaluator for: serviceaccounts * I0724 22:13:00.637968 1 controller.go:606] quota admission added evaluator for: deployments.apps * I0724 22:13:00.714747 1 controller.go:606] quota admission added evaluator for: daemonsets.apps * I0724 22:13:15.472257 1 trace.go:116] Trace[371264249]: "Create" url:/api/v1/namespaces/default/events,user-agent:kubelet/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:172.17.0.5 (started: 2020-07-24 22:13:11.165316402 +0000 UTC m=+28.885265456) (total time: 4.306895167s): * Trace[371264249]: [4.306830363s] [4.306748958s] Object stored in database * I0724 22:13:15.472429 1 trace.go:116] Trace[1157467879]: "Get" url:/api/v1/namespaces/default/serviceaccounts/default,user-agent:kubectl/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:127.0.0.1 (started: 2020-07-24 22:13:11.225715914 +0000 UTC m=+28.945664968) (total time: 4.246681467s): * Trace[1157467879]: [4.246681467s] [4.246669166s] END * I0724 22:13:15.472453 1 trace.go:116] Trace[1376546597]: "GuaranteedUpdate etcd3" type:*core.Node (started: 2020-07-24 22:13:11.166090152 +0000 UTC m=+28.886039306) (total time: 4.306337531s): * Trace[1376546597]: [4.306206523s] [4.304357703s] Transaction committed * I0724 22:13:15.472507 1 trace.go:116] Trace[1373390526]: "List etcd3" key:/minions,resourceVersion:,limit:0,continue: (started: 2020-07-24 22:13:12.562772918 +0000 UTC m=+30.282722072) (total time: 2.909710469s): * Trace[1373390526]: [2.909710469s] [2.909710469s] END * I0724 22:13:15.472573 1 trace.go:116] Trace[209798184]: "Get" url:/api/v1/namespaces/kube-system/pods/kube-apiserver-containerd-20200724221200-14997,user-agent:kubelet/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:172.17.0.5 (started: 2020-07-24 22:13:10.957265426 +0000 UTC m=+28.677214580) (total time: 4.515285465s): * Trace[209798184]: [4.51489514s] [4.514888939s] About to write a response * I0724 22:13:15.472707 1 trace.go:116] Trace[484532927]: "Patch" url:/api/v1/nodes/containerd-20200724221200-14997/status,user-agent:kubelet/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:172.17.0.5 (started: 2020-07-24 22:13:11.165983745 +0000 UTC m=+28.885932899) (total time: 4.306690554s): * Trace[484532927]: [4.306486741s] [4.304883637s] Object stored in database * I0724 22:13:15.472774 1 trace.go:116] Trace[667541080]: "List" url:/api/v1/nodes,user-agent:kube-controller-manager/v1.18.3 (linux/amd64) kubernetes/2e7996e/system:serviceaccount:kube-system:node-controller,client:172.17.0.5 (started: 2020-07-24 22:13:12.562758918 +0000 UTC m=+30.282707972) (total time: 2.909990386s): * Trace[667541080]: [2.909773072s] [2.909764572s] Listing from storage done * I0724 22:13:18.078919 1 controller.go:606] quota admission added evaluator for: replicasets.apps * I0724 22:13:18.426181 1 controller.go:606] quota admission added evaluator for: controllerrevisions.apps * I0724 22:13:30.080930 1 trace.go:116] Trace[926841721]: "GuaranteedUpdate etcd3" type:*v1.Endpoints (started: 2020-07-24 22:13:28.891002839 +0000 UTC m=+46.610951993) (total time: 1.189883246s): * Trace[926841721]: [1.189853344s] [1.188435753s] Transaction committed * I0724 22:14:39.085727 1 trace.go:116] Trace[943610803]: "GuaranteedUpdate etcd3" type:*apps.DaemonSet (started: 2020-07-24 22:14:37.201680483 +0000 UTC m=+114.921629637) (total time: 1.884006692s): * Trace[943610803]: [1.883921787s] [1.882993127s] Transaction committed * I0724 22:14:39.086045 1 trace.go:116] Trace[431771147]: "Update" url:/apis/apps/v1/namespaces/kube-system/daemonsets/kindnet/status,user-agent:kube-controller-manager/v1.18.3 (linux/amd64) kubernetes/2e7996e/system:serviceaccount:kube-system:daemon-set-controller,client:172.17.0.5 (started: 2020-07-24 22:14:37.201520273 +0000 UTC m=+114.921469327) (total time: 1.884488723s): * Trace[431771147]: [1.884264808s] [1.884157301s] Object stored in database * * ==> kube-controller-manager [6dc3461120ba7717ddda6d5e83f663ff1cb958b21f4b6b496ed88923606aebf1] <== * I0724 22:12:43.365003 1 serving.go:313] Generated self-signed cert in-memory * I0724 22:12:44.072514 1 controllermanager.go:161] Version: v1.18.3 * I0724 22:12:44.073870 1 dynamic_cafile_content.go:167] Starting request-header::/var/lib/minikube/certs/front-proxy-ca.crt * I0724 22:12:44.073872 1 dynamic_cafile_content.go:167] Starting client-ca-bundle::/var/lib/minikube/certs/ca.crt * I0724 22:12:44.074524 1 secure_serving.go:178] Serving securely on 127.0.0.1:10257 * I0724 22:12:44.074632 1 tlsconfig.go:240] Starting DynamicServingCertificateController * I0724 22:12:44.075551 1 deprecated_insecure_serving.go:53] Serving insecurely on [::]:10252 * F0724 22:12:57.450686 1 controllermanager.go:230] error building controller context: failed to wait for apiserver being healthy: timed out waiting for the condition: failed to get apiserver /healthz status: forbidden: User "system:kube-controller-manager" cannot get path "/healthz" * * ==> kube-controller-manager [6f125fe745aef65ddf16605622b6130fd0ebc50c6ee73b0d5b8431248dc821c2] <== * I0724 22:13:18.083396 1 event.go:278] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"kube-system", Name:"coredns", UID:"c339b220-56ba-47ef-93ef-bb5429df2c46", APIVersion:"apps/v1", ResourceVersion:"208", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set coredns-66bff467f8 to 2 * I0724 22:13:18.088573 1 event.go:278] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"kube-system", Name:"coredns-66bff467f8", UID:"1f68eda2-5dbc-4ff7-802d-d0fc15a0855d", APIVersion:"apps/v1", ResourceVersion:"325", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: coredns-66bff467f8-cgqcr * I0724 22:13:18.143495 1 event.go:278] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"kube-system", Name:"coredns-66bff467f8", UID:"1f68eda2-5dbc-4ff7-802d-d0fc15a0855d", APIVersion:"apps/v1", ResourceVersion:"325", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: coredns-66bff467f8-hlk9j * I0724 22:13:18.177452 1 shared_informer.go:230] Caches are synced for persistent volume * I0724 22:13:18.240929 1 shared_informer.go:230] Caches are synced for endpoint * I0724 22:13:18.273694 1 shared_informer.go:230] Caches are synced for expand * I0724 22:13:18.275914 1 shared_informer.go:230] Caches are synced for PVC protection * I0724 22:13:18.325760 1 shared_informer.go:230] Caches are synced for stateful set * I0724 22:13:18.421570 1 shared_informer.go:230] Caches are synced for daemon sets * I0724 22:13:18.446520 1 event.go:278] Event(v1.ObjectReference{Kind:"DaemonSet", Namespace:"kube-system", Name:"kube-proxy", UID:"a0159c58-7dba-43c7-a87d-d2e4c392a926", APIVersion:"apps/v1", ResourceVersion:"214", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: kube-proxy-x7fwq * I0724 22:13:18.452732 1 event.go:278] Event(v1.ObjectReference{Kind:"DaemonSet", Namespace:"kube-system", Name:"kindnet", UID:"6b1c3e80-93ca-41d3-9d48-43047d4d93a6", APIVersion:"apps/v1", ResourceVersion:"261", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: kindnet-nsc8k * I0724 22:13:18.463040 1 shared_informer.go:230] Caches are synced for taint * I0724 22:13:18.463114 1 taint_manager.go:187] Starting NoExecuteTaintManager * I0724 22:13:18.463170 1 node_lifecycle_controller.go:1433] Initializing eviction metric for zone: * W0724 22:13:18.463272 1 node_lifecycle_controller.go:1048] Missing timestamp for Node containerd-20200724221200-14997. Assuming now as a timestamp. * I0724 22:13:18.463319 1 node_lifecycle_controller.go:1249] Controller detected that zone is now in state Normal. * I0724 22:13:18.463344 1 event.go:278] Event(v1.ObjectReference{Kind:"Node", Namespace:"", Name:"containerd-20200724221200-14997", UID:"878dba67-2126-43d2-a5be-2ad809c96173", APIVersion:"v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'RegisteredNode' Node containerd-20200724221200-14997 event: Registered Node containerd-20200724221200-14997 in Controller * I0724 22:13:18.564886 1 event.go:278] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"kube-system", Name:"coredns", UID:"c339b220-56ba-47ef-93ef-bb5429df2c46", APIVersion:"apps/v1", ResourceVersion:"372", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled down replica set coredns-66bff467f8 to 1 * I0724 22:13:18.613309 1 shared_informer.go:230] Caches are synced for attach detach * I0724 22:13:18.635502 1 shared_informer.go:230] Caches are synced for garbage collector * I0724 22:13:18.635535 1 garbagecollector.go:142] Garbage collector: all resource monitors have synced. Proceeding to collect garbage * I0724 22:13:18.639165 1 event.go:278] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"kube-system", Name:"coredns-66bff467f8", UID:"1f68eda2-5dbc-4ff7-802d-d0fc15a0855d", APIVersion:"apps/v1", ResourceVersion:"373", FieldPath:""}): type: 'Normal' reason: 'SuccessfulDelete' Deleted pod: coredns-66bff467f8-cgqcr * I0724 22:13:18.653092 1 shared_informer.go:230] Caches are synced for resource quota * I0724 22:13:18.676387 1 shared_informer.go:230] Caches are synced for resource quota * I0724 22:13:18.677047 1 shared_informer.go:230] Caches are synced for garbage collector * * ==> kube-proxy [003518d48f7b64998ac9b35be592bc8821d93926af4451deb91a09ab8b6e907d] <== * W0724 22:13:19.661270 1 server_others.go:559] Unknown proxy mode "", assuming iptables proxy * I0724 22:13:19.702749 1 node.go:136] Successfully retrieved node IP: 172.17.0.5 * I0724 22:13:19.702805 1 server_others.go:186] Using iptables Proxier. * I0724 22:13:19.703119 1 server.go:583] Version: v1.18.3 * I0724 22:13:19.703827 1 conntrack.go:52] Setting nf_conntrack_max to 524288 * E0724 22:13:19.704435 1 conntrack.go:127] sysfs is not writable: {Device:sysfs Path:/sys Type:sysfs Opts:[ro nosuid nodev noexec relatime] Freq:0 Pass:0} (mount options are [ro nosuid nodev noexec relatime]) * I0724 22:13:19.704682 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_established' to 86400 * I0724 22:13:19.704788 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_close_wait' to 3600 * I0724 22:13:19.706305 1 config.go:133] Starting endpoints config controller * I0724 22:13:19.706338 1 config.go:315] Starting service config controller * I0724 22:13:19.706344 1 shared_informer.go:223] Waiting for caches to sync for endpoints config * I0724 22:13:19.706365 1 shared_informer.go:223] Waiting for caches to sync for service config * I0724 22:13:19.806590 1 shared_informer.go:230] Caches are synced for service config * I0724 22:13:19.806609 1 shared_informer.go:230] Caches are synced for endpoints config * * ==> kube-scheduler [bff121f454668ac500a195fcf425cf4c1545c2b5466ceef795a4d2c11d3c6d76] <== * E0724 22:12:48.856731 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope * E0724 22:12:48.943428 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope * E0724 22:12:48.947394 1 reflector.go:178] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system" * E0724 22:12:49.021495 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope * E0724 22:12:49.042794 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope * E0724 22:12:49.115776 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope * E0724 22:12:50.494405 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope * E0724 22:12:50.855911 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope * E0724 22:12:50.917390 1 reflector.go:178] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system" * E0724 22:12:51.267562 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope * E0724 22:12:51.297256 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1beta1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope * E0724 22:12:51.371164 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope * E0724 22:12:51.686787 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope * E0724 22:12:51.816022 1 reflector.go:178] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:233: Failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope * E0724 22:12:52.019881 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope * E0724 22:12:54.420521 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope * E0724 22:12:55.430747 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope * E0724 22:12:56.236464 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope * E0724 22:12:56.459747 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1beta1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope * E0724 22:12:56.965178 1 reflector.go:178] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:233: Failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope * E0724 22:12:57.125721 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope * E0724 22:12:57.141479 1 reflector.go:178] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system" * E0724 22:12:57.352492 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope * E0724 22:12:58.018671 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope * I0724 22:13:03.848890 1 shared_informer.go:230] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file * * ==> kubelet <== * -- Logs begin at Fri 2020-07-24 22:12:25 UTC, end at Fri 2020-07-24 22:27:45 UTC. -- * Jul 24 22:27:12 containerd-20200724221200-14997 kubelet[1328]: E0724 22:27:12.957363 1328 pod_workers.go:191] Error syncing pod 25727a03-b071-4a2c-8dfb-ea2a7038e4cd ("storage-provisioner_kube-system(25727a03-b071-4a2c-8dfb-ea2a7038e4cd)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 5m0s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(25727a03-b071-4a2c-8dfb-ea2a7038e4cd)" * Jul 24 22:27:21 containerd-20200724221200-14997 kubelet[1328]: I0724 22:27:21.957074 1328 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: ea143e20f0dbeeb2b9358673ae2e4d2a16b693a820fae1864ff422753a21ab14 * Jul 24 22:27:21 containerd-20200724221200-14997 kubelet[1328]: E0724 22:27:21.957431 1328 pod_workers.go:191] Error syncing pod 562af1c8-c195-4fed-b475-4ff2a3cca8b5 ("kindnet-nsc8k_kube-system(562af1c8-c195-4fed-b475-4ff2a3cca8b5)"), skipping: failed to "StartContainer" for "kindnet-cni" with CrashLoopBackOff: "back-off 5m0s restarting failed container=kindnet-cni pod=kindnet-nsc8k_kube-system(562af1c8-c195-4fed-b475-4ff2a3cca8b5)" * Jul 24 22:27:24 containerd-20200724221200-14997 kubelet[1328]: I0724 22:27:24.957182 1328 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: ac782f72581f09dd5b23e949f032d10d6460499b6761e87a581036f0ed5964b4 * Jul 24 22:27:24 containerd-20200724221200-14997 kubelet[1328]: E0724 22:27:24.957408 1328 pod_workers.go:191] Error syncing pod 25727a03-b071-4a2c-8dfb-ea2a7038e4cd ("storage-provisioner_kube-system(25727a03-b071-4a2c-8dfb-ea2a7038e4cd)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 5m0s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(25727a03-b071-4a2c-8dfb-ea2a7038e4cd)" * Jul 24 22:27:25 containerd-20200724221200-14997 kubelet[1328]: E0724 22:27:25.870392 1328 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to setup network for sandbox "65256ac1b854ef1aad2f5e5faba981e3ba3e2efede014e0aba9abbb838cf9e26": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:27:25 containerd-20200724221200-14997 kubelet[1328]: E0724 22:27:25.870459 1328 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "65256ac1b854ef1aad2f5e5faba981e3ba3e2efede014e0aba9abbb838cf9e26": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:27:25 containerd-20200724221200-14997 kubelet[1328]: E0724 22:27:25.870483 1328 kuberuntime_manager.go:727] createPodSandbox for pod "coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "65256ac1b854ef1aad2f5e5faba981e3ba3e2efede014e0aba9abbb838cf9e26": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:27:25 containerd-20200724221200-14997 kubelet[1328]: E0724 22:27:25.870553 1328 pod_workers.go:191] Error syncing pod 584286ea-2ddf-4194-8a3c-48d505f7f607 ("coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)"), skipping: failed to "CreatePodSandbox" for "coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)" with CreatePodSandboxError: "CreatePodSandbox for pod \"coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)\" failed: rpc error: code = Unknown desc = failed to setup network for sandbox \"65256ac1b854ef1aad2f5e5faba981e3ba3e2efede014e0aba9abbb838cf9e26\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:27:27 containerd-20200724221200-14997 kubelet[1328]: E0724 22:27:27.604200 1328 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to setup network for sandbox "742f36453fef1543a625c726a7faf9c2db36f264d359adf8b4b2e5c13f8e44ae": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:27:27 containerd-20200724221200-14997 kubelet[1328]: E0724 22:27:27.604256 1328 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "742f36453fef1543a625c726a7faf9c2db36f264d359adf8b4b2e5c13f8e44ae": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:27:27 containerd-20200724221200-14997 kubelet[1328]: E0724 22:27:27.604272 1328 kuberuntime_manager.go:727] createPodSandbox for pod "busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "742f36453fef1543a625c726a7faf9c2db36f264d359adf8b4b2e5c13f8e44ae": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:27:27 containerd-20200724221200-14997 kubelet[1328]: E0724 22:27:27.604379 1328 pod_workers.go:191] Error syncing pod 4b662b5f-6e78-48de-818c-81989d7f4ea9 ("busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)"), skipping: failed to "CreatePodSandbox" for "busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)" with CreatePodSandboxError: "CreatePodSandbox for pod \"busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)\" failed: rpc error: code = Unknown desc = failed to setup network for sandbox \"742f36453fef1543a625c726a7faf9c2db36f264d359adf8b4b2e5c13f8e44ae\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:27:35 containerd-20200724221200-14997 kubelet[1328]: I0724 22:27:35.957250 1328 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: ea143e20f0dbeeb2b9358673ae2e4d2a16b693a820fae1864ff422753a21ab14 * Jul 24 22:27:35 containerd-20200724221200-14997 kubelet[1328]: I0724 22:27:35.957282 1328 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: ac782f72581f09dd5b23e949f032d10d6460499b6761e87a581036f0ed5964b4 * Jul 24 22:27:35 containerd-20200724221200-14997 kubelet[1328]: E0724 22:27:35.957512 1328 pod_workers.go:191] Error syncing pod 25727a03-b071-4a2c-8dfb-ea2a7038e4cd ("storage-provisioner_kube-system(25727a03-b071-4a2c-8dfb-ea2a7038e4cd)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 5m0s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(25727a03-b071-4a2c-8dfb-ea2a7038e4cd)" * Jul 24 22:27:35 containerd-20200724221200-14997 kubelet[1328]: E0724 22:27:35.957578 1328 pod_workers.go:191] Error syncing pod 562af1c8-c195-4fed-b475-4ff2a3cca8b5 ("kindnet-nsc8k_kube-system(562af1c8-c195-4fed-b475-4ff2a3cca8b5)"), skipping: failed to "StartContainer" for "kindnet-cni" with CrashLoopBackOff: "back-off 5m0s restarting failed container=kindnet-cni pod=kindnet-nsc8k_kube-system(562af1c8-c195-4fed-b475-4ff2a3cca8b5)" * Jul 24 22:27:38 containerd-20200724221200-14997 kubelet[1328]: E0724 22:27:38.998325 1328 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to setup network for sandbox "2162692046689cfc16590d2369d492dd13ed331c0f624343f44091944438584d": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:27:38 containerd-20200724221200-14997 kubelet[1328]: E0724 22:27:38.998381 1328 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "2162692046689cfc16590d2369d492dd13ed331c0f624343f44091944438584d": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:27:38 containerd-20200724221200-14997 kubelet[1328]: E0724 22:27:38.998397 1328 kuberuntime_manager.go:727] createPodSandbox for pod "coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "2162692046689cfc16590d2369d492dd13ed331c0f624343f44091944438584d": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:27:38 containerd-20200724221200-14997 kubelet[1328]: E0724 22:27:38.998452 1328 pod_workers.go:191] Error syncing pod 584286ea-2ddf-4194-8a3c-48d505f7f607 ("coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)"), skipping: failed to "CreatePodSandbox" for "coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)" with CreatePodSandboxError: "CreatePodSandbox for pod \"coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)\" failed: rpc error: code = Unknown desc = failed to setup network for sandbox \"2162692046689cfc16590d2369d492dd13ed331c0f624343f44091944438584d\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:27:39 containerd-20200724221200-14997 kubelet[1328]: E0724 22:27:39.951092 1328 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to setup network for sandbox "1562b5a48d11e357ee96997306c5ad1118e140b04c14b88f1d9fcb66ca41f198": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:27:39 containerd-20200724221200-14997 kubelet[1328]: E0724 22:27:39.951157 1328 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "1562b5a48d11e357ee96997306c5ad1118e140b04c14b88f1d9fcb66ca41f198": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:27:39 containerd-20200724221200-14997 kubelet[1328]: E0724 22:27:39.951173 1328 kuberuntime_manager.go:727] createPodSandbox for pod "busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "1562b5a48d11e357ee96997306c5ad1118e140b04c14b88f1d9fcb66ca41f198": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:27:39 containerd-20200724221200-14997 kubelet[1328]: E0724 22:27:39.951224 1328 pod_workers.go:191] Error syncing pod 4b662b5f-6e78-48de-818c-81989d7f4ea9 ("busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)"), skipping: failed to "CreatePodSandbox" for "busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)" with CreatePodSandboxError: "CreatePodSandbox for pod \"busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)\" failed: rpc error: code = Unknown desc = failed to setup network for sandbox \"1562b5a48d11e357ee96997306c5ad1118e140b04c14b88f1d9fcb66ca41f198\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * * ==> storage-provisioner [ac782f72581f09dd5b23e949f032d10d6460499b6761e87a581036f0ed5964b4] <== * F0724 22:24:44.328716 1 main.go:37] Error getting server version: Get https://10.96.0.1:443/version: dial tcp 10.96.0.1:443: getsockopt: no route to host -- /stdout -- helpers_test.go:247: (dbg) Run: ./minikube-linux-amd64 status --format={{.APIServer}} -p containerd-20200724221200-14997 -n containerd-20200724221200-14997 helpers_test.go:254: (dbg) Run: kubectl --context containerd-20200724221200-14997 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running helpers_test.go:260: non-running pods: busybox coredns-66bff467f8-hlk9j helpers_test.go:262: ======> post-mortem[TestStartStop/group/containerd/serial/DeployApp]: describe non-running pods <====== helpers_test.go:265: (dbg) Run: kubectl --context containerd-20200724221200-14997 describe pod busybox coredns-66bff467f8-hlk9j helpers_test.go:265: (dbg) Non-zero exit: kubectl --context containerd-20200724221200-14997 describe pod busybox coredns-66bff467f8-hlk9j: exit status 1 (96.17023ms) -- stdout -- Name: busybox Namespace: default Priority: 0 Node: containerd-20200724221200-14997/172.17.0.5 Start Time: Fri, 24 Jul 2020 22:19:40 +0000 Labels: integration-test=busybox Annotations: Status: Pending IP: IPs: Containers: busybox: Container ID: Image: busybox:1.28.4-glibc Image ID: Port: Host Port: Command: sleep 3600 State: Waiting Reason: ContainerCreating Ready: False Restart Count: 0 Environment: Mounts: /var/run/secrets/kubernetes.io/serviceaccount from default-token-xmm9f (ro) Conditions: Type Status Initialized True Ready False ContainersReady False PodScheduled True Volumes: default-token-xmm9f: Type: Secret (a volume populated by a Secret) SecretName: default-token-xmm9f Optional: false QoS Class: BestEffort Node-Selectors: Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s node.kubernetes.io/unreachable:NoExecute for 300s Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Scheduled 8m6s default-scheduler Successfully assigned default/busybox to containerd-20200724221200-14997 Warning FailedCreatePodSandBox 8m4s kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "5311a1ffb81c3ec44164ca704d1b425a50851c7a615951d885f3e261bb56b331": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 7m50s kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "e0b3338bd82723225aa39e3a572b31f4b10340fa640d33ac956ec7982b47a365": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 7m33s kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "bce48c23f5090e307249f38d5e9c17615b5ce4547b68fd7dd207f5616546b1ff": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 7m17s kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "c260da116bad44bab092e4453833efc9ce5c3c70209770b30f7aeffed5db766d": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 7m3s kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "e352d7747cb44ae844f2847c8895d8a03a59a5bd62570299b6f91ba0d2b31e93": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 6m50s kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "7a8e1f3fa957ea5ffbd5b203818f66025f20b18e8ba1398842a4a1dcb4beade1": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 6m33s kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "02713a33eaf3192cf9c63e173bd72b67773f28f4dfcef338060dda2a7f8489e1": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 6m18s kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "530cf34860dcb75c557c7cbc9a86910a3f4919230e74cdf10126ba1e75f3f49b": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 6m6s kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "a33b097bd953e28e4d7499a4b0ca06585fe1b9029b95aac214b0bba904677259": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 117s (x17 over 5m53s) kubelet, containerd-20200724221200-14997 (combined from similar events): Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "47073b588559d0af9dafbb8171df657751c2a35ea5f0466a20f5762627e9cd56": failed to set bridge addr: could not add IP address to "cni0": permission denied -- /stdout -- ** stderr ** Error from server (NotFound): pods "coredns-66bff467f8-hlk9j" not found ** /stderr ** helpers_test.go:267: kubectl --context containerd-20200724221200-14997 describe pod busybox coredns-66bff467f8-hlk9j: exit status 1 === RUN TestStartStop/group/containerd/serial/Stop start_stop_delete_test.go:164: (dbg) Run: ./minikube-linux-amd64 stop -p containerd-20200724221200-14997 --alsologtostderr -v=3 start_stop_delete_test.go:164: (dbg) Done: ./minikube-linux-amd64 stop -p containerd-20200724221200-14997 --alsologtostderr -v=3: (1.448999893s) === RUN TestStartStop/group/containerd/serial/EnableAddonAfterStop start_stop_delete_test.go:174: (dbg) Run: ./minikube-linux-amd64 status --format={{.Host}} -p containerd-20200724221200-14997 -n containerd-20200724221200-14997 start_stop_delete_test.go:174: (dbg) Non-zero exit: ./minikube-linux-amd64 status --format={{.Host}} -p containerd-20200724221200-14997 -n containerd-20200724221200-14997: exit status 7 (117.700336ms) -- stdout -- Stopped -- /stdout -- start_stop_delete_test.go:174: status error: exit status 7 (may be ok) start_stop_delete_test.go:181: (dbg) Run: ./minikube-linux-amd64 addons enable dashboard -p containerd-20200724221200-14997 === RUN TestStartStop/group/containerd/serial/SecondStart start_stop_delete_test.go:190: (dbg) Run: ./minikube-linux-amd64 start -p containerd-20200724221200-14997 --memory=2200 --alsologtostderr --wait=true --container-runtime=containerd --docker-opt containerd=/var/run/containerd/containerd.sock --apiserver-port=8444 --vm-driver=docker --base-image=local/kicbase:-snapshot --kubernetes-version=v1.18.3 === CONT TestNetworkPlugins/group/calico/Start net_test.go:80: (dbg) Non-zero exit: ./minikube-linux-amd64 start -p calico-20200724220226-14997 --memory=1800 --alsologtostderr --wait=true --wait-timeout=25m --cni=calico --vm-driver=docker --base-image=local/kicbase:-snapshot: exit status 70 (28m23.881697508s) -- stdout -- * [calico-20200724220226-14997] minikube v1.12.1 on Ubuntu 20.04 - KUBECONFIG=/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/kubeconfig - MINIKUBE_HOME=/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome * Using the docker driver based on user configuration * Starting control plane node calico-20200724220226-14997 in cluster calico-20200724220226-14997 * Pulling base image ... * Creating docker container (CPUs=2, Memory=1800MB) ... * Preparing Kubernetes v1.18.3 on Docker 19.03.2 ... * Configuring Calico (Container Networking Interface) ... * Verifying Kubernetes components... * Enabled addons: default-storageclass, storage-provisioner -- /stdout -- ** stderr ** I0724 22:02:26.961183 119525 out.go:188] Setting JSON to false I0724 22:02:26.964981 119525 start.go:101] hostinfo: {"hostname":"mini-test-11-ubuntu","uptime":1785,"bootTime":1595626361,"procs":909,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.4.0-1022-azure","virtualizationSystem":"kvm","virtualizationRole":"host","hostid":"c95cb721-f5cd-cb47-980f-2a6f7a0ad6b2"} I0724 22:02:26.966103 119525 start.go:111] virtualization: kvm host I0724 22:02:26.996954 119525 notify.go:125] Checking for updates... I0724 22:02:27.015890 119525 driver.go:287] Setting default libvirt URI to qemu:///system I0724 22:02:27.080041 119525 docker.go:87] docker version: linux-19.03.8 I0724 22:02:27.115243 119525 start.go:217] selected driver: docker I0724 22:02:27.115263 119525 start.go:623] validating driver "docker" against I0724 22:02:27.115292 119525 start.go:634] status for docker: {Installed:true Healthy:true NeedsImprovement:false Error: Fix: Doc:} I0724 22:02:27.115409 119525 cli_runner.go:109] Run: docker system info --format "{{json .}}" ! Requested memory allocation (1800MB) is less than the recommended minimum 2000MB. Kubernetes may crash unexpectedly. I0724 22:02:27.181730 119525 start_flags.go:223] no existing cluster config was found, will generate one from the flags ! Requested memory allocation (1800MB) is less than the recommended minimum 2000MB. Kubernetes may crash unexpectedly. I0724 22:02:27.182041 119525 start_flags.go:617] Waiting for all components: map[apiserver:true apps_running:true default_sa:true system_pods:true] I0724 22:02:27.182077 119525 cni.go:74] Creating CNI manager for "calico" I0724 22:02:27.182135 119525 start_flags.go:340] Found "Calico" CNI - setting NetworkPlugin=cni I0724 22:02:27.182152 119525 start_flags.go:345] config: {Name:calico-20200724220226-14997 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:local/kicbase:-snapshot Memory:1800 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.99.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio KubernetesConfig:{KubernetesVersion:v1.18.3 ClusterName:calico-20200724220226-14997 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:calico NodeIP: NodePort:8443 NodeName:} Nodes:[] Addons:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true system_pods:true]} I0724 22:02:27.292277 119525 cache.go:117] Beginning downloading kic base image for docker with docker I0724 22:02:27.308808 119525 preload.go:97] Checking if preload exists for k8s version v1.18.3 and runtime docker I0724 22:02:27.308851 119525 preload.go:105] Found local preload: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v4-v1.18.3-docker-overlay2-amd64.tar.lz4 I0724 22:02:27.308865 119525 cache.go:51] Caching tarball of preloaded images I0724 22:02:27.308876 119525 preload.go:131] Found /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v4-v1.18.3-docker-overlay2-amd64.tar.lz4 in cache, skipping download I0724 22:02:27.308880 119525 cache.go:54] Finished verifying existence of preloaded tar for v1.18.3 on docker I0724 22:02:27.308956 119525 cache.go:137] Downloading local/kicbase:-snapshot to local daemon I0724 22:02:27.308986 119525 image.go:140] Writing local/kicbase:-snapshot to local daemon I0724 22:02:27.309136 119525 profile.go:150] Saving config to /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/calico-20200724220226-14997/config.json ... I0724 22:02:27.309219 119525 lock.go:35] WriteFile acquiring /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/calico-20200724220226-14997/config.json: {Name:mk316db4dc1bcba6aa966077bc6f9cab53f53953 Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0724 22:02:27.651783 119525 cache.go:151] failed to download local/kicbase:-snapshot, will try fallback image if available: getting remote image: GET https://index.docker.io/v2/local/kicbase/manifests/-snapshot: unsupported status code 404; body: 404 page not found I0724 22:02:27.651862 119525 cache.go:137] Downloading kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 to local daemon I0724 22:02:27.651877 119525 image.go:140] Writing kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 to local daemon I0724 22:02:32.122327 119525 cache.go:140] successfully downloaded kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 ! minikube was unable to download local/kicbase:-snapshot, but successfully downloaded kicbase/stable:v0.0.10 as a fallback image I0724 22:02:32.122393 119525 cache.go:178] Successfully downloaded all kic artifacts I0724 22:02:32.122428 119525 start.go:241] acquiring machines lock for calico-20200724220226-14997: {Name:mk91e0531dbac8bf0bab062b1deb1a07bd4532ce Clock:{} Delay:500ms Timeout:15m0s Cancel:} I0724 22:03:29.608987 119525 start.go:245] acquired machines lock for "calico-20200724220226-14997" in 57.486531693s I0724 22:03:29.609047 119525 start.go:85] Provisioning new machine with config: &{Name:calico-20200724220226-14997 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 Memory:1800 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.99.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio KubernetesConfig:{KubernetesVersion:v1.18.3 ClusterName:calico-20200724220226-14997 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:calico NodeIP: NodePort:8443 NodeName:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.18.3 ControlPlane:true Worker:true}] Addons:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true system_pods:true]} &{Name: IP: Port:8443 KubernetesVersion:v1.18.3 ControlPlane:true Worker:true} I0724 22:03:29.609182 119525 start.go:122] createHost starting for "" (driver="docker") I0724 22:03:29.620634 119525 start.go:158] libmachine.API.Create for "calico-20200724220226-14997" (driver="docker") I0724 22:03:29.620688 119525 client.go:161] LocalClient.Create starting I0724 22:03:29.620736 119525 main.go:115] libmachine: Reading certificate data from /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca.pem I0724 22:03:29.620779 119525 main.go:115] libmachine: Decoding PEM data... I0724 22:03:29.620801 119525 main.go:115] libmachine: Parsing certificate... I0724 22:03:29.620948 119525 main.go:115] libmachine: Reading certificate data from /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/cert.pem I0724 22:03:29.620976 119525 main.go:115] libmachine: Decoding PEM data... I0724 22:03:29.621001 119525 main.go:115] libmachine: Parsing certificate... I0724 22:03:29.621438 119525 cli_runner.go:109] Run: docker ps -a --format {{.Names}} I0724 22:03:29.709372 119525 cli_runner.go:109] Run: docker volume create calico-20200724220226-14997 --label name.minikube.sigs.k8s.io=calico-20200724220226-14997 --label created_by.minikube.sigs.k8s.io=true I0724 22:03:29.787368 119525 oci.go:101] Successfully created a docker volume calico-20200724220226-14997 I0724 22:03:29.787475 119525 cli_runner.go:109] Run: docker run --rm --entrypoint /usr/bin/test -v calico-20200724220226-14997:/var kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 -d /var/lib I0724 22:03:31.193594 119525 cli_runner.go:151] Completed: docker run --rm --entrypoint /usr/bin/test -v calico-20200724220226-14997:/var kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 -d /var/lib: (1.406075302s) I0724 22:03:31.193621 119525 oci.go:105] Successfully prepared a docker volume calico-20200724220226-14997 I0724 22:03:31.193733 119525 preload.go:97] Checking if preload exists for k8s version v1.18.3 and runtime docker I0724 22:03:31.193783 119525 preload.go:105] Found local preload: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v4-v1.18.3-docker-overlay2-amd64.tar.lz4 I0724 22:03:31.193794 119525 kic.go:133] Starting extracting preloaded images to volume ... I0724 22:03:31.193847 119525 cli_runner.go:109] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v4-v1.18.3-docker-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v calico-20200724220226-14997:/extractDir kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 -I lz4 -xvf /preloaded.tar -C /extractDir W0724 22:03:31.194216 119525 oci.go:165] Your kernel does not support swap limit capabilities or the cgroup is not mounted. I0724 22:03:31.194399 119525 cli_runner.go:109] Run: docker info --format "'{{json .SecurityOptions}}'" I0724 22:03:31.260223 119525 cli_runner.go:109] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname calico-20200724220226-14997 --name calico-20200724220226-14997 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=calico-20200724220226-14997 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=calico-20200724220226-14997 --volume calico-20200724220226-14997:/var --security-opt apparmor=unconfined --cpus=2 --memory=1800mb -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 I0724 22:03:32.631271 119525 cli_runner.go:151] Completed: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname calico-20200724220226-14997 --name calico-20200724220226-14997 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=calico-20200724220226-14997 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=calico-20200724220226-14997 --volume calico-20200724220226-14997:/var --security-opt apparmor=unconfined --cpus=2 --memory=1800mb -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438: (1.370976606s) I0724 22:03:32.631372 119525 cli_runner.go:109] Run: docker container inspect calico-20200724220226-14997 --format={{.State.Running}} I0724 22:03:32.695461 119525 cli_runner.go:109] Run: docker container inspect calico-20200724220226-14997 --format={{.State.Status}} I0724 22:03:32.768054 119525 cli_runner.go:109] Run: docker exec calico-20200724220226-14997 stat /var/lib/dpkg/alternatives/iptables I0724 22:03:32.956452 119525 oci.go:222] the created container "calico-20200724220226-14997" has a running status. I0724 22:03:32.956487 119525 kic.go:157] Creating ssh key for kic: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/calico-20200724220226-14997/id_rsa... I0724 22:03:33.157785 119525 kic_runner.go:179] docker (temp): /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/calico-20200724220226-14997/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes) I0724 22:03:33.307809 119525 cli_runner.go:109] Run: docker container inspect calico-20200724220226-14997 --format={{.State.Status}} I0724 22:03:33.380563 119525 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys I0724 22:03:33.380592 119525 kic_runner.go:114] Args: [docker exec --privileged calico-20200724220226-14997 chown docker:docker /home/docker/.ssh/authorized_keys] I0724 22:03:39.405069 119525 kic_runner.go:123] Done: [docker exec --privileged calico-20200724220226-14997 chown docker:docker /home/docker/.ssh/authorized_keys]: (6.024448239s) I0724 22:03:43.701875 119525 cli_runner.go:151] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v4-v1.18.3-docker-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v calico-20200724220226-14997:/extractDir kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 -I lz4 -xvf /preloaded.tar -C /extractDir: (12.507981318s) I0724 22:03:43.701906 119525 kic.go:138] duration metric: took 12.508110 seconds to extract preloaded images to volume I0724 22:03:43.702013 119525 cli_runner.go:109] Run: docker container inspect calico-20200724220226-14997 --format={{.State.Status}} I0724 22:03:43.757116 119525 machine.go:88] provisioning docker machine ... I0724 22:03:43.757159 119525 ubuntu.go:166] provisioning hostname "calico-20200724220226-14997" I0724 22:03:43.757218 119525 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" calico-20200724220226-14997 I0724 22:03:43.809681 119525 main.go:115] libmachine: Using SSH client type: native I0724 22:03:43.809863 119525 main.go:115] libmachine: &{{{ 0 [] [] []} docker [0x7b99a0] 0x7b9970 [] 0s} 127.0.0.1 32856 } I0724 22:03:43.809883 119525 main.go:115] libmachine: About to run SSH command: sudo hostname calico-20200724220226-14997 && echo "calico-20200724220226-14997" | sudo tee /etc/hostname I0724 22:03:43.951386 119525 main.go:115] libmachine: SSH cmd err, output: : calico-20200724220226-14997 I0724 22:03:43.951537 119525 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" calico-20200724220226-14997 I0724 22:03:44.010949 119525 main.go:115] libmachine: Using SSH client type: native I0724 22:03:44.011140 119525 main.go:115] libmachine: &{{{ 0 [] [] []} docker [0x7b99a0] 0x7b9970 [] 0s} 127.0.0.1 32856 } I0724 22:03:44.011182 119525 main.go:115] libmachine: About to run SSH command: if ! grep -xq '.*\scalico-20200724220226-14997' /etc/hosts; then if grep -xq '127.0.1.1\s.*' /etc/hosts; then sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 calico-20200724220226-14997/g' /etc/hosts; else echo '127.0.1.1 calico-20200724220226-14997' | sudo tee -a /etc/hosts; fi fi I0724 22:03:44.133271 119525 main.go:115] libmachine: SSH cmd err, output: : I0724 22:03:44.133302 119525 ubuntu.go:172] set auth options {CertDir:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube CaCertPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube} I0724 22:03:44.133329 119525 ubuntu.go:174] setting up certificates I0724 22:03:44.133338 119525 provision.go:82] configureAuth start I0724 22:03:44.133395 119525 cli_runner.go:109] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" calico-20200724220226-14997 I0724 22:03:44.189154 119525 provision.go:131] copyHostCerts I0724 22:03:44.189235 119525 exec_runner.go:91] found /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/ca.pem, removing ... I0724 22:03:44.189292 119525 exec_runner.go:98] cp: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca.pem --> /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/ca.pem (1038 bytes) I0724 22:03:44.189369 119525 exec_runner.go:91] found /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cert.pem, removing ... I0724 22:03:44.189399 119525 exec_runner.go:98] cp: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/cert.pem --> /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cert.pem (1078 bytes) I0724 22:03:44.189456 119525 exec_runner.go:91] found /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/key.pem, removing ... I0724 22:03:44.189484 119525 exec_runner.go:98] cp: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/key.pem --> /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/key.pem (1675 bytes) I0724 22:03:44.189526 119525 provision.go:105] generating server cert: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/server.pem ca-key=/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca.pem private-key=/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca-key.pem org=jenkins.calico-20200724220226-14997 san=[172.17.0.7 localhost 127.0.0.1] I0724 22:03:44.804376 119525 provision.go:159] copyRemoteCerts I0724 22:03:44.804455 119525 ssh_runner.go:148] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker I0724 22:03:44.804519 119525 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" calico-20200724220226-14997 I0724 22:03:44.876074 119525 sshutil.go:44] new ssh client: &{IP:127.0.0.1 Port:32856 SSHKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/calico-20200724220226-14997/id_rsa Username:docker} I0724 22:03:44.975105 119525 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1038 bytes) I0724 22:03:45.002745 119525 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/server.pem --> /etc/docker/server.pem (1147 bytes) I0724 22:03:45.024097 119525 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes) I0724 22:03:45.052645 119525 provision.go:85] duration metric: configureAuth took 919.29151ms I0724 22:03:45.052667 119525 ubuntu.go:190] setting minikube options for container-runtime I0724 22:03:45.052848 119525 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" calico-20200724220226-14997 I0724 22:03:45.112610 119525 main.go:115] libmachine: Using SSH client type: native I0724 22:03:45.112803 119525 main.go:115] libmachine: &{{{ 0 [] [] []} docker [0x7b99a0] 0x7b9970 [] 0s} 127.0.0.1 32856 } I0724 22:03:45.112821 119525 main.go:115] libmachine: About to run SSH command: df --output=fstype / | tail -n 1 I0724 22:03:45.245690 119525 main.go:115] libmachine: SSH cmd err, output: : overlay I0724 22:03:45.245712 119525 ubuntu.go:71] root file system type: overlay I0724 22:03:45.246013 119525 provision.go:290] Updating docker unit: /lib/systemd/system/docker.service ... I0724 22:03:45.246245 119525 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" calico-20200724220226-14997 I0724 22:03:45.307317 119525 main.go:115] libmachine: Using SSH client type: native I0724 22:03:45.307501 119525 main.go:115] libmachine: &{{{ 0 [] [] []} docker [0x7b99a0] 0x7b9970 [] 0s} 127.0.0.1 32856 } I0724 22:03:45.307594 119525 main.go:115] libmachine: About to run SSH command: sudo mkdir -p /lib/systemd/system && printf %s "[Unit] Description=Docker Application Container Engine Documentation=https://docs.docker.com BindsTo=containerd.service After=network-online.target firewalld.service containerd.service Wants=network-online.target Requires=docker.socket [Service] Type=notify # This file is a systemd drop-in unit that inherits from the base dockerd configuration. # The base configuration already specifies an 'ExecStart=...' command. The first directive # here is to clear out that command inherited from the base configuration. Without this, # the command from the base configuration and the command specified here are treated as # a sequence of commands, which is not the desired behavior, nor is it valid -- systemd # will catch this invalid input and refuse to start the service with an error like: # Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services. # NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other # container runtimes. If left unlimited, it may result in OOM issues with MySQL. ExecStart= ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12 ExecReload=/bin/kill -s HUP $MAINPID # Having non-zero Limit*s causes performance problems due to accounting overhead # in the kernel. We recommend using cgroups to do container-local accounting. LimitNOFILE=infinity LimitNPROC=infinity LimitCORE=infinity # Uncomment TasksMax if your systemd version supports it. # Only systemd 226 and above support this version. TasksMax=infinity TimeoutStartSec=0 # set delegate yes so that systemd does not reset the cgroups of docker containers Delegate=yes # kill only the docker process, not all processes in the cgroup KillMode=process [Install] WantedBy=multi-user.target " | sudo tee /lib/systemd/system/docker.service.new I0724 22:03:45.455134 119525 main.go:115] libmachine: SSH cmd err, output: : [Unit] Description=Docker Application Container Engine Documentation=https://docs.docker.com BindsTo=containerd.service After=network-online.target firewalld.service containerd.service Wants=network-online.target Requires=docker.socket [Service] Type=notify # This file is a systemd drop-in unit that inherits from the base dockerd configuration. # The base configuration already specifies an 'ExecStart=...' command. The first directive # here is to clear out that command inherited from the base configuration. Without this, # the command from the base configuration and the command specified here are treated as # a sequence of commands, which is not the desired behavior, nor is it valid -- systemd # will catch this invalid input and refuse to start the service with an error like: # Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services. # NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other # container runtimes. If left unlimited, it may result in OOM issues with MySQL. ExecStart= ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12 ExecReload=/bin/kill -s HUP # Having non-zero Limit*s causes performance problems due to accounting overhead # in the kernel. We recommend using cgroups to do container-local accounting. LimitNOFILE=infinity LimitNPROC=infinity LimitCORE=infinity # Uncomment TasksMax if your systemd version supports it. # Only systemd 226 and above support this version. TasksMax=infinity TimeoutStartSec=0 # set delegate yes so that systemd does not reset the cgroups of docker containers Delegate=yes # kill only the docker process, not all processes in the cgroup KillMode=process [Install] WantedBy=multi-user.target I0724 22:03:45.455281 119525 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" calico-20200724220226-14997 I0724 22:03:45.521671 119525 main.go:115] libmachine: Using SSH client type: native I0724 22:03:45.521895 119525 main.go:115] libmachine: &{{{ 0 [] [] []} docker [0x7b99a0] 0x7b9970 [] 0s} 127.0.0.1 32856 } I0724 22:03:45.521936 119525 main.go:115] libmachine: About to run SSH command: sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; } I0724 22:03:48.823031 119525 main.go:115] libmachine: SSH cmd err, output: : --- /lib/systemd/system/docker.service 2019-08-29 04:42:14.000000000 +0000 +++ /lib/systemd/system/docker.service.new 2020-07-24 22:03:45.448749797 +0000 @@ -8,24 +8,22 @@ [Service] Type=notify -# the default is not to use systemd for cgroups because the delegate issues still -# exists and systemd currently does not support the cgroup feature set required -# for containers run by docker -ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock -ExecReload=/bin/kill -s HUP $MAINPID -TimeoutSec=0 -RestartSec=2 -Restart=always - -# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229. -# Both the old, and new location are accepted by systemd 229 and up, so using the old location -# to make them work for either version of systemd. -StartLimitBurst=3 - -# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230. -# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make -# this option work for either version of systemd. -StartLimitInterval=60s + + + +# This file is a systemd drop-in unit that inherits from the base dockerd configuration. +# The base configuration already specifies an 'ExecStart=...' command. The first directive +# here is to clear out that command inherited from the base configuration. Without this, +# the command from the base configuration and the command specified here are treated as +# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd +# will catch this invalid input and refuse to start the service with an error like: +# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services. + +# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other +# container runtimes. If left unlimited, it may result in OOM issues with MySQL. +ExecStart= +ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12 +ExecReload=/bin/kill -s HUP # Having non-zero Limit*s causes performance problems due to accounting overhead # in the kernel. We recommend using cgroups to do container-local accounting. @@ -33,9 +31,10 @@ LimitNPROC=infinity LimitCORE=infinity -# Comment TasksMax if your systemd version does not support it. -# Only systemd 226 and above support this option. +# Uncomment TasksMax if your systemd version supports it. +# Only systemd 226 and above support this version. TasksMax=infinity +TimeoutStartSec=0 # set delegate yes so that systemd does not reset the cgroups of docker containers Delegate=yes I0724 22:03:48.823061 119525 machine.go:91] provisioned docker machine in 5.06591707s I0724 22:03:48.823070 119525 client.go:164] LocalClient.Create took 19.202371146s I0724 22:03:48.823088 119525 start.go:163] duration metric: libmachine.API.Create for "calico-20200724220226-14997" took 19.202460352s I0724 22:03:48.823096 119525 start.go:204] post-start starting for "calico-20200724220226-14997" (driver="docker") I0724 22:03:48.823101 119525 start.go:214] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs] I0724 22:03:48.823164 119525 ssh_runner.go:148] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs I0724 22:03:48.823210 119525 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" calico-20200724220226-14997 I0724 22:03:48.879975 119525 sshutil.go:44] new ssh client: &{IP:127.0.0.1 Port:32856 SSHKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/calico-20200724220226-14997/id_rsa Username:docker} I0724 22:03:48.977895 119525 ssh_runner.go:148] Run: cat /etc/os-release I0724 22:03:48.981529 119525 main.go:115] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found I0724 22:03:48.981574 119525 main.go:115] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found I0724 22:03:48.981587 119525 main.go:115] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found I0724 22:03:48.981600 119525 info.go:98] Remote host: Ubuntu 19.10 I0724 22:03:48.981609 119525 filesync.go:118] Scanning /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/addons for local assets ... I0724 22:03:48.981669 119525 filesync.go:118] Scanning /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/files for local assets ... I0724 22:03:48.981809 119525 filesync.go:141] local asset: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/files/etc/test/nested/copy/14997/hosts -> hosts in /etc/test/nested/copy/14997 I0724 22:03:48.981858 119525 ssh_runner.go:148] Run: sudo mkdir -p /etc/test/nested/copy/14997 I0724 22:03:48.994603 119525 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/files/etc/test/nested/copy/14997/hosts --> /etc/test/nested/copy/14997/hosts (40 bytes) I0724 22:03:49.018709 119525 start.go:207] post-start completed in 195.601791ms I0724 22:03:49.020659 119525 cli_runner.go:109] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" calico-20200724220226-14997 I0724 22:03:49.087119 119525 profile.go:150] Saving config to /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/calico-20200724220226-14997/config.json ... I0724 22:03:49.087313 119525 start.go:125] duration metric: createHost completed in 19.478113905s I0724 22:03:49.087337 119525 start.go:76] releasing machines lock for "calico-20200724220226-14997", held for 19.478317518s I0724 22:03:49.087436 119525 cli_runner.go:109] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" calico-20200724220226-14997 I0724 22:03:49.144481 119525 ssh_runner.go:148] Run: systemctl --version I0724 22:03:49.144554 119525 ssh_runner.go:148] Run: curl -sS -m 2 https://k8s.gcr.io/ I0724 22:03:49.144555 119525 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" calico-20200724220226-14997 I0724 22:03:49.144646 119525 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" calico-20200724220226-14997 I0724 22:03:49.203244 119525 sshutil.go:44] new ssh client: &{IP:127.0.0.1 Port:32856 SSHKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/calico-20200724220226-14997/id_rsa Username:docker} I0724 22:03:49.206534 119525 sshutil.go:44] new ssh client: &{IP:127.0.0.1 Port:32856 SSHKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/calico-20200724220226-14997/id_rsa Username:docker} I0724 22:03:49.458317 119525 ssh_runner.go:148] Run: sudo systemctl is-active --quiet service containerd I0724 22:03:49.471586 119525 ssh_runner.go:148] Run: sudo systemctl cat docker.service I0724 22:03:49.483974 119525 cruntime.go:192] skipping containerd shutdown because we are bound to it I0724 22:03:49.484053 119525 ssh_runner.go:148] Run: sudo systemctl is-active --quiet service crio I0724 22:03:49.496439 119525 ssh_runner.go:148] Run: sudo systemctl cat docker.service I0724 22:03:49.508843 119525 ssh_runner.go:148] Run: sudo systemctl daemon-reload I0724 22:03:49.592084 119525 ssh_runner.go:148] Run: sudo systemctl start docker I0724 22:03:49.605217 119525 ssh_runner.go:148] Run: docker version --format {{.Server.Version}} I0724 22:03:49.690835 119525 cli_runner.go:109] Run: docker network ls --filter name=bridge --format {{.ID}} I0724 22:03:49.743189 119525 cli_runner.go:109] Run: docker network inspect --format "{{(index .IPAM.Config 0).Gateway}}" d4a420189740 I0724 22:03:49.800449 119525 network.go:77] got host ip for mount in container by inspect docker network: 172.17.0.1 I0724 22:03:49.800586 119525 ssh_runner.go:148] Run: grep 172.17.0.1 host.minikube.internal$ /etc/hosts I0724 22:03:49.805984 119525 ssh_runner.go:148] Run: /bin/bash -c "{ grep -v '\thost.minikube.internal$' /etc/hosts; echo "172.17.0.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ /etc/hosts" I0724 22:03:49.819937 119525 preload.go:97] Checking if preload exists for k8s version v1.18.3 and runtime docker I0724 22:03:49.819969 119525 preload.go:105] Found local preload: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v4-v1.18.3-docker-overlay2-amd64.tar.lz4 I0724 22:03:49.820022 119525 ssh_runner.go:148] Run: docker images --format {{.Repository}}:{{.Tag}} I0724 22:03:49.886760 119525 docker.go:381] Got preloaded images: -- stdout -- kubernetesui/dashboard:v2.0.1 k8s.gcr.io/kube-proxy:v1.18.3 k8s.gcr.io/kube-apiserver:v1.18.3 k8s.gcr.io/kube-controller-manager:v1.18.3 k8s.gcr.io/kube-scheduler:v1.18.3 kubernetesui/metrics-scraper:v1.0.4 k8s.gcr.io/pause:3.2 k8s.gcr.io/coredns:1.6.7 k8s.gcr.io/etcd:3.4.3-0 gcr.io/k8s-minikube/storage-provisioner:v1.8.1 -- /stdout -- I0724 22:03:49.886793 119525 docker.go:319] Images already preloaded, skipping extraction I0724 22:03:49.886841 119525 ssh_runner.go:148] Run: docker images --format {{.Repository}}:{{.Tag}} I0724 22:03:49.943281 119525 docker.go:381] Got preloaded images: -- stdout -- kubernetesui/dashboard:v2.0.1 k8s.gcr.io/kube-proxy:v1.18.3 k8s.gcr.io/kube-controller-manager:v1.18.3 k8s.gcr.io/kube-scheduler:v1.18.3 k8s.gcr.io/kube-apiserver:v1.18.3 kubernetesui/metrics-scraper:v1.0.4 k8s.gcr.io/pause:3.2 k8s.gcr.io/coredns:1.6.7 k8s.gcr.io/etcd:3.4.3-0 gcr.io/k8s-minikube/storage-provisioner:v1.8.1 -- /stdout -- I0724 22:03:49.943311 119525 cache_images.go:69] Images are preloaded, skipping loading I0724 22:03:49.943382 119525 ssh_runner.go:148] Run: docker info --format {{.CgroupDriver}} I0724 22:03:50.015894 119525 cni.go:74] Creating CNI manager for "calico" I0724 22:03:50.015917 119525 kubeadm.go:84] Using pod CIDR: 10.244.0.0/16 I0724 22:03:50.015935 119525 kubeadm.go:150] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:172.17.0.7 APIServerPort:8443 KubernetesVersion:v1.18.3 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:calico-20200724220226-14997 NodeName:calico-20200724220226-14997 DNSDomain:cluster.local CRISocket: ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "172.17.0.7"]]}] FeatureArgs:map[] NoTaintMaster:true NodeIP:172.17.0.7 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[]} I0724 22:03:50.016066 119525 kubeadm.go:154] kubeadm config: apiVersion: kubeadm.k8s.io/v1beta2 kind: InitConfiguration localAPIEndpoint: advertiseAddress: 172.17.0.7 bindPort: 8443 bootstrapTokens: - groups: - system:bootstrappers:kubeadm:default-node-token ttl: 24h0m0s usages: - signing - authentication nodeRegistration: criSocket: /var/run/dockershim.sock name: "calico-20200724220226-14997" kubeletExtraArgs: node-ip: 172.17.0.7 taints: [] --- apiVersion: kubeadm.k8s.io/v1beta2 kind: ClusterConfiguration apiServer: certSANs: ["127.0.0.1", "localhost", "172.17.0.7"] extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs clusterName: mk controlPlaneEndpoint: control-plane.minikube.internal:8443 dns: type: CoreDNS etcd: local: dataDir: /var/lib/minikube/etcd controllerManager: extraArgs: "leader-elect": "false" scheduler: extraArgs: "leader-elect": "false" kubernetesVersion: v1.18.3 networking: dnsDomain: cluster.local podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: cgroupfs clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 evictionHard: nodefs.available: "0%" nodefs.inodesFree: "0%" imagefs.available: "0%" failSwapOn: false staticPodPath: /etc/kubernetes/manifests --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration clusterCIDR: "10.244.0.0/16" metricsBindAddress: 172.17.0.7:10249 I0724 22:03:50.016133 119525 kubeadm.go:790] kubelet [Unit] Wants=docker.socket [Service] ExecStart= ExecStart=/var/lib/minikube/binaries/v1.18.3/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --container-runtime=docker --hostname-override=calico-20200724220226-14997 --kubeconfig=/etc/kubernetes/kubelet.conf --network-plugin=cni --node-ip=172.17.0.7 [Install] config: {KubernetesVersion:v1.18.3 ClusterName:calico-20200724220226-14997 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:calico NodeIP: NodePort:8443 NodeName:} I0724 22:03:50.016192 119525 ssh_runner.go:148] Run: sudo ls /var/lib/minikube/binaries/v1.18.3 I0724 22:03:50.027051 119525 binaries.go:43] Found k8s binaries, skipping transfer I0724 22:03:50.027184 119525 ssh_runner.go:148] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube I0724 22:03:50.036545 119525 ssh_runner.go:215] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (372 bytes) I0724 22:03:50.066097 119525 ssh_runner.go:215] scp memory --> /lib/systemd/system/kubelet.service (349 bytes) I0724 22:03:50.104958 119525 ssh_runner.go:215] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (1775 bytes) I0724 22:03:50.130931 119525 ssh_runner.go:148] Run: grep 172.17.0.7 control-plane.minikube.internal$ /etc/hosts I0724 22:03:50.134867 119525 ssh_runner.go:148] Run: /bin/bash -c "{ grep -v '\tcontrol-plane.minikube.internal$' /etc/hosts; echo "172.17.0.7 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ /etc/hosts" I0724 22:03:50.147453 119525 ssh_runner.go:148] Run: sudo systemctl daemon-reload I0724 22:03:50.228008 119525 ssh_runner.go:148] Run: sudo systemctl start kubelet I0724 22:03:50.254426 119525 certs.go:52] Setting up /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/calico-20200724220226-14997 for IP: 172.17.0.7 I0724 22:03:50.254488 119525 certs.go:169] skipping minikubeCA CA generation: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/ca.key I0724 22:03:50.254507 119525 certs.go:169] skipping proxyClientCA CA generation: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/proxy-client-ca.key I0724 22:03:50.254567 119525 certs.go:273] generating minikube-user signed cert: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/calico-20200724220226-14997/client.key I0724 22:03:50.254582 119525 crypto.go:69] Generating cert /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/calico-20200724220226-14997/client.crt with IP's: [] I0724 22:03:50.319621 119525 crypto.go:157] Writing cert to /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/calico-20200724220226-14997/client.crt ... I0724 22:03:50.319658 119525 lock.go:35] WriteFile acquiring /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/calico-20200724220226-14997/client.crt: {Name:mk1c6ccc83fdf8041def0b5001de172883aac596 Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0724 22:03:50.319857 119525 crypto.go:165] Writing key to /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/calico-20200724220226-14997/client.key ... I0724 22:03:50.319875 119525 lock.go:35] WriteFile acquiring /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/calico-20200724220226-14997/client.key: {Name:mkb3b8b057699ebed9bffeb314ef3e6032a658ac Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0724 22:03:50.319996 119525 certs.go:273] generating minikube signed cert: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/calico-20200724220226-14997/apiserver.key.d9a465bc I0724 22:03:50.320012 119525 crypto.go:69] Generating cert /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/calico-20200724220226-14997/apiserver.crt.d9a465bc with IP's: [172.17.0.7 10.96.0.1 127.0.0.1 10.0.0.1] I0724 22:03:50.620076 119525 crypto.go:157] Writing cert to /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/calico-20200724220226-14997/apiserver.crt.d9a465bc ... I0724 22:03:50.620109 119525 lock.go:35] WriteFile acquiring /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/calico-20200724220226-14997/apiserver.crt.d9a465bc: {Name:mk91e479b81d044d7f09c24befdc5595d827bd94 Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0724 22:03:50.620318 119525 crypto.go:165] Writing key to /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/calico-20200724220226-14997/apiserver.key.d9a465bc ... I0724 22:03:50.620380 119525 lock.go:35] WriteFile acquiring /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/calico-20200724220226-14997/apiserver.key.d9a465bc: {Name:mkbf14daaefde56a08bf3ed6bce771d835967c3e Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0724 22:03:50.620488 119525 certs.go:284] copying /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/calico-20200724220226-14997/apiserver.crt.d9a465bc -> /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/calico-20200724220226-14997/apiserver.crt I0724 22:03:50.620558 119525 certs.go:288] copying /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/calico-20200724220226-14997/apiserver.key.d9a465bc -> /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/calico-20200724220226-14997/apiserver.key I0724 22:03:50.620667 119525 certs.go:273] generating aggregator signed cert: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/calico-20200724220226-14997/proxy-client.key I0724 22:03:50.620699 119525 crypto.go:69] Generating cert /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/calico-20200724220226-14997/proxy-client.crt with IP's: [] I0724 22:03:50.735250 119525 crypto.go:157] Writing cert to /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/calico-20200724220226-14997/proxy-client.crt ... I0724 22:03:50.735277 119525 lock.go:35] WriteFile acquiring /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/calico-20200724220226-14997/proxy-client.crt: {Name:mk94ba374fcf8f7ae79a34b976b941312dc77808 Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0724 22:03:50.735459 119525 crypto.go:165] Writing key to /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/calico-20200724220226-14997/proxy-client.key ... I0724 22:03:50.735476 119525 lock.go:35] WriteFile acquiring /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/calico-20200724220226-14997/proxy-client.key: {Name:mk6d7fc22ff9ab76ebad895529a5a96f83e2b0ff Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0724 22:03:50.735676 119525 certs.go:348] found cert: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/14997.pem (1338 bytes) W0724 22:03:50.735728 119525 certs.go:344] ignoring /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/14997_empty.pem, impossibly tiny 0 bytes I0724 22:03:50.735751 119525 certs.go:348] found cert: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca-key.pem (1675 bytes) I0724 22:03:50.735780 119525 certs.go:348] found cert: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca.pem (1038 bytes) I0724 22:03:50.735810 119525 certs.go:348] found cert: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/cert.pem (1078 bytes) I0724 22:03:50.735837 119525 certs.go:348] found cert: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/key.pem (1675 bytes) I0724 22:03:50.736863 119525 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/calico-20200724220226-14997/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1350 bytes) I0724 22:03:50.759885 119525 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/calico-20200724220226-14997/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes) I0724 22:03:50.781205 119525 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/calico-20200724220226-14997/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1103 bytes) I0724 22:03:50.801827 119525 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/calico-20200724220226-14997/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes) I0724 22:03:50.821895 119525 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1066 bytes) I0724 22:03:50.842698 119525 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1679 bytes) I0724 22:03:50.863764 119525 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1074 bytes) I0724 22:03:50.885412 119525 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes) I0724 22:03:50.906314 119525 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/14997.pem --> /usr/share/ca-certificates/14997.pem (1338 bytes) I0724 22:03:50.927041 119525 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1066 bytes) I0724 22:03:50.948365 119525 ssh_runner.go:215] scp memory --> /var/lib/minikube/kubeconfig (392 bytes) I0724 22:03:50.970415 119525 ssh_runner.go:148] Run: openssl version I0724 22:03:50.976501 119525 ssh_runner.go:148] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/14997.pem && ln -fs /usr/share/ca-certificates/14997.pem /etc/ssl/certs/14997.pem" I0724 22:03:50.985722 119525 ssh_runner.go:148] Run: ls -la /usr/share/ca-certificates/14997.pem I0724 22:03:50.989577 119525 certs.go:389] hashing: -rw-r--r-- 1 root root 1338 Jul 24 21:50 /usr/share/ca-certificates/14997.pem I0724 22:03:50.989629 119525 ssh_runner.go:148] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/14997.pem I0724 22:03:50.995542 119525 ssh_runner.go:148] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/14997.pem /etc/ssl/certs/51391683.0" I0724 22:03:51.004893 119525 ssh_runner.go:148] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem" I0724 22:03:51.014602 119525 ssh_runner.go:148] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem I0724 22:03:51.018506 119525 certs.go:389] hashing: -rw-r--r-- 1 root root 1066 Jul 24 21:47 /usr/share/ca-certificates/minikubeCA.pem I0724 22:03:51.018561 119525 ssh_runner.go:148] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem I0724 22:03:51.026098 119525 ssh_runner.go:148] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0" I0724 22:03:51.036201 119525 kubeadm.go:327] StartCluster: {Name:calico-20200724220226-14997 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 Memory:1800 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.99.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio KubernetesConfig:{KubernetesVersion:v1.18.3 ClusterName:calico-20200724220226-14997 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:calico NodeIP: NodePort:8443 NodeName:} Nodes:[{Name: IP:172.17.0.7 Port:8443 KubernetesVersion:v1.18.3 ControlPlane:true Worker:true}] Addons:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true system_pods:true]} I0724 22:03:51.036325 119525 ssh_runner.go:148] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}} I0724 22:03:51.091401 119525 ssh_runner.go:148] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd I0724 22:03:51.101348 119525 ssh_runner.go:148] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml I0724 22:03:51.115071 119525 kubeadm.go:211] ignoring SystemVerification for kubeadm because of docker driver I0724 22:03:51.115154 119525 ssh_runner.go:148] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf I0724 22:03:51.124683 119525 kubeadm.go:147] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2 stdout: stderr: ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory I0724 22:03:51.124731 119525 ssh_runner.go:148] Run: /bin/bash -c "sudo env PATH=/var/lib/minikube/binaries/v1.18.3:$PATH kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables" I0724 22:04:40.738066 119525 ssh_runner.go:188] Completed: /bin/bash -c "sudo env PATH=/var/lib/minikube/binaries/v1.18.3:$PATH kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables": (49.61330714s) I0724 22:04:40.738105 119525 cni.go:74] Creating CNI manager for "calico" I0724 22:04:40.746983 119525 cni.go:137] applying CNI manifest using /var/lib/minikube/binaries/v1.18.3/kubectl ... I0724 22:04:40.747000 119525 ssh_runner.go:215] scp memory --> /var/tmp/minikube/cni.yaml (22463 bytes) I0724 22:04:40.772444 119525 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml I0724 22:04:42.155675 119525 ssh_runner.go:188] Completed: sudo /var/lib/minikube/binaries/v1.18.3/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml: (1.383186196s) I0724 22:04:42.155725 119525 ssh_runner.go:148] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj" I0724 22:04:42.155827 119525 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:04:42.155832 119525 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl label nodes minikube.k8s.io/version=v1.12.1 minikube.k8s.io/commit=40eac8ce825d2bb784efa63b900c8d788ea49faf minikube.k8s.io/name=calico-20200724220226-14997 minikube.k8s.io/updated_at=2020_07_24T22_04_42_0700 --all --overwrite --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:04:42.165453 119525 ops.go:35] apiserver oom_adj: -16 I0724 22:04:42.536847 119525 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:04:43.338671 119525 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:04:43.838576 119525 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:04:44.338519 119525 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:04:44.838580 119525 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:04:45.338566 119525 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:04:45.838484 119525 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:04:46.338740 119525 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:04:46.838818 119525 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:04:47.338537 119525 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:04:48.899394 119525 ssh_runner.go:188] Completed: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig: (1.56080276s) I0724 22:04:49.338601 119525 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:04:57.686449 119525 ssh_runner.go:188] Completed: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig: (8.347812697s) I0724 22:04:57.838677 119525 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:04:58.168776 119525 kubeadm.go:866] duration metric: took 16.013020805s to wait for elevateKubeSystemPrivileges. I0724 22:04:58.168819 119525 kubeadm.go:329] StartCluster complete in 1m7.132623194s I0724 22:04:58.168841 119525 settings.go:123] acquiring lock: {Name:mk120aead41f4abf9b6da50636235ecd4ae2a41a Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0724 22:04:58.168945 119525 settings.go:131] Updating kubeconfig: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/kubeconfig I0724 22:04:58.171684 119525 lock.go:35] WriteFile acquiring /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/kubeconfig: {Name:mk94f19b810ab6208411eb086ed6241d89a90d8c Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0724 22:04:58.171922 119525 start.go:195] Will wait wait-timeout for node ... I0724 22:04:58.172093 119525 addons.go:353] enableAddons start: toEnable=map[], additional=[] I0724 22:04:58.264970 119525 addons.go:53] Setting storage-provisioner=true in profile "calico-20200724220226-14997" I0724 22:04:58.172157 119525 ssh_runner.go:148] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.18.3/kubectl scale deployment --replicas=1 coredns -n=kube-system I0724 22:04:58.265066 119525 addons.go:129] Setting addon storage-provisioner=true in "calico-20200724220226-14997" W0724 22:04:58.265364 119525 addons.go:138] addon storage-provisioner should already be in state true I0724 22:04:58.265387 119525 host.go:65] Checking if "calico-20200724220226-14997" exists ... I0724 22:04:58.265251 119525 addons.go:53] Setting default-storageclass=true in profile "calico-20200724220226-14997" I0724 22:04:58.265462 119525 addons.go:267] enableOrDisableStorageClasses default-storageclass=true on "calico-20200724220226-14997" I0724 22:04:58.265880 119525 cli_runner.go:109] Run: docker container inspect calico-20200724220226-14997 --format={{.State.Status}} I0724 22:04:58.266672 119525 cli_runner.go:109] Run: docker container inspect calico-20200724220226-14997 --format={{.State.Status}} I0724 22:04:58.269662 119525 api_server.go:48] waiting for apiserver process to appear ... I0724 22:04:58.269967 119525 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:04:58.341690 119525 addons.go:236] installing /etc/kubernetes/addons/storage-provisioner.yaml I0724 22:04:58.341714 119525 ssh_runner.go:215] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2668 bytes) I0724 22:04:58.341802 119525 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" calico-20200724220226-14997 I0724 22:04:58.371668 119525 addons.go:129] Setting addon default-storageclass=true in "calico-20200724220226-14997" W0724 22:04:58.371707 119525 addons.go:138] addon default-storageclass should already be in state true I0724 22:04:58.371721 119525 host.go:65] Checking if "calico-20200724220226-14997" exists ... I0724 22:04:58.372250 119525 cli_runner.go:109] Run: docker container inspect calico-20200724220226-14997 --format={{.State.Status}} I0724 22:04:58.418258 119525 sshutil.go:44] new ssh client: &{IP:127.0.0.1 Port:32856 SSHKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/calico-20200724220226-14997/id_rsa Username:docker} I0724 22:04:58.465453 119525 addons.go:236] installing /etc/kubernetes/addons/storageclass.yaml I0724 22:04:58.465491 119525 ssh_runner.go:215] scp deploy/addons/storageclass/storageclass.yaml.tmpl --> /etc/kubernetes/addons/storageclass.yaml (271 bytes) I0724 22:04:58.465570 119525 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" calico-20200724220226-14997 I0724 22:04:58.558485 119525 api_server.go:68] duration metric: took 386.521138ms to wait for apiserver process to appear ... I0724 22:04:58.558517 119525 api_server.go:84] waiting for apiserver healthz status ... I0724 22:04:58.558530 119525 api_server.go:221] Checking apiserver healthz at https://172.17.0.7:8443/healthz ... I0724 22:04:58.558784 119525 start.go:549] successfully scaled coredns replicas to 1 I0724 22:04:58.572575 119525 sshutil.go:44] new ssh client: &{IP:127.0.0.1 Port:32856 SSHKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/calico-20200724220226-14997/id_rsa Username:docker} I0724 22:04:58.606253 119525 ssh_runner.go:148] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.18.3/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml I0724 22:04:58.685876 119525 ssh_runner.go:148] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.18.3/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml I0724 22:04:58.705253 119525 api_server.go:241] https://172.17.0.7:8443/healthz returned 200: ok I0724 22:04:58.706320 119525 api_server.go:137] control plane version: v1.18.3 I0724 22:04:58.706346 119525 api_server.go:127] duration metric: took 147.820664ms to wait for apiserver health ... I0724 22:04:58.706357 119525 system_pods.go:43] waiting for kube-system pods to appear ... I0724 22:04:58.853291 119525 system_pods.go:59] 9 kube-system pods found I0724 22:04:58.853331 119525 system_pods.go:61] "calico-kube-controllers-76d4774d89-bqxzb" [43f6fdcb-72f1-40d6-be13-1e7f0a81985a] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers]) I0724 22:04:58.853339 119525 system_pods.go:61] "calico-node-tj86p" [773acae0-7fcd-4ffc-9bf6-9704582bff3a] Pending I0724 22:04:58.853344 119525 system_pods.go:61] "coredns-66bff467f8-b2rh7" [c48f0bc5-33c0-4290-8407-23a5ed925fea] Pending I0724 22:04:58.853351 119525 system_pods.go:61] "coredns-66bff467f8-w4wcl" [7e6f65aa-d791-4a6a-bd2a-474085a4dfd8] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:04:58.853357 119525 system_pods.go:61] "etcd-calico-20200724220226-14997" [230012a4-158d-434e-87cb-1d3dfe9504e7] Running I0724 22:04:58.853363 119525 system_pods.go:61] "kube-apiserver-calico-20200724220226-14997" [34509372-afbe-4807-b042-8fb9ccc787b3] Running I0724 22:04:58.853368 119525 system_pods.go:61] "kube-controller-manager-calico-20200724220226-14997" [101a8c9b-a755-4d1c-8c31-630320b76d94] Running I0724 22:04:58.853373 119525 system_pods.go:61] "kube-proxy-58p6f" [433b3963-a7c0-4ced-9fc3-d0557f74c3e3] Pending I0724 22:04:58.853377 119525 system_pods.go:61] "kube-scheduler-calico-20200724220226-14997" [e784ae6d-896b-449f-90c4-cdb436127b76] Running I0724 22:04:58.853383 119525 system_pods.go:74] duration metric: took 147.018808ms to wait for pod list to return data ... I0724 22:04:58.853390 119525 default_sa.go:33] waiting for default service account to be created ... I0724 22:04:59.062485 119525 default_sa.go:44] found service account: "default" I0724 22:04:59.062515 119525 default_sa.go:54] duration metric: took 209.11782ms for default service account to be created ... I0724 22:04:59.062526 119525 system_pods.go:116] waiting for k8s-apps to be running ... I0724 22:04:59.240687 119525 system_pods.go:86] 9 kube-system pods found I0724 22:04:59.240749 119525 system_pods.go:89] "calico-kube-controllers-76d4774d89-bqxzb" [43f6fdcb-72f1-40d6-be13-1e7f0a81985a] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers]) I0724 22:04:59.240762 119525 system_pods.go:89] "calico-node-tj86p" [773acae0-7fcd-4ffc-9bf6-9704582bff3a] Pending I0724 22:04:59.240773 119525 system_pods.go:89] "coredns-66bff467f8-b2rh7" [c48f0bc5-33c0-4290-8407-23a5ed925fea] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:04:59.240786 119525 system_pods.go:89] "coredns-66bff467f8-w4wcl" [7e6f65aa-d791-4a6a-bd2a-474085a4dfd8] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:04:59.240795 119525 system_pods.go:89] "etcd-calico-20200724220226-14997" [230012a4-158d-434e-87cb-1d3dfe9504e7] Running I0724 22:04:59.240812 119525 system_pods.go:89] "kube-apiserver-calico-20200724220226-14997" [34509372-afbe-4807-b042-8fb9ccc787b3] Running I0724 22:04:59.240818 119525 system_pods.go:89] "kube-controller-manager-calico-20200724220226-14997" [101a8c9b-a755-4d1c-8c31-630320b76d94] Running I0724 22:04:59.240823 119525 system_pods.go:89] "kube-proxy-58p6f" [433b3963-a7c0-4ced-9fc3-d0557f74c3e3] Pending I0724 22:04:59.240828 119525 system_pods.go:89] "kube-scheduler-calico-20200724220226-14997" [e784ae6d-896b-449f-90c4-cdb436127b76] Running I0724 22:04:59.240840 119525 retry.go:30] will retry after 263.082536ms: missing components: kube-dns, kube-proxy I0724 22:04:59.518060 119525 system_pods.go:86] 9 kube-system pods found I0724 22:04:59.518104 119525 system_pods.go:89] "calico-kube-controllers-76d4774d89-bqxzb" [43f6fdcb-72f1-40d6-be13-1e7f0a81985a] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers]) I0724 22:04:59.518115 119525 system_pods.go:89] "calico-node-tj86p" [773acae0-7fcd-4ffc-9bf6-9704582bff3a] Pending I0724 22:04:59.518134 119525 system_pods.go:89] "coredns-66bff467f8-b2rh7" [c48f0bc5-33c0-4290-8407-23a5ed925fea] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:04:59.518145 119525 system_pods.go:89] "coredns-66bff467f8-w4wcl" [7e6f65aa-d791-4a6a-bd2a-474085a4dfd8] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:04:59.518165 119525 system_pods.go:89] "etcd-calico-20200724220226-14997" [230012a4-158d-434e-87cb-1d3dfe9504e7] Running I0724 22:04:59.518175 119525 system_pods.go:89] "kube-apiserver-calico-20200724220226-14997" [34509372-afbe-4807-b042-8fb9ccc787b3] Running I0724 22:04:59.518191 119525 system_pods.go:89] "kube-controller-manager-calico-20200724220226-14997" [101a8c9b-a755-4d1c-8c31-630320b76d94] Running I0724 22:04:59.518201 119525 system_pods.go:89] "kube-proxy-58p6f" [433b3963-a7c0-4ced-9fc3-d0557f74c3e3] Pending / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy]) I0724 22:04:59.518220 119525 system_pods.go:89] "kube-scheduler-calico-20200724220226-14997" [e784ae6d-896b-449f-90c4-cdb436127b76] Running I0724 22:04:59.518236 119525 retry.go:30] will retry after 381.329545ms: missing components: kube-dns, kube-proxy I0724 22:04:59.585020 119525 addons.go:355] enableAddons completed in 1.412923707s I0724 22:04:59.905443 119525 system_pods.go:86] 10 kube-system pods found I0724 22:04:59.905540 119525 system_pods.go:89] "calico-kube-controllers-76d4774d89-bqxzb" [43f6fdcb-72f1-40d6-be13-1e7f0a81985a] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers]) I0724 22:04:59.905557 119525 system_pods.go:89] "calico-node-tj86p" [773acae0-7fcd-4ffc-9bf6-9704582bff3a] Pending: Initialized:ContainersNotInitialized (containers with incomplete status: [upgrade-ipam install-cni flexvol-driver]) / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node]) I0724 22:04:59.905566 119525 system_pods.go:89] "coredns-66bff467f8-b2rh7" [c48f0bc5-33c0-4290-8407-23a5ed925fea] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:04:59.905573 119525 system_pods.go:89] "coredns-66bff467f8-w4wcl" [7e6f65aa-d791-4a6a-bd2a-474085a4dfd8] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:04:59.905584 119525 system_pods.go:89] "etcd-calico-20200724220226-14997" [230012a4-158d-434e-87cb-1d3dfe9504e7] Running I0724 22:04:59.905592 119525 system_pods.go:89] "kube-apiserver-calico-20200724220226-14997" [34509372-afbe-4807-b042-8fb9ccc787b3] Running I0724 22:04:59.905598 119525 system_pods.go:89] "kube-controller-manager-calico-20200724220226-14997" [101a8c9b-a755-4d1c-8c31-630320b76d94] Running I0724 22:04:59.905605 119525 system_pods.go:89] "kube-proxy-58p6f" [433b3963-a7c0-4ced-9fc3-d0557f74c3e3] Pending / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy]) I0724 22:04:59.905616 119525 system_pods.go:89] "kube-scheduler-calico-20200724220226-14997" [e784ae6d-896b-449f-90c4-cdb436127b76] Running I0724 22:04:59.905621 119525 system_pods.go:89] "storage-provisioner" [f52ff3e2-4876-4302-9bb4-13b7782ee76f] Pending I0724 22:04:59.905632 119525 retry.go:30] will retry after 422.765636ms: missing components: kube-dns, kube-proxy I0724 22:05:00.343396 119525 system_pods.go:86] 10 kube-system pods found I0724 22:05:00.343440 119525 system_pods.go:89] "calico-kube-controllers-76d4774d89-bqxzb" [43f6fdcb-72f1-40d6-be13-1e7f0a81985a] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers]) I0724 22:05:00.343452 119525 system_pods.go:89] "calico-node-tj86p" [773acae0-7fcd-4ffc-9bf6-9704582bff3a] Pending: Initialized:ContainersNotInitialized (containers with incomplete status: [upgrade-ipam install-cni flexvol-driver]) / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node]) I0724 22:05:00.343464 119525 system_pods.go:89] "coredns-66bff467f8-b2rh7" [c48f0bc5-33c0-4290-8407-23a5ed925fea] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:05:00.343473 119525 system_pods.go:89] "coredns-66bff467f8-w4wcl" [7e6f65aa-d791-4a6a-bd2a-474085a4dfd8] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:05:00.343483 119525 system_pods.go:89] "etcd-calico-20200724220226-14997" [230012a4-158d-434e-87cb-1d3dfe9504e7] Running I0724 22:05:00.343494 119525 system_pods.go:89] "kube-apiserver-calico-20200724220226-14997" [34509372-afbe-4807-b042-8fb9ccc787b3] Running I0724 22:05:00.343516 119525 system_pods.go:89] "kube-controller-manager-calico-20200724220226-14997" [101a8c9b-a755-4d1c-8c31-630320b76d94] Running I0724 22:05:00.343522 119525 system_pods.go:89] "kube-proxy-58p6f" [433b3963-a7c0-4ced-9fc3-d0557f74c3e3] Pending / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy]) I0724 22:05:00.343528 119525 system_pods.go:89] "kube-scheduler-calico-20200724220226-14997" [e784ae6d-896b-449f-90c4-cdb436127b76] Running I0724 22:05:00.343534 119525 system_pods.go:89] "storage-provisioner" [f52ff3e2-4876-4302-9bb4-13b7782ee76f] Pending I0724 22:05:00.343544 119525 retry.go:30] will retry after 473.074753ms: missing components: kube-dns, kube-proxy I0724 22:05:03.810591 119525 system_pods.go:86] 10 kube-system pods found I0724 22:05:03.810634 119525 system_pods.go:89] "calico-kube-controllers-76d4774d89-bqxzb" [43f6fdcb-72f1-40d6-be13-1e7f0a81985a] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers]) I0724 22:05:03.810650 119525 system_pods.go:89] "calico-node-tj86p" [773acae0-7fcd-4ffc-9bf6-9704582bff3a] Pending: Initialized:ContainersNotInitialized (containers with incomplete status: [upgrade-ipam install-cni flexvol-driver]) / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node]) I0724 22:05:03.810663 119525 system_pods.go:89] "coredns-66bff467f8-b2rh7" [c48f0bc5-33c0-4290-8407-23a5ed925fea] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:05:03.810676 119525 system_pods.go:89] "coredns-66bff467f8-w4wcl" [7e6f65aa-d791-4a6a-bd2a-474085a4dfd8] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:05:03.810693 119525 system_pods.go:89] "etcd-calico-20200724220226-14997" [230012a4-158d-434e-87cb-1d3dfe9504e7] Running I0724 22:05:03.810704 119525 system_pods.go:89] "kube-apiserver-calico-20200724220226-14997" [34509372-afbe-4807-b042-8fb9ccc787b3] Running I0724 22:05:03.810720 119525 system_pods.go:89] "kube-controller-manager-calico-20200724220226-14997" [101a8c9b-a755-4d1c-8c31-630320b76d94] Running I0724 22:05:03.810750 119525 system_pods.go:89] "kube-proxy-58p6f" [433b3963-a7c0-4ced-9fc3-d0557f74c3e3] Pending / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy]) I0724 22:05:03.810765 119525 system_pods.go:89] "kube-scheduler-calico-20200724220226-14997" [e784ae6d-896b-449f-90c4-cdb436127b76] Running I0724 22:05:03.810777 119525 system_pods.go:89] "storage-provisioner" [f52ff3e2-4876-4302-9bb4-13b7782ee76f] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:05:03.810794 119525 retry.go:30] will retry after 587.352751ms: missing components: kube-dns, kube-proxy I0724 22:05:04.514009 119525 system_pods.go:86] 9 kube-system pods found I0724 22:05:04.514043 119525 system_pods.go:89] "calico-kube-controllers-76d4774d89-bqxzb" [43f6fdcb-72f1-40d6-be13-1e7f0a81985a] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers]) I0724 22:05:04.514057 119525 system_pods.go:89] "calico-node-tj86p" [773acae0-7fcd-4ffc-9bf6-9704582bff3a] Pending: Initialized:ContainersNotInitialized (containers with incomplete status: [upgrade-ipam install-cni flexvol-driver]) / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node]) I0724 22:05:04.514068 119525 system_pods.go:89] "coredns-66bff467f8-b2rh7" [c48f0bc5-33c0-4290-8407-23a5ed925fea] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:05:04.514074 119525 system_pods.go:89] "etcd-calico-20200724220226-14997" [230012a4-158d-434e-87cb-1d3dfe9504e7] Running I0724 22:05:04.514082 119525 system_pods.go:89] "kube-apiserver-calico-20200724220226-14997" [34509372-afbe-4807-b042-8fb9ccc787b3] Running I0724 22:05:04.514092 119525 system_pods.go:89] "kube-controller-manager-calico-20200724220226-14997" [101a8c9b-a755-4d1c-8c31-630320b76d94] Running I0724 22:05:04.514100 119525 system_pods.go:89] "kube-proxy-58p6f" [433b3963-a7c0-4ced-9fc3-d0557f74c3e3] Pending / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy]) I0724 22:05:04.514111 119525 system_pods.go:89] "kube-scheduler-calico-20200724220226-14997" [e784ae6d-896b-449f-90c4-cdb436127b76] Running I0724 22:05:04.514118 119525 system_pods.go:89] "storage-provisioner" [f52ff3e2-4876-4302-9bb4-13b7782ee76f] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:05:04.514131 119525 retry.go:30] will retry after 834.206799ms: missing components: kube-dns, kube-proxy I0724 22:05:05.362896 119525 system_pods.go:86] 9 kube-system pods found I0724 22:05:05.362938 119525 system_pods.go:89] "calico-kube-controllers-76d4774d89-bqxzb" [43f6fdcb-72f1-40d6-be13-1e7f0a81985a] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers]) I0724 22:05:05.362952 119525 system_pods.go:89] "calico-node-tj86p" [773acae0-7fcd-4ffc-9bf6-9704582bff3a] Pending: Initialized:ContainersNotInitialized (containers with incomplete status: [upgrade-ipam install-cni flexvol-driver]) / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node]) I0724 22:05:05.362965 119525 system_pods.go:89] "coredns-66bff467f8-b2rh7" [c48f0bc5-33c0-4290-8407-23a5ed925fea] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:05:05.362974 119525 system_pods.go:89] "etcd-calico-20200724220226-14997" [230012a4-158d-434e-87cb-1d3dfe9504e7] Running I0724 22:05:05.362984 119525 system_pods.go:89] "kube-apiserver-calico-20200724220226-14997" [34509372-afbe-4807-b042-8fb9ccc787b3] Running I0724 22:05:05.362992 119525 system_pods.go:89] "kube-controller-manager-calico-20200724220226-14997" [101a8c9b-a755-4d1c-8c31-630320b76d94] Running I0724 22:05:05.363003 119525 system_pods.go:89] "kube-proxy-58p6f" [433b3963-a7c0-4ced-9fc3-d0557f74c3e3] Pending / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy]) I0724 22:05:05.363011 119525 system_pods.go:89] "kube-scheduler-calico-20200724220226-14997" [e784ae6d-896b-449f-90c4-cdb436127b76] Running I0724 22:05:05.363022 119525 system_pods.go:89] "storage-provisioner" [f52ff3e2-4876-4302-9bb4-13b7782ee76f] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:05:05.363072 119525 retry.go:30] will retry after 746.553905ms: missing components: kube-dns, kube-proxy I0724 22:05:06.153360 119525 system_pods.go:86] 9 kube-system pods found I0724 22:05:06.153403 119525 system_pods.go:89] "calico-kube-controllers-76d4774d89-bqxzb" [43f6fdcb-72f1-40d6-be13-1e7f0a81985a] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers]) I0724 22:05:06.153418 119525 system_pods.go:89] "calico-node-tj86p" [773acae0-7fcd-4ffc-9bf6-9704582bff3a] Pending: Initialized:ContainersNotInitialized (containers with incomplete status: [upgrade-ipam install-cni flexvol-driver]) / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node]) I0724 22:05:06.153429 119525 system_pods.go:89] "coredns-66bff467f8-b2rh7" [c48f0bc5-33c0-4290-8407-23a5ed925fea] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:05:06.153436 119525 system_pods.go:89] "etcd-calico-20200724220226-14997" [230012a4-158d-434e-87cb-1d3dfe9504e7] Running I0724 22:05:06.153448 119525 system_pods.go:89] "kube-apiserver-calico-20200724220226-14997" [34509372-afbe-4807-b042-8fb9ccc787b3] Running I0724 22:05:06.153458 119525 system_pods.go:89] "kube-controller-manager-calico-20200724220226-14997" [101a8c9b-a755-4d1c-8c31-630320b76d94] Running I0724 22:05:06.153475 119525 system_pods.go:89] "kube-proxy-58p6f" [433b3963-a7c0-4ced-9fc3-d0557f74c3e3] Running I0724 22:05:06.153484 119525 system_pods.go:89] "kube-scheduler-calico-20200724220226-14997" [e784ae6d-896b-449f-90c4-cdb436127b76] Running I0724 22:05:06.153489 119525 system_pods.go:89] "storage-provisioner" [f52ff3e2-4876-4302-9bb4-13b7782ee76f] Running I0724 22:05:06.153499 119525 retry.go:30] will retry after 987.362415ms: missing components: kube-dns I0724 22:05:07.148978 119525 system_pods.go:86] 9 kube-system pods found I0724 22:05:07.149018 119525 system_pods.go:89] "calico-kube-controllers-76d4774d89-bqxzb" [43f6fdcb-72f1-40d6-be13-1e7f0a81985a] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers]) I0724 22:05:07.149028 119525 system_pods.go:89] "calico-node-tj86p" [773acae0-7fcd-4ffc-9bf6-9704582bff3a] Pending: Initialized:ContainersNotInitialized (containers with incomplete status: [upgrade-ipam install-cni flexvol-driver]) / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node]) I0724 22:05:07.149036 119525 system_pods.go:89] "coredns-66bff467f8-b2rh7" [c48f0bc5-33c0-4290-8407-23a5ed925fea] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:05:07.149042 119525 system_pods.go:89] "etcd-calico-20200724220226-14997" [230012a4-158d-434e-87cb-1d3dfe9504e7] Running I0724 22:05:07.149049 119525 system_pods.go:89] "kube-apiserver-calico-20200724220226-14997" [34509372-afbe-4807-b042-8fb9ccc787b3] Running I0724 22:05:07.149055 119525 system_pods.go:89] "kube-controller-manager-calico-20200724220226-14997" [101a8c9b-a755-4d1c-8c31-630320b76d94] Running I0724 22:05:07.149060 119525 system_pods.go:89] "kube-proxy-58p6f" [433b3963-a7c0-4ced-9fc3-d0557f74c3e3] Running I0724 22:05:07.149162 119525 system_pods.go:89] "kube-scheduler-calico-20200724220226-14997" [e784ae6d-896b-449f-90c4-cdb436127b76] Running I0724 22:05:07.149170 119525 system_pods.go:89] "storage-provisioner" [f52ff3e2-4876-4302-9bb4-13b7782ee76f] Running I0724 22:05:07.149183 119525 retry.go:30] will retry after 1.189835008s: missing components: kube-dns I0724 22:05:08.515849 119525 system_pods.go:86] 9 kube-system pods found I0724 22:05:08.515895 119525 system_pods.go:89] "calico-kube-controllers-76d4774d89-bqxzb" [43f6fdcb-72f1-40d6-be13-1e7f0a81985a] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers]) I0724 22:05:08.515906 119525 system_pods.go:89] "calico-node-tj86p" [773acae0-7fcd-4ffc-9bf6-9704582bff3a] Pending: Initialized:ContainersNotInitialized (containers with incomplete status: [upgrade-ipam install-cni flexvol-driver]) / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node]) I0724 22:05:08.515915 119525 system_pods.go:89] "coredns-66bff467f8-b2rh7" [c48f0bc5-33c0-4290-8407-23a5ed925fea] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:05:08.515922 119525 system_pods.go:89] "etcd-calico-20200724220226-14997" [230012a4-158d-434e-87cb-1d3dfe9504e7] Running I0724 22:05:08.515929 119525 system_pods.go:89] "kube-apiserver-calico-20200724220226-14997" [34509372-afbe-4807-b042-8fb9ccc787b3] Running I0724 22:05:08.515935 119525 system_pods.go:89] "kube-controller-manager-calico-20200724220226-14997" [101a8c9b-a755-4d1c-8c31-630320b76d94] Running I0724 22:05:08.515940 119525 system_pods.go:89] "kube-proxy-58p6f" [433b3963-a7c0-4ced-9fc3-d0557f74c3e3] Running I0724 22:05:08.515949 119525 system_pods.go:89] "kube-scheduler-calico-20200724220226-14997" [e784ae6d-896b-449f-90c4-cdb436127b76] Running I0724 22:05:08.515966 119525 system_pods.go:89] "storage-provisioner" [f52ff3e2-4876-4302-9bb4-13b7782ee76f] Running I0724 22:05:08.515983 119525 retry.go:30] will retry after 1.677229867s: missing components: kube-dns I0724 22:05:10.201612 119525 system_pods.go:86] 9 kube-system pods found I0724 22:05:10.201784 119525 system_pods.go:89] "calico-kube-controllers-76d4774d89-bqxzb" [43f6fdcb-72f1-40d6-be13-1e7f0a81985a] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers]) I0724 22:05:10.201811 119525 system_pods.go:89] "calico-node-tj86p" [773acae0-7fcd-4ffc-9bf6-9704582bff3a] Pending: Initialized:ContainersNotInitialized (containers with incomplete status: [upgrade-ipam install-cni flexvol-driver]) / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node]) I0724 22:05:10.201825 119525 system_pods.go:89] "coredns-66bff467f8-b2rh7" [c48f0bc5-33c0-4290-8407-23a5ed925fea] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:05:10.201834 119525 system_pods.go:89] "etcd-calico-20200724220226-14997" [230012a4-158d-434e-87cb-1d3dfe9504e7] Running I0724 22:05:10.201847 119525 system_pods.go:89] "kube-apiserver-calico-20200724220226-14997" [34509372-afbe-4807-b042-8fb9ccc787b3] Running I0724 22:05:10.201865 119525 system_pods.go:89] "kube-controller-manager-calico-20200724220226-14997" [101a8c9b-a755-4d1c-8c31-630320b76d94] Running I0724 22:05:10.201873 119525 system_pods.go:89] "kube-proxy-58p6f" [433b3963-a7c0-4ced-9fc3-d0557f74c3e3] Running I0724 22:05:10.201882 119525 system_pods.go:89] "kube-scheduler-calico-20200724220226-14997" [e784ae6d-896b-449f-90c4-cdb436127b76] Running I0724 22:05:10.201890 119525 system_pods.go:89] "storage-provisioner" [f52ff3e2-4876-4302-9bb4-13b7782ee76f] Running I0724 22:05:10.201903 119525 retry.go:30] will retry after 2.346016261s: missing components: kube-dns I0724 22:05:13.070508 119525 system_pods.go:86] 9 kube-system pods found I0724 22:05:13.070547 119525 system_pods.go:89] "calico-kube-controllers-76d4774d89-bqxzb" [43f6fdcb-72f1-40d6-be13-1e7f0a81985a] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers]) I0724 22:05:13.070560 119525 system_pods.go:89] "calico-node-tj86p" [773acae0-7fcd-4ffc-9bf6-9704582bff3a] Pending: Initialized:ContainersNotInitialized (containers with incomplete status: [upgrade-ipam install-cni flexvol-driver]) / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node]) I0724 22:05:13.070572 119525 system_pods.go:89] "coredns-66bff467f8-b2rh7" [c48f0bc5-33c0-4290-8407-23a5ed925fea] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:05:13.070583 119525 system_pods.go:89] "etcd-calico-20200724220226-14997" [230012a4-158d-434e-87cb-1d3dfe9504e7] Running I0724 22:05:13.070603 119525 system_pods.go:89] "kube-apiserver-calico-20200724220226-14997" [34509372-afbe-4807-b042-8fb9ccc787b3] Running I0724 22:05:13.070609 119525 system_pods.go:89] "kube-controller-manager-calico-20200724220226-14997" [101a8c9b-a755-4d1c-8c31-630320b76d94] Running I0724 22:05:13.070615 119525 system_pods.go:89] "kube-proxy-58p6f" [433b3963-a7c0-4ced-9fc3-d0557f74c3e3] Running I0724 22:05:13.070636 119525 system_pods.go:89] "kube-scheduler-calico-20200724220226-14997" [e784ae6d-896b-449f-90c4-cdb436127b76] Running I0724 22:05:13.070649 119525 system_pods.go:89] "storage-provisioner" [f52ff3e2-4876-4302-9bb4-13b7782ee76f] Running I0724 22:05:13.070662 119525 retry.go:30] will retry after 3.36678925s: missing components: kube-dns I0724 22:05:21.760285 119525 system_pods.go:86] 9 kube-system pods found I0724 22:05:21.760318 119525 system_pods.go:89] "calico-kube-controllers-76d4774d89-bqxzb" [43f6fdcb-72f1-40d6-be13-1e7f0a81985a] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers]) I0724 22:05:21.760356 119525 system_pods.go:89] "calico-node-tj86p" [773acae0-7fcd-4ffc-9bf6-9704582bff3a] Pending: Initialized:ContainersNotInitialized (containers with incomplete status: [upgrade-ipam install-cni flexvol-driver]) / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node]) I0724 22:05:21.760364 119525 system_pods.go:89] "coredns-66bff467f8-b2rh7" [c48f0bc5-33c0-4290-8407-23a5ed925fea] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:05:21.760370 119525 system_pods.go:89] "etcd-calico-20200724220226-14997" [230012a4-158d-434e-87cb-1d3dfe9504e7] Running I0724 22:05:21.760377 119525 system_pods.go:89] "kube-apiserver-calico-20200724220226-14997" [34509372-afbe-4807-b042-8fb9ccc787b3] Running I0724 22:05:21.760384 119525 system_pods.go:89] "kube-controller-manager-calico-20200724220226-14997" [101a8c9b-a755-4d1c-8c31-630320b76d94] Running I0724 22:05:21.760389 119525 system_pods.go:89] "kube-proxy-58p6f" [433b3963-a7c0-4ced-9fc3-d0557f74c3e3] Running I0724 22:05:21.760394 119525 system_pods.go:89] "kube-scheduler-calico-20200724220226-14997" [e784ae6d-896b-449f-90c4-cdb436127b76] Running I0724 22:05:21.760399 119525 system_pods.go:89] "storage-provisioner" [f52ff3e2-4876-4302-9bb4-13b7782ee76f] Running I0724 22:05:21.760413 119525 retry.go:30] will retry after 3.11822781s: missing components: kube-dns I0724 22:05:24.886399 119525 system_pods.go:86] 9 kube-system pods found I0724 22:05:24.886447 119525 system_pods.go:89] "calico-kube-controllers-76d4774d89-bqxzb" [43f6fdcb-72f1-40d6-be13-1e7f0a81985a] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers]) I0724 22:05:24.886464 119525 system_pods.go:89] "calico-node-tj86p" [773acae0-7fcd-4ffc-9bf6-9704582bff3a] Pending: Initialized:ContainersNotInitialized (containers with incomplete status: [install-cni flexvol-driver]) / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node]) I0724 22:05:24.886487 119525 system_pods.go:89] "coredns-66bff467f8-b2rh7" [c48f0bc5-33c0-4290-8407-23a5ed925fea] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:05:24.886498 119525 system_pods.go:89] "etcd-calico-20200724220226-14997" [230012a4-158d-434e-87cb-1d3dfe9504e7] Running I0724 22:05:24.886516 119525 system_pods.go:89] "kube-apiserver-calico-20200724220226-14997" [34509372-afbe-4807-b042-8fb9ccc787b3] Running I0724 22:05:24.886527 119525 system_pods.go:89] "kube-controller-manager-calico-20200724220226-14997" [101a8c9b-a755-4d1c-8c31-630320b76d94] Running I0724 22:05:24.886544 119525 system_pods.go:89] "kube-proxy-58p6f" [433b3963-a7c0-4ced-9fc3-d0557f74c3e3] Running I0724 22:05:24.886553 119525 system_pods.go:89] "kube-scheduler-calico-20200724220226-14997" [e784ae6d-896b-449f-90c4-cdb436127b76] Running I0724 22:05:24.886562 119525 system_pods.go:89] "storage-provisioner" [f52ff3e2-4876-4302-9bb4-13b7782ee76f] Running I0724 22:05:24.886576 119525 retry.go:30] will retry after 4.276119362s: missing components: kube-dns I0724 22:05:30.882103 119525 system_pods.go:86] 9 kube-system pods found I0724 22:05:30.882150 119525 system_pods.go:89] "calico-kube-controllers-76d4774d89-bqxzb" [43f6fdcb-72f1-40d6-be13-1e7f0a81985a] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers]) I0724 22:05:30.882165 119525 system_pods.go:89] "calico-node-tj86p" [773acae0-7fcd-4ffc-9bf6-9704582bff3a] Pending: Initialized:ContainersNotInitialized (containers with incomplete status: [flexvol-driver]) / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node]) I0724 22:05:30.882180 119525 system_pods.go:89] "coredns-66bff467f8-b2rh7" [c48f0bc5-33c0-4290-8407-23a5ed925fea] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:05:30.882189 119525 system_pods.go:89] "etcd-calico-20200724220226-14997" [230012a4-158d-434e-87cb-1d3dfe9504e7] Running I0724 22:05:30.882199 119525 system_pods.go:89] "kube-apiserver-calico-20200724220226-14997" [34509372-afbe-4807-b042-8fb9ccc787b3] Running I0724 22:05:30.882208 119525 system_pods.go:89] "kube-controller-manager-calico-20200724220226-14997" [101a8c9b-a755-4d1c-8c31-630320b76d94] Running I0724 22:05:30.882228 119525 system_pods.go:89] "kube-proxy-58p6f" [433b3963-a7c0-4ced-9fc3-d0557f74c3e3] Running I0724 22:05:30.882236 119525 system_pods.go:89] "kube-scheduler-calico-20200724220226-14997" [e784ae6d-896b-449f-90c4-cdb436127b76] Running I0724 22:05:30.882244 119525 system_pods.go:89] "storage-provisioner" [f52ff3e2-4876-4302-9bb4-13b7782ee76f] Running I0724 22:05:30.882258 119525 retry.go:30] will retry after 5.167232101s: missing components: kube-dns I0724 22:05:39.156216 119525 system_pods.go:86] 9 kube-system pods found I0724 22:05:39.156247 119525 system_pods.go:89] "calico-kube-controllers-76d4774d89-bqxzb" [43f6fdcb-72f1-40d6-be13-1e7f0a81985a] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers]) I0724 22:05:39.156256 119525 system_pods.go:89] "calico-node-tj86p" [773acae0-7fcd-4ffc-9bf6-9704582bff3a] Pending: Initialized:ContainersNotInitialized (containers with incomplete status: [flexvol-driver]) / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node]) I0724 22:05:39.156273 119525 system_pods.go:89] "coredns-66bff467f8-b2rh7" [c48f0bc5-33c0-4290-8407-23a5ed925fea] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:05:39.156291 119525 system_pods.go:89] "etcd-calico-20200724220226-14997" [230012a4-158d-434e-87cb-1d3dfe9504e7] Running I0724 22:05:39.156305 119525 system_pods.go:89] "kube-apiserver-calico-20200724220226-14997" [34509372-afbe-4807-b042-8fb9ccc787b3] Running I0724 22:05:39.156317 119525 system_pods.go:89] "kube-controller-manager-calico-20200724220226-14997" [101a8c9b-a755-4d1c-8c31-630320b76d94] Running I0724 22:05:39.156378 119525 system_pods.go:89] "kube-proxy-58p6f" [433b3963-a7c0-4ced-9fc3-d0557f74c3e3] Running I0724 22:05:39.156388 119525 system_pods.go:89] "kube-scheduler-calico-20200724220226-14997" [e784ae6d-896b-449f-90c4-cdb436127b76] Running I0724 22:05:39.156404 119525 system_pods.go:89] "storage-provisioner" [f52ff3e2-4876-4302-9bb4-13b7782ee76f] Running I0724 22:05:39.156415 119525 retry.go:30] will retry after 6.994901864s: missing components: kube-dns I0724 22:05:46.167356 119525 system_pods.go:86] 9 kube-system pods found I0724 22:05:46.167404 119525 system_pods.go:89] "calico-kube-controllers-76d4774d89-bqxzb" [43f6fdcb-72f1-40d6-be13-1e7f0a81985a] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers]) I0724 22:05:46.167430 119525 system_pods.go:89] "calico-node-tj86p" [773acae0-7fcd-4ffc-9bf6-9704582bff3a] Pending / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node]) I0724 22:05:46.168490 119525 system_pods.go:89] "coredns-66bff467f8-b2rh7" [c48f0bc5-33c0-4290-8407-23a5ed925fea] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:05:46.168533 119525 system_pods.go:89] "etcd-calico-20200724220226-14997" [230012a4-158d-434e-87cb-1d3dfe9504e7] Running I0724 22:05:46.168547 119525 system_pods.go:89] "kube-apiserver-calico-20200724220226-14997" [34509372-afbe-4807-b042-8fb9ccc787b3] Running I0724 22:05:46.168568 119525 system_pods.go:89] "kube-controller-manager-calico-20200724220226-14997" [101a8c9b-a755-4d1c-8c31-630320b76d94] Running I0724 22:05:46.168578 119525 system_pods.go:89] "kube-proxy-58p6f" [433b3963-a7c0-4ced-9fc3-d0557f74c3e3] Running I0724 22:05:46.168594 119525 system_pods.go:89] "kube-scheduler-calico-20200724220226-14997" [e784ae6d-896b-449f-90c4-cdb436127b76] Running I0724 22:05:46.168603 119525 system_pods.go:89] "storage-provisioner" [f52ff3e2-4876-4302-9bb4-13b7782ee76f] Running I0724 22:05:46.168617 119525 retry.go:30] will retry after 7.91826225s: missing components: kube-dns I0724 22:05:54.093731 119525 system_pods.go:86] 9 kube-system pods found I0724 22:05:54.093765 119525 system_pods.go:89] "calico-kube-controllers-76d4774d89-bqxzb" [43f6fdcb-72f1-40d6-be13-1e7f0a81985a] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers]) I0724 22:05:54.093775 119525 system_pods.go:89] "calico-node-tj86p" [773acae0-7fcd-4ffc-9bf6-9704582bff3a] Pending / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node]) I0724 22:05:54.093784 119525 system_pods.go:89] "coredns-66bff467f8-b2rh7" [c48f0bc5-33c0-4290-8407-23a5ed925fea] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:05:54.093790 119525 system_pods.go:89] "etcd-calico-20200724220226-14997" [230012a4-158d-434e-87cb-1d3dfe9504e7] Running I0724 22:05:54.093797 119525 system_pods.go:89] "kube-apiserver-calico-20200724220226-14997" [34509372-afbe-4807-b042-8fb9ccc787b3] Running I0724 22:05:54.093804 119525 system_pods.go:89] "kube-controller-manager-calico-20200724220226-14997" [101a8c9b-a755-4d1c-8c31-630320b76d94] Running I0724 22:05:54.093816 119525 system_pods.go:89] "kube-proxy-58p6f" [433b3963-a7c0-4ced-9fc3-d0557f74c3e3] Running I0724 22:05:54.093821 119525 system_pods.go:89] "kube-scheduler-calico-20200724220226-14997" [e784ae6d-896b-449f-90c4-cdb436127b76] Running I0724 22:05:54.093826 119525 system_pods.go:89] "storage-provisioner" [f52ff3e2-4876-4302-9bb4-13b7782ee76f] Running I0724 22:05:54.093836 119525 retry.go:30] will retry after 9.953714808s: missing components: kube-dns I0724 22:06:04.053615 119525 system_pods.go:86] 9 kube-system pods found I0724 22:06:04.053648 119525 system_pods.go:89] "calico-kube-controllers-76d4774d89-bqxzb" [43f6fdcb-72f1-40d6-be13-1e7f0a81985a] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers]) I0724 22:06:04.053658 119525 system_pods.go:89] "calico-node-tj86p" [773acae0-7fcd-4ffc-9bf6-9704582bff3a] Running / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node]) I0724 22:06:04.053667 119525 system_pods.go:89] "coredns-66bff467f8-b2rh7" [c48f0bc5-33c0-4290-8407-23a5ed925fea] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:06:04.053679 119525 system_pods.go:89] "etcd-calico-20200724220226-14997" [230012a4-158d-434e-87cb-1d3dfe9504e7] Running I0724 22:06:04.053685 119525 system_pods.go:89] "kube-apiserver-calico-20200724220226-14997" [34509372-afbe-4807-b042-8fb9ccc787b3] Running I0724 22:06:04.053691 119525 system_pods.go:89] "kube-controller-manager-calico-20200724220226-14997" [101a8c9b-a755-4d1c-8c31-630320b76d94] Running I0724 22:06:04.053696 119525 system_pods.go:89] "kube-proxy-58p6f" [433b3963-a7c0-4ced-9fc3-d0557f74c3e3] Running I0724 22:06:04.053701 119525 system_pods.go:89] "kube-scheduler-calico-20200724220226-14997" [e784ae6d-896b-449f-90c4-cdb436127b76] Running I0724 22:06:04.053705 119525 system_pods.go:89] "storage-provisioner" [f52ff3e2-4876-4302-9bb4-13b7782ee76f] Running I0724 22:06:04.053716 119525 retry.go:30] will retry after 15.120437328s: missing components: kube-dns I0724 22:06:19.835517 119525 system_pods.go:86] 9 kube-system pods found I0724 22:06:19.835550 119525 system_pods.go:89] "calico-kube-controllers-76d4774d89-bqxzb" [43f6fdcb-72f1-40d6-be13-1e7f0a81985a] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers]) I0724 22:06:19.835559 119525 system_pods.go:89] "calico-node-tj86p" [773acae0-7fcd-4ffc-9bf6-9704582bff3a] Running / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node]) I0724 22:06:19.835568 119525 system_pods.go:89] "coredns-66bff467f8-b2rh7" [c48f0bc5-33c0-4290-8407-23a5ed925fea] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:06:19.835580 119525 system_pods.go:89] "etcd-calico-20200724220226-14997" [230012a4-158d-434e-87cb-1d3dfe9504e7] Running I0724 22:06:19.835587 119525 system_pods.go:89] "kube-apiserver-calico-20200724220226-14997" [34509372-afbe-4807-b042-8fb9ccc787b3] Running I0724 22:06:19.835593 119525 system_pods.go:89] "kube-controller-manager-calico-20200724220226-14997" [101a8c9b-a755-4d1c-8c31-630320b76d94] Running I0724 22:06:19.835598 119525 system_pods.go:89] "kube-proxy-58p6f" [433b3963-a7c0-4ced-9fc3-d0557f74c3e3] Running I0724 22:06:19.835603 119525 system_pods.go:89] "kube-scheduler-calico-20200724220226-14997" [e784ae6d-896b-449f-90c4-cdb436127b76] Running I0724 22:06:19.835608 119525 system_pods.go:89] "storage-provisioner" [f52ff3e2-4876-4302-9bb4-13b7782ee76f] Running I0724 22:06:19.835618 119525 retry.go:30] will retry after 14.90607158s: missing components: kube-dns I0724 22:06:36.821289 119525 system_pods.go:86] 9 kube-system pods found I0724 22:06:36.821329 119525 system_pods.go:89] "calico-kube-controllers-76d4774d89-bqxzb" [43f6fdcb-72f1-40d6-be13-1e7f0a81985a] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers]) I0724 22:06:36.821348 119525 system_pods.go:89] "calico-node-tj86p" [773acae0-7fcd-4ffc-9bf6-9704582bff3a] Running / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node]) I0724 22:06:36.821363 119525 system_pods.go:89] "coredns-66bff467f8-b2rh7" [c48f0bc5-33c0-4290-8407-23a5ed925fea] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:06:36.821373 119525 system_pods.go:89] "etcd-calico-20200724220226-14997" [230012a4-158d-434e-87cb-1d3dfe9504e7] Running I0724 22:06:36.821387 119525 system_pods.go:89] "kube-apiserver-calico-20200724220226-14997" [34509372-afbe-4807-b042-8fb9ccc787b3] Running I0724 22:06:36.821396 119525 system_pods.go:89] "kube-controller-manager-calico-20200724220226-14997" [101a8c9b-a755-4d1c-8c31-630320b76d94] Running I0724 22:06:36.821404 119525 system_pods.go:89] "kube-proxy-58p6f" [433b3963-a7c0-4ced-9fc3-d0557f74c3e3] Running I0724 22:06:36.821411 119525 system_pods.go:89] "kube-scheduler-calico-20200724220226-14997" [e784ae6d-896b-449f-90c4-cdb436127b76] Running I0724 22:06:36.821418 119525 system_pods.go:89] "storage-provisioner" [f52ff3e2-4876-4302-9bb4-13b7782ee76f] Running I0724 22:06:36.821432 119525 retry.go:30] will retry after 18.465989061s: missing components: kube-dns I0724 22:06:55.295151 119525 system_pods.go:86] 9 kube-system pods found I0724 22:06:55.295190 119525 system_pods.go:89] "calico-kube-controllers-76d4774d89-bqxzb" [43f6fdcb-72f1-40d6-be13-1e7f0a81985a] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers]) I0724 22:06:55.295203 119525 system_pods.go:89] "calico-node-tj86p" [773acae0-7fcd-4ffc-9bf6-9704582bff3a] Running / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node]) I0724 22:06:55.295212 119525 system_pods.go:89] "coredns-66bff467f8-b2rh7" [c48f0bc5-33c0-4290-8407-23a5ed925fea] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:06:55.295217 119525 system_pods.go:89] "etcd-calico-20200724220226-14997" [230012a4-158d-434e-87cb-1d3dfe9504e7] Running I0724 22:06:55.295233 119525 system_pods.go:89] "kube-apiserver-calico-20200724220226-14997" [34509372-afbe-4807-b042-8fb9ccc787b3] Running I0724 22:06:55.295239 119525 system_pods.go:89] "kube-controller-manager-calico-20200724220226-14997" [101a8c9b-a755-4d1c-8c31-630320b76d94] Running I0724 22:06:55.295244 119525 system_pods.go:89] "kube-proxy-58p6f" [433b3963-a7c0-4ced-9fc3-d0557f74c3e3] Running I0724 22:06:55.295249 119525 system_pods.go:89] "kube-scheduler-calico-20200724220226-14997" [e784ae6d-896b-449f-90c4-cdb436127b76] Running I0724 22:06:55.295256 119525 system_pods.go:89] "storage-provisioner" [f52ff3e2-4876-4302-9bb4-13b7782ee76f] Running I0724 22:06:55.295266 119525 retry.go:30] will retry after 25.219510332s: missing components: kube-dns I0724 22:07:20.522694 119525 system_pods.go:86] 9 kube-system pods found I0724 22:07:20.522733 119525 system_pods.go:89] "calico-kube-controllers-76d4774d89-bqxzb" [43f6fdcb-72f1-40d6-be13-1e7f0a81985a] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers]) I0724 22:07:20.522744 119525 system_pods.go:89] "calico-node-tj86p" [773acae0-7fcd-4ffc-9bf6-9704582bff3a] Running / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node]) I0724 22:07:20.522753 119525 system_pods.go:89] "coredns-66bff467f8-b2rh7" [c48f0bc5-33c0-4290-8407-23a5ed925fea] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:07:20.522759 119525 system_pods.go:89] "etcd-calico-20200724220226-14997" [230012a4-158d-434e-87cb-1d3dfe9504e7] Running I0724 22:07:20.522766 119525 system_pods.go:89] "kube-apiserver-calico-20200724220226-14997" [34509372-afbe-4807-b042-8fb9ccc787b3] Running I0724 22:07:20.522772 119525 system_pods.go:89] "kube-controller-manager-calico-20200724220226-14997" [101a8c9b-a755-4d1c-8c31-630320b76d94] Running I0724 22:07:20.522777 119525 system_pods.go:89] "kube-proxy-58p6f" [433b3963-a7c0-4ced-9fc3-d0557f74c3e3] Running I0724 22:07:20.522782 119525 system_pods.go:89] "kube-scheduler-calico-20200724220226-14997" [e784ae6d-896b-449f-90c4-cdb436127b76] Running I0724 22:07:20.522790 119525 system_pods.go:89] "storage-provisioner" [f52ff3e2-4876-4302-9bb4-13b7782ee76f] Running I0724 22:07:20.522804 119525 retry.go:30] will retry after 35.078569648s: missing components: kube-dns I0724 22:07:55.607130 119525 system_pods.go:86] 9 kube-system pods found I0724 22:07:55.607164 119525 system_pods.go:89] "calico-kube-controllers-76d4774d89-bqxzb" [43f6fdcb-72f1-40d6-be13-1e7f0a81985a] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers]) I0724 22:07:55.607173 119525 system_pods.go:89] "calico-node-tj86p" [773acae0-7fcd-4ffc-9bf6-9704582bff3a] Running / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node]) I0724 22:07:55.607182 119525 system_pods.go:89] "coredns-66bff467f8-b2rh7" [c48f0bc5-33c0-4290-8407-23a5ed925fea] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:07:55.607188 119525 system_pods.go:89] "etcd-calico-20200724220226-14997" [230012a4-158d-434e-87cb-1d3dfe9504e7] Running I0724 22:07:55.607194 119525 system_pods.go:89] "kube-apiserver-calico-20200724220226-14997" [34509372-afbe-4807-b042-8fb9ccc787b3] Running I0724 22:07:55.607200 119525 system_pods.go:89] "kube-controller-manager-calico-20200724220226-14997" [101a8c9b-a755-4d1c-8c31-630320b76d94] Running I0724 22:07:55.607214 119525 system_pods.go:89] "kube-proxy-58p6f" [433b3963-a7c0-4ced-9fc3-d0557f74c3e3] Running I0724 22:07:55.607219 119525 system_pods.go:89] "kube-scheduler-calico-20200724220226-14997" [e784ae6d-896b-449f-90c4-cdb436127b76] Running I0724 22:07:55.607224 119525 system_pods.go:89] "storage-provisioner" [f52ff3e2-4876-4302-9bb4-13b7782ee76f] Running I0724 22:07:55.607235 119525 retry.go:30] will retry after 50.027701973s: missing components: kube-dns I0724 22:08:45.642261 119525 system_pods.go:86] 9 kube-system pods found I0724 22:08:45.642298 119525 system_pods.go:89] "calico-kube-controllers-76d4774d89-bqxzb" [43f6fdcb-72f1-40d6-be13-1e7f0a81985a] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers]) I0724 22:08:45.642313 119525 system_pods.go:89] "calico-node-tj86p" [773acae0-7fcd-4ffc-9bf6-9704582bff3a] Running / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node]) I0724 22:08:45.642327 119525 system_pods.go:89] "coredns-66bff467f8-b2rh7" [c48f0bc5-33c0-4290-8407-23a5ed925fea] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:08:45.642337 119525 system_pods.go:89] "etcd-calico-20200724220226-14997" [230012a4-158d-434e-87cb-1d3dfe9504e7] Running I0724 22:08:45.642347 119525 system_pods.go:89] "kube-apiserver-calico-20200724220226-14997" [34509372-afbe-4807-b042-8fb9ccc787b3] Running I0724 22:08:45.642355 119525 system_pods.go:89] "kube-controller-manager-calico-20200724220226-14997" [101a8c9b-a755-4d1c-8c31-630320b76d94] Running I0724 22:08:45.642363 119525 system_pods.go:89] "kube-proxy-58p6f" [433b3963-a7c0-4ced-9fc3-d0557f74c3e3] Running I0724 22:08:45.642371 119525 system_pods.go:89] "kube-scheduler-calico-20200724220226-14997" [e784ae6d-896b-449f-90c4-cdb436127b76] Running I0724 22:08:45.642395 119525 system_pods.go:89] "storage-provisioner" [f52ff3e2-4876-4302-9bb4-13b7782ee76f] Running I0724 22:08:45.642405 119525 retry.go:30] will retry after 47.463338706s: missing components: kube-dns I0724 22:09:33.113170 119525 system_pods.go:86] 9 kube-system pods found I0724 22:09:33.113216 119525 system_pods.go:89] "calico-kube-controllers-76d4774d89-bqxzb" [43f6fdcb-72f1-40d6-be13-1e7f0a81985a] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers]) I0724 22:09:33.113237 119525 system_pods.go:89] "calico-node-tj86p" [773acae0-7fcd-4ffc-9bf6-9704582bff3a] Running / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node]) I0724 22:09:33.113247 119525 system_pods.go:89] "coredns-66bff467f8-b2rh7" [c48f0bc5-33c0-4290-8407-23a5ed925fea] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:09:33.113252 119525 system_pods.go:89] "etcd-calico-20200724220226-14997" [230012a4-158d-434e-87cb-1d3dfe9504e7] Running I0724 22:09:33.113259 119525 system_pods.go:89] "kube-apiserver-calico-20200724220226-14997" [34509372-afbe-4807-b042-8fb9ccc787b3] Running I0724 22:09:33.113265 119525 system_pods.go:89] "kube-controller-manager-calico-20200724220226-14997" [101a8c9b-a755-4d1c-8c31-630320b76d94] Running I0724 22:09:33.113270 119525 system_pods.go:89] "kube-proxy-58p6f" [433b3963-a7c0-4ced-9fc3-d0557f74c3e3] Running I0724 22:09:33.113275 119525 system_pods.go:89] "kube-scheduler-calico-20200724220226-14997" [e784ae6d-896b-449f-90c4-cdb436127b76] Running I0724 22:09:33.113280 119525 system_pods.go:89] "storage-provisioner" [f52ff3e2-4876-4302-9bb4-13b7782ee76f] Running I0724 22:09:33.113292 119525 retry.go:30] will retry after 53.912476906s: missing components: kube-dns I0724 22:10:27.031674 119525 system_pods.go:86] 9 kube-system pods found I0724 22:10:27.031718 119525 system_pods.go:89] "calico-kube-controllers-76d4774d89-bqxzb" [43f6fdcb-72f1-40d6-be13-1e7f0a81985a] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers]) I0724 22:10:27.031729 119525 system_pods.go:89] "calico-node-tj86p" [773acae0-7fcd-4ffc-9bf6-9704582bff3a] Running / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node]) I0724 22:10:27.031738 119525 system_pods.go:89] "coredns-66bff467f8-b2rh7" [c48f0bc5-33c0-4290-8407-23a5ed925fea] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:10:27.031744 119525 system_pods.go:89] "etcd-calico-20200724220226-14997" [230012a4-158d-434e-87cb-1d3dfe9504e7] Running I0724 22:10:27.031751 119525 system_pods.go:89] "kube-apiserver-calico-20200724220226-14997" [34509372-afbe-4807-b042-8fb9ccc787b3] Running I0724 22:10:27.031757 119525 system_pods.go:89] "kube-controller-manager-calico-20200724220226-14997" [101a8c9b-a755-4d1c-8c31-630320b76d94] Running I0724 22:10:27.031762 119525 system_pods.go:89] "kube-proxy-58p6f" [433b3963-a7c0-4ced-9fc3-d0557f74c3e3] Running I0724 22:10:27.031767 119525 system_pods.go:89] "kube-scheduler-calico-20200724220226-14997" [e784ae6d-896b-449f-90c4-cdb436127b76] Running I0724 22:10:27.031772 119525 system_pods.go:89] "storage-provisioner" [f52ff3e2-4876-4302-9bb4-13b7782ee76f] Running I0724 22:10:27.031782 119525 retry.go:30] will retry after 1m7.577191067s: missing components: kube-dns I0724 22:11:34.641655 119525 system_pods.go:86] 9 kube-system pods found I0724 22:11:34.641692 119525 system_pods.go:89] "calico-kube-controllers-76d4774d89-bqxzb" [43f6fdcb-72f1-40d6-be13-1e7f0a81985a] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers]) I0724 22:11:34.641705 119525 system_pods.go:89] "calico-node-tj86p" [773acae0-7fcd-4ffc-9bf6-9704582bff3a] Running / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node]) I0724 22:11:34.641716 119525 system_pods.go:89] "coredns-66bff467f8-b2rh7" [c48f0bc5-33c0-4290-8407-23a5ed925fea] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:11:34.641722 119525 system_pods.go:89] "etcd-calico-20200724220226-14997" [230012a4-158d-434e-87cb-1d3dfe9504e7] Running I0724 22:11:34.641728 119525 system_pods.go:89] "kube-apiserver-calico-20200724220226-14997" [34509372-afbe-4807-b042-8fb9ccc787b3] Running I0724 22:11:34.641734 119525 system_pods.go:89] "kube-controller-manager-calico-20200724220226-14997" [101a8c9b-a755-4d1c-8c31-630320b76d94] Running I0724 22:11:34.641740 119525 system_pods.go:89] "kube-proxy-58p6f" [433b3963-a7c0-4ced-9fc3-d0557f74c3e3] Running I0724 22:11:34.641747 119525 system_pods.go:89] "kube-scheduler-calico-20200724220226-14997" [e784ae6d-896b-449f-90c4-cdb436127b76] Running I0724 22:11:34.641752 119525 system_pods.go:89] "storage-provisioner" [f52ff3e2-4876-4302-9bb4-13b7782ee76f] Running I0724 22:11:34.641762 119525 retry.go:30] will retry after 51.197479857s: missing components: kube-dns I0724 22:12:25.845373 119525 system_pods.go:86] 9 kube-system pods found I0724 22:12:25.845409 119525 system_pods.go:89] "calico-kube-controllers-76d4774d89-bqxzb" [43f6fdcb-72f1-40d6-be13-1e7f0a81985a] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers]) I0724 22:12:25.845420 119525 system_pods.go:89] "calico-node-tj86p" [773acae0-7fcd-4ffc-9bf6-9704582bff3a] Running / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node]) I0724 22:12:25.845430 119525 system_pods.go:89] "coredns-66bff467f8-b2rh7" [c48f0bc5-33c0-4290-8407-23a5ed925fea] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:12:25.845436 119525 system_pods.go:89] "etcd-calico-20200724220226-14997" [230012a4-158d-434e-87cb-1d3dfe9504e7] Running I0724 22:12:25.845442 119525 system_pods.go:89] "kube-apiserver-calico-20200724220226-14997" [34509372-afbe-4807-b042-8fb9ccc787b3] Running I0724 22:12:25.845449 119525 system_pods.go:89] "kube-controller-manager-calico-20200724220226-14997" [101a8c9b-a755-4d1c-8c31-630320b76d94] Running I0724 22:12:25.845455 119525 system_pods.go:89] "kube-proxy-58p6f" [433b3963-a7c0-4ced-9fc3-d0557f74c3e3] Running I0724 22:12:25.845461 119525 system_pods.go:89] "kube-scheduler-calico-20200724220226-14997" [e784ae6d-896b-449f-90c4-cdb436127b76] Running I0724 22:12:25.845474 119525 system_pods.go:89] "storage-provisioner" [f52ff3e2-4876-4302-9bb4-13b7782ee76f] Running I0724 22:12:25.845484 119525 retry.go:30] will retry after 1m10.96005039s: missing components: kube-dns I0724 22:13:36.812445 119525 system_pods.go:86] 9 kube-system pods found I0724 22:13:36.812497 119525 system_pods.go:89] "calico-kube-controllers-76d4774d89-bqxzb" [43f6fdcb-72f1-40d6-be13-1e7f0a81985a] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers]) I0724 22:13:36.812515 119525 system_pods.go:89] "calico-node-tj86p" [773acae0-7fcd-4ffc-9bf6-9704582bff3a] Running / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node]) I0724 22:13:36.812534 119525 system_pods.go:89] "coredns-66bff467f8-b2rh7" [c48f0bc5-33c0-4290-8407-23a5ed925fea] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:13:36.812541 119525 system_pods.go:89] "etcd-calico-20200724220226-14997" [230012a4-158d-434e-87cb-1d3dfe9504e7] Running I0724 22:13:36.812555 119525 system_pods.go:89] "kube-apiserver-calico-20200724220226-14997" [34509372-afbe-4807-b042-8fb9ccc787b3] Running I0724 22:13:36.812562 119525 system_pods.go:89] "kube-controller-manager-calico-20200724220226-14997" [101a8c9b-a755-4d1c-8c31-630320b76d94] Running I0724 22:13:36.812573 119525 system_pods.go:89] "kube-proxy-58p6f" [433b3963-a7c0-4ced-9fc3-d0557f74c3e3] Running I0724 22:13:36.812578 119525 system_pods.go:89] "kube-scheduler-calico-20200724220226-14997" [e784ae6d-896b-449f-90c4-cdb436127b76] Running I0724 22:13:36.812583 119525 system_pods.go:89] "storage-provisioner" [f52ff3e2-4876-4302-9bb4-13b7782ee76f] Running I0724 22:13:36.812593 119525 retry.go:30] will retry after 1m5.901574973s: missing components: kube-dns I0724 22:14:42.726827 119525 system_pods.go:86] 9 kube-system pods found I0724 22:14:42.726880 119525 system_pods.go:89] "calico-kube-controllers-76d4774d89-bqxzb" [43f6fdcb-72f1-40d6-be13-1e7f0a81985a] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers]) I0724 22:14:42.726897 119525 system_pods.go:89] "calico-node-tj86p" [773acae0-7fcd-4ffc-9bf6-9704582bff3a] Running / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node]) I0724 22:14:42.726914 119525 system_pods.go:89] "coredns-66bff467f8-b2rh7" [c48f0bc5-33c0-4290-8407-23a5ed925fea] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:14:42.726924 119525 system_pods.go:89] "etcd-calico-20200724220226-14997" [230012a4-158d-434e-87cb-1d3dfe9504e7] Running I0724 22:14:42.726935 119525 system_pods.go:89] "kube-apiserver-calico-20200724220226-14997" [34509372-afbe-4807-b042-8fb9ccc787b3] Running I0724 22:14:42.726945 119525 system_pods.go:89] "kube-controller-manager-calico-20200724220226-14997" [101a8c9b-a755-4d1c-8c31-630320b76d94] Running I0724 22:14:42.726953 119525 system_pods.go:89] "kube-proxy-58p6f" [433b3963-a7c0-4ced-9fc3-d0557f74c3e3] Running I0724 22:14:42.726962 119525 system_pods.go:89] "kube-scheduler-calico-20200724220226-14997" [e784ae6d-896b-449f-90c4-cdb436127b76] Running I0724 22:14:42.726970 119525 system_pods.go:89] "storage-provisioner" [f52ff3e2-4876-4302-9bb4-13b7782ee76f] Running I0724 22:14:42.726986 119525 retry.go:30] will retry after 1m0.714609182s: missing components: kube-dns I0724 22:15:43.447717 119525 system_pods.go:86] 9 kube-system pods found I0724 22:15:43.447756 119525 system_pods.go:89] "calico-kube-controllers-76d4774d89-bqxzb" [43f6fdcb-72f1-40d6-be13-1e7f0a81985a] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers]) I0724 22:15:43.447769 119525 system_pods.go:89] "calico-node-tj86p" [773acae0-7fcd-4ffc-9bf6-9704582bff3a] Running / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node]) I0724 22:15:43.447779 119525 system_pods.go:89] "coredns-66bff467f8-b2rh7" [c48f0bc5-33c0-4290-8407-23a5ed925fea] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:15:43.447784 119525 system_pods.go:89] "etcd-calico-20200724220226-14997" [230012a4-158d-434e-87cb-1d3dfe9504e7] Running I0724 22:15:43.447790 119525 system_pods.go:89] "kube-apiserver-calico-20200724220226-14997" [34509372-afbe-4807-b042-8fb9ccc787b3] Running I0724 22:15:43.447796 119525 system_pods.go:89] "kube-controller-manager-calico-20200724220226-14997" [101a8c9b-a755-4d1c-8c31-630320b76d94] Running I0724 22:15:43.447801 119525 system_pods.go:89] "kube-proxy-58p6f" [433b3963-a7c0-4ced-9fc3-d0557f74c3e3] Running I0724 22:15:43.447806 119525 system_pods.go:89] "kube-scheduler-calico-20200724220226-14997" [e784ae6d-896b-449f-90c4-cdb436127b76] Running I0724 22:15:43.447811 119525 system_pods.go:89] "storage-provisioner" [f52ff3e2-4876-4302-9bb4-13b7782ee76f] Running I0724 22:15:43.447822 119525 retry.go:30] will retry after 45.849092499s: missing components: kube-dns I0724 22:16:29.302655 119525 system_pods.go:86] 9 kube-system pods found I0724 22:16:29.302691 119525 system_pods.go:89] "calico-kube-controllers-76d4774d89-bqxzb" [43f6fdcb-72f1-40d6-be13-1e7f0a81985a] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers]) I0724 22:16:29.302701 119525 system_pods.go:89] "calico-node-tj86p" [773acae0-7fcd-4ffc-9bf6-9704582bff3a] Running / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node]) I0724 22:16:29.302710 119525 system_pods.go:89] "coredns-66bff467f8-b2rh7" [c48f0bc5-33c0-4290-8407-23a5ed925fea] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:16:29.302716 119525 system_pods.go:89] "etcd-calico-20200724220226-14997" [230012a4-158d-434e-87cb-1d3dfe9504e7] Running I0724 22:16:29.302722 119525 system_pods.go:89] "kube-apiserver-calico-20200724220226-14997" [34509372-afbe-4807-b042-8fb9ccc787b3] Running I0724 22:16:29.302728 119525 system_pods.go:89] "kube-controller-manager-calico-20200724220226-14997" [101a8c9b-a755-4d1c-8c31-630320b76d94] Running I0724 22:16:29.302733 119525 system_pods.go:89] "kube-proxy-58p6f" [433b3963-a7c0-4ced-9fc3-d0557f74c3e3] Running I0724 22:16:29.302739 119525 system_pods.go:89] "kube-scheduler-calico-20200724220226-14997" [e784ae6d-896b-449f-90c4-cdb436127b76] Running I0724 22:16:29.302744 119525 system_pods.go:89] "storage-provisioner" [f52ff3e2-4876-4302-9bb4-13b7782ee76f] Running I0724 22:16:29.302753 119525 retry.go:30] will retry after 49.749848332s: missing components: kube-dns I0724 22:17:19.058643 119525 system_pods.go:86] 9 kube-system pods found I0724 22:17:19.058676 119525 system_pods.go:89] "calico-kube-controllers-76d4774d89-bqxzb" [43f6fdcb-72f1-40d6-be13-1e7f0a81985a] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers]) I0724 22:17:19.058685 119525 system_pods.go:89] "calico-node-tj86p" [773acae0-7fcd-4ffc-9bf6-9704582bff3a] Running / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node]) I0724 22:17:19.058694 119525 system_pods.go:89] "coredns-66bff467f8-b2rh7" [c48f0bc5-33c0-4290-8407-23a5ed925fea] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:17:19.058700 119525 system_pods.go:89] "etcd-calico-20200724220226-14997" [230012a4-158d-434e-87cb-1d3dfe9504e7] Running I0724 22:17:19.058707 119525 system_pods.go:89] "kube-apiserver-calico-20200724220226-14997" [34509372-afbe-4807-b042-8fb9ccc787b3] Running I0724 22:17:19.058713 119525 system_pods.go:89] "kube-controller-manager-calico-20200724220226-14997" [101a8c9b-a755-4d1c-8c31-630320b76d94] Running I0724 22:17:19.058718 119525 system_pods.go:89] "kube-proxy-58p6f" [433b3963-a7c0-4ced-9fc3-d0557f74c3e3] Running I0724 22:17:19.058724 119525 system_pods.go:89] "kube-scheduler-calico-20200724220226-14997" [e784ae6d-896b-449f-90c4-cdb436127b76] Running I0724 22:17:19.058729 119525 system_pods.go:89] "storage-provisioner" [f52ff3e2-4876-4302-9bb4-13b7782ee76f] Running I0724 22:17:19.058744 119525 retry.go:30] will retry after 1m3.217603186s: missing components: kube-dns I0724 22:18:22.284027 119525 system_pods.go:86] 9 kube-system pods found I0724 22:18:22.284086 119525 system_pods.go:89] "calico-kube-controllers-76d4774d89-bqxzb" [43f6fdcb-72f1-40d6-be13-1e7f0a81985a] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers]) I0724 22:18:22.284103 119525 system_pods.go:89] "calico-node-tj86p" [773acae0-7fcd-4ffc-9bf6-9704582bff3a] Running / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node]) I0724 22:18:22.284129 119525 system_pods.go:89] "coredns-66bff467f8-b2rh7" [c48f0bc5-33c0-4290-8407-23a5ed925fea] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:18:22.284138 119525 system_pods.go:89] "etcd-calico-20200724220226-14997" [230012a4-158d-434e-87cb-1d3dfe9504e7] Running I0724 22:18:22.284149 119525 system_pods.go:89] "kube-apiserver-calico-20200724220226-14997" [34509372-afbe-4807-b042-8fb9ccc787b3] Running I0724 22:18:22.284169 119525 system_pods.go:89] "kube-controller-manager-calico-20200724220226-14997" [101a8c9b-a755-4d1c-8c31-630320b76d94] Running I0724 22:18:22.284186 119525 system_pods.go:89] "kube-proxy-58p6f" [433b3963-a7c0-4ced-9fc3-d0557f74c3e3] Running I0724 22:18:22.284203 119525 system_pods.go:89] "kube-scheduler-calico-20200724220226-14997" [e784ae6d-896b-449f-90c4-cdb436127b76] Running I0724 22:18:22.284211 119525 system_pods.go:89] "storage-provisioner" [f52ff3e2-4876-4302-9bb4-13b7782ee76f] Running I0724 22:18:22.284233 119525 retry.go:30] will retry after 1m14.257248566s: missing components: kube-dns I0724 22:19:36.547579 119525 system_pods.go:86] 9 kube-system pods found I0724 22:19:36.547612 119525 system_pods.go:89] "calico-kube-controllers-76d4774d89-bqxzb" [43f6fdcb-72f1-40d6-be13-1e7f0a81985a] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers]) I0724 22:19:36.547621 119525 system_pods.go:89] "calico-node-tj86p" [773acae0-7fcd-4ffc-9bf6-9704582bff3a] Running / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node]) I0724 22:19:36.547630 119525 system_pods.go:89] "coredns-66bff467f8-b2rh7" [c48f0bc5-33c0-4290-8407-23a5ed925fea] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:19:36.547639 119525 system_pods.go:89] "etcd-calico-20200724220226-14997" [230012a4-158d-434e-87cb-1d3dfe9504e7] Running I0724 22:19:36.547648 119525 system_pods.go:89] "kube-apiserver-calico-20200724220226-14997" [34509372-afbe-4807-b042-8fb9ccc787b3] Running I0724 22:19:36.547656 119525 system_pods.go:89] "kube-controller-manager-calico-20200724220226-14997" [101a8c9b-a755-4d1c-8c31-630320b76d94] Running I0724 22:19:36.547673 119525 system_pods.go:89] "kube-proxy-58p6f" [433b3963-a7c0-4ced-9fc3-d0557f74c3e3] Running I0724 22:19:36.547682 119525 system_pods.go:89] "kube-scheduler-calico-20200724220226-14997" [e784ae6d-896b-449f-90c4-cdb436127b76] Running I0724 22:19:36.547694 119525 system_pods.go:89] "storage-provisioner" [f52ff3e2-4876-4302-9bb4-13b7782ee76f] Running I0724 22:19:36.547704 119525 retry.go:30] will retry after 47.383608701s: missing components: kube-dns I0724 22:20:23.937983 119525 system_pods.go:86] 9 kube-system pods found I0724 22:20:23.938019 119525 system_pods.go:89] "calico-kube-controllers-76d4774d89-bqxzb" [43f6fdcb-72f1-40d6-be13-1e7f0a81985a] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers]) I0724 22:20:23.938030 119525 system_pods.go:89] "calico-node-tj86p" [773acae0-7fcd-4ffc-9bf6-9704582bff3a] Running / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node]) I0724 22:20:23.938040 119525 system_pods.go:89] "coredns-66bff467f8-b2rh7" [c48f0bc5-33c0-4290-8407-23a5ed925fea] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:20:23.938046 119525 system_pods.go:89] "etcd-calico-20200724220226-14997" [230012a4-158d-434e-87cb-1d3dfe9504e7] Running I0724 22:20:23.938052 119525 system_pods.go:89] "kube-apiserver-calico-20200724220226-14997" [34509372-afbe-4807-b042-8fb9ccc787b3] Running I0724 22:20:23.938059 119525 system_pods.go:89] "kube-controller-manager-calico-20200724220226-14997" [101a8c9b-a755-4d1c-8c31-630320b76d94] Running I0724 22:20:23.938065 119525 system_pods.go:89] "kube-proxy-58p6f" [433b3963-a7c0-4ced-9fc3-d0557f74c3e3] Running I0724 22:20:23.938070 119525 system_pods.go:89] "kube-scheduler-calico-20200724220226-14997" [e784ae6d-896b-449f-90c4-cdb436127b76] Running I0724 22:20:23.938075 119525 system_pods.go:89] "storage-provisioner" [f52ff3e2-4876-4302-9bb4-13b7782ee76f] Running I0724 22:20:23.938085 119525 retry.go:30] will retry after 1m2.844257931s: missing components: kube-dns I0724 22:21:26.789662 119525 system_pods.go:86] 9 kube-system pods found I0724 22:21:26.789715 119525 system_pods.go:89] "calico-kube-controllers-76d4774d89-bqxzb" [43f6fdcb-72f1-40d6-be13-1e7f0a81985a] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers]) I0724 22:21:26.789732 119525 system_pods.go:89] "calico-node-tj86p" [773acae0-7fcd-4ffc-9bf6-9704582bff3a] Running / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node]) I0724 22:21:26.789747 119525 system_pods.go:89] "coredns-66bff467f8-b2rh7" [c48f0bc5-33c0-4290-8407-23a5ed925fea] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:21:26.789757 119525 system_pods.go:89] "etcd-calico-20200724220226-14997" [230012a4-158d-434e-87cb-1d3dfe9504e7] Running I0724 22:21:26.789778 119525 system_pods.go:89] "kube-apiserver-calico-20200724220226-14997" [34509372-afbe-4807-b042-8fb9ccc787b3] Running I0724 22:21:26.789787 119525 system_pods.go:89] "kube-controller-manager-calico-20200724220226-14997" [101a8c9b-a755-4d1c-8c31-630320b76d94] Running I0724 22:21:26.789796 119525 system_pods.go:89] "kube-proxy-58p6f" [433b3963-a7c0-4ced-9fc3-d0557f74c3e3] Running I0724 22:21:26.789813 119525 system_pods.go:89] "kube-scheduler-calico-20200724220226-14997" [e784ae6d-896b-449f-90c4-cdb436127b76] Running I0724 22:21:26.789821 119525 system_pods.go:89] "storage-provisioner" [f52ff3e2-4876-4302-9bb4-13b7782ee76f] Running I0724 22:21:26.789833 119525 retry.go:30] will retry after 46.773619539s: missing components: kube-dns I0724 22:22:13.570383 119525 system_pods.go:86] 9 kube-system pods found I0724 22:22:13.570421 119525 system_pods.go:89] "calico-kube-controllers-76d4774d89-bqxzb" [43f6fdcb-72f1-40d6-be13-1e7f0a81985a] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers]) I0724 22:22:13.570431 119525 system_pods.go:89] "calico-node-tj86p" [773acae0-7fcd-4ffc-9bf6-9704582bff3a] Running / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node]) I0724 22:22:13.570443 119525 system_pods.go:89] "coredns-66bff467f8-b2rh7" [c48f0bc5-33c0-4290-8407-23a5ed925fea] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:22:13.570450 119525 system_pods.go:89] "etcd-calico-20200724220226-14997" [230012a4-158d-434e-87cb-1d3dfe9504e7] Running I0724 22:22:13.570457 119525 system_pods.go:89] "kube-apiserver-calico-20200724220226-14997" [34509372-afbe-4807-b042-8fb9ccc787b3] Running I0724 22:22:13.570471 119525 system_pods.go:89] "kube-controller-manager-calico-20200724220226-14997" [101a8c9b-a755-4d1c-8c31-630320b76d94] Running I0724 22:22:13.570476 119525 system_pods.go:89] "kube-proxy-58p6f" [433b3963-a7c0-4ced-9fc3-d0557f74c3e3] Running I0724 22:22:13.570482 119525 system_pods.go:89] "kube-scheduler-calico-20200724220226-14997" [e784ae6d-896b-449f-90c4-cdb436127b76] Running I0724 22:22:13.570495 119525 system_pods.go:89] "storage-provisioner" [f52ff3e2-4876-4302-9bb4-13b7782ee76f] Running I0724 22:22:13.570506 119525 retry.go:30] will retry after 1m5.760737621s: missing components: kube-dns I0724 22:23:19.341532 119525 system_pods.go:86] 9 kube-system pods found I0724 22:23:19.341580 119525 system_pods.go:89] "calico-kube-controllers-76d4774d89-bqxzb" [43f6fdcb-72f1-40d6-be13-1e7f0a81985a] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers]) I0724 22:23:19.341597 119525 system_pods.go:89] "calico-node-tj86p" [773acae0-7fcd-4ffc-9bf6-9704582bff3a] Running / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node]) I0724 22:23:19.341617 119525 system_pods.go:89] "coredns-66bff467f8-b2rh7" [c48f0bc5-33c0-4290-8407-23a5ed925fea] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:23:19.341625 119525 system_pods.go:89] "etcd-calico-20200724220226-14997" [230012a4-158d-434e-87cb-1d3dfe9504e7] Running I0724 22:23:19.341637 119525 system_pods.go:89] "kube-apiserver-calico-20200724220226-14997" [34509372-afbe-4807-b042-8fb9ccc787b3] Running I0724 22:23:19.341644 119525 system_pods.go:89] "kube-controller-manager-calico-20200724220226-14997" [101a8c9b-a755-4d1c-8c31-630320b76d94] Running I0724 22:23:19.341651 119525 system_pods.go:89] "kube-proxy-58p6f" [433b3963-a7c0-4ced-9fc3-d0557f74c3e3] Running I0724 22:23:19.341660 119525 system_pods.go:89] "kube-scheduler-calico-20200724220226-14997" [e784ae6d-896b-449f-90c4-cdb436127b76] Running I0724 22:23:19.341667 119525 system_pods.go:89] "storage-provisioner" [f52ff3e2-4876-4302-9bb4-13b7782ee76f] Running I0724 22:23:19.341680 119525 retry.go:30] will retry after 54.04568043s: missing components: kube-dns I0724 22:24:13.393750 119525 system_pods.go:86] 9 kube-system pods found I0724 22:24:13.393790 119525 system_pods.go:89] "calico-kube-controllers-76d4774d89-bqxzb" [43f6fdcb-72f1-40d6-be13-1e7f0a81985a] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers]) I0724 22:24:13.393801 119525 system_pods.go:89] "calico-node-tj86p" [773acae0-7fcd-4ffc-9bf6-9704582bff3a] Running / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node]) I0724 22:24:13.393810 119525 system_pods.go:89] "coredns-66bff467f8-b2rh7" [c48f0bc5-33c0-4290-8407-23a5ed925fea] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:24:13.393816 119525 system_pods.go:89] "etcd-calico-20200724220226-14997" [230012a4-158d-434e-87cb-1d3dfe9504e7] Running I0724 22:24:13.393823 119525 system_pods.go:89] "kube-apiserver-calico-20200724220226-14997" [34509372-afbe-4807-b042-8fb9ccc787b3] Running I0724 22:24:13.393829 119525 system_pods.go:89] "kube-controller-manager-calico-20200724220226-14997" [101a8c9b-a755-4d1c-8c31-630320b76d94] Running I0724 22:24:13.393834 119525 system_pods.go:89] "kube-proxy-58p6f" [433b3963-a7c0-4ced-9fc3-d0557f74c3e3] Running I0724 22:24:13.393839 119525 system_pods.go:89] "kube-scheduler-calico-20200724220226-14997" [e784ae6d-896b-449f-90c4-cdb436127b76] Running I0724 22:24:13.393844 119525 system_pods.go:89] "storage-provisioner" [f52ff3e2-4876-4302-9bb4-13b7782ee76f] Running I0724 22:24:13.393854 119525 retry.go:30] will retry after 50.197987145s: missing components: kube-dns I0724 22:25:03.598907 119525 system_pods.go:86] 9 kube-system pods found I0724 22:25:03.598958 119525 system_pods.go:89] "calico-kube-controllers-76d4774d89-bqxzb" [43f6fdcb-72f1-40d6-be13-1e7f0a81985a] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers]) I0724 22:25:03.598976 119525 system_pods.go:89] "calico-node-tj86p" [773acae0-7fcd-4ffc-9bf6-9704582bff3a] Running / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node]) I0724 22:25:03.598990 119525 system_pods.go:89] "coredns-66bff467f8-b2rh7" [c48f0bc5-33c0-4290-8407-23a5ed925fea] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:25:03.598998 119525 system_pods.go:89] "etcd-calico-20200724220226-14997" [230012a4-158d-434e-87cb-1d3dfe9504e7] Running I0724 22:25:03.599011 119525 system_pods.go:89] "kube-apiserver-calico-20200724220226-14997" [34509372-afbe-4807-b042-8fb9ccc787b3] Running I0724 22:25:03.599019 119525 system_pods.go:89] "kube-controller-manager-calico-20200724220226-14997" [101a8c9b-a755-4d1c-8c31-630320b76d94] Running I0724 22:25:03.599027 119525 system_pods.go:89] "kube-proxy-58p6f" [433b3963-a7c0-4ced-9fc3-d0557f74c3e3] Running I0724 22:25:03.599036 119525 system_pods.go:89] "kube-scheduler-calico-20200724220226-14997" [e784ae6d-896b-449f-90c4-cdb436127b76] Running I0724 22:25:03.599044 119525 system_pods.go:89] "storage-provisioner" [f52ff3e2-4876-4302-9bb4-13b7782ee76f] Running I0724 22:25:03.599071 119525 retry.go:30] will retry after 1m1.23299565s: missing components: kube-dns I0724 22:26:04.838679 119525 system_pods.go:86] 9 kube-system pods found I0724 22:26:04.838718 119525 system_pods.go:89] "calico-kube-controllers-76d4774d89-bqxzb" [43f6fdcb-72f1-40d6-be13-1e7f0a81985a] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers]) I0724 22:26:04.838734 119525 system_pods.go:89] "calico-node-tj86p" [773acae0-7fcd-4ffc-9bf6-9704582bff3a] Running / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node]) I0724 22:26:04.838749 119525 system_pods.go:89] "coredns-66bff467f8-b2rh7" [c48f0bc5-33c0-4290-8407-23a5ed925fea] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:26:04.838758 119525 system_pods.go:89] "etcd-calico-20200724220226-14997" [230012a4-158d-434e-87cb-1d3dfe9504e7] Running I0724 22:26:04.838777 119525 system_pods.go:89] "kube-apiserver-calico-20200724220226-14997" [34509372-afbe-4807-b042-8fb9ccc787b3] Running I0724 22:26:04.838787 119525 system_pods.go:89] "kube-controller-manager-calico-20200724220226-14997" [101a8c9b-a755-4d1c-8c31-630320b76d94] Running I0724 22:26:04.838802 119525 system_pods.go:89] "kube-proxy-58p6f" [433b3963-a7c0-4ced-9fc3-d0557f74c3e3] Running I0724 22:26:04.838810 119525 system_pods.go:89] "kube-scheduler-calico-20200724220226-14997" [e784ae6d-896b-449f-90c4-cdb436127b76] Running I0724 22:26:04.838826 119525 system_pods.go:89] "storage-provisioner" [f52ff3e2-4876-4302-9bb4-13b7782ee76f] Running I0724 22:26:04.838839 119525 retry.go:30] will retry after 1m1.32466719s: missing components: kube-dns I0724 22:27:06.169937 119525 system_pods.go:86] 9 kube-system pods found I0724 22:27:06.169975 119525 system_pods.go:89] "calico-kube-controllers-76d4774d89-bqxzb" [43f6fdcb-72f1-40d6-be13-1e7f0a81985a] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers]) I0724 22:27:06.169985 119525 system_pods.go:89] "calico-node-tj86p" [773acae0-7fcd-4ffc-9bf6-9704582bff3a] Running / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node]) I0724 22:27:06.170031 119525 system_pods.go:89] "coredns-66bff467f8-b2rh7" [c48f0bc5-33c0-4290-8407-23a5ed925fea] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:27:06.170038 119525 system_pods.go:89] "etcd-calico-20200724220226-14997" [230012a4-158d-434e-87cb-1d3dfe9504e7] Running I0724 22:27:06.170045 119525 system_pods.go:89] "kube-apiserver-calico-20200724220226-14997" [34509372-afbe-4807-b042-8fb9ccc787b3] Running I0724 22:27:06.170057 119525 system_pods.go:89] "kube-controller-manager-calico-20200724220226-14997" [101a8c9b-a755-4d1c-8c31-630320b76d94] Running I0724 22:27:06.170062 119525 system_pods.go:89] "kube-proxy-58p6f" [433b3963-a7c0-4ced-9fc3-d0557f74c3e3] Running I0724 22:27:06.170068 119525 system_pods.go:89] "kube-scheduler-calico-20200724220226-14997" [e784ae6d-896b-449f-90c4-cdb436127b76] Running I0724 22:27:06.170074 119525 system_pods.go:89] "storage-provisioner" [f52ff3e2-4876-4302-9bb4-13b7782ee76f] Running I0724 22:27:06.170090 119525 retry.go:30] will retry after 53.355228654s: missing components: kube-dns I0724 22:27:59.531669 119525 system_pods.go:86] 9 kube-system pods found I0724 22:27:59.531715 119525 system_pods.go:89] "calico-kube-controllers-76d4774d89-bqxzb" [43f6fdcb-72f1-40d6-be13-1e7f0a81985a] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers]) I0724 22:27:59.531735 119525 system_pods.go:89] "calico-node-tj86p" [773acae0-7fcd-4ffc-9bf6-9704582bff3a] Running / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node]) I0724 22:27:59.531744 119525 system_pods.go:89] "coredns-66bff467f8-b2rh7" [c48f0bc5-33c0-4290-8407-23a5ed925fea] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:27:59.531759 119525 system_pods.go:89] "etcd-calico-20200724220226-14997" [230012a4-158d-434e-87cb-1d3dfe9504e7] Running I0724 22:27:59.531765 119525 system_pods.go:89] "kube-apiserver-calico-20200724220226-14997" [34509372-afbe-4807-b042-8fb9ccc787b3] Running I0724 22:27:59.531771 119525 system_pods.go:89] "kube-controller-manager-calico-20200724220226-14997" [101a8c9b-a755-4d1c-8c31-630320b76d94] Running I0724 22:27:59.531782 119525 system_pods.go:89] "kube-proxy-58p6f" [433b3963-a7c0-4ced-9fc3-d0557f74c3e3] Running I0724 22:27:59.531787 119525 system_pods.go:89] "kube-scheduler-calico-20200724220226-14997" [e784ae6d-896b-449f-90c4-cdb436127b76] Running I0724 22:27:59.531793 119525 system_pods.go:89] "storage-provisioner" [f52ff3e2-4876-4302-9bb4-13b7782ee76f] Running I0724 22:27:59.531803 119525 retry.go:30] will retry after 57.694566047s: missing components: kube-dns I0724 22:28:57.232435 119525 system_pods.go:86] 9 kube-system pods found I0724 22:28:57.232472 119525 system_pods.go:89] "calico-kube-controllers-76d4774d89-bqxzb" [43f6fdcb-72f1-40d6-be13-1e7f0a81985a] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers]) I0724 22:28:57.232484 119525 system_pods.go:89] "calico-node-tj86p" [773acae0-7fcd-4ffc-9bf6-9704582bff3a] Running / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node]) I0724 22:28:57.232493 119525 system_pods.go:89] "coredns-66bff467f8-b2rh7" [c48f0bc5-33c0-4290-8407-23a5ed925fea] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:28:57.232500 119525 system_pods.go:89] "etcd-calico-20200724220226-14997" [230012a4-158d-434e-87cb-1d3dfe9504e7] Running I0724 22:28:57.232506 119525 system_pods.go:89] "kube-apiserver-calico-20200724220226-14997" [34509372-afbe-4807-b042-8fb9ccc787b3] Running I0724 22:28:57.232514 119525 system_pods.go:89] "kube-controller-manager-calico-20200724220226-14997" [101a8c9b-a755-4d1c-8c31-630320b76d94] Running I0724 22:28:57.232527 119525 system_pods.go:89] "kube-proxy-58p6f" [433b3963-a7c0-4ced-9fc3-d0557f74c3e3] Running I0724 22:28:57.232533 119525 system_pods.go:89] "kube-scheduler-calico-20200724220226-14997" [e784ae6d-896b-449f-90c4-cdb436127b76] Running I0724 22:28:57.232538 119525 system_pods.go:89] "storage-provisioner" [f52ff3e2-4876-4302-9bb4-13b7782ee76f] Running I0724 22:28:57.232548 119525 retry.go:30] will retry after 1m0.917571461s: missing components: kube-dns I0724 22:29:58.156665 119525 system_pods.go:86] 9 kube-system pods found I0724 22:29:58.156702 119525 system_pods.go:89] "calico-kube-controllers-76d4774d89-bqxzb" [43f6fdcb-72f1-40d6-be13-1e7f0a81985a] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers]) I0724 22:29:58.156713 119525 system_pods.go:89] "calico-node-tj86p" [773acae0-7fcd-4ffc-9bf6-9704582bff3a] Running / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node]) I0724 22:29:58.156724 119525 system_pods.go:89] "coredns-66bff467f8-b2rh7" [c48f0bc5-33c0-4290-8407-23a5ed925fea] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:29:58.156730 119525 system_pods.go:89] "etcd-calico-20200724220226-14997" [230012a4-158d-434e-87cb-1d3dfe9504e7] Running I0724 22:29:58.156736 119525 system_pods.go:89] "kube-apiserver-calico-20200724220226-14997" [34509372-afbe-4807-b042-8fb9ccc787b3] Running I0724 22:29:58.156750 119525 system_pods.go:89] "kube-controller-manager-calico-20200724220226-14997" [101a8c9b-a755-4d1c-8c31-630320b76d94] Running I0724 22:29:58.156755 119525 system_pods.go:89] "kube-proxy-58p6f" [433b3963-a7c0-4ced-9fc3-d0557f74c3e3] Running I0724 22:29:58.156760 119525 system_pods.go:89] "kube-scheduler-calico-20200724220226-14997" [e784ae6d-896b-449f-90c4-cdb436127b76] Running I0724 22:29:58.156765 119525 system_pods.go:89] "storage-provisioner" [f52ff3e2-4876-4302-9bb4-13b7782ee76f] Running I0724 22:29:58.156776 119525 retry.go:30] will retry after 52.606215015s: missing components: kube-dns I0724 22:30:50.769073 119525 system_pods.go:86] 9 kube-system pods found I0724 22:30:50.769112 119525 system_pods.go:89] "calico-kube-controllers-76d4774d89-bqxzb" [43f6fdcb-72f1-40d6-be13-1e7f0a81985a] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers]) I0724 22:30:50.769122 119525 system_pods.go:89] "calico-node-tj86p" [773acae0-7fcd-4ffc-9bf6-9704582bff3a] Running / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node]) I0724 22:30:50.769132 119525 system_pods.go:89] "coredns-66bff467f8-b2rh7" [c48f0bc5-33c0-4290-8407-23a5ed925fea] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:30:50.769138 119525 system_pods.go:89] "etcd-calico-20200724220226-14997" [230012a4-158d-434e-87cb-1d3dfe9504e7] Running I0724 22:30:50.769144 119525 system_pods.go:89] "kube-apiserver-calico-20200724220226-14997" [34509372-afbe-4807-b042-8fb9ccc787b3] Running I0724 22:30:50.769159 119525 system_pods.go:89] "kube-controller-manager-calico-20200724220226-14997" [101a8c9b-a755-4d1c-8c31-630320b76d94] Running I0724 22:30:50.769165 119525 system_pods.go:89] "kube-proxy-58p6f" [433b3963-a7c0-4ced-9fc3-d0557f74c3e3] Running I0724 22:30:50.769170 119525 system_pods.go:89] "kube-scheduler-calico-20200724220226-14997" [e784ae6d-896b-449f-90c4-cdb436127b76] Running I0724 22:30:50.769175 119525 system_pods.go:89] "storage-provisioner" [f52ff3e2-4876-4302-9bb4-13b7782ee76f] Running I0724 22:30:50.769270 119525 exit.go:58] WithError(failed to start node)=startup failed: wait 25m0s for node: waiting for apps_running: expected k8s-apps: missing components: kube-dns called from: goroutine 1 [running]: runtime/debug.Stack(0x0, 0x0, 0x100000000000000) /home/jenkins/actions-runner/_work/_tool/go/1.14.6/x64/src/runtime/debug/stack.go:24 +0x9d k8s.io/minikube/pkg/minikube/exit.WithError(0x1ba7c56, 0x14, 0x1ebf200, 0xc00001bb20) /home/jenkins/actions-runner/_work/minikube/minikube/pkg/minikube/exit/exit.go:58 +0x34 k8s.io/minikube/cmd/minikube/cmd.runStart(0x2cd0820, 0xc0003b8790, 0x2, 0xb) /home/jenkins/actions-runner/_work/minikube/minikube/cmd/minikube/cmd/start.go:206 +0x505 github.com/spf13/cobra.(*Command).execute(0x2cd0820, 0xc0003b80b0, 0xb, 0xb, 0x2cd0820, 0xc0003b80b0) /home/jenkins/go/pkg/mod/github.com/spf13/cobra@v1.0.0/command.go:846 +0x29d github.com/spf13/cobra.(*Command).ExecuteC(0x2ccf860, 0x0, 0x1, 0xc0001a45f0) /home/jenkins/go/pkg/mod/github.com/spf13/cobra@v1.0.0/command.go:950 +0x349 github.com/spf13/cobra.(*Command).Execute(...) /home/jenkins/go/pkg/mod/github.com/spf13/cobra@v1.0.0/command.go:887 k8s.io/minikube/cmd/minikube/cmd.Execute() /home/jenkins/actions-runner/_work/minikube/minikube/cmd/minikube/cmd/root.go:106 +0x72c main.main() /home/jenkins/actions-runner/_work/minikube/minikube/cmd/minikube/main.go:71 +0x11f W0724 22:30:50.769398 119525 out.go:249] failed to start node: startup failed: wait 25m0s for node: waiting for apps_running: expected k8s-apps: missing components: kube-dns * X failed to start node: startup failed: wait 25m0s for node: waiting for apps_running: expected k8s-apps: missing components: kube-dns * * minikube is exiting due to an error. If the above message is not useful, open an issue: - https://github.com/kubernetes/minikube/issues/new/choose ** /stderr ** net_test.go:82: failed start: exit status 70 === CONT TestNetworkPlugins/group/calico net_test.go:204: "calico" test finished in 30m42.627790431s, failed=true net_test.go:205: *** TestNetworkPlugins/group/calico FAILED at 2020-07-24 22:30:50.804933551 +0000 UTC m=+3280.929297936 helpers_test.go:215: -----------------------post-mortem-------------------------------- helpers_test.go:223: ======> post-mortem[TestNetworkPlugins/group/calico]: docker inspect <====== helpers_test.go:224: (dbg) Run: docker inspect calico-20200724220226-14997 helpers_test.go:228: (dbg) docker inspect calico-20200724220226-14997: -- stdout -- [ { "Id": "8e13d835fcf66cdf1e9c0e9f5ccf017a602109347d08d465fd1a51517fb30ec8", "Created": "2020-07-24T22:03:31.316829376Z", "Path": "/usr/local/bin/entrypoint", "Args": [ "/sbin/init" ], "State": { "Status": "running", "Running": true, "Paused": false, "Restarting": false, "OOMKilled": false, "Dead": false, "Pid": 137488, "ExitCode": 0, "Error": "", "StartedAt": "2020-07-24T22:03:32.189830237Z", "FinishedAt": "0001-01-01T00:00:00Z" }, "Image": "sha256:e6bc41c39dc48b2b472936db36aedb28527ce0f675ed1bc20d029125c9ccf578", "ResolvConfPath": "/var/lib/docker/containers/8e13d835fcf66cdf1e9c0e9f5ccf017a602109347d08d465fd1a51517fb30ec8/resolv.conf", "HostnamePath": "/var/lib/docker/containers/8e13d835fcf66cdf1e9c0e9f5ccf017a602109347d08d465fd1a51517fb30ec8/hostname", "HostsPath": "/var/lib/docker/containers/8e13d835fcf66cdf1e9c0e9f5ccf017a602109347d08d465fd1a51517fb30ec8/hosts", "LogPath": "/var/lib/docker/containers/8e13d835fcf66cdf1e9c0e9f5ccf017a602109347d08d465fd1a51517fb30ec8/8e13d835fcf66cdf1e9c0e9f5ccf017a602109347d08d465fd1a51517fb30ec8-json.log", "Name": "/calico-20200724220226-14997", "RestartCount": 0, "Driver": "overlay2", "Platform": "linux", "MountLabel": "", "ProcessLabel": "", "AppArmorProfile": "unconfined", "ExecIDs": null, "HostConfig": { "Binds": [ "calico-20200724220226-14997:/var", "/lib/modules:/lib/modules:ro" ], "ContainerIDFile": "", "LogConfig": { "Type": "json-file", "Config": {} }, "NetworkMode": "default", "PortBindings": { "22/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "2376/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "5000/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "8443/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ] }, "RestartPolicy": { "Name": "no", "MaximumRetryCount": 0 }, "AutoRemove": false, "VolumeDriver": "", "VolumesFrom": null, "CapAdd": null, "CapDrop": null, "Capabilities": null, "Dns": [], "DnsOptions": [], "DnsSearch": [], "ExtraHosts": null, "GroupAdd": null, "IpcMode": "private", "Cgroup": "", "Links": null, "OomScoreAdj": 0, "PidMode": "", "Privileged": true, "PublishAllPorts": false, "ReadonlyRootfs": false, "SecurityOpt": [ "seccomp=unconfined", "apparmor=unconfined", "label=disable" ], "Tmpfs": { "/run": "", "/tmp": "" }, "UTSMode": "", "UsernsMode": "", "ShmSize": 67108864, "Runtime": "runc", "ConsoleSize": [ 0, 0 ], "Isolation": "", "CpuShares": 0, "Memory": 1887436800, "NanoCpus": 2000000000, "CgroupParent": "", "BlkioWeight": 0, "BlkioWeightDevice": [], "BlkioDeviceReadBps": null, "BlkioDeviceWriteBps": null, "BlkioDeviceReadIOps": null, "BlkioDeviceWriteIOps": null, "CpuPeriod": 0, "CpuQuota": 0, "CpuRealtimePeriod": 0, "CpuRealtimeRuntime": 0, "CpusetCpus": "", "CpusetMems": "", "Devices": [], "DeviceCgroupRules": null, "DeviceRequests": null, "KernelMemory": 0, "KernelMemoryTCP": 0, "MemoryReservation": 0, "MemorySwap": -1, "MemorySwappiness": null, "OomKillDisable": false, "PidsLimit": null, "Ulimits": null, "CpuCount": 0, "CpuPercent": 0, "IOMaximumIOps": 0, "IOMaximumBandwidth": 0, "MaskedPaths": null, "ReadonlyPaths": null }, "GraphDriver": { "Data": { "LowerDir": "/var/lib/docker/overlay2/da8396bc55cdb45aebbb4632dd89a9de95b8a469d46138ba4f9c55a1adb9305b-init/diff:/var/lib/docker/overlay2/33ef5d7ed4fbb30d55d2e5925de9b5c359ee2f6bcaf5a31308f0c4c11e358759/diff:/var/lib/docker/overlay2/3cf31b1deb19cf9de586952311d9f94baab00dac6393d96e1a80da943c3823e6/diff:/var/lib/docker/overlay2/5e25be0edbb143d613aab16e23056ece18370c2427214464ccc0b8f033fc76c5/diff:/var/lib/docker/overlay2/933d7b760faf1726c0231eb48cbf3e7af8aeea7d1f449959b7bf07bbe9a84376/diff:/var/lib/docker/overlay2/69ec5555996fe29cf36b7e927034690ecb66f32d4c72396f3c2f3d73303ab10c/diff:/var/lib/docker/overlay2/891f9311e0e93aca44c576d873c0860bf67e972178833fc3ac0569308996933c/diff:/var/lib/docker/overlay2/431ddf5eea9395c020c3a27539c6984764cb7a0dd8f3eeadfbb24f37a897caf7/diff:/var/lib/docker/overlay2/ef7d3d9d53c985fdb46a41c8bcbc04c7d98baa3d8030b1432d115657269a4611/diff:/var/lib/docker/overlay2/d976f7471257da8ef85e0de564cae8e6b16eeb8f83fff6750b3985387f87052c/diff:/var/lib/docker/overlay2/16ed6f6abe25e98b50b10529e70d835da8b64ca99934243f70e5cc63cca44ba1/diff:/var/lib/docker/overlay2/a6495d6f579c32bd0faf387dc3b48c733dae0219e3cfc5d94f9f6ba027261668/diff:/var/lib/docker/overlay2/a34db548da9e24b0323e43e53767c49484fa19509ba9c1e738fb42330788ec52/diff:/var/lib/docker/overlay2/03f93f74dafc48e2bb0234297546abf70dc9918c047a7e3b500ed2242aec09ff/diff:/var/lib/docker/overlay2/b0d08d0be034080c25561d9451d8081050a591c2991a7f3d242234b9bf7c046d/diff:/var/lib/docker/overlay2/d7be6ed61eedc76988e74fe1d87a6a1ef287912654acea5566e7f9cb797b3e3c/diff:/var/lib/docker/overlay2/bde6a4a42d5652b3ef22e3a1dddd0f6fb765d2cd9f5b1a38d4ecb9b44c45b998/diff:/var/lib/docker/overlay2/0f384da0b034b730d4bdd22f270821719c0ac0b7b7d608c3a52fa89b9a386b90/diff:/var/lib/docker/overlay2/46e9d50bc162d2d523c12da3e378e5b859515056dc7d7f11344c1c42c64718d4/diff:/var/lib/docker/overlay2/6e7552cdb94dfc1c9a0a8efae25c758d93fff3d2eec58a3e2b710496d0c94653/diff:/var/lib/docker/overlay2/b9217960caef08b70ded801ca89f5a0808227383f2ab1de5ae7313c200a8090b/diff:/var/lib/docker/overlay2/d11fc39cafccd3e3cb8fba6cfdda2bbfdb99c2e41619ce169b0d6ede0b3f6a64/diff:/var/lib/docker/overlay2/0f89620291cba4bc3afac7c9fa1faf205450d5dcae032c3a9882009633b72b4a/diff", "MergedDir": "/var/lib/docker/overlay2/da8396bc55cdb45aebbb4632dd89a9de95b8a469d46138ba4f9c55a1adb9305b/merged", "UpperDir": "/var/lib/docker/overlay2/da8396bc55cdb45aebbb4632dd89a9de95b8a469d46138ba4f9c55a1adb9305b/diff", "WorkDir": "/var/lib/docker/overlay2/da8396bc55cdb45aebbb4632dd89a9de95b8a469d46138ba4f9c55a1adb9305b/work" }, "Name": "overlay2" }, "Mounts": [ { "Type": "volume", "Name": "calico-20200724220226-14997", "Source": "/var/lib/docker/volumes/calico-20200724220226-14997/_data", "Destination": "/var", "Driver": "local", "Mode": "z", "RW": true, "Propagation": "" }, { "Type": "bind", "Source": "/lib/modules", "Destination": "/lib/modules", "Mode": "ro", "RW": false, "Propagation": "rprivate" } ], "Config": { "Hostname": "calico-20200724220226-14997", "Domainname": "", "User": "root", "AttachStdin": false, "AttachStdout": false, "AttachStderr": false, "ExposedPorts": { "22/tcp": {}, "2376/tcp": {}, "5000/tcp": {}, "8443/tcp": {} }, "Tty": true, "OpenStdin": false, "StdinOnce": false, "Env": [ "container=docker", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], "Cmd": null, "Image": "kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438", "Volumes": null, "WorkingDir": "", "Entrypoint": [ "/usr/local/bin/entrypoint", "/sbin/init" ], "OnBuild": null, "Labels": { "created_by.minikube.sigs.k8s.io": "true", "mode.minikube.sigs.k8s.io": "calico-20200724220226-14997", "name.minikube.sigs.k8s.io": "calico-20200724220226-14997", "role.minikube.sigs.k8s.io": "" }, "StopSignal": "SIGRTMIN+3" }, "NetworkSettings": { "Bridge": "", "SandboxID": "6a7100ac847ca7b1db07c1701ce2df039abf4a23f62f11e09c5798b75209267c", "HairpinMode": false, "LinkLocalIPv6Address": "", "LinkLocalIPv6PrefixLen": 0, "Ports": { "22/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32856" } ], "2376/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32855" } ], "5000/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32854" } ], "8443/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32853" } ] }, "SandboxKey": "/var/run/docker/netns/6a7100ac847c", "SecondaryIPAddresses": null, "SecondaryIPv6Addresses": null, "EndpointID": "462c39bf8a3e73099542e5ddbac66c6dc7296b78e32d2859f7ccfb8fc445cd9b", "Gateway": "172.17.0.1", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "IPAddress": "172.17.0.7", "IPPrefixLen": 16, "IPv6Gateway": "", "MacAddress": "02:42:ac:11:00:07", "Networks": { "bridge": { "IPAMConfig": null, "Links": null, "Aliases": null, "NetworkID": "d4a420189740571bc15c14b2ebac1ab66e95c9c6796461aa96bb3486a5f7e03d", "EndpointID": "462c39bf8a3e73099542e5ddbac66c6dc7296b78e32d2859f7ccfb8fc445cd9b", "Gateway": "172.17.0.1", "IPAddress": "172.17.0.7", "IPPrefixLen": 16, "IPv6Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "MacAddress": "02:42:ac:11:00:07", "DriverOpts": null } } } } ] -- /stdout -- helpers_test.go:232: (dbg) Run: ./minikube-linux-amd64 status --format={{.Host}} -p calico-20200724220226-14997 -n calico-20200724220226-14997 helpers_test.go:237: <<< TestNetworkPlugins/group/calico FAILED: start of post-mortem logs <<< helpers_test.go:238: ======> post-mortem[TestNetworkPlugins/group/calico]: minikube logs <====== helpers_test.go:240: (dbg) Run: ./minikube-linux-amd64 -p calico-20200724220226-14997 logs -n 25 helpers_test.go:240: (dbg) Done: ./minikube-linux-amd64 -p calico-20200724220226-14997 logs -n 25: (3.272861777s) helpers_test.go:245: TestNetworkPlugins/group/calico logs: -- stdout -- * ==> Docker <== * -- Logs begin at Fri 2020-07-24 22:03:33 UTC, end at Fri 2020-07-24 22:30:52 UTC. -- * Jul 24 22:30:31 calico-20200724220226-14997 dockerd[359]: time="2020-07-24T22:30:31.671865801Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:30:31 calico-20200724220226-14997 dockerd[359]: time="2020-07-24T22:30:31.683850835Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:30:32 calico-20200724220226-14997 dockerd[359]: time="2020-07-24T22:30:32.762837204Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:30:32 calico-20200724220226-14997 dockerd[359]: time="2020-07-24T22:30:32.788740674Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:30:33 calico-20200724220226-14997 dockerd[359]: time="2020-07-24T22:30:33.894554162Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:30:33 calico-20200724220226-14997 dockerd[359]: time="2020-07-24T22:30:33.914722028Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:30:35 calico-20200724220226-14997 dockerd[359]: time="2020-07-24T22:30:35.103039510Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:30:35 calico-20200724220226-14997 dockerd[359]: time="2020-07-24T22:30:35.104281393Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:30:36 calico-20200724220226-14997 dockerd[359]: time="2020-07-24T22:30:36.667594879Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:30:36 calico-20200724220226-14997 dockerd[359]: time="2020-07-24T22:30:36.897703458Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:30:40 calico-20200724220226-14997 dockerd[359]: time="2020-07-24T22:30:40.284555431Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:30:40 calico-20200724220226-14997 dockerd[359]: time="2020-07-24T22:30:40.461631915Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:30:43 calico-20200724220226-14997 dockerd[359]: time="2020-07-24T22:30:43.142203927Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:30:43 calico-20200724220226-14997 dockerd[359]: time="2020-07-24T22:30:43.164945576Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:30:45 calico-20200724220226-14997 dockerd[359]: time="2020-07-24T22:30:45.339266629Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:30:46 calico-20200724220226-14997 dockerd[359]: time="2020-07-24T22:30:46.447507847Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:30:47 calico-20200724220226-14997 dockerd[359]: time="2020-07-24T22:30:47.174372353Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:30:48 calico-20200724220226-14997 dockerd[359]: time="2020-07-24T22:30:48.091341962Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:30:48 calico-20200724220226-14997 dockerd[359]: time="2020-07-24T22:30:48.102339350Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:30:49 calico-20200724220226-14997 dockerd[359]: time="2020-07-24T22:30:49.175832058Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:30:49 calico-20200724220226-14997 dockerd[359]: time="2020-07-24T22:30:49.188889475Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:30:50 calico-20200724220226-14997 dockerd[359]: time="2020-07-24T22:30:50.206050456Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:30:50 calico-20200724220226-14997 dockerd[359]: time="2020-07-24T22:30:50.220361952Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:30:51 calico-20200724220226-14997 dockerd[359]: time="2020-07-24T22:30:51.261113110Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:30:51 calico-20200724220226-14997 dockerd[359]: time="2020-07-24T22:30:51.261494034Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * * ==> container status <== * CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID * 8ed1d63053061 04a9b816c7535 3 minutes ago Exited calico-node 10 8221aaeb88e4e * 5147d4b8caece calico/pod2daemon-flexvol@sha256:d125b9f3c24133bdaf90eaf2bee1d506240d39a77bda712eda3991b6b5d443f0 25 minutes ago Exited flexvol-driver 0 8221aaeb88e4e * 3de429ab6304a 35a7136bc71a7 25 minutes ago Exited install-cni 0 8221aaeb88e4e * 943f3a12f4481 calico/cni@sha256:84113c174b979e686de32094e552933e35d8fc7e2d532efcb9ace5310b65088c 25 minutes ago Exited upgrade-ipam 0 8221aaeb88e4e * 95cb5db795daf 4689081edb103 25 minutes ago Running storage-provisioner 0 d3c3f76900643 * 04d354316eb95 3439b7546f29b 25 minutes ago Running kube-proxy 0 1a06e2f536213 * 1ed21896fa960 303ce5db0e90d 26 minutes ago Running etcd 0 e2875e71079f7 * 168c6bf5361d3 76216c34ed0c7 26 minutes ago Running kube-scheduler 0 868cb20f9bac1 * 7830e5e5f724e 7e28efa976bd1 26 minutes ago Running kube-apiserver 0 5d4f65e5c4d63 * 9e361174a2d4f da26705ccb4b5 26 minutes ago Running kube-controller-manager 0 a420183d89194 * * ==> describe nodes <== * Name: calico-20200724220226-14997 * Roles: master * Labels: beta.kubernetes.io/arch=amd64 * beta.kubernetes.io/os=linux * kubernetes.io/arch=amd64 * kubernetes.io/hostname=calico-20200724220226-14997 * kubernetes.io/os=linux * minikube.k8s.io/commit=40eac8ce825d2bb784efa63b900c8d788ea49faf * minikube.k8s.io/name=calico-20200724220226-14997 * minikube.k8s.io/updated_at=2020_07_24T22_04_42_0700 * minikube.k8s.io/version=v1.12.1 * node-role.kubernetes.io/master= * Annotations: kubeadm.alpha.kubernetes.io/cri-socket: /var/run/dockershim.sock * node.alpha.kubernetes.io/ttl: 0 * volumes.kubernetes.io/controller-managed-attach-detach: true * CreationTimestamp: Fri, 24 Jul 2020 22:04:26 +0000 * Taints: * Unschedulable: false * Lease: * HolderIdentity: calico-20200724220226-14997 * AcquireTime: * RenewTime: Fri, 24 Jul 2020 22:30:50 +0000 * Conditions: * Type Status LastHeartbeatTime LastTransitionTime Reason Message * ---- ------ ----------------- ------------------ ------ ------- * MemoryPressure False Fri, 24 Jul 2020 22:26:14 +0000 Fri, 24 Jul 2020 22:04:22 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available * DiskPressure False Fri, 24 Jul 2020 22:26:14 +0000 Fri, 24 Jul 2020 22:04:22 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure * PIDPressure False Fri, 24 Jul 2020 22:26:14 +0000 Fri, 24 Jul 2020 22:04:22 +0000 KubeletHasSufficientPID kubelet has sufficient PID available * Ready True Fri, 24 Jul 2020 22:26:14 +0000 Fri, 24 Jul 2020 22:04:36 +0000 KubeletReady kubelet is posting ready status * Addresses: * InternalIP: 172.17.0.7 * Hostname: calico-20200724220226-14997 * Capacity: * cpu: 16 * ephemeral-storage: 128884272Ki * hugepages-1Gi: 0 * hugepages-2Mi: 0 * memory: 65817044Ki * pods: 110 * Allocatable: * cpu: 16 * ephemeral-storage: 128884272Ki * hugepages-1Gi: 0 * hugepages-2Mi: 0 * memory: 65817044Ki * pods: 110 * System Info: * Machine ID: ac9a211de21d484486b8a8aadde97703 * System UUID: 4f1465cc-8efc-438c-bb17-0eb44b930690 * Boot ID: 65219ec9-ab55-4151-85fa-6cbcd6144529 * Kernel Version: 5.4.0-1022-azure * OS Image: Ubuntu 19.10 * Operating System: linux * Architecture: amd64 * Container Runtime Version: docker://19.3.2 * Kubelet Version: v1.18.3 * Kube-Proxy Version: v1.18.3 * PodCIDR: 10.244.0.0/24 * PodCIDRs: 10.244.0.0/24 * Non-terminated Pods: (9 in total) * Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits AGE * --------- ---- ------------ ---------- --------------- ------------- --- * kube-system calico-kube-controllers-76d4774d89-bqxzb 0 (0%) 0 (0%) 0 (0%) 0 (0%) 25m * kube-system calico-node-tj86p 250m (1%) 0 (0%) 0 (0%) 0 (0%) 25m * kube-system coredns-66bff467f8-b2rh7 100m (0%) 0 (0%) 70Mi (0%) 170Mi (0%) 25m * kube-system etcd-calico-20200724220226-14997 0 (0%) 0 (0%) 0 (0%) 0 (0%) 26m * kube-system kube-apiserver-calico-20200724220226-14997 250m (1%) 0 (0%) 0 (0%) 0 (0%) 26m * kube-system kube-controller-manager-calico-20200724220226-14997 200m (1%) 0 (0%) 0 (0%) 0 (0%) 26m * kube-system kube-proxy-58p6f 0 (0%) 0 (0%) 0 (0%) 0 (0%) 25m * kube-system kube-scheduler-calico-20200724220226-14997 100m (0%) 0 (0%) 0 (0%) 0 (0%) 26m * kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 25m * Allocated resources: * (Total limits may be over 100 percent, i.e., overcommitted.) * Resource Requests Limits * -------- -------- ------ * cpu 900m (5%) 0 (0%) * memory 70Mi (0%) 170Mi (0%) * ephemeral-storage 0 (0%) 0 (0%) * hugepages-1Gi 0 (0%) 0 (0%) * hugepages-2Mi 0 (0%) 0 (0%) * Events: * Type Reason Age From Message * ---- ------ ---- ---- ------- * Normal NodeHasSufficientMemory 26m (x7 over 26m) kubelet, calico-20200724220226-14997 Node calico-20200724220226-14997 status is now: NodeHasSufficientMemory * Normal NodeHasNoDiskPressure 26m (x7 over 26m) kubelet, calico-20200724220226-14997 Node calico-20200724220226-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientPID 26m (x7 over 26m) kubelet, calico-20200724220226-14997 Node calico-20200724220226-14997 status is now: NodeHasSufficientPID * Normal Starting 26m kubelet, calico-20200724220226-14997 Starting kubelet. * Warning SystemOOM 26m kubelet, calico-20200724220226-14997 System OOM encountered, victim process: registry-server, pid: 26041 * Warning SystemOOM 26m kubelet, calico-20200724220226-14997 System OOM encountered, victim process: registry-server, pid: 28567 * Normal NodeHasSufficientMemory 26m kubelet, calico-20200724220226-14997 Node calico-20200724220226-14997 status is now: NodeHasSufficientMemory * Normal NodeHasNoDiskPressure 26m kubelet, calico-20200724220226-14997 Node calico-20200724220226-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientPID 26m kubelet, calico-20200724220226-14997 Node calico-20200724220226-14997 status is now: NodeHasSufficientPID * Normal NodeAllocatableEnforced 26m kubelet, calico-20200724220226-14997 Updated Node Allocatable limit across pods * Warning readOnlySysFS 25m kube-proxy, calico-20200724220226-14997 CRI error: /sys is read-only: cannot modify conntrack limits, problems may arise later (If running Docker, see docker issue #24000) * Normal Starting 25m kube-proxy, calico-20200724220226-14997 Starting kube-proxy. * * ==> dmesg <== * [ +0.008042] FS-Cache: N-cookie d=00000000087e516b n=00000000639e5e65 * [ +0.007221] FS-Cache: N-key=[8] '56c00f0000000000' * [ +0.005631] FS-Cache: Duplicate cookie detected * [ +0.003841] FS-Cache: O-cookie c=0000000011bf07ab [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.005951] FS-Cache: O-cookie d=00000000087e516b n=00000000251e97b7 * [ +0.004790] FS-Cache: O-key=[8] '56c00f0000000000' * [ +0.003643] FS-Cache: N-cookie c=00000000e2eec606 [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.005689] FS-Cache: N-cookie d=00000000087e516b n=000000007550b1ca * [ +0.004826] FS-Cache: N-key=[8] '56c00f0000000000' * [ +1.890005] FS-Cache: Duplicate cookie detected * [ +0.022632] FS-Cache: O-cookie c=0000000036b15294 [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.008521] FS-Cache: O-cookie d=00000000087e516b n=00000000efd734b0 * [ +0.007450] FS-Cache: O-key=[8] '55c00f0000000000' * [ +0.004298] FS-Cache: N-cookie c=00000000c6760349 [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.007729] FS-Cache: N-cookie d=00000000087e516b n=000000002b6d28f2 * [ +0.006730] FS-Cache: N-key=[8] '55c00f0000000000' * [ +2.781512] FS-Cache: Duplicate cookie detected * [ +0.004691] FS-Cache: O-cookie c=0000000026b60cdb [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.006817] FS-Cache: O-cookie d=00000000087e516b n=0000000083eaff9d * [ +0.006146] FS-Cache: O-key=[8] '5ac00f0000000000' * [ +0.004163] FS-Cache: N-cookie c=000000006a4278df [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.006241] FS-Cache: N-cookie d=00000000087e516b n=00000000e4c3dd6e * [ +0.005074] FS-Cache: N-key=[8] '5ac00f0000000000' * [Jul24 22:02] tee (116897): /proc/108378/oom_adj is deprecated, please use /proc/108378/oom_score_adj instead. * [Jul24 22:09] hrtimer: interrupt took 1877431 ns * * ==> etcd [1ed21896fa96] <== * 2020-07-24 22:28:03.505624 W | wal: sync duration of 2.599242995s, expected less than 1s * 2020-07-24 22:28:03.505819 W | etcdserver: read-only range request "key:\"/registry/health\" " with result "range_response_count:0 size:5" took too long (846.662487ms) to execute * 2020-07-24 22:28:14.253591 W | etcdserver: timed out waiting for read index response (local node might have slow network) * 2020-07-24 22:28:14.253711 W | etcdserver: read-only range request "key:\"/registry/jobs/\" range_end:\"/registry/jobs0\" limit:500 " with result "error:etcdserver: request timed out" took too long (7.000269989s) to execute * 2020-07-24 22:28:14.659379 W | etcdserver: read-only range request "key:\"/registry/health\" " with result "error:context deadline exceeded" took too long (2.000138687s) to execute * WARNING: 2020/07/24 22:28:14 grpc: Server.processUnaryRPC failed to write status: connection error: desc = "transport is closing" * 2020-07-24 22:28:16.220544 W | wal: sync duration of 12.33337542s, expected less than 1s * 2020-07-24 22:28:16.291748 W | etcdserver: read-only range request "key:\"/registry/events/kube-system/etcd-calico-20200724220226-14997.1624cf7a3ca946f2\" " with result "range_response_count:1 size:829" took too long (4.383725658s) to execute * 2020-07-24 22:28:16.291811 W | etcdserver: read-only range request "key:\"/registry/validatingwebhookconfigurations\" range_end:\"/registry/validatingwebhookconfigurationt\" count_only:true " with result "range_response_count:0 size:5" took too long (8.62135007s) to execute * 2020-07-24 22:28:16.291879 W | etcdserver: read-only range request "key:\"/registry/namespaces/default\" " with result "range_response_count:1 size:257" took too long (7.454169059s) to execute * 2020-07-24 22:28:16.292034 W | etcdserver: read-only range request "key:\"/registry/podsecuritypolicy\" range_end:\"/registry/podsecuritypolicz\" count_only:true " with result "range_response_count:0 size:5" took too long (8.72916248s) to execute * 2020-07-24 22:28:16.292061 W | etcdserver: read-only range request "key:\"/registry/runtimeclasses\" range_end:\"/registry/runtimeclasset\" count_only:true " with result "range_response_count:0 size:5" took too long (2.55441792s) to execute * 2020-07-24 22:28:16.292377 W | etcdserver: read-only range request "key:\"/registry/validatingwebhookconfigurations\" range_end:\"/registry/validatingwebhookconfigurationt\" count_only:true " with result "range_response_count:0 size:5" took too long (6.775860005s) to execute * 2020-07-24 22:28:16.292480 W | etcdserver: read-only range request "key:\"/registry/secrets\" range_end:\"/registry/secrett\" count_only:true " with result "range_response_count:0 size:7" took too long (3.172562584s) to execute * 2020-07-24 22:28:18.842256 W | wal: sync duration of 2.621557621s, expected less than 1s * 2020-07-24 22:28:18.842594 W | etcdserver: read-only range request "key:\"/registry/services/specs/default/kubernetes\" " with result "range_response_count:1 size:610" took too long (2.548774546s) to execute * 2020-07-24 22:28:20.664746 W | wal: sync duration of 1.210507518s, expected less than 1s * 2020-07-24 22:28:20.685371 W | etcdserver: read-only range request "key:\"/registry/ingress\" range_end:\"/registry/ingrest\" count_only:true " with result "range_response_count:0 size:5" took too long (2.431005333s) to execute * 2020-07-24 22:28:20.685391 W | etcdserver: read-only range request "key:\"/registry/clusterrolebindings\" range_end:\"/registry/clusterrolebindingt\" count_only:true " with result "range_response_count:0 size:7" took too long (4.376988142s) to execute * 2020-07-24 22:28:20.685405 W | etcdserver: read-only range request "key:\"/registry/services/specs\" range_end:\"/registry/services/spect\" count_only:true " with result "range_response_count:0 size:7" took too long (2.025019921s) to execute * 2020-07-24 22:28:20.685477 W | etcdserver: read-only range request "key:\"/registry/leases/kube-node-lease/calico-20200724220226-14997\" " with result "range_response_count:1 size:677" took too long (4.391053422s) to execute * 2020-07-24 22:28:20.685633 W | etcdserver: read-only range request "key:\"/registry/masterleases/172.17.0.7\" " with result "range_response_count:0 size:5" took too long (1.841312709s) to execute * 2020-07-24 22:29:22.083104 I | mvcc: store.index: compact 854 * 2020-07-24 22:29:22.083739 I | mvcc: finished scheduled compaction at 854 (took 301.521µs) * 2020-07-24 22:30:00.914621 W | etcdserver: read-only range request "key:\"/registry/masterleases/\" range_end:\"/registry/masterleases0\" " with result "range_response_count:1 size:129" took too long (129.236277ms) to execute * * ==> kernel <== * 22:30:53 up 58 min, 0 users, load average: 9.95, 8.75, 8.26 * Linux calico-20200724220226-14997 5.4.0-1022-azure #22-Ubuntu SMP Fri Jul 10 06:14:37 UTC 2020 x86_64 x86_64 x86_64 GNU/Linux * PRETTY_NAME="Ubuntu 19.10" * * ==> kube-apiserver [7830e5e5f724] <== * Trace[28144091]: [7.001918585s] [7.001829479s] END * I0724 22:28:14.254154 1 trace.go:116] Trace[519482538]: "List etcd3" key:/jobs,resourceVersion:,limit:500,continue: (started: 2020-07-24 22:28:07.253041607 +0000 UTC m=+1426.891166502) (total time: 7.001081645s): * Trace[519482538]: [7.001081645s] [7.001081645s] END * E0724 22:28:14.254185 1 status.go:71] apiserver received an error that is not an metav1.Status: rpctypes.EtcdError{code:0xe, desc:"etcdserver: request timed out"} * I0724 22:28:14.254414 1 trace.go:116] Trace[992706608]: "List" url:/apis/batch/v1/jobs,user-agent:kube-controller-manager/v1.18.3 (linux/amd64) kubernetes/2e7996e/system:serviceaccount:kube-system:cronjob-controller,client:172.17.0.7 (started: 2020-07-24 22:28:07.253010405 +0000 UTC m=+1426.891135400) (total time: 7.001382566s): * Trace[992706608]: [7.001382566s] [7.001359265s] END * I0724 22:28:16.292604 1 trace.go:116] Trace[191624744]: "GuaranteedUpdate etcd3" type:*coordination.Lease (started: 2020-07-24 22:28:13.298480859 +0000 UTC m=+1432.936605854) (total time: 2.99409085s): * Trace[191624744]: [2.993608216s] [2.993191087s] Transaction committed * I0724 22:28:16.292717 1 trace.go:116] Trace[1141373432]: "Update" url:/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/calico-20200724220226-14997,user-agent:kubelet/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:172.17.0.7 (started: 2020-07-24 22:28:13.29835965 +0000 UTC m=+1432.936484545) (total time: 2.994325567s): * Trace[1141373432]: [2.994325567s] [2.994246161s] END * I0724 22:28:16.292750 1 trace.go:116] Trace[2069852792]: "Get" url:/api/v1/namespaces/default,user-agent:kube-apiserver/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:127.0.0.1 (started: 2020-07-24 22:28:08.837297973 +0000 UTC m=+1428.475422968) (total time: 7.455401145s): * Trace[2069852792]: [7.455343241s] [7.45533484s] About to write a response * I0724 22:28:18.843646 1 trace.go:116] Trace[1274765363]: "Get" url:/api/v1/namespaces/default/services/kubernetes,user-agent:kube-apiserver/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:127.0.0.1 (started: 2020-07-24 22:28:16.29344657 +0000 UTC m=+1435.931571465) (total time: 2.550159443s): * Trace[1274765363]: [2.550105339s] [2.550097738s] About to write a response * I0724 22:28:20.686013 1 trace.go:116] Trace[19060847]: "Get" url:/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/calico-20200724220226-14997,user-agent:kubelet/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:172.17.0.7 (started: 2020-07-24 22:28:16.294011809 +0000 UTC m=+1435.932136704) (total time: 4.391954486s): * Trace[19060847]: [4.391900782s] [4.39188478s] About to write a response * I0724 22:28:20.686029 1 trace.go:116] Trace[1622978050]: "GuaranteedUpdate etcd3" type:*core.Event (started: 2020-07-24 22:28:11.907575362 +0000 UTC m=+1431.545700357) (total time: 8.778416434s): * Trace[1622978050]: [4.384927542s] [4.384927542s] initial value restored * Trace[1622978050]: [6.935379605s] [2.550452063s] Transaction prepared * Trace[1622978050]: [8.778388633s] [1.843009028s] Transaction committed * I0724 22:28:20.686130 1 trace.go:116] Trace[557644103]: "Patch" url:/api/v1/namespaces/kube-system/events/etcd-calico-20200724220226-14997.1624cf7a3ca946f2,user-agent:kubelet/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:172.17.0.7 (started: 2020-07-24 22:28:11.907481456 +0000 UTC m=+1431.545606351) (total time: 8.778618748s): * Trace[557644103]: [4.385023648s] [4.384990746s] About to apply patch * Trace[557644103]: [8.778570045s] [4.390614892s] Object stored in database * I0724 22:28:20.712664 1 trace.go:116] Trace[1165747961]: "GuaranteedUpdate etcd3" type:*v1.Endpoints (started: 2020-07-24 22:28:18.844034842 +0000 UTC m=+1438.482159837) (total time: 1.868600413s): * Trace[1165747961]: [1.841933253s] [1.841933253s] initial value restored * * ==> kube-controller-manager [9e361174a2d4] <== * E0724 22:05:25.052881 1 driver-call.go:266] Failed to unmarshal output for command: init, output: "", error: unexpected end of JSON input * W0724 22:05:25.052904 1 driver-call.go:149] FlexVolume: driver call failed: executable: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds/uds, args: [init], error: fork/exec /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds/uds: no such file or directory, output: "" * E0724 22:05:25.052926 1 plugins.go:729] Error dynamically probing plugins: Error creating Flexvolume plugin from directory nodeagent~uds, skipping. Error: unexpected end of JSON input * E0724 22:05:25.053391 1 driver-call.go:266] Failed to unmarshal output for command: init, output: "", error: unexpected end of JSON input * W0724 22:05:25.053414 1 driver-call.go:149] FlexVolume: driver call failed: executable: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds/uds, args: [init], error: fork/exec /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds/uds: no such file or directory, output: "" * E0724 22:05:25.053426 1 plugins.go:729] Error dynamically probing plugins: Error creating Flexvolume plugin from directory nodeagent~uds, skipping. Error: unexpected end of JSON input * E0724 22:05:25.053740 1 driver-call.go:266] Failed to unmarshal output for command: init, output: "", error: unexpected end of JSON input * W0724 22:05:25.053765 1 driver-call.go:149] FlexVolume: driver call failed: executable: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds/uds, args: [init], error: fork/exec /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds/uds: no such file or directory, output: "" * E0724 22:05:25.053783 1 plugins.go:729] Error dynamically probing plugins: Error creating Flexvolume plugin from directory nodeagent~uds, skipping. Error: unexpected end of JSON input * E0724 22:05:25.054043 1 driver-call.go:266] Failed to unmarshal output for command: init, output: "", error: unexpected end of JSON input * W0724 22:05:25.054057 1 driver-call.go:149] FlexVolume: driver call failed: executable: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds/uds, args: [init], error: fork/exec /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds/uds: no such file or directory, output: "" * E0724 22:05:25.054066 1 plugins.go:729] Error dynamically probing plugins: Error creating Flexvolume plugin from directory nodeagent~uds, skipping. Error: unexpected end of JSON input * E0724 22:05:25.054361 1 driver-call.go:266] Failed to unmarshal output for command: init, output: "", error: unexpected end of JSON input * W0724 22:05:25.054376 1 driver-call.go:149] FlexVolume: driver call failed: executable: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds/uds, args: [init], error: fork/exec /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds/uds: no such file or directory, output: "" * E0724 22:05:25.054385 1 plugins.go:729] Error dynamically probing plugins: Error creating Flexvolume plugin from directory nodeagent~uds, skipping. Error: unexpected end of JSON input * E0724 22:05:25.054711 1 driver-call.go:266] Failed to unmarshal output for command: init, output: "", error: unexpected end of JSON input * W0724 22:05:25.054737 1 driver-call.go:149] FlexVolume: driver call failed: executable: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds/uds, args: [init], error: fork/exec /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds/uds: no such file or directory, output: "" * E0724 22:05:25.054747 1 plugins.go:729] Error dynamically probing plugins: Error creating Flexvolume plugin from directory nodeagent~uds, skipping. Error: unexpected end of JSON input * E0724 22:05:25.055011 1 driver-call.go:266] Failed to unmarshal output for command: init, output: "", error: unexpected end of JSON input * W0724 22:05:25.055020 1 driver-call.go:149] FlexVolume: driver call failed: executable: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds/uds, args: [init], error: fork/exec /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds/uds: no such file or directory, output: "" * E0724 22:05:25.055031 1 plugins.go:729] Error dynamically probing plugins: Error creating Flexvolume plugin from directory nodeagent~uds, skipping. Error: unexpected end of JSON input * E0724 22:05:25.055278 1 driver-call.go:266] Failed to unmarshal output for command: init, output: "", error: unexpected end of JSON input * W0724 22:05:25.055293 1 driver-call.go:149] FlexVolume: driver call failed: executable: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds/uds, args: [init], error: fork/exec /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds/uds: no such file or directory, output: "" * E0724 22:05:25.055305 1 plugins.go:729] Error dynamically probing plugins: Error creating Flexvolume plugin from directory nodeagent~uds, skipping. Error: unexpected end of JSON input * E0724 22:28:14.254864 1 cronjob_controller.go:125] Failed to extract job list: etcdserver: request timed out * * ==> kube-proxy [04d354316eb9] <== * W0724 22:05:04.960574 1 server_others.go:559] Unknown proxy mode "", assuming iptables proxy * I0724 22:05:04.969641 1 node.go:136] Successfully retrieved node IP: 172.17.0.7 * I0724 22:05:04.969685 1 server_others.go:186] Using iptables Proxier. * I0724 22:05:04.970060 1 server.go:583] Version: v1.18.3 * I0724 22:05:04.970803 1 conntrack.go:52] Setting nf_conntrack_max to 524288 * E0724 22:05:04.971224 1 conntrack.go:127] sysfs is not writable: {Device:sysfs Path:/sys Type:sysfs Opts:[ro nosuid nodev noexec relatime] Freq:0 Pass:0} (mount options are [ro nosuid nodev noexec relatime]) * I0724 22:05:04.971621 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_established' to 86400 * I0724 22:05:04.971710 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_close_wait' to 3600 * I0724 22:05:04.972063 1 config.go:315] Starting service config controller * I0724 22:05:04.972089 1 shared_informer.go:223] Waiting for caches to sync for service config * I0724 22:05:04.972125 1 config.go:133] Starting endpoints config controller * I0724 22:05:04.972139 1 shared_informer.go:223] Waiting for caches to sync for endpoints config * I0724 22:05:05.072263 1 shared_informer.go:230] Caches are synced for service config * I0724 22:05:05.072271 1 shared_informer.go:230] Caches are synced for endpoints config * * ==> kube-scheduler [168c6bf5361d] <== * W0724 22:04:26.159871 1 authentication.go:40] Authentication is disabled * I0724 22:04:26.159883 1 deprecated_insecure_serving.go:51] Serving healthz insecurely on [::]:10251 * I0724 22:04:26.161517 1 configmap_cafile_content.go:202] Starting client-ca::kube-system::extension-apiserver-authentication::client-ca-file * I0724 22:04:26.161554 1 shared_informer.go:223] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file * I0724 22:04:26.162129 1 secure_serving.go:178] Serving securely on 127.0.0.1:10259 * I0724 22:04:26.162208 1 tlsconfig.go:240] Starting DynamicServingCertificateController * E0724 22:04:26.235874 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope * E0724 22:04:26.237052 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope * E0724 22:04:26.237052 1 reflector.go:178] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system" * E0724 22:04:26.237131 1 reflector.go:178] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:233: Failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope * E0724 22:04:26.237329 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope * E0724 22:04:26.237428 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope * E0724 22:04:26.237503 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope * E0724 22:04:26.238523 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope * E0724 22:04:26.238535 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1beta1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope * E0724 22:04:27.094175 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope * E0724 22:04:27.291265 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1beta1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope * E0724 22:04:27.326659 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope * E0724 22:04:27.389755 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope * E0724 22:04:27.476312 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope * E0724 22:04:27.522151 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope * E0724 22:04:27.533176 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope * E0724 22:04:27.581431 1 reflector.go:178] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system" * E0724 22:04:27.727501 1 reflector.go:178] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:233: Failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope * I0724 22:04:29.661738 1 shared_informer.go:230] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file * * ==> kubelet <== * -- Logs begin at Fri 2020-07-24 22:03:33 UTC, end at Fri 2020-07-24 22:30:54 UTC. -- * Jul 24 22:30:51 calico-20200724220226-14997 kubelet[2568]: E0724 22:30:51.396711 2568 pod_workers.go:191] Error syncing pod c48f0bc5-33c0-4290-8407-23a5ed925fea ("coredns-66bff467f8-b2rh7_kube-system(c48f0bc5-33c0-4290-8407-23a5ed925fea)"), skipping: failed to "CreatePodSandbox" for "coredns-66bff467f8-b2rh7_kube-system(c48f0bc5-33c0-4290-8407-23a5ed925fea)" with CreatePodSandboxError: "CreatePodSandbox for pod \"coredns-66bff467f8-b2rh7_kube-system(c48f0bc5-33c0-4290-8407-23a5ed925fea)\" failed: rpc error: code = Unknown desc = failed to set up sandbox container \"1f00e2efbcaa8ad8ca290b37152b5707c391d4ad2dc08472732294285045ab4c\" network for pod \"coredns-66bff467f8-b2rh7\": networkPlugin cni failed to set up pod \"coredns-66bff467f8-b2rh7_kube-system\" network: stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/" * Jul 24 22:30:51 calico-20200724220226-14997 kubelet[2568]: W0724 22:30:51.414039 2568 docker_sandbox.go:400] failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod "coredns-66bff467f8-b2rh7_kube-system": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container "1f00e2efbcaa8ad8ca290b37152b5707c391d4ad2dc08472732294285045ab4c" * Jul 24 22:30:51 calico-20200724220226-14997 kubelet[2568]: W0724 22:30:51.422563 2568 pod_container_deletor.go:77] Container "1f00e2efbcaa8ad8ca290b37152b5707c391d4ad2dc08472732294285045ab4c" not found in pod's containers * Jul 24 22:30:51 calico-20200724220226-14997 kubelet[2568]: W0724 22:30:51.424426 2568 cni.go:331] CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container "1f00e2efbcaa8ad8ca290b37152b5707c391d4ad2dc08472732294285045ab4c" * Jul 24 22:30:51 calico-20200724220226-14997 kubelet[2568]: W0724 22:30:51.426376 2568 docker_sandbox.go:400] failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod "calico-kube-controllers-76d4774d89-bqxzb_kube-system": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container "ca6e5bae53bdbf56887f9bd437ec6799096d7efa39a1042bef57728c9315183e" * Jul 24 22:30:51 calico-20200724220226-14997 kubelet[2568]: W0724 22:30:51.436602 2568 pod_container_deletor.go:77] Container "ca6e5bae53bdbf56887f9bd437ec6799096d7efa39a1042bef57728c9315183e" not found in pod's containers * Jul 24 22:30:51 calico-20200724220226-14997 kubelet[2568]: W0724 22:30:51.438267 2568 cni.go:331] CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container "ca6e5bae53bdbf56887f9bd437ec6799096d7efa39a1042bef57728c9315183e" * Jul 24 22:30:52 calico-20200724220226-14997 kubelet[2568]: E0724 22:30:52.180706 2568 cni.go:364] Error adding kube-system_calico-kube-controllers-76d4774d89-bqxzb/22189240e3e90f2ae2ed7d36d150bab43f8bc04270417e85a806657243ba270c to network calico/k8s-pod-network: stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/ * Jul 24 22:30:52 calico-20200724220226-14997 kubelet[2568]: E0724 22:30:52.186274 2568 cni.go:364] Error adding kube-system_coredns-66bff467f8-b2rh7/dcd750e785b28af0e20f54f3c43593945b3481fc423186aaa1ebc5e3cac0848f to network calico/k8s-pod-network: stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/ * Jul 24 22:30:52 calico-20200724220226-14997 kubelet[2568]: E0724 22:30:52.545964 2568 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to set up sandbox container "22189240e3e90f2ae2ed7d36d150bab43f8bc04270417e85a806657243ba270c" network for pod "calico-kube-controllers-76d4774d89-bqxzb": networkPlugin cni failed to set up pod "calico-kube-controllers-76d4774d89-bqxzb_kube-system" network: stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/ * Jul 24 22:30:52 calico-20200724220226-14997 kubelet[2568]: E0724 22:30:52.546019 2568 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "calico-kube-controllers-76d4774d89-bqxzb_kube-system(43f6fdcb-72f1-40d6-be13-1e7f0a81985a)" failed: rpc error: code = Unknown desc = failed to set up sandbox container "22189240e3e90f2ae2ed7d36d150bab43f8bc04270417e85a806657243ba270c" network for pod "calico-kube-controllers-76d4774d89-bqxzb": networkPlugin cni failed to set up pod "calico-kube-controllers-76d4774d89-bqxzb_kube-system" network: stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/ * Jul 24 22:30:52 calico-20200724220226-14997 kubelet[2568]: E0724 22:30:52.546035 2568 kuberuntime_manager.go:727] createPodSandbox for pod "calico-kube-controllers-76d4774d89-bqxzb_kube-system(43f6fdcb-72f1-40d6-be13-1e7f0a81985a)" failed: rpc error: code = Unknown desc = failed to set up sandbox container "22189240e3e90f2ae2ed7d36d150bab43f8bc04270417e85a806657243ba270c" network for pod "calico-kube-controllers-76d4774d89-bqxzb": networkPlugin cni failed to set up pod "calico-kube-controllers-76d4774d89-bqxzb_kube-system" network: stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/ * Jul 24 22:30:52 calico-20200724220226-14997 kubelet[2568]: E0724 22:30:52.546093 2568 pod_workers.go:191] Error syncing pod 43f6fdcb-72f1-40d6-be13-1e7f0a81985a ("calico-kube-controllers-76d4774d89-bqxzb_kube-system(43f6fdcb-72f1-40d6-be13-1e7f0a81985a)"), skipping: failed to "CreatePodSandbox" for "calico-kube-controllers-76d4774d89-bqxzb_kube-system(43f6fdcb-72f1-40d6-be13-1e7f0a81985a)" with CreatePodSandboxError: "CreatePodSandbox for pod \"calico-kube-controllers-76d4774d89-bqxzb_kube-system(43f6fdcb-72f1-40d6-be13-1e7f0a81985a)\" failed: rpc error: code = Unknown desc = failed to set up sandbox container \"22189240e3e90f2ae2ed7d36d150bab43f8bc04270417e85a806657243ba270c\" network for pod \"calico-kube-controllers-76d4774d89-bqxzb\": networkPlugin cni failed to set up pod \"calico-kube-controllers-76d4774d89-bqxzb_kube-system\" network: stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/" * Jul 24 22:30:52 calico-20200724220226-14997 kubelet[2568]: W0724 22:30:52.546937 2568 docker_sandbox.go:400] failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod "calico-kube-controllers-76d4774d89-bqxzb_kube-system": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container "22189240e3e90f2ae2ed7d36d150bab43f8bc04270417e85a806657243ba270c" * Jul 24 22:30:52 calico-20200724220226-14997 kubelet[2568]: E0724 22:30:52.561010 2568 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to set up sandbox container "dcd750e785b28af0e20f54f3c43593945b3481fc423186aaa1ebc5e3cac0848f" network for pod "coredns-66bff467f8-b2rh7": networkPlugin cni failed to set up pod "coredns-66bff467f8-b2rh7_kube-system" network: stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/ * Jul 24 22:30:52 calico-20200724220226-14997 kubelet[2568]: E0724 22:30:52.561048 2568 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "coredns-66bff467f8-b2rh7_kube-system(c48f0bc5-33c0-4290-8407-23a5ed925fea)" failed: rpc error: code = Unknown desc = failed to set up sandbox container "dcd750e785b28af0e20f54f3c43593945b3481fc423186aaa1ebc5e3cac0848f" network for pod "coredns-66bff467f8-b2rh7": networkPlugin cni failed to set up pod "coredns-66bff467f8-b2rh7_kube-system" network: stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/ * Jul 24 22:30:52 calico-20200724220226-14997 kubelet[2568]: E0724 22:30:52.561063 2568 kuberuntime_manager.go:727] createPodSandbox for pod "coredns-66bff467f8-b2rh7_kube-system(c48f0bc5-33c0-4290-8407-23a5ed925fea)" failed: rpc error: code = Unknown desc = failed to set up sandbox container "dcd750e785b28af0e20f54f3c43593945b3481fc423186aaa1ebc5e3cac0848f" network for pod "coredns-66bff467f8-b2rh7": networkPlugin cni failed to set up pod "coredns-66bff467f8-b2rh7_kube-system" network: stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/ * Jul 24 22:30:52 calico-20200724220226-14997 kubelet[2568]: E0724 22:30:52.561099 2568 pod_workers.go:191] Error syncing pod c48f0bc5-33c0-4290-8407-23a5ed925fea ("coredns-66bff467f8-b2rh7_kube-system(c48f0bc5-33c0-4290-8407-23a5ed925fea)"), skipping: failed to "CreatePodSandbox" for "coredns-66bff467f8-b2rh7_kube-system(c48f0bc5-33c0-4290-8407-23a5ed925fea)" with CreatePodSandboxError: "CreatePodSandbox for pod \"coredns-66bff467f8-b2rh7_kube-system(c48f0bc5-33c0-4290-8407-23a5ed925fea)\" failed: rpc error: code = Unknown desc = failed to set up sandbox container \"dcd750e785b28af0e20f54f3c43593945b3481fc423186aaa1ebc5e3cac0848f\" network for pod \"coredns-66bff467f8-b2rh7\": networkPlugin cni failed to set up pod \"coredns-66bff467f8-b2rh7_kube-system\" network: stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/" * Jul 24 22:30:52 calico-20200724220226-14997 kubelet[2568]: W0724 22:30:52.561895 2568 docker_sandbox.go:400] failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod "coredns-66bff467f8-b2rh7_kube-system": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container "dcd750e785b28af0e20f54f3c43593945b3481fc423186aaa1ebc5e3cac0848f" * Jul 24 22:30:53 calico-20200724220226-14997 kubelet[2568]: W0724 22:30:53.582230 2568 docker_sandbox.go:400] failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod "coredns-66bff467f8-b2rh7_kube-system": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container "dcd750e785b28af0e20f54f3c43593945b3481fc423186aaa1ebc5e3cac0848f" * Jul 24 22:30:53 calico-20200724220226-14997 kubelet[2568]: W0724 22:30:53.591185 2568 pod_container_deletor.go:77] Container "dcd750e785b28af0e20f54f3c43593945b3481fc423186aaa1ebc5e3cac0848f" not found in pod's containers * Jul 24 22:30:53 calico-20200724220226-14997 kubelet[2568]: W0724 22:30:53.592958 2568 cni.go:331] CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container "dcd750e785b28af0e20f54f3c43593945b3481fc423186aaa1ebc5e3cac0848f" * Jul 24 22:30:53 calico-20200724220226-14997 kubelet[2568]: W0724 22:30:53.595071 2568 docker_sandbox.go:400] failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod "calico-kube-controllers-76d4774d89-bqxzb_kube-system": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container "22189240e3e90f2ae2ed7d36d150bab43f8bc04270417e85a806657243ba270c" * Jul 24 22:30:53 calico-20200724220226-14997 kubelet[2568]: W0724 22:30:53.603239 2568 pod_container_deletor.go:77] Container "22189240e3e90f2ae2ed7d36d150bab43f8bc04270417e85a806657243ba270c" not found in pod's containers * Jul 24 22:30:53 calico-20200724220226-14997 kubelet[2568]: W0724 22:30:53.604857 2568 cni.go:331] CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container "22189240e3e90f2ae2ed7d36d150bab43f8bc04270417e85a806657243ba270c" * * ==> storage-provisioner [95cb5db795da] <== -- /stdout -- helpers_test.go:247: (dbg) Run: ./minikube-linux-amd64 status --format={{.APIServer}} -p calico-20200724220226-14997 -n calico-20200724220226-14997 helpers_test.go:254: (dbg) Run: kubectl --context calico-20200724220226-14997 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running helpers_test.go:260: non-running pods: calico-kube-controllers-76d4774d89-bqxzb coredns-66bff467f8-b2rh7 helpers_test.go:262: ======> post-mortem[TestNetworkPlugins/group/calico]: describe non-running pods <====== helpers_test.go:265: (dbg) Run: kubectl --context calico-20200724220226-14997 describe pod calico-kube-controllers-76d4774d89-bqxzb coredns-66bff467f8-b2rh7 helpers_test.go:265: (dbg) Non-zero exit: kubectl --context calico-20200724220226-14997 describe pod calico-kube-controllers-76d4774d89-bqxzb coredns-66bff467f8-b2rh7: exit status 1 (77.732957ms) ** stderr ** Error from server (NotFound): pods "calico-kube-controllers-76d4774d89-bqxzb" not found Error from server (NotFound): pods "coredns-66bff467f8-b2rh7" not found ** /stderr ** helpers_test.go:267: kubectl --context calico-20200724220226-14997 describe pod calico-kube-controllers-76d4774d89-bqxzb coredns-66bff467f8-b2rh7: exit status 1 helpers_test.go:170: Cleaning up "calico-20200724220226-14997" profile ... helpers_test.go:171: (dbg) Run: ./minikube-linux-amd64 delete -p calico-20200724220226-14997 helpers_test.go:171: (dbg) Done: ./minikube-linux-amd64 delete -p calico-20200724220226-14997: (7.372656963s) === CONT TestNetworkPlugins/group/custom-weave/Start net_test.go:80: (dbg) Non-zero exit: ./minikube-linux-amd64 start -p custom-weave-20200724220248-14997 --memory=1800 --alsologtostderr --wait=true --wait-timeout=25m --cni=testdata/weavenet.yaml --vm-driver=docker --base-image=local/kicbase:-snapshot: exit status 70 (28m31.430957405s) -- stdout -- * [custom-weave-20200724220248-14997] minikube v1.12.1 on Ubuntu 20.04 - KUBECONFIG=/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/kubeconfig - MINIKUBE_HOME=/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome * Using the docker driver based on user configuration * Starting control plane node custom-weave-20200724220248-14997 in cluster custom-weave-20200724220248-14997 * Pulling base image ... * Creating docker container (CPUs=2, Memory=1800MB) ... * Preparing Kubernetes v1.18.3 on Docker 19.03.2 ... * Configuring testdata/weavenet.yaml (Container Networking Interface) ... * Verifying Kubernetes components... * Enabled addons: default-storageclass, storage-provisioner -- /stdout -- ** stderr ** I0724 22:02:48.381014 126494 out.go:188] Setting JSON to false I0724 22:02:48.384160 126494 start.go:101] hostinfo: {"hostname":"mini-test-11-ubuntu","uptime":1807,"bootTime":1595626361,"procs":955,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.4.0-1022-azure","virtualizationSystem":"kvm","virtualizationRole":"host","hostid":"c95cb721-f5cd-cb47-980f-2a6f7a0ad6b2"} I0724 22:02:48.385318 126494 start.go:111] virtualization: kvm host I0724 22:02:50.346659 126494 notify.go:125] Checking for updates... I0724 22:02:50.610963 126494 driver.go:287] Setting default libvirt URI to qemu:///system I0724 22:02:50.674891 126494 docker.go:87] docker version: linux-19.03.8 I0724 22:02:52.531292 126494 start.go:217] selected driver: docker I0724 22:02:52.531309 126494 start.go:623] validating driver "docker" against I0724 22:02:52.531345 126494 start.go:634] status for docker: {Installed:true Healthy:true NeedsImprovement:false Error: Fix: Doc:} I0724 22:02:52.531497 126494 cli_runner.go:109] Run: docker system info --format "{{json .}}" ! Requested memory allocation (1800MB) is less than the recommended minimum 2000MB. Kubernetes may crash unexpectedly. I0724 22:02:52.594197 126494 start_flags.go:223] no existing cluster config was found, will generate one from the flags ! Requested memory allocation (1800MB) is less than the recommended minimum 2000MB. Kubernetes may crash unexpectedly. I0724 22:02:52.594957 126494 start_flags.go:617] Waiting for all components: map[apiserver:true apps_running:true default_sa:true system_pods:true] I0724 22:02:52.594994 126494 cni.go:74] Creating CNI manager for "testdata/weavenet.yaml" I0724 22:02:52.595019 126494 start_flags.go:340] Found "testdata/weavenet.yaml" CNI - setting NetworkPlugin=cni I0724 22:02:52.595047 126494 start_flags.go:345] config: {Name:custom-weave-20200724220248-14997 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:local/kicbase:-snapshot Memory:1800 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.99.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio KubernetesConfig:{KubernetesVersion:v1.18.3 ClusterName:custom-weave-20200724220248-14997 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:testdata/weavenet.yaml NodeIP: NodePort:8443 NodeName:} Nodes:[] Addons:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true system_pods:true]} I0724 22:02:52.798877 126494 cache.go:117] Beginning downloading kic base image for docker with docker I0724 22:02:52.843474 126494 preload.go:97] Checking if preload exists for k8s version v1.18.3 and runtime docker I0724 22:02:52.843521 126494 preload.go:105] Found local preload: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v4-v1.18.3-docker-overlay2-amd64.tar.lz4 I0724 22:02:52.843536 126494 cache.go:51] Caching tarball of preloaded images I0724 22:02:52.843559 126494 preload.go:131] Found /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v4-v1.18.3-docker-overlay2-amd64.tar.lz4 in cache, skipping download I0724 22:02:52.843577 126494 cache.go:54] Finished verifying existence of preloaded tar for v1.18.3 on docker I0724 22:02:52.843639 126494 cache.go:137] Downloading local/kicbase:-snapshot to local daemon I0724 22:02:52.843664 126494 image.go:140] Writing local/kicbase:-snapshot to local daemon I0724 22:02:52.843917 126494 profile.go:150] Saving config to /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/custom-weave-20200724220248-14997/config.json ... I0724 22:02:52.844178 126494 lock.go:35] WriteFile acquiring /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/custom-weave-20200724220248-14997/config.json: {Name:mk8d0c31842fd99ad915a08dcde439c4185d0261 Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0724 22:02:53.256097 126494 cache.go:151] failed to download local/kicbase:-snapshot, will try fallback image if available: getting remote image: GET https://index.docker.io/v2/local/kicbase/manifests/-snapshot: unsupported status code 404; body: 404 page not found I0724 22:02:53.256156 126494 cache.go:137] Downloading kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 to local daemon I0724 22:02:53.256162 126494 image.go:140] Writing kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 to local daemon I0724 22:02:57.680000 126494 cache.go:140] successfully downloaded kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 ! minikube was unable to download local/kicbase:-snapshot, but successfully downloaded kicbase/stable:v0.0.10 as a fallback image I0724 22:02:57.680070 126494 cache.go:178] Successfully downloaded all kic artifacts I0724 22:02:57.680101 126494 start.go:241] acquiring machines lock for custom-weave-20200724220248-14997: {Name:mk91e0531dbac8bf0bab062b1deb1a07bd4532ce Clock:{} Delay:500ms Timeout:15m0s Cancel:} I0724 22:03:54.725727 126494 start.go:245] acquired machines lock for "custom-weave-20200724220248-14997" in 57.045601866s I0724 22:03:54.725761 126494 start.go:85] Provisioning new machine with config: &{Name:custom-weave-20200724220248-14997 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 Memory:1800 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.99.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio KubernetesConfig:{KubernetesVersion:v1.18.3 ClusterName:custom-weave-20200724220248-14997 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:testdata/weavenet.yaml NodeIP: NodePort:8443 NodeName:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.18.3 ControlPlane:true Worker:true}] Addons:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true system_pods:true]} &{Name: IP: Port:8443 KubernetesVersion:v1.18.3 ControlPlane:true Worker:true} I0724 22:03:54.725828 126494 start.go:122] createHost starting for "" (driver="docker") I0724 22:03:54.736977 126494 start.go:158] libmachine.API.Create for "custom-weave-20200724220248-14997" (driver="docker") I0724 22:03:54.737043 126494 client.go:161] LocalClient.Create starting I0724 22:03:54.737114 126494 main.go:115] libmachine: Reading certificate data from /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca.pem I0724 22:03:54.737168 126494 main.go:115] libmachine: Decoding PEM data... I0724 22:03:54.737202 126494 main.go:115] libmachine: Parsing certificate... I0724 22:03:54.737377 126494 main.go:115] libmachine: Reading certificate data from /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/cert.pem I0724 22:03:54.737419 126494 main.go:115] libmachine: Decoding PEM data... I0724 22:03:54.737451 126494 main.go:115] libmachine: Parsing certificate... I0724 22:03:54.738153 126494 cli_runner.go:109] Run: docker ps -a --format {{.Names}} I0724 22:03:54.799151 126494 cli_runner.go:109] Run: docker volume create custom-weave-20200724220248-14997 --label name.minikube.sigs.k8s.io=custom-weave-20200724220248-14997 --label created_by.minikube.sigs.k8s.io=true I0724 22:03:54.873758 126494 oci.go:101] Successfully created a docker volume custom-weave-20200724220248-14997 I0724 22:03:54.873838 126494 cli_runner.go:109] Run: docker run --rm --entrypoint /usr/bin/test -v custom-weave-20200724220248-14997:/var kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 -d /var/lib I0724 22:04:09.927310 126494 cli_runner.go:151] Completed: docker run --rm --entrypoint /usr/bin/test -v custom-weave-20200724220248-14997:/var kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 -d /var/lib: (15.053429007s) I0724 22:04:09.927342 126494 oci.go:105] Successfully prepared a docker volume custom-weave-20200724220248-14997 W0724 22:04:09.927386 126494 oci.go:165] Your kernel does not support swap limit capabilities or the cgroup is not mounted. I0724 22:04:09.927662 126494 cli_runner.go:109] Run: docker info --format "'{{json .SecurityOptions}}'" I0724 22:04:09.927402 126494 preload.go:97] Checking if preload exists for k8s version v1.18.3 and runtime docker I0724 22:04:09.927746 126494 preload.go:105] Found local preload: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v4-v1.18.3-docker-overlay2-amd64.tar.lz4 I0724 22:04:09.927764 126494 kic.go:133] Starting extracting preloaded images to volume ... I0724 22:04:09.927833 126494 cli_runner.go:109] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v4-v1.18.3-docker-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v custom-weave-20200724220248-14997:/extractDir kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 -I lz4 -xvf /preloaded.tar -C /extractDir I0724 22:04:09.995370 126494 cli_runner.go:109] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname custom-weave-20200724220248-14997 --name custom-weave-20200724220248-14997 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=custom-weave-20200724220248-14997 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=custom-weave-20200724220248-14997 --volume custom-weave-20200724220248-14997:/var --security-opt apparmor=unconfined --cpus=2 --memory=1800mb -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 I0724 22:04:11.074471 126494 cli_runner.go:151] Completed: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname custom-weave-20200724220248-14997 --name custom-weave-20200724220248-14997 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=custom-weave-20200724220248-14997 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=custom-weave-20200724220248-14997 --volume custom-weave-20200724220248-14997:/var --security-opt apparmor=unconfined --cpus=2 --memory=1800mb -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438: (1.079005698s) I0724 22:04:11.074604 126494 cli_runner.go:109] Run: docker container inspect custom-weave-20200724220248-14997 --format={{.State.Running}} I0724 22:04:11.129259 126494 cli_runner.go:109] Run: docker container inspect custom-weave-20200724220248-14997 --format={{.State.Status}} I0724 22:04:11.202501 126494 cli_runner.go:109] Run: docker exec custom-weave-20200724220248-14997 stat /var/lib/dpkg/alternatives/iptables I0724 22:04:11.871517 126494 oci.go:222] the created container "custom-weave-20200724220248-14997" has a running status. I0724 22:04:11.871557 126494 kic.go:157] Creating ssh key for kic: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/custom-weave-20200724220248-14997/id_rsa... I0724 22:04:12.214781 126494 kic_runner.go:179] docker (temp): /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/custom-weave-20200724220248-14997/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes) I0724 22:04:19.760835 126494 cli_runner.go:109] Run: docker container inspect custom-weave-20200724220248-14997 --format={{.State.Status}} I0724 22:04:19.817502 126494 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys I0724 22:04:19.817523 126494 kic_runner.go:114] Args: [docker exec --privileged custom-weave-20200724220248-14997 chown docker:docker /home/docker/.ssh/authorized_keys] I0724 22:04:20.072563 126494 cli_runner.go:151] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v4-v1.18.3-docker-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v custom-weave-20200724220248-14997:/extractDir kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 -I lz4 -xvf /preloaded.tar -C /extractDir: (10.144689093s) I0724 22:04:20.072592 126494 kic.go:138] duration metric: took 10.144827 seconds to extract preloaded images to volume I0724 22:04:20.072689 126494 cli_runner.go:109] Run: docker container inspect custom-weave-20200724220248-14997 --format={{.State.Status}} I0724 22:04:20.127259 126494 machine.go:88] provisioning docker machine ... I0724 22:04:20.127305 126494 ubuntu.go:166] provisioning hostname "custom-weave-20200724220248-14997" I0724 22:04:20.127374 126494 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" custom-weave-20200724220248-14997 I0724 22:04:20.189051 126494 main.go:115] libmachine: Using SSH client type: native I0724 22:04:20.189343 126494 main.go:115] libmachine: &{{{ 0 [] [] []} docker [0x7b99a0] 0x7b9970 [] 0s} 127.0.0.1 32864 } I0724 22:04:20.189384 126494 main.go:115] libmachine: About to run SSH command: sudo hostname custom-weave-20200724220248-14997 && echo "custom-weave-20200724220248-14997" | sudo tee /etc/hostname I0724 22:04:20.190035 126494 main.go:115] libmachine: Error dialing TCP: ssh: handshake failed: read tcp 127.0.0.1:48690->127.0.0.1:32864: read: connection reset by peer I0724 22:04:23.336446 126494 main.go:115] libmachine: SSH cmd err, output: : custom-weave-20200724220248-14997 I0724 22:04:23.336548 126494 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" custom-weave-20200724220248-14997 I0724 22:04:23.399575 126494 main.go:115] libmachine: Using SSH client type: native I0724 22:04:23.399765 126494 main.go:115] libmachine: &{{{ 0 [] [] []} docker [0x7b99a0] 0x7b9970 [] 0s} 127.0.0.1 32864 } I0724 22:04:23.399807 126494 main.go:115] libmachine: About to run SSH command: if ! grep -xq '.*\scustom-weave-20200724220248-14997' /etc/hosts; then if grep -xq '127.0.1.1\s.*' /etc/hosts; then sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 custom-weave-20200724220248-14997/g' /etc/hosts; else echo '127.0.1.1 custom-weave-20200724220248-14997' | sudo tee -a /etc/hosts; fi fi I0724 22:04:23.524929 126494 main.go:115] libmachine: SSH cmd err, output: : I0724 22:04:23.524958 126494 ubuntu.go:172] set auth options {CertDir:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube CaCertPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube} I0724 22:04:23.524975 126494 ubuntu.go:174] setting up certificates I0724 22:04:23.524983 126494 provision.go:82] configureAuth start I0724 22:04:23.525041 126494 cli_runner.go:109] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" custom-weave-20200724220248-14997 I0724 22:04:23.581118 126494 provision.go:131] copyHostCerts I0724 22:04:23.581188 126494 exec_runner.go:91] found /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/ca.pem, removing ... I0724 22:04:23.581266 126494 exec_runner.go:98] cp: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca.pem --> /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/ca.pem (1038 bytes) I0724 22:04:23.581370 126494 exec_runner.go:91] found /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cert.pem, removing ... I0724 22:04:23.581410 126494 exec_runner.go:98] cp: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/cert.pem --> /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cert.pem (1078 bytes) I0724 22:04:23.581474 126494 exec_runner.go:91] found /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/key.pem, removing ... I0724 22:04:23.581503 126494 exec_runner.go:98] cp: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/key.pem --> /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/key.pem (1675 bytes) I0724 22:04:23.581548 126494 provision.go:105] generating server cert: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/server.pem ca-key=/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca.pem private-key=/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca-key.pem org=jenkins.custom-weave-20200724220248-14997 san=[172.17.0.8 localhost 127.0.0.1] I0724 22:04:23.921509 126494 provision.go:159] copyRemoteCerts I0724 22:04:23.921576 126494 ssh_runner.go:148] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker I0724 22:04:23.921629 126494 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" custom-weave-20200724220248-14997 I0724 22:04:23.982238 126494 sshutil.go:44] new ssh client: &{IP:127.0.0.1 Port:32864 SSHKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/custom-weave-20200724220248-14997/id_rsa Username:docker} I0724 22:04:24.088707 126494 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1038 bytes) I0724 22:04:24.109652 126494 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/server.pem --> /etc/docker/server.pem (1155 bytes) I0724 22:04:24.129977 126494 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes) I0724 22:04:24.151528 126494 provision.go:85] duration metric: configureAuth took 626.522852ms I0724 22:04:24.151558 126494 ubuntu.go:190] setting minikube options for container-runtime I0724 22:04:24.151795 126494 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" custom-weave-20200724220248-14997 I0724 22:04:24.207429 126494 main.go:115] libmachine: Using SSH client type: native I0724 22:04:24.207617 126494 main.go:115] libmachine: &{{{ 0 [] [] []} docker [0x7b99a0] 0x7b9970 [] 0s} 127.0.0.1 32864 } I0724 22:04:24.207633 126494 main.go:115] libmachine: About to run SSH command: df --output=fstype / | tail -n 1 I0724 22:04:24.328895 126494 main.go:115] libmachine: SSH cmd err, output: : overlay I0724 22:04:24.328918 126494 ubuntu.go:71] root file system type: overlay I0724 22:04:24.329065 126494 provision.go:290] Updating docker unit: /lib/systemd/system/docker.service ... I0724 22:04:24.329129 126494 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" custom-weave-20200724220248-14997 I0724 22:04:24.391914 126494 main.go:115] libmachine: Using SSH client type: native I0724 22:04:24.392098 126494 main.go:115] libmachine: &{{{ 0 [] [] []} docker [0x7b99a0] 0x7b9970 [] 0s} 127.0.0.1 32864 } I0724 22:04:24.392214 126494 main.go:115] libmachine: About to run SSH command: sudo mkdir -p /lib/systemd/system && printf %s "[Unit] Description=Docker Application Container Engine Documentation=https://docs.docker.com BindsTo=containerd.service After=network-online.target firewalld.service containerd.service Wants=network-online.target Requires=docker.socket [Service] Type=notify # This file is a systemd drop-in unit that inherits from the base dockerd configuration. # The base configuration already specifies an 'ExecStart=...' command. The first directive # here is to clear out that command inherited from the base configuration. Without this, # the command from the base configuration and the command specified here are treated as # a sequence of commands, which is not the desired behavior, nor is it valid -- systemd # will catch this invalid input and refuse to start the service with an error like: # Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services. # NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other # container runtimes. If left unlimited, it may result in OOM issues with MySQL. ExecStart= ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12 ExecReload=/bin/kill -s HUP $MAINPID # Having non-zero Limit*s causes performance problems due to accounting overhead # in the kernel. We recommend using cgroups to do container-local accounting. LimitNOFILE=infinity LimitNPROC=infinity LimitCORE=infinity # Uncomment TasksMax if your systemd version supports it. # Only systemd 226 and above support this version. TasksMax=infinity TimeoutStartSec=0 # set delegate yes so that systemd does not reset the cgroups of docker containers Delegate=yes # kill only the docker process, not all processes in the cgroup KillMode=process [Install] WantedBy=multi-user.target " | sudo tee /lib/systemd/system/docker.service.new I0724 22:04:24.633845 126494 main.go:115] libmachine: SSH cmd err, output: : [Unit] Description=Docker Application Container Engine Documentation=https://docs.docker.com BindsTo=containerd.service After=network-online.target firewalld.service containerd.service Wants=network-online.target Requires=docker.socket [Service] Type=notify # This file is a systemd drop-in unit that inherits from the base dockerd configuration. # The base configuration already specifies an 'ExecStart=...' command. The first directive # here is to clear out that command inherited from the base configuration. Without this, # the command from the base configuration and the command specified here are treated as # a sequence of commands, which is not the desired behavior, nor is it valid -- systemd # will catch this invalid input and refuse to start the service with an error like: # Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services. # NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other # container runtimes. If left unlimited, it may result in OOM issues with MySQL. ExecStart= ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12 ExecReload=/bin/kill -s HUP # Having non-zero Limit*s causes performance problems due to accounting overhead # in the kernel. We recommend using cgroups to do container-local accounting. LimitNOFILE=infinity LimitNPROC=infinity LimitCORE=infinity # Uncomment TasksMax if your systemd version supports it. # Only systemd 226 and above support this version. TasksMax=infinity TimeoutStartSec=0 # set delegate yes so that systemd does not reset the cgroups of docker containers Delegate=yes # kill only the docker process, not all processes in the cgroup KillMode=process [Install] WantedBy=multi-user.target I0724 22:04:24.633957 126494 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" custom-weave-20200724220248-14997 I0724 22:04:24.698571 126494 main.go:115] libmachine: Using SSH client type: native I0724 22:04:24.698769 126494 main.go:115] libmachine: &{{{ 0 [] [] []} docker [0x7b99a0] 0x7b9970 [] 0s} 127.0.0.1 32864 } I0724 22:04:24.698796 126494 main.go:115] libmachine: About to run SSH command: sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; } I0724 22:04:26.034810 126494 main.go:115] libmachine: SSH cmd err, output: : --- /lib/systemd/system/docker.service 2019-08-29 04:42:14.000000000 +0000 +++ /lib/systemd/system/docker.service.new 2020-07-24 22:04:24.627471722 +0000 @@ -8,24 +8,22 @@ [Service] Type=notify -# the default is not to use systemd for cgroups because the delegate issues still -# exists and systemd currently does not support the cgroup feature set required -# for containers run by docker -ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock -ExecReload=/bin/kill -s HUP $MAINPID -TimeoutSec=0 -RestartSec=2 -Restart=always - -# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229. -# Both the old, and new location are accepted by systemd 229 and up, so using the old location -# to make them work for either version of systemd. -StartLimitBurst=3 - -# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230. -# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make -# this option work for either version of systemd. -StartLimitInterval=60s + + + +# This file is a systemd drop-in unit that inherits from the base dockerd configuration. +# The base configuration already specifies an 'ExecStart=...' command. The first directive +# here is to clear out that command inherited from the base configuration. Without this, +# the command from the base configuration and the command specified here are treated as +# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd +# will catch this invalid input and refuse to start the service with an error like: +# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services. + +# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other +# container runtimes. If left unlimited, it may result in OOM issues with MySQL. +ExecStart= +ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12 +ExecReload=/bin/kill -s HUP # Having non-zero Limit*s causes performance problems due to accounting overhead # in the kernel. We recommend using cgroups to do container-local accounting. @@ -33,9 +31,10 @@ LimitNPROC=infinity LimitCORE=infinity -# Comment TasksMax if your systemd version does not support it. -# Only systemd 226 and above support this option. +# Uncomment TasksMax if your systemd version supports it. +# Only systemd 226 and above support this version. TasksMax=infinity +TimeoutStartSec=0 # set delegate yes so that systemd does not reset the cgroups of docker containers Delegate=yes I0724 22:04:26.034838 126494 machine.go:91] provisioned docker machine in 5.907552231s I0724 22:04:26.034848 126494 client.go:164] LocalClient.Create took 31.297790376s I0724 22:04:26.034866 126494 start.go:163] duration metric: libmachine.API.Create for "custom-weave-20200724220248-14997" took 31.297892583s I0724 22:04:26.034877 126494 start.go:204] post-start starting for "custom-weave-20200724220248-14997" (driver="docker") I0724 22:04:26.034885 126494 start.go:214] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs] I0724 22:04:26.034993 126494 ssh_runner.go:148] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs I0724 22:04:26.035072 126494 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" custom-weave-20200724220248-14997 I0724 22:04:26.089566 126494 sshutil.go:44] new ssh client: &{IP:127.0.0.1 Port:32864 SSHKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/custom-weave-20200724220248-14997/id_rsa Username:docker} I0724 22:04:26.177669 126494 ssh_runner.go:148] Run: cat /etc/os-release I0724 22:04:26.181469 126494 main.go:115] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found I0724 22:04:26.181493 126494 main.go:115] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found I0724 22:04:26.181507 126494 main.go:115] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found I0724 22:04:26.181515 126494 info.go:98] Remote host: Ubuntu 19.10 I0724 22:04:26.181527 126494 filesync.go:118] Scanning /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/addons for local assets ... I0724 22:04:26.181574 126494 filesync.go:118] Scanning /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/files for local assets ... I0724 22:04:26.181708 126494 filesync.go:141] local asset: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/files/etc/test/nested/copy/14997/hosts -> hosts in /etc/test/nested/copy/14997 I0724 22:04:26.181770 126494 ssh_runner.go:148] Run: sudo mkdir -p /etc/test/nested/copy/14997 I0724 22:04:26.190542 126494 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/files/etc/test/nested/copy/14997/hosts --> /etc/test/nested/copy/14997/hosts (40 bytes) I0724 22:04:26.212434 126494 start.go:207] post-start completed in 177.538541ms I0724 22:04:26.212757 126494 cli_runner.go:109] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" custom-weave-20200724220248-14997 I0724 22:04:26.274810 126494 profile.go:150] Saving config to /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/custom-weave-20200724220248-14997/config.json ... I0724 22:04:26.274980 126494 start.go:125] duration metric: createHost completed in 31.549143349s I0724 22:04:26.274997 126494 start.go:76] releasing machines lock for "custom-weave-20200724220248-14997", held for 31.549253956s I0724 22:04:26.275069 126494 cli_runner.go:109] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" custom-weave-20200724220248-14997 I0724 22:04:26.324259 126494 ssh_runner.go:148] Run: systemctl --version I0724 22:04:26.324319 126494 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" custom-weave-20200724220248-14997 I0724 22:04:26.324368 126494 ssh_runner.go:148] Run: curl -sS -m 2 https://k8s.gcr.io/ I0724 22:04:26.324434 126494 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" custom-weave-20200724220248-14997 I0724 22:04:26.384027 126494 sshutil.go:44] new ssh client: &{IP:127.0.0.1 Port:32864 SSHKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/custom-weave-20200724220248-14997/id_rsa Username:docker} I0724 22:04:26.385249 126494 sshutil.go:44] new ssh client: &{IP:127.0.0.1 Port:32864 SSHKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/custom-weave-20200724220248-14997/id_rsa Username:docker} I0724 22:04:26.469802 126494 ssh_runner.go:148] Run: sudo systemctl is-active --quiet service containerd I0724 22:04:26.533538 126494 ssh_runner.go:148] Run: sudo systemctl cat docker.service I0724 22:04:26.544889 126494 cruntime.go:192] skipping containerd shutdown because we are bound to it I0724 22:04:26.544943 126494 ssh_runner.go:148] Run: sudo systemctl is-active --quiet service crio I0724 22:04:26.558983 126494 ssh_runner.go:148] Run: sudo systemctl cat docker.service I0724 22:04:26.571778 126494 ssh_runner.go:148] Run: sudo systemctl daemon-reload I0724 22:04:26.658883 126494 ssh_runner.go:148] Run: sudo systemctl start docker I0724 22:04:26.673029 126494 ssh_runner.go:148] Run: docker version --format {{.Server.Version}} I0724 22:04:26.794274 126494 cli_runner.go:109] Run: docker network ls --filter name=bridge --format {{.ID}} I0724 22:04:26.882480 126494 cli_runner.go:109] Run: docker network inspect --format "{{(index .IPAM.Config 0).Gateway}}" d4a420189740 I0724 22:04:26.936673 126494 network.go:77] got host ip for mount in container by inspect docker network: 172.17.0.1 I0724 22:04:26.936750 126494 ssh_runner.go:148] Run: grep 172.17.0.1 host.minikube.internal$ /etc/hosts I0724 22:04:26.941024 126494 ssh_runner.go:148] Run: /bin/bash -c "{ grep -v '\thost.minikube.internal$' /etc/hosts; echo "172.17.0.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ /etc/hosts" I0724 22:04:26.954598 126494 preload.go:97] Checking if preload exists for k8s version v1.18.3 and runtime docker I0724 22:04:26.954640 126494 preload.go:105] Found local preload: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v4-v1.18.3-docker-overlay2-amd64.tar.lz4 I0724 22:04:26.954691 126494 ssh_runner.go:148] Run: docker images --format {{.Repository}}:{{.Tag}} I0724 22:04:27.009957 126494 docker.go:381] Got preloaded images: -- stdout -- kubernetesui/dashboard:v2.0.1 k8s.gcr.io/kube-proxy:v1.18.3 k8s.gcr.io/kube-controller-manager:v1.18.3 k8s.gcr.io/kube-apiserver:v1.18.3 k8s.gcr.io/kube-scheduler:v1.18.3 kubernetesui/metrics-scraper:v1.0.4 k8s.gcr.io/pause:3.2 k8s.gcr.io/coredns:1.6.7 k8s.gcr.io/etcd:3.4.3-0 gcr.io/k8s-minikube/storage-provisioner:v1.8.1 -- /stdout -- I0724 22:04:27.010070 126494 docker.go:319] Images already preloaded, skipping extraction I0724 22:04:27.010121 126494 ssh_runner.go:148] Run: docker images --format {{.Repository}}:{{.Tag}} I0724 22:04:27.083281 126494 docker.go:381] Got preloaded images: -- stdout -- kubernetesui/dashboard:v2.0.1 k8s.gcr.io/kube-proxy:v1.18.3 k8s.gcr.io/kube-scheduler:v1.18.3 k8s.gcr.io/kube-apiserver:v1.18.3 k8s.gcr.io/kube-controller-manager:v1.18.3 kubernetesui/metrics-scraper:v1.0.4 k8s.gcr.io/pause:3.2 k8s.gcr.io/coredns:1.6.7 k8s.gcr.io/etcd:3.4.3-0 gcr.io/k8s-minikube/storage-provisioner:v1.8.1 -- /stdout -- I0724 22:04:27.083304 126494 cache_images.go:69] Images are preloaded, skipping loading I0724 22:04:27.083405 126494 ssh_runner.go:148] Run: docker info --format {{.CgroupDriver}} I0724 22:04:27.164625 126494 cni.go:74] Creating CNI manager for "testdata/weavenet.yaml" I0724 22:04:27.164676 126494 kubeadm.go:84] Using pod CIDR: 10.244.0.0/16 I0724 22:04:27.164703 126494 kubeadm.go:150] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:172.17.0.8 APIServerPort:8443 KubernetesVersion:v1.18.3 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:custom-weave-20200724220248-14997 NodeName:custom-weave-20200724220248-14997 DNSDomain:cluster.local CRISocket: ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "172.17.0.8"]]}] FeatureArgs:map[] NoTaintMaster:true NodeIP:172.17.0.8 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[]} I0724 22:04:27.164870 126494 kubeadm.go:154] kubeadm config: apiVersion: kubeadm.k8s.io/v1beta2 kind: InitConfiguration localAPIEndpoint: advertiseAddress: 172.17.0.8 bindPort: 8443 bootstrapTokens: - groups: - system:bootstrappers:kubeadm:default-node-token ttl: 24h0m0s usages: - signing - authentication nodeRegistration: criSocket: /var/run/dockershim.sock name: "custom-weave-20200724220248-14997" kubeletExtraArgs: node-ip: 172.17.0.8 taints: [] --- apiVersion: kubeadm.k8s.io/v1beta2 kind: ClusterConfiguration apiServer: certSANs: ["127.0.0.1", "localhost", "172.17.0.8"] extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs clusterName: mk controlPlaneEndpoint: control-plane.minikube.internal:8443 dns: type: CoreDNS etcd: local: dataDir: /var/lib/minikube/etcd controllerManager: extraArgs: "leader-elect": "false" scheduler: extraArgs: "leader-elect": "false" kubernetesVersion: v1.18.3 networking: dnsDomain: cluster.local podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: cgroupfs clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 evictionHard: nodefs.available: "0%" nodefs.inodesFree: "0%" imagefs.available: "0%" failSwapOn: false staticPodPath: /etc/kubernetes/manifests --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration clusterCIDR: "10.244.0.0/16" metricsBindAddress: 172.17.0.8:10249 I0724 22:04:27.164962 126494 kubeadm.go:790] kubelet [Unit] Wants=docker.socket [Service] ExecStart= ExecStart=/var/lib/minikube/binaries/v1.18.3/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --container-runtime=docker --hostname-override=custom-weave-20200724220248-14997 --kubeconfig=/etc/kubernetes/kubelet.conf --network-plugin=cni --node-ip=172.17.0.8 [Install] config: {KubernetesVersion:v1.18.3 ClusterName:custom-weave-20200724220248-14997 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:testdata/weavenet.yaml NodeIP: NodePort:8443 NodeName:} I0724 22:04:27.165059 126494 ssh_runner.go:148] Run: sudo ls /var/lib/minikube/binaries/v1.18.3 I0724 22:04:27.174349 126494 binaries.go:43] Found k8s binaries, skipping transfer I0724 22:04:27.174418 126494 ssh_runner.go:148] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube I0724 22:04:27.184290 126494 ssh_runner.go:215] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (378 bytes) I0724 22:04:27.237110 126494 ssh_runner.go:215] scp memory --> /lib/systemd/system/kubelet.service (349 bytes) I0724 22:04:27.283568 126494 ssh_runner.go:215] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (1781 bytes) I0724 22:04:27.304298 126494 ssh_runner.go:148] Run: grep 172.17.0.8 control-plane.minikube.internal$ /etc/hosts I0724 22:04:27.307846 126494 ssh_runner.go:148] Run: /bin/bash -c "{ grep -v '\tcontrol-plane.minikube.internal$' /etc/hosts; echo "172.17.0.8 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ /etc/hosts" I0724 22:04:27.318442 126494 ssh_runner.go:148] Run: sudo systemctl daemon-reload I0724 22:04:27.399951 126494 ssh_runner.go:148] Run: sudo systemctl start kubelet I0724 22:04:27.417158 126494 certs.go:52] Setting up /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/custom-weave-20200724220248-14997 for IP: 172.17.0.8 I0724 22:04:27.417267 126494 certs.go:169] skipping minikubeCA CA generation: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/ca.key I0724 22:04:27.417319 126494 certs.go:169] skipping proxyClientCA CA generation: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/proxy-client-ca.key I0724 22:04:27.417401 126494 certs.go:273] generating minikube-user signed cert: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/custom-weave-20200724220248-14997/client.key I0724 22:04:27.417445 126494 crypto.go:69] Generating cert /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/custom-weave-20200724220248-14997/client.crt with IP's: [] I0724 22:04:27.799336 126494 crypto.go:157] Writing cert to /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/custom-weave-20200724220248-14997/client.crt ... I0724 22:04:27.799372 126494 lock.go:35] WriteFile acquiring /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/custom-weave-20200724220248-14997/client.crt: {Name:mk51dfbe32c62b2b031a460641f6ebc74377f133 Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0724 22:04:27.799553 126494 crypto.go:165] Writing key to /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/custom-weave-20200724220248-14997/client.key ... I0724 22:04:27.799578 126494 lock.go:35] WriteFile acquiring /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/custom-weave-20200724220248-14997/client.key: {Name:mkd5f8986911c31e58b6de4bc94d7677e9ad1a84 Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0724 22:04:27.799693 126494 certs.go:273] generating minikube signed cert: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/custom-weave-20200724220248-14997/apiserver.key.f7ca08ce I0724 22:04:27.799706 126494 crypto.go:69] Generating cert /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/custom-weave-20200724220248-14997/apiserver.crt.f7ca08ce with IP's: [172.17.0.8 10.96.0.1 127.0.0.1 10.0.0.1] I0724 22:04:28.328661 126494 crypto.go:157] Writing cert to /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/custom-weave-20200724220248-14997/apiserver.crt.f7ca08ce ... I0724 22:04:28.328696 126494 lock.go:35] WriteFile acquiring /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/custom-weave-20200724220248-14997/apiserver.crt.f7ca08ce: {Name:mkcc2ecf7fc69961f24102081fa2f1e3c5825cde Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0724 22:04:28.328868 126494 crypto.go:165] Writing key to /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/custom-weave-20200724220248-14997/apiserver.key.f7ca08ce ... I0724 22:04:28.328884 126494 lock.go:35] WriteFile acquiring /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/custom-weave-20200724220248-14997/apiserver.key.f7ca08ce: {Name:mkcbf5f5baf03c8d0b572d720acb474e2409f92b Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0724 22:04:28.329009 126494 certs.go:284] copying /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/custom-weave-20200724220248-14997/apiserver.crt.f7ca08ce -> /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/custom-weave-20200724220248-14997/apiserver.crt I0724 22:04:28.329097 126494 certs.go:288] copying /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/custom-weave-20200724220248-14997/apiserver.key.f7ca08ce -> /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/custom-weave-20200724220248-14997/apiserver.key I0724 22:04:28.329168 126494 certs.go:273] generating aggregator signed cert: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/custom-weave-20200724220248-14997/proxy-client.key I0724 22:04:28.329180 126494 crypto.go:69] Generating cert /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/custom-weave-20200724220248-14997/proxy-client.crt with IP's: [] I0724 22:04:28.509158 126494 crypto.go:157] Writing cert to /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/custom-weave-20200724220248-14997/proxy-client.crt ... I0724 22:04:28.509201 126494 lock.go:35] WriteFile acquiring /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/custom-weave-20200724220248-14997/proxy-client.crt: {Name:mk6ad77e8b093778ec0ea3e3ca83f7a32b0415ec Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0724 22:04:28.509426 126494 crypto.go:165] Writing key to /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/custom-weave-20200724220248-14997/proxy-client.key ... I0724 22:04:28.509450 126494 lock.go:35] WriteFile acquiring /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/custom-weave-20200724220248-14997/proxy-client.key: {Name:mk2d4356ac745144bf53d0677d407be094845927 Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0724 22:04:28.509739 126494 certs.go:348] found cert: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/14997.pem (1338 bytes) W0724 22:04:28.509822 126494 certs.go:344] ignoring /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/14997_empty.pem, impossibly tiny 0 bytes I0724 22:04:28.509849 126494 certs.go:348] found cert: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca-key.pem (1675 bytes) I0724 22:04:28.509901 126494 certs.go:348] found cert: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca.pem (1038 bytes) I0724 22:04:28.509949 126494 certs.go:348] found cert: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/cert.pem (1078 bytes) I0724 22:04:28.509996 126494 certs.go:348] found cert: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/key.pem (1675 bytes) I0724 22:04:28.511301 126494 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/custom-weave-20200724220248-14997/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1350 bytes) I0724 22:04:28.537869 126494 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/custom-weave-20200724220248-14997/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes) I0724 22:04:28.560703 126494 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/custom-weave-20200724220248-14997/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1103 bytes) I0724 22:04:28.586131 126494 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/custom-weave-20200724220248-14997/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes) I0724 22:04:28.610055 126494 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1066 bytes) I0724 22:04:28.642003 126494 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1679 bytes) I0724 22:04:28.674554 126494 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1074 bytes) I0724 22:04:28.700544 126494 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes) I0724 22:04:28.724522 126494 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1066 bytes) I0724 22:04:28.748729 126494 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/14997.pem --> /usr/share/ca-certificates/14997.pem (1338 bytes) I0724 22:04:28.772797 126494 ssh_runner.go:215] scp memory --> /var/lib/minikube/kubeconfig (392 bytes) I0724 22:04:29.039075 126494 ssh_runner.go:148] Run: openssl version I0724 22:04:29.046558 126494 ssh_runner.go:148] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/14997.pem && ln -fs /usr/share/ca-certificates/14997.pem /etc/ssl/certs/14997.pem" I0724 22:04:29.056393 126494 ssh_runner.go:148] Run: ls -la /usr/share/ca-certificates/14997.pem I0724 22:04:29.064428 126494 certs.go:389] hashing: -rw-r--r-- 1 root root 1338 Jul 24 21:50 /usr/share/ca-certificates/14997.pem I0724 22:04:29.064498 126494 ssh_runner.go:148] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/14997.pem I0724 22:04:29.071096 126494 ssh_runner.go:148] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/14997.pem /etc/ssl/certs/51391683.0" I0724 22:04:29.079966 126494 ssh_runner.go:148] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem" I0724 22:04:29.089382 126494 ssh_runner.go:148] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem I0724 22:04:29.093133 126494 certs.go:389] hashing: -rw-r--r-- 1 root root 1066 Jul 24 21:47 /usr/share/ca-certificates/minikubeCA.pem I0724 22:04:29.093201 126494 ssh_runner.go:148] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem I0724 22:04:29.098990 126494 ssh_runner.go:148] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0" I0724 22:04:29.107779 126494 kubeadm.go:327] StartCluster: {Name:custom-weave-20200724220248-14997 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 Memory:1800 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.99.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio KubernetesConfig:{KubernetesVersion:v1.18.3 ClusterName:custom-weave-20200724220248-14997 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:testdata/weavenet.yaml NodeIP: NodePort:8443 NodeName:} Nodes:[{Name: IP:172.17.0.8 Port:8443 KubernetesVersion:v1.18.3 ControlPlane:true Worker:true}] Addons:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true system_pods:true]} I0724 22:04:29.107950 126494 ssh_runner.go:148] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}} I0724 22:04:29.160971 126494 ssh_runner.go:148] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd I0724 22:04:29.170451 126494 ssh_runner.go:148] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml I0724 22:04:29.178841 126494 kubeadm.go:211] ignoring SystemVerification for kubeadm because of docker driver I0724 22:04:29.178895 126494 ssh_runner.go:148] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf I0724 22:04:29.188859 126494 kubeadm.go:147] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2 stdout: stderr: ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory I0724 22:04:29.188900 126494 ssh_runner.go:148] Run: /bin/bash -c "sudo env PATH=/var/lib/minikube/binaries/v1.18.3:$PATH kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables" I0724 22:05:10.346485 126494 ssh_runner.go:188] Completed: /bin/bash -c "sudo env PATH=/var/lib/minikube/binaries/v1.18.3:$PATH kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables": (41.157558837s) I0724 22:05:10.346516 126494 cni.go:74] Creating CNI manager for "testdata/weavenet.yaml" I0724 22:05:10.367246 126494 cni.go:137] applying CNI manifest using /var/lib/minikube/binaries/v1.18.3/kubectl ... I0724 22:05:10.367372 126494 ssh_runner.go:148] Run: stat -c "%s %y" /var/tmp/minikube/cni.yaml I0724 22:05:10.373411 126494 ssh_runner.go:205] existence check for /var/tmp/minikube/cni.yaml: stat -c "%s %y" /var/tmp/minikube/cni.yaml: Process exited with status 1 stdout: stderr: stat: cannot stat '/var/tmp/minikube/cni.yaml': No such file or directory I0724 22:05:10.373446 126494 ssh_runner.go:215] scp testdata/weavenet.yaml --> /var/tmp/minikube/cni.yaml (10948 bytes) I0724 22:05:10.400230 126494 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml I0724 22:05:22.089966 126494 ssh_runner.go:188] Completed: sudo /var/lib/minikube/binaries/v1.18.3/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml: (11.689674605s) I0724 22:05:22.090060 126494 ssh_runner.go:148] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj" I0724 22:05:22.090183 126494 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:05:22.090233 126494 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl label nodes minikube.k8s.io/version=v1.12.1 minikube.k8s.io/commit=40eac8ce825d2bb784efa63b900c8d788ea49faf minikube.k8s.io/name=custom-weave-20200724220248-14997 minikube.k8s.io/updated_at=2020_07_24T22_05_22_0700 --all --overwrite --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:05:22.108883 126494 ops.go:35] apiserver oom_adj: -16 I0724 22:05:22.282952 126494 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:05:22.892860 126494 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:05:23.392855 126494 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:05:23.892877 126494 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:05:24.392817 126494 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:05:25.392875 126494 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:05:25.892909 126494 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:05:26.393048 126494 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:05:26.892860 126494 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:05:27.392911 126494 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:05:27.892893 126494 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:05:28.038174 126494 kubeadm.go:866] duration metric: took 5.948088473s to wait for elevateKubeSystemPrivileges. I0724 22:05:28.038198 126494 kubeadm.go:329] StartCluster complete in 58.930425404s I0724 22:05:28.038219 126494 settings.go:123] acquiring lock: {Name:mk120aead41f4abf9b6da50636235ecd4ae2a41a Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0724 22:05:28.038337 126494 settings.go:131] Updating kubeconfig: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/kubeconfig I0724 22:05:28.040639 126494 lock.go:35] WriteFile acquiring /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/kubeconfig: {Name:mk94f19b810ab6208411eb086ed6241d89a90d8c Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0724 22:05:28.040844 126494 start.go:195] Will wait wait-timeout for node ... I0724 22:05:28.040989 126494 addons.go:353] enableAddons start: toEnable=map[], additional=[] I0724 22:05:28.060799 126494 addons.go:53] Setting storage-provisioner=true in profile "custom-weave-20200724220248-14997" I0724 22:05:28.041055 126494 ssh_runner.go:148] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.18.3/kubectl scale deployment --replicas=1 coredns -n=kube-system I0724 22:05:28.060939 126494 addons.go:53] Setting default-storageclass=true in profile "custom-weave-20200724220248-14997" I0724 22:05:28.060954 126494 addons.go:267] enableOrDisableStorageClasses default-storageclass=true on "custom-weave-20200724220248-14997" I0724 22:05:28.061439 126494 cli_runner.go:109] Run: docker container inspect custom-weave-20200724220248-14997 --format={{.State.Status}} I0724 22:05:28.062063 126494 addons.go:129] Setting addon storage-provisioner=true in "custom-weave-20200724220248-14997" W0724 22:05:28.062085 126494 addons.go:138] addon storage-provisioner should already be in state true I0724 22:05:28.062103 126494 host.go:65] Checking if "custom-weave-20200724220248-14997" exists ... I0724 22:05:28.062790 126494 cli_runner.go:109] Run: docker container inspect custom-weave-20200724220248-14997 --format={{.State.Status}} I0724 22:05:28.065651 126494 api_server.go:48] waiting for apiserver process to appear ... I0724 22:05:28.065707 126494 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:05:28.132860 126494 addons.go:236] installing /etc/kubernetes/addons/storage-provisioner.yaml I0724 22:05:28.132890 126494 ssh_runner.go:215] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2668 bytes) I0724 22:05:28.132960 126494 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" custom-weave-20200724220248-14997 I0724 22:05:28.156480 126494 addons.go:129] Setting addon default-storageclass=true in "custom-weave-20200724220248-14997" W0724 22:05:28.156507 126494 addons.go:138] addon default-storageclass should already be in state true I0724 22:05:28.156521 126494 host.go:65] Checking if "custom-weave-20200724220248-14997" exists ... I0724 22:05:28.157075 126494 cli_runner.go:109] Run: docker container inspect custom-weave-20200724220248-14997 --format={{.State.Status}} I0724 22:05:28.202146 126494 start.go:549] successfully scaled coredns replicas to 1 I0724 22:05:28.202203 126494 api_server.go:68] duration metric: took 161.337824ms to wait for apiserver process to appear ... I0724 22:05:28.202229 126494 api_server.go:84] waiting for apiserver healthz status ... I0724 22:05:28.202242 126494 api_server.go:221] Checking apiserver healthz at https://172.17.0.8:8443/healthz ... I0724 22:05:28.213616 126494 sshutil.go:44] new ssh client: &{IP:127.0.0.1 Port:32864 SSHKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/custom-weave-20200724220248-14997/id_rsa Username:docker} I0724 22:05:28.218569 126494 api_server.go:241] https://172.17.0.8:8443/healthz returned 200: ok I0724 22:05:28.219491 126494 api_server.go:137] control plane version: v1.18.3 I0724 22:05:28.219517 126494 api_server.go:127] duration metric: took 17.278402ms to wait for apiserver health ... I0724 22:05:28.219534 126494 system_pods.go:43] waiting for kube-system pods to appear ... I0724 22:05:28.230933 126494 addons.go:236] installing /etc/kubernetes/addons/storageclass.yaml I0724 22:05:28.230969 126494 ssh_runner.go:215] scp deploy/addons/storageclass/storageclass.yaml.tmpl --> /etc/kubernetes/addons/storageclass.yaml (271 bytes) I0724 22:05:28.231043 126494 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" custom-weave-20200724220248-14997 I0724 22:05:28.248954 126494 system_pods.go:59] 4 kube-system pods found I0724 22:05:28.249045 126494 system_pods.go:61] "etcd-custom-weave-20200724220248-14997" [b6be47cc-382b-4bab-8e66-0482a5947f89] Running I0724 22:05:28.249080 126494 system_pods.go:61] "kube-apiserver-custom-weave-20200724220248-14997" [259ce929-c39c-46d2-94cf-a26980082767] Running I0724 22:05:28.249103 126494 system_pods.go:61] "kube-controller-manager-custom-weave-20200724220248-14997" [7d3fd7da-e02f-4447-a202-858783b88e61] Running I0724 22:05:28.249126 126494 system_pods.go:61] "kube-scheduler-custom-weave-20200724220248-14997" [d2064a1a-0e7e-4eed-8632-d4aacb0cedf6] Running I0724 22:05:28.249163 126494 system_pods.go:74] duration metric: took 29.60586ms to wait for pod list to return data ... I0724 22:05:28.249188 126494 default_sa.go:33] waiting for default service account to be created ... I0724 22:05:28.269403 126494 default_sa.go:44] found service account: "default" I0724 22:05:28.269433 126494 default_sa.go:54] duration metric: took 20.220806ms for default service account to be created ... I0724 22:05:28.269447 126494 system_pods.go:116] waiting for k8s-apps to be running ... I0724 22:05:28.284470 126494 system_pods.go:86] 6 kube-system pods found I0724 22:05:28.284502 126494 system_pods.go:89] "etcd-custom-weave-20200724220248-14997" [b6be47cc-382b-4bab-8e66-0482a5947f89] Running I0724 22:05:28.284510 126494 system_pods.go:89] "kube-apiserver-custom-weave-20200724220248-14997" [259ce929-c39c-46d2-94cf-a26980082767] Running I0724 22:05:28.284515 126494 system_pods.go:89] "kube-controller-manager-custom-weave-20200724220248-14997" [7d3fd7da-e02f-4447-a202-858783b88e61] Running I0724 22:05:28.284521 126494 system_pods.go:89] "kube-proxy-4fxmn" [d27e15e4-b21a-41ab-98fc-172c594abd85] Pending I0724 22:05:28.284526 126494 system_pods.go:89] "kube-scheduler-custom-weave-20200724220248-14997" [d2064a1a-0e7e-4eed-8632-d4aacb0cedf6] Running I0724 22:05:28.284531 126494 system_pods.go:89] "weave-net-w4lvx" [fda79fe7-76fb-4fbb-b246-1da694a2b942] Pending I0724 22:05:28.284549 126494 retry.go:30] will retry after 263.082536ms: missing components: kube-dns, kube-proxy I0724 22:05:28.318652 126494 sshutil.go:44] new ssh client: &{IP:127.0.0.1 Port:32864 SSHKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/custom-weave-20200724220248-14997/id_rsa Username:docker} I0724 22:05:28.411993 126494 ssh_runner.go:148] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.18.3/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml I0724 22:05:28.500045 126494 ssh_runner.go:148] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.18.3/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml I0724 22:05:28.554311 126494 system_pods.go:86] 7 kube-system pods found I0724 22:05:28.554354 126494 system_pods.go:89] "coredns-66bff467f8-c6xtm" [404c9846-5599-483c-a9ee-4ce0c790d832] Pending I0724 22:05:28.554367 126494 system_pods.go:89] "etcd-custom-weave-20200724220248-14997" [b6be47cc-382b-4bab-8e66-0482a5947f89] Running I0724 22:05:28.554382 126494 system_pods.go:89] "kube-apiserver-custom-weave-20200724220248-14997" [259ce929-c39c-46d2-94cf-a26980082767] Running I0724 22:05:28.554396 126494 system_pods.go:89] "kube-controller-manager-custom-weave-20200724220248-14997" [7d3fd7da-e02f-4447-a202-858783b88e61] Running I0724 22:05:28.554426 126494 system_pods.go:89] "kube-proxy-4fxmn" [d27e15e4-b21a-41ab-98fc-172c594abd85] Pending / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy]) I0724 22:05:28.554449 126494 system_pods.go:89] "kube-scheduler-custom-weave-20200724220248-14997" [d2064a1a-0e7e-4eed-8632-d4aacb0cedf6] Running I0724 22:05:28.554477 126494 system_pods.go:89] "weave-net-w4lvx" [fda79fe7-76fb-4fbb-b246-1da694a2b942] Pending / Ready:ContainersNotReady (containers with unready status: [weave weave-npc]) / ContainersReady:ContainersNotReady (containers with unready status: [weave weave-npc]) I0724 22:05:28.554496 126494 retry.go:30] will retry after 381.329545ms: missing components: kube-dns, kube-proxy I0724 22:05:31.034442 126494 system_pods.go:86] 7 kube-system pods found I0724 22:05:31.034475 126494 system_pods.go:89] "coredns-66bff467f8-c6xtm" [404c9846-5599-483c-a9ee-4ce0c790d832] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:05:31.034486 126494 system_pods.go:89] "etcd-custom-weave-20200724220248-14997" [b6be47cc-382b-4bab-8e66-0482a5947f89] Running I0724 22:05:31.034498 126494 system_pods.go:89] "kube-apiserver-custom-weave-20200724220248-14997" [259ce929-c39c-46d2-94cf-a26980082767] Running I0724 22:05:31.034518 126494 system_pods.go:89] "kube-controller-manager-custom-weave-20200724220248-14997" [7d3fd7da-e02f-4447-a202-858783b88e61] Running I0724 22:05:31.034528 126494 system_pods.go:89] "kube-proxy-4fxmn" [d27e15e4-b21a-41ab-98fc-172c594abd85] Pending / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy]) I0724 22:05:31.034537 126494 system_pods.go:89] "kube-scheduler-custom-weave-20200724220248-14997" [d2064a1a-0e7e-4eed-8632-d4aacb0cedf6] Running I0724 22:05:31.034548 126494 system_pods.go:89] "weave-net-w4lvx" [fda79fe7-76fb-4fbb-b246-1da694a2b942] Pending / Ready:ContainersNotReady (containers with unready status: [weave weave-npc]) / ContainersReady:ContainersNotReady (containers with unready status: [weave weave-npc]) I0724 22:05:31.034561 126494 retry.go:30] will retry after 422.765636ms: missing components: kube-dns, kube-proxy I0724 22:05:39.186842 126494 system_pods.go:86] 7 kube-system pods found I0724 22:05:39.186886 126494 system_pods.go:89] "coredns-66bff467f8-c6xtm" [404c9846-5599-483c-a9ee-4ce0c790d832] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:05:39.186898 126494 system_pods.go:89] "etcd-custom-weave-20200724220248-14997" [b6be47cc-382b-4bab-8e66-0482a5947f89] Running I0724 22:05:39.186912 126494 system_pods.go:89] "kube-apiserver-custom-weave-20200724220248-14997" [259ce929-c39c-46d2-94cf-a26980082767] Running I0724 22:05:39.186922 126494 system_pods.go:89] "kube-controller-manager-custom-weave-20200724220248-14997" [7d3fd7da-e02f-4447-a202-858783b88e61] Running I0724 22:05:39.186934 126494 system_pods.go:89] "kube-proxy-4fxmn" [d27e15e4-b21a-41ab-98fc-172c594abd85] Pending / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy]) I0724 22:05:39.186947 126494 system_pods.go:89] "kube-scheduler-custom-weave-20200724220248-14997" [d2064a1a-0e7e-4eed-8632-d4aacb0cedf6] Running I0724 22:05:39.186980 126494 system_pods.go:89] "weave-net-w4lvx" [fda79fe7-76fb-4fbb-b246-1da694a2b942] Pending / Ready:ContainersNotReady (containers with unready status: [weave weave-npc]) / ContainersReady:ContainersNotReady (containers with unready status: [weave weave-npc]) I0724 22:05:39.187010 126494 retry.go:30] will retry after 473.074753ms: missing components: kube-dns, kube-proxy I0724 22:05:39.445352 126494 ssh_runner.go:188] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.18.3/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: (11.033326547s) W0724 22:05:39.445386 126494 addons.go:257] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.18.3/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: Process exited with status 1 stdout: serviceaccount/storage-provisioner created clusterrolebinding.rbac.authorization.k8s.io/storage-provisioner created rolebinding.rbac.authorization.k8s.io/leader-locking-storage-provisioner created endpoints/k8s.io-minikube-hostpath created pod/storage-provisioner created stderr: Error from server: error when retrieving current configuration of: Resource: "rbac.authorization.k8s.io/v1, Resource=roles", GroupVersionKind: "rbac.authorization.k8s.io/v1, Kind=Role" Name: "system::leader-locking-storage-provisioner", Namespace: "kube-system" from server for: "/etc/kubernetes/addons/storage-provisioner.yaml": etcdserver: request timed out I0724 22:05:39.445396 126494 retry.go:30] will retry after 231.159374ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.18.3/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: Process exited with status 1 stdout: serviceaccount/storage-provisioner created clusterrolebinding.rbac.authorization.k8s.io/storage-provisioner created rolebinding.rbac.authorization.k8s.io/leader-locking-storage-provisioner created endpoints/k8s.io-minikube-hostpath created pod/storage-provisioner created stderr: Error from server: error when retrieving current configuration of: Resource: "rbac.authorization.k8s.io/v1, Resource=roles", GroupVersionKind: "rbac.authorization.k8s.io/v1, Kind=Role" Name: "system::leader-locking-storage-provisioner", Namespace: "kube-system" from server for: "/etc/kubernetes/addons/storage-provisioner.yaml": etcdserver: request timed out I0724 22:05:39.445454 126494 ssh_runner.go:188] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.18.3/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml: (10.945370028s) W0724 22:05:39.445491 126494 addons.go:257] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.18.3/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml: Process exited with status 1 stdout: stderr: Error from server: error when creating "/etc/kubernetes/addons/storageclass.yaml": etcdserver: request timed out I0724 22:05:39.445510 126494 retry.go:30] will retry after 296.705768ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.18.3/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml: Process exited with status 1 stdout: stderr: Error from server: error when creating "/etc/kubernetes/addons/storageclass.yaml": etcdserver: request timed out I0724 22:05:39.667900 126494 system_pods.go:86] 8 kube-system pods found I0724 22:05:39.667945 126494 system_pods.go:89] "coredns-66bff467f8-c6xtm" [404c9846-5599-483c-a9ee-4ce0c790d832] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:05:39.667955 126494 system_pods.go:89] "etcd-custom-weave-20200724220248-14997" [b6be47cc-382b-4bab-8e66-0482a5947f89] Running I0724 22:05:39.667965 126494 system_pods.go:89] "kube-apiserver-custom-weave-20200724220248-14997" [259ce929-c39c-46d2-94cf-a26980082767] Running I0724 22:05:39.667975 126494 system_pods.go:89] "kube-controller-manager-custom-weave-20200724220248-14997" [7d3fd7da-e02f-4447-a202-858783b88e61] Running I0724 22:05:39.667985 126494 system_pods.go:89] "kube-proxy-4fxmn" [d27e15e4-b21a-41ab-98fc-172c594abd85] Pending / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy]) I0724 22:05:39.667994 126494 system_pods.go:89] "kube-scheduler-custom-weave-20200724220248-14997" [d2064a1a-0e7e-4eed-8632-d4aacb0cedf6] Running I0724 22:05:39.668012 126494 system_pods.go:89] "storage-provisioner" [8ce030ba-9435-404a-98ab-0e5fffd82cea] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:05:39.668030 126494 system_pods.go:89] "weave-net-w4lvx" [fda79fe7-76fb-4fbb-b246-1da694a2b942] Pending / Ready:ContainersNotReady (containers with unready status: [weave weave-npc]) / ContainersReady:ContainersNotReady (containers with unready status: [weave weave-npc]) I0724 22:05:39.668044 126494 retry.go:30] will retry after 477.7945ms: missing components: kube-dns, kube-proxy I0724 22:05:39.676736 126494 ssh_runner.go:148] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.18.3/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml I0724 22:05:39.742412 126494 ssh_runner.go:148] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.18.3/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml I0724 22:05:40.152444 126494 system_pods.go:86] 8 kube-system pods found I0724 22:05:40.152495 126494 system_pods.go:89] "coredns-66bff467f8-c6xtm" [404c9846-5599-483c-a9ee-4ce0c790d832] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:05:40.152505 126494 system_pods.go:89] "etcd-custom-weave-20200724220248-14997" [b6be47cc-382b-4bab-8e66-0482a5947f89] Running I0724 22:05:40.152520 126494 system_pods.go:89] "kube-apiserver-custom-weave-20200724220248-14997" [259ce929-c39c-46d2-94cf-a26980082767] Running I0724 22:05:40.152529 126494 system_pods.go:89] "kube-controller-manager-custom-weave-20200724220248-14997" [7d3fd7da-e02f-4447-a202-858783b88e61] Running I0724 22:05:40.152539 126494 system_pods.go:89] "kube-proxy-4fxmn" [d27e15e4-b21a-41ab-98fc-172c594abd85] Pending / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy]) I0724 22:05:40.152634 126494 system_pods.go:89] "kube-scheduler-custom-weave-20200724220248-14997" [d2064a1a-0e7e-4eed-8632-d4aacb0cedf6] Running I0724 22:05:40.152657 126494 system_pods.go:89] "storage-provisioner" [8ce030ba-9435-404a-98ab-0e5fffd82cea] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:05:40.152671 126494 system_pods.go:89] "weave-net-w4lvx" [fda79fe7-76fb-4fbb-b246-1da694a2b942] Pending / Ready:ContainersNotReady (containers with unready status: [weave weave-npc]) / ContainersReady:ContainersNotReady (containers with unready status: [weave weave-npc]) I0724 22:05:40.152694 126494 retry.go:30] will retry after 631.911946ms: missing components: kube-dns, kube-proxy I0724 22:05:40.344861 126494 addons.go:355] enableAddons completed in 12.30388088s I0724 22:05:40.790062 126494 system_pods.go:86] 8 kube-system pods found I0724 22:05:40.790094 126494 system_pods.go:89] "coredns-66bff467f8-c6xtm" [404c9846-5599-483c-a9ee-4ce0c790d832] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:05:40.790101 126494 system_pods.go:89] "etcd-custom-weave-20200724220248-14997" [b6be47cc-382b-4bab-8e66-0482a5947f89] Running I0724 22:05:40.790109 126494 system_pods.go:89] "kube-apiserver-custom-weave-20200724220248-14997" [259ce929-c39c-46d2-94cf-a26980082767] Running I0724 22:05:40.790115 126494 system_pods.go:89] "kube-controller-manager-custom-weave-20200724220248-14997" [7d3fd7da-e02f-4447-a202-858783b88e61] Running I0724 22:05:40.790121 126494 system_pods.go:89] "kube-proxy-4fxmn" [d27e15e4-b21a-41ab-98fc-172c594abd85] Pending / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy]) I0724 22:05:40.790127 126494 system_pods.go:89] "kube-scheduler-custom-weave-20200724220248-14997" [d2064a1a-0e7e-4eed-8632-d4aacb0cedf6] Running I0724 22:05:40.790135 126494 system_pods.go:89] "storage-provisioner" [8ce030ba-9435-404a-98ab-0e5fffd82cea] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:05:40.790142 126494 system_pods.go:89] "weave-net-w4lvx" [fda79fe7-76fb-4fbb-b246-1da694a2b942] Pending / Ready:ContainersNotReady (containers with unready status: [weave weave-npc]) / ContainersReady:ContainersNotReady (containers with unready status: [weave weave-npc]) I0724 22:05:40.790162 126494 retry.go:30] will retry after 761.494406ms: missing components: kube-dns, kube-proxy I0724 22:05:41.662861 126494 system_pods.go:86] 8 kube-system pods found I0724 22:05:41.662895 126494 system_pods.go:89] "coredns-66bff467f8-c6xtm" [404c9846-5599-483c-a9ee-4ce0c790d832] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:05:41.662904 126494 system_pods.go:89] "etcd-custom-weave-20200724220248-14997" [b6be47cc-382b-4bab-8e66-0482a5947f89] Running I0724 22:05:41.662912 126494 system_pods.go:89] "kube-apiserver-custom-weave-20200724220248-14997" [259ce929-c39c-46d2-94cf-a26980082767] Running I0724 22:05:41.662918 126494 system_pods.go:89] "kube-controller-manager-custom-weave-20200724220248-14997" [7d3fd7da-e02f-4447-a202-858783b88e61] Running I0724 22:05:41.662924 126494 system_pods.go:89] "kube-proxy-4fxmn" [d27e15e4-b21a-41ab-98fc-172c594abd85] Pending / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy]) I0724 22:05:41.662930 126494 system_pods.go:89] "kube-scheduler-custom-weave-20200724220248-14997" [d2064a1a-0e7e-4eed-8632-d4aacb0cedf6] Running I0724 22:05:41.662937 126494 system_pods.go:89] "storage-provisioner" [8ce030ba-9435-404a-98ab-0e5fffd82cea] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:05:41.662955 126494 system_pods.go:89] "weave-net-w4lvx" [fda79fe7-76fb-4fbb-b246-1da694a2b942] Pending / Ready:ContainersNotReady (containers with unready status: [weave weave-npc]) / ContainersReady:ContainersNotReady (containers with unready status: [weave weave-npc]) I0724 22:05:41.662965 126494 retry.go:30] will retry after 1.073427115s: missing components: kube-dns, kube-proxy I0724 22:05:42.744183 126494 system_pods.go:86] 8 kube-system pods found I0724 22:05:42.744230 126494 system_pods.go:89] "coredns-66bff467f8-c6xtm" [404c9846-5599-483c-a9ee-4ce0c790d832] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:05:42.744241 126494 system_pods.go:89] "etcd-custom-weave-20200724220248-14997" [b6be47cc-382b-4bab-8e66-0482a5947f89] Running I0724 22:05:42.744254 126494 system_pods.go:89] "kube-apiserver-custom-weave-20200724220248-14997" [259ce929-c39c-46d2-94cf-a26980082767] Running I0724 22:05:42.744264 126494 system_pods.go:89] "kube-controller-manager-custom-weave-20200724220248-14997" [7d3fd7da-e02f-4447-a202-858783b88e61] Running I0724 22:05:42.744272 126494 system_pods.go:89] "kube-proxy-4fxmn" [d27e15e4-b21a-41ab-98fc-172c594abd85] Running I0724 22:05:42.744281 126494 system_pods.go:89] "kube-scheduler-custom-weave-20200724220248-14997" [d2064a1a-0e7e-4eed-8632-d4aacb0cedf6] Running I0724 22:05:42.744303 126494 system_pods.go:89] "storage-provisioner" [8ce030ba-9435-404a-98ab-0e5fffd82cea] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:05:42.744314 126494 system_pods.go:89] "weave-net-w4lvx" [fda79fe7-76fb-4fbb-b246-1da694a2b942] Pending / Ready:ContainersNotReady (containers with unready status: [weave weave-npc]) / ContainersReady:ContainersNotReady (containers with unready status: [weave weave-npc]) I0724 22:05:42.744396 126494 retry.go:30] will retry after 1.501450407s: missing components: kube-dns I0724 22:05:44.255965 126494 system_pods.go:86] 8 kube-system pods found I0724 22:05:44.256014 126494 system_pods.go:89] "coredns-66bff467f8-c6xtm" [404c9846-5599-483c-a9ee-4ce0c790d832] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:05:44.256027 126494 system_pods.go:89] "etcd-custom-weave-20200724220248-14997" [b6be47cc-382b-4bab-8e66-0482a5947f89] Running I0724 22:05:44.256039 126494 system_pods.go:89] "kube-apiserver-custom-weave-20200724220248-14997" [259ce929-c39c-46d2-94cf-a26980082767] Running I0724 22:05:44.256050 126494 system_pods.go:89] "kube-controller-manager-custom-weave-20200724220248-14997" [7d3fd7da-e02f-4447-a202-858783b88e61] Running I0724 22:05:44.256060 126494 system_pods.go:89] "kube-proxy-4fxmn" [d27e15e4-b21a-41ab-98fc-172c594abd85] Running I0724 22:05:44.256070 126494 system_pods.go:89] "kube-scheduler-custom-weave-20200724220248-14997" [d2064a1a-0e7e-4eed-8632-d4aacb0cedf6] Running I0724 22:05:44.256082 126494 system_pods.go:89] "storage-provisioner" [8ce030ba-9435-404a-98ab-0e5fffd82cea] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:05:44.256098 126494 system_pods.go:89] "weave-net-w4lvx" [fda79fe7-76fb-4fbb-b246-1da694a2b942] Pending / Ready:ContainersNotReady (containers with unready status: [weave weave-npc]) / ContainersReady:ContainersNotReady (containers with unready status: [weave weave-npc]) I0724 22:05:44.256117 126494 retry.go:30] will retry after 2.154745121s: missing components: kube-dns I0724 22:05:46.417267 126494 system_pods.go:86] 8 kube-system pods found I0724 22:05:46.417305 126494 system_pods.go:89] "coredns-66bff467f8-c6xtm" [404c9846-5599-483c-a9ee-4ce0c790d832] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:05:46.417316 126494 system_pods.go:89] "etcd-custom-weave-20200724220248-14997" [b6be47cc-382b-4bab-8e66-0482a5947f89] Running I0724 22:05:46.417327 126494 system_pods.go:89] "kube-apiserver-custom-weave-20200724220248-14997" [259ce929-c39c-46d2-94cf-a26980082767] Running I0724 22:05:46.417337 126494 system_pods.go:89] "kube-controller-manager-custom-weave-20200724220248-14997" [7d3fd7da-e02f-4447-a202-858783b88e61] Running I0724 22:05:46.417346 126494 system_pods.go:89] "kube-proxy-4fxmn" [d27e15e4-b21a-41ab-98fc-172c594abd85] Running I0724 22:05:46.417356 126494 system_pods.go:89] "kube-scheduler-custom-weave-20200724220248-14997" [d2064a1a-0e7e-4eed-8632-d4aacb0cedf6] Running I0724 22:05:46.417376 126494 system_pods.go:89] "storage-provisioner" [8ce030ba-9435-404a-98ab-0e5fffd82cea] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:05:46.417388 126494 system_pods.go:89] "weave-net-w4lvx" [fda79fe7-76fb-4fbb-b246-1da694a2b942] Pending / Ready:ContainersNotReady (containers with unready status: [weave weave-npc]) / ContainersReady:ContainersNotReady (containers with unready status: [weave weave-npc]) I0724 22:05:46.417412 126494 retry.go:30] will retry after 1.995665799s: missing components: kube-dns I0724 22:05:48.419306 126494 system_pods.go:86] 8 kube-system pods found I0724 22:05:48.419351 126494 system_pods.go:89] "coredns-66bff467f8-c6xtm" [404c9846-5599-483c-a9ee-4ce0c790d832] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:05:48.419361 126494 system_pods.go:89] "etcd-custom-weave-20200724220248-14997" [b6be47cc-382b-4bab-8e66-0482a5947f89] Running I0724 22:05:48.419373 126494 system_pods.go:89] "kube-apiserver-custom-weave-20200724220248-14997" [259ce929-c39c-46d2-94cf-a26980082767] Running I0724 22:05:48.419382 126494 system_pods.go:89] "kube-controller-manager-custom-weave-20200724220248-14997" [7d3fd7da-e02f-4447-a202-858783b88e61] Running I0724 22:05:48.419390 126494 system_pods.go:89] "kube-proxy-4fxmn" [d27e15e4-b21a-41ab-98fc-172c594abd85] Running I0724 22:05:48.419398 126494 system_pods.go:89] "kube-scheduler-custom-weave-20200724220248-14997" [d2064a1a-0e7e-4eed-8632-d4aacb0cedf6] Running I0724 22:05:48.419409 126494 system_pods.go:89] "storage-provisioner" [8ce030ba-9435-404a-98ab-0e5fffd82cea] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:05:48.419429 126494 system_pods.go:89] "weave-net-w4lvx" [fda79fe7-76fb-4fbb-b246-1da694a2b942] Pending / Ready:ContainersNotReady (containers with unready status: [weave weave-npc]) / ContainersReady:ContainersNotReady (containers with unready status: [weave weave-npc]) I0724 22:05:48.419440 126494 retry.go:30] will retry after 2.736716392s: missing components: kube-dns I0724 22:05:51.162248 126494 system_pods.go:86] 8 kube-system pods found I0724 22:05:51.162283 126494 system_pods.go:89] "coredns-66bff467f8-c6xtm" [404c9846-5599-483c-a9ee-4ce0c790d832] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:05:51.162346 126494 system_pods.go:89] "etcd-custom-weave-20200724220248-14997" [b6be47cc-382b-4bab-8e66-0482a5947f89] Running I0724 22:05:51.162362 126494 system_pods.go:89] "kube-apiserver-custom-weave-20200724220248-14997" [259ce929-c39c-46d2-94cf-a26980082767] Running I0724 22:05:51.162369 126494 system_pods.go:89] "kube-controller-manager-custom-weave-20200724220248-14997" [7d3fd7da-e02f-4447-a202-858783b88e61] Running I0724 22:05:51.162381 126494 system_pods.go:89] "kube-proxy-4fxmn" [d27e15e4-b21a-41ab-98fc-172c594abd85] Running I0724 22:05:51.162386 126494 system_pods.go:89] "kube-scheduler-custom-weave-20200724220248-14997" [d2064a1a-0e7e-4eed-8632-d4aacb0cedf6] Running I0724 22:05:51.162394 126494 system_pods.go:89] "storage-provisioner" [8ce030ba-9435-404a-98ab-0e5fffd82cea] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:05:51.162402 126494 system_pods.go:89] "weave-net-w4lvx" [fda79fe7-76fb-4fbb-b246-1da694a2b942] Pending / Ready:ContainersNotReady (containers with unready status: [weave weave-npc]) / ContainersReady:ContainersNotReady (containers with unready status: [weave weave-npc]) I0724 22:05:51.162413 126494 retry.go:30] will retry after 3.307028545s: missing components: kube-dns I0724 22:05:54.477411 126494 system_pods.go:86] 8 kube-system pods found I0724 22:05:54.477455 126494 system_pods.go:89] "coredns-66bff467f8-c6xtm" [404c9846-5599-483c-a9ee-4ce0c790d832] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:05:54.477467 126494 system_pods.go:89] "etcd-custom-weave-20200724220248-14997" [b6be47cc-382b-4bab-8e66-0482a5947f89] Running I0724 22:05:54.477479 126494 system_pods.go:89] "kube-apiserver-custom-weave-20200724220248-14997" [259ce929-c39c-46d2-94cf-a26980082767] Running I0724 22:05:54.477489 126494 system_pods.go:89] "kube-controller-manager-custom-weave-20200724220248-14997" [7d3fd7da-e02f-4447-a202-858783b88e61] Running I0724 22:05:54.477497 126494 system_pods.go:89] "kube-proxy-4fxmn" [d27e15e4-b21a-41ab-98fc-172c594abd85] Running I0724 22:05:54.477506 126494 system_pods.go:89] "kube-scheduler-custom-weave-20200724220248-14997" [d2064a1a-0e7e-4eed-8632-d4aacb0cedf6] Running I0724 22:05:54.477533 126494 system_pods.go:89] "storage-provisioner" [8ce030ba-9435-404a-98ab-0e5fffd82cea] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:05:54.477545 126494 system_pods.go:89] "weave-net-w4lvx" [fda79fe7-76fb-4fbb-b246-1da694a2b942] Running / Ready:ContainersNotReady (containers with unready status: [weave]) / ContainersReady:ContainersNotReady (containers with unready status: [weave]) I0724 22:05:54.477570 126494 retry.go:30] will retry after 4.476737193s: missing components: kube-dns I0724 22:05:58.961429 126494 system_pods.go:86] 8 kube-system pods found I0724 22:05:58.961476 126494 system_pods.go:89] "coredns-66bff467f8-c6xtm" [404c9846-5599-483c-a9ee-4ce0c790d832] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:05:58.961488 126494 system_pods.go:89] "etcd-custom-weave-20200724220248-14997" [b6be47cc-382b-4bab-8e66-0482a5947f89] Running I0724 22:05:58.961500 126494 system_pods.go:89] "kube-apiserver-custom-weave-20200724220248-14997" [259ce929-c39c-46d2-94cf-a26980082767] Running I0724 22:05:58.961519 126494 system_pods.go:89] "kube-controller-manager-custom-weave-20200724220248-14997" [7d3fd7da-e02f-4447-a202-858783b88e61] Running I0724 22:05:58.961528 126494 system_pods.go:89] "kube-proxy-4fxmn" [d27e15e4-b21a-41ab-98fc-172c594abd85] Running I0724 22:05:58.961546 126494 system_pods.go:89] "kube-scheduler-custom-weave-20200724220248-14997" [d2064a1a-0e7e-4eed-8632-d4aacb0cedf6] Running I0724 22:05:58.961557 126494 system_pods.go:89] "storage-provisioner" [8ce030ba-9435-404a-98ab-0e5fffd82cea] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:05:58.961568 126494 system_pods.go:89] "weave-net-w4lvx" [fda79fe7-76fb-4fbb-b246-1da694a2b942] Running / Ready:ContainersNotReady (containers with unready status: [weave]) / ContainersReady:ContainersNotReady (containers with unready status: [weave]) I0724 22:05:58.961592 126494 retry.go:30] will retry after 5.06768784s: missing components: kube-dns I0724 22:06:04.034538 126494 system_pods.go:86] 8 kube-system pods found I0724 22:06:04.034568 126494 system_pods.go:89] "coredns-66bff467f8-c6xtm" [404c9846-5599-483c-a9ee-4ce0c790d832] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:06:04.034576 126494 system_pods.go:89] "etcd-custom-weave-20200724220248-14997" [b6be47cc-382b-4bab-8e66-0482a5947f89] Running I0724 22:06:04.034585 126494 system_pods.go:89] "kube-apiserver-custom-weave-20200724220248-14997" [259ce929-c39c-46d2-94cf-a26980082767] Running I0724 22:06:04.034591 126494 system_pods.go:89] "kube-controller-manager-custom-weave-20200724220248-14997" [7d3fd7da-e02f-4447-a202-858783b88e61] Running I0724 22:06:04.034597 126494 system_pods.go:89] "kube-proxy-4fxmn" [d27e15e4-b21a-41ab-98fc-172c594abd85] Running I0724 22:06:04.034603 126494 system_pods.go:89] "kube-scheduler-custom-weave-20200724220248-14997" [d2064a1a-0e7e-4eed-8632-d4aacb0cedf6] Running I0724 22:06:04.034610 126494 system_pods.go:89] "storage-provisioner" [8ce030ba-9435-404a-98ab-0e5fffd82cea] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:06:04.034617 126494 system_pods.go:89] "weave-net-w4lvx" [fda79fe7-76fb-4fbb-b246-1da694a2b942] Running / Ready:ContainersNotReady (containers with unready status: [weave]) / ContainersReady:ContainersNotReady (containers with unready status: [weave]) I0724 22:06:04.034635 126494 retry.go:30] will retry after 6.370377477s: missing components: kube-dns I0724 22:06:10.412474 126494 system_pods.go:86] 8 kube-system pods found I0724 22:06:10.412519 126494 system_pods.go:89] "coredns-66bff467f8-c6xtm" [404c9846-5599-483c-a9ee-4ce0c790d832] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:06:10.412529 126494 system_pods.go:89] "etcd-custom-weave-20200724220248-14997" [b6be47cc-382b-4bab-8e66-0482a5947f89] Running I0724 22:06:10.412544 126494 system_pods.go:89] "kube-apiserver-custom-weave-20200724220248-14997" [259ce929-c39c-46d2-94cf-a26980082767] Running I0724 22:06:10.412555 126494 system_pods.go:89] "kube-controller-manager-custom-weave-20200724220248-14997" [7d3fd7da-e02f-4447-a202-858783b88e61] Running I0724 22:06:10.412570 126494 system_pods.go:89] "kube-proxy-4fxmn" [d27e15e4-b21a-41ab-98fc-172c594abd85] Running I0724 22:06:10.412579 126494 system_pods.go:89] "kube-scheduler-custom-weave-20200724220248-14997" [d2064a1a-0e7e-4eed-8632-d4aacb0cedf6] Running I0724 22:06:10.412597 126494 system_pods.go:89] "storage-provisioner" [8ce030ba-9435-404a-98ab-0e5fffd82cea] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:06:10.412618 126494 system_pods.go:89] "weave-net-w4lvx" [fda79fe7-76fb-4fbb-b246-1da694a2b942] Running / Ready:ContainersNotReady (containers with unready status: [weave]) / ContainersReady:ContainersNotReady (containers with unready status: [weave]) I0724 22:06:10.412640 126494 retry.go:30] will retry after 9.677079891s: missing components: kube-dns I0724 22:06:20.096121 126494 system_pods.go:86] 8 kube-system pods found I0724 22:06:20.096159 126494 system_pods.go:89] "coredns-66bff467f8-c6xtm" [404c9846-5599-483c-a9ee-4ce0c790d832] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:06:20.096170 126494 system_pods.go:89] "etcd-custom-weave-20200724220248-14997" [b6be47cc-382b-4bab-8e66-0482a5947f89] Running I0724 22:06:20.096178 126494 system_pods.go:89] "kube-apiserver-custom-weave-20200724220248-14997" [259ce929-c39c-46d2-94cf-a26980082767] Running I0724 22:06:20.096185 126494 system_pods.go:89] "kube-controller-manager-custom-weave-20200724220248-14997" [7d3fd7da-e02f-4447-a202-858783b88e61] Running I0724 22:06:20.096191 126494 system_pods.go:89] "kube-proxy-4fxmn" [d27e15e4-b21a-41ab-98fc-172c594abd85] Running I0724 22:06:20.096197 126494 system_pods.go:89] "kube-scheduler-custom-weave-20200724220248-14997" [d2064a1a-0e7e-4eed-8632-d4aacb0cedf6] Running I0724 22:06:20.096204 126494 system_pods.go:89] "storage-provisioner" [8ce030ba-9435-404a-98ab-0e5fffd82cea] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:06:20.096217 126494 system_pods.go:89] "weave-net-w4lvx" [fda79fe7-76fb-4fbb-b246-1da694a2b942] Running / Ready:ContainersNotReady (containers with unready status: [weave]) / ContainersReady:ContainersNotReady (containers with unready status: [weave]) I0724 22:06:20.096228 126494 retry.go:30] will retry after 9.539885811s: missing components: kube-dns I0724 22:06:29.642938 126494 system_pods.go:86] 8 kube-system pods found I0724 22:06:29.642979 126494 system_pods.go:89] "coredns-66bff467f8-c6xtm" [404c9846-5599-483c-a9ee-4ce0c790d832] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:06:29.642990 126494 system_pods.go:89] "etcd-custom-weave-20200724220248-14997" [b6be47cc-382b-4bab-8e66-0482a5947f89] Running I0724 22:06:29.643003 126494 system_pods.go:89] "kube-apiserver-custom-weave-20200724220248-14997" [259ce929-c39c-46d2-94cf-a26980082767] Running I0724 22:06:29.643022 126494 system_pods.go:89] "kube-controller-manager-custom-weave-20200724220248-14997" [7d3fd7da-e02f-4447-a202-858783b88e61] Running I0724 22:06:29.643031 126494 system_pods.go:89] "kube-proxy-4fxmn" [d27e15e4-b21a-41ab-98fc-172c594abd85] Running I0724 22:06:29.643039 126494 system_pods.go:89] "kube-scheduler-custom-weave-20200724220248-14997" [d2064a1a-0e7e-4eed-8632-d4aacb0cedf6] Running I0724 22:06:29.643047 126494 system_pods.go:89] "storage-provisioner" [8ce030ba-9435-404a-98ab-0e5fffd82cea] Running I0724 22:06:29.643067 126494 system_pods.go:89] "weave-net-w4lvx" [fda79fe7-76fb-4fbb-b246-1da694a2b942] Running / Ready:ContainersNotReady (containers with unready status: [weave]) / ContainersReady:ContainersNotReady (containers with unready status: [weave]) I0724 22:06:29.643087 126494 retry.go:30] will retry after 11.818233s: missing components: kube-dns I0724 22:06:41.466874 126494 system_pods.go:86] 8 kube-system pods found I0724 22:06:41.466910 126494 system_pods.go:89] "coredns-66bff467f8-c6xtm" [404c9846-5599-483c-a9ee-4ce0c790d832] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:06:41.466939 126494 system_pods.go:89] "etcd-custom-weave-20200724220248-14997" [b6be47cc-382b-4bab-8e66-0482a5947f89] Running I0724 22:06:41.466961 126494 system_pods.go:89] "kube-apiserver-custom-weave-20200724220248-14997" [259ce929-c39c-46d2-94cf-a26980082767] Running I0724 22:06:41.466972 126494 system_pods.go:89] "kube-controller-manager-custom-weave-20200724220248-14997" [7d3fd7da-e02f-4447-a202-858783b88e61] Running I0724 22:06:41.466988 126494 system_pods.go:89] "kube-proxy-4fxmn" [d27e15e4-b21a-41ab-98fc-172c594abd85] Running I0724 22:06:41.466998 126494 system_pods.go:89] "kube-scheduler-custom-weave-20200724220248-14997" [d2064a1a-0e7e-4eed-8632-d4aacb0cedf6] Running I0724 22:06:41.467024 126494 system_pods.go:89] "storage-provisioner" [8ce030ba-9435-404a-98ab-0e5fffd82cea] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:06:41.467037 126494 system_pods.go:89] "weave-net-w4lvx" [fda79fe7-76fb-4fbb-b246-1da694a2b942] Running / Ready:ContainersNotReady (containers with unready status: [weave]) / ContainersReady:ContainersNotReady (containers with unready status: [weave]) I0724 22:06:41.467057 126494 retry.go:30] will retry after 16.140486614s: missing components: kube-dns I0724 22:06:57.614246 126494 system_pods.go:86] 8 kube-system pods found I0724 22:06:57.614279 126494 system_pods.go:89] "coredns-66bff467f8-c6xtm" [404c9846-5599-483c-a9ee-4ce0c790d832] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:06:57.614287 126494 system_pods.go:89] "etcd-custom-weave-20200724220248-14997" [b6be47cc-382b-4bab-8e66-0482a5947f89] Running I0724 22:06:57.614295 126494 system_pods.go:89] "kube-apiserver-custom-weave-20200724220248-14997" [259ce929-c39c-46d2-94cf-a26980082767] Running I0724 22:06:57.614301 126494 system_pods.go:89] "kube-controller-manager-custom-weave-20200724220248-14997" [7d3fd7da-e02f-4447-a202-858783b88e61] Running I0724 22:06:57.614306 126494 system_pods.go:89] "kube-proxy-4fxmn" [d27e15e4-b21a-41ab-98fc-172c594abd85] Running I0724 22:06:57.614312 126494 system_pods.go:89] "kube-scheduler-custom-weave-20200724220248-14997" [d2064a1a-0e7e-4eed-8632-d4aacb0cedf6] Running I0724 22:06:57.614319 126494 system_pods.go:89] "storage-provisioner" [8ce030ba-9435-404a-98ab-0e5fffd82cea] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:06:57.614329 126494 system_pods.go:89] "weave-net-w4lvx" [fda79fe7-76fb-4fbb-b246-1da694a2b942] Running / Ready:ContainersNotReady (containers with unready status: [weave]) / ContainersReady:ContainersNotReady (containers with unready status: [weave]) I0724 22:06:57.614339 126494 retry.go:30] will retry after 22.450284575s: missing components: kube-dns I0724 22:07:20.070603 126494 system_pods.go:86] 8 kube-system pods found I0724 22:07:20.070636 126494 system_pods.go:89] "coredns-66bff467f8-c6xtm" [404c9846-5599-483c-a9ee-4ce0c790d832] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:07:20.070643 126494 system_pods.go:89] "etcd-custom-weave-20200724220248-14997" [b6be47cc-382b-4bab-8e66-0482a5947f89] Running I0724 22:07:20.070650 126494 system_pods.go:89] "kube-apiserver-custom-weave-20200724220248-14997" [259ce929-c39c-46d2-94cf-a26980082767] Running I0724 22:07:20.070656 126494 system_pods.go:89] "kube-controller-manager-custom-weave-20200724220248-14997" [7d3fd7da-e02f-4447-a202-858783b88e61] Running I0724 22:07:20.070661 126494 system_pods.go:89] "kube-proxy-4fxmn" [d27e15e4-b21a-41ab-98fc-172c594abd85] Running I0724 22:07:20.070666 126494 system_pods.go:89] "kube-scheduler-custom-weave-20200724220248-14997" [d2064a1a-0e7e-4eed-8632-d4aacb0cedf6] Running I0724 22:07:20.070673 126494 system_pods.go:89] "storage-provisioner" [8ce030ba-9435-404a-98ab-0e5fffd82cea] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:07:20.070681 126494 system_pods.go:89] "weave-net-w4lvx" [fda79fe7-76fb-4fbb-b246-1da694a2b942] Running / Ready:ContainersNotReady (containers with unready status: [weave]) / ContainersReady:ContainersNotReady (containers with unready status: [weave]) I0724 22:07:20.070698 126494 retry.go:30] will retry after 32.017729263s: missing components: kube-dns I0724 22:07:52.094352 126494 system_pods.go:86] 8 kube-system pods found I0724 22:07:52.094387 126494 system_pods.go:89] "coredns-66bff467f8-c6xtm" [404c9846-5599-483c-a9ee-4ce0c790d832] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:07:52.094394 126494 system_pods.go:89] "etcd-custom-weave-20200724220248-14997" [b6be47cc-382b-4bab-8e66-0482a5947f89] Running I0724 22:07:52.094403 126494 system_pods.go:89] "kube-apiserver-custom-weave-20200724220248-14997" [259ce929-c39c-46d2-94cf-a26980082767] Running I0724 22:07:52.094409 126494 system_pods.go:89] "kube-controller-manager-custom-weave-20200724220248-14997" [7d3fd7da-e02f-4447-a202-858783b88e61] Running I0724 22:07:52.094415 126494 system_pods.go:89] "kube-proxy-4fxmn" [d27e15e4-b21a-41ab-98fc-172c594abd85] Running I0724 22:07:52.094421 126494 system_pods.go:89] "kube-scheduler-custom-weave-20200724220248-14997" [d2064a1a-0e7e-4eed-8632-d4aacb0cedf6] Running I0724 22:07:52.094428 126494 system_pods.go:89] "storage-provisioner" [8ce030ba-9435-404a-98ab-0e5fffd82cea] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:07:52.094443 126494 system_pods.go:89] "weave-net-w4lvx" [fda79fe7-76fb-4fbb-b246-1da694a2b942] Running / Ready:ContainersNotReady (containers with unready status: [weave]) / ContainersReady:ContainersNotReady (containers with unready status: [weave]) I0724 22:07:52.094453 126494 retry.go:30] will retry after 30.376536772s: missing components: kube-dns I0724 22:08:22.477407 126494 system_pods.go:86] 8 kube-system pods found I0724 22:08:22.477442 126494 system_pods.go:89] "coredns-66bff467f8-c6xtm" [404c9846-5599-483c-a9ee-4ce0c790d832] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:08:22.477449 126494 system_pods.go:89] "etcd-custom-weave-20200724220248-14997" [b6be47cc-382b-4bab-8e66-0482a5947f89] Running I0724 22:08:22.477456 126494 system_pods.go:89] "kube-apiserver-custom-weave-20200724220248-14997" [259ce929-c39c-46d2-94cf-a26980082767] Running I0724 22:08:22.477462 126494 system_pods.go:89] "kube-controller-manager-custom-weave-20200724220248-14997" [7d3fd7da-e02f-4447-a202-858783b88e61] Running I0724 22:08:22.477468 126494 system_pods.go:89] "kube-proxy-4fxmn" [d27e15e4-b21a-41ab-98fc-172c594abd85] Running I0724 22:08:22.477473 126494 system_pods.go:89] "kube-scheduler-custom-weave-20200724220248-14997" [d2064a1a-0e7e-4eed-8632-d4aacb0cedf6] Running I0724 22:08:22.477480 126494 system_pods.go:89] "storage-provisioner" [8ce030ba-9435-404a-98ab-0e5fffd82cea] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:08:22.477494 126494 system_pods.go:89] "weave-net-w4lvx" [fda79fe7-76fb-4fbb-b246-1da694a2b942] Running / Ready:ContainersNotReady (containers with unready status: [weave]) / ContainersReady:ContainersNotReady (containers with unready status: [weave]) I0724 22:08:22.477505 126494 retry.go:30] will retry after 38.054703388s: missing components: kube-dns I0724 22:09:01.740852 126494 system_pods.go:86] 8 kube-system pods found I0724 22:09:01.740900 126494 system_pods.go:89] "coredns-66bff467f8-c6xtm" [404c9846-5599-483c-a9ee-4ce0c790d832] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:09:01.740910 126494 system_pods.go:89] "etcd-custom-weave-20200724220248-14997" [b6be47cc-382b-4bab-8e66-0482a5947f89] Running I0724 22:09:01.740925 126494 system_pods.go:89] "kube-apiserver-custom-weave-20200724220248-14997" [259ce929-c39c-46d2-94cf-a26980082767] Running I0724 22:09:01.740936 126494 system_pods.go:89] "kube-controller-manager-custom-weave-20200724220248-14997" [7d3fd7da-e02f-4447-a202-858783b88e61] Running I0724 22:09:01.740958 126494 system_pods.go:89] "kube-proxy-4fxmn" [d27e15e4-b21a-41ab-98fc-172c594abd85] Running I0724 22:09:01.740967 126494 system_pods.go:89] "kube-scheduler-custom-weave-20200724220248-14997" [d2064a1a-0e7e-4eed-8632-d4aacb0cedf6] Running I0724 22:09:01.740978 126494 system_pods.go:89] "storage-provisioner" [8ce030ba-9435-404a-98ab-0e5fffd82cea] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:09:01.741000 126494 system_pods.go:89] "weave-net-w4lvx" [fda79fe7-76fb-4fbb-b246-1da694a2b942] Running / Ready:ContainersNotReady (containers with unready status: [weave]) / ContainersReady:ContainersNotReady (containers with unready status: [weave]) I0724 22:09:01.741016 126494 retry.go:30] will retry after 59.625111602s: missing components: kube-dns I0724 22:10:01.371497 126494 system_pods.go:86] 8 kube-system pods found I0724 22:10:01.371531 126494 system_pods.go:89] "coredns-66bff467f8-c6xtm" [404c9846-5599-483c-a9ee-4ce0c790d832] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:10:01.371538 126494 system_pods.go:89] "etcd-custom-weave-20200724220248-14997" [b6be47cc-382b-4bab-8e66-0482a5947f89] Running I0724 22:10:01.371546 126494 system_pods.go:89] "kube-apiserver-custom-weave-20200724220248-14997" [259ce929-c39c-46d2-94cf-a26980082767] Running I0724 22:10:01.371552 126494 system_pods.go:89] "kube-controller-manager-custom-weave-20200724220248-14997" [7d3fd7da-e02f-4447-a202-858783b88e61] Running I0724 22:10:01.371558 126494 system_pods.go:89] "kube-proxy-4fxmn" [d27e15e4-b21a-41ab-98fc-172c594abd85] Running I0724 22:10:01.371572 126494 system_pods.go:89] "kube-scheduler-custom-weave-20200724220248-14997" [d2064a1a-0e7e-4eed-8632-d4aacb0cedf6] Running I0724 22:10:01.371578 126494 system_pods.go:89] "storage-provisioner" [8ce030ba-9435-404a-98ab-0e5fffd82cea] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:10:01.371585 126494 system_pods.go:89] "weave-net-w4lvx" [fda79fe7-76fb-4fbb-b246-1da694a2b942] Running / Ready:ContainersNotReady (containers with unready status: [weave]) / ContainersReady:ContainersNotReady (containers with unready status: [weave]) I0724 22:10:01.371597 126494 retry.go:30] will retry after 51.197479857s: missing components: kube-dns I0724 22:10:52.698591 126494 system_pods.go:86] 8 kube-system pods found I0724 22:10:52.698626 126494 system_pods.go:89] "coredns-66bff467f8-c6xtm" [404c9846-5599-483c-a9ee-4ce0c790d832] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:10:52.698633 126494 system_pods.go:89] "etcd-custom-weave-20200724220248-14997" [b6be47cc-382b-4bab-8e66-0482a5947f89] Running I0724 22:10:52.698641 126494 system_pods.go:89] "kube-apiserver-custom-weave-20200724220248-14997" [259ce929-c39c-46d2-94cf-a26980082767] Running I0724 22:10:52.698647 126494 system_pods.go:89] "kube-controller-manager-custom-weave-20200724220248-14997" [7d3fd7da-e02f-4447-a202-858783b88e61] Running I0724 22:10:52.698652 126494 system_pods.go:89] "kube-proxy-4fxmn" [d27e15e4-b21a-41ab-98fc-172c594abd85] Running I0724 22:10:52.698659 126494 system_pods.go:89] "kube-scheduler-custom-weave-20200724220248-14997" [d2064a1a-0e7e-4eed-8632-d4aacb0cedf6] Running I0724 22:10:52.698685 126494 system_pods.go:89] "storage-provisioner" [8ce030ba-9435-404a-98ab-0e5fffd82cea] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:10:52.698699 126494 system_pods.go:89] "weave-net-w4lvx" [fda79fe7-76fb-4fbb-b246-1da694a2b942] Running / Ready:ContainersNotReady (containers with unready status: [weave]) / ContainersReady:ContainersNotReady (containers with unready status: [weave]) I0724 22:10:52.698713 126494 retry.go:30] will retry after 1m10.96005039s: missing components: kube-dns I0724 22:12:03.664744 126494 system_pods.go:86] 8 kube-system pods found I0724 22:12:03.664832 126494 system_pods.go:89] "coredns-66bff467f8-c6xtm" [404c9846-5599-483c-a9ee-4ce0c790d832] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:12:03.664850 126494 system_pods.go:89] "etcd-custom-weave-20200724220248-14997" [b6be47cc-382b-4bab-8e66-0482a5947f89] Running I0724 22:12:03.664866 126494 system_pods.go:89] "kube-apiserver-custom-weave-20200724220248-14997" [259ce929-c39c-46d2-94cf-a26980082767] Running I0724 22:12:03.664884 126494 system_pods.go:89] "kube-controller-manager-custom-weave-20200724220248-14997" [7d3fd7da-e02f-4447-a202-858783b88e61] Running I0724 22:12:03.664893 126494 system_pods.go:89] "kube-proxy-4fxmn" [d27e15e4-b21a-41ab-98fc-172c594abd85] Running I0724 22:12:03.664911 126494 system_pods.go:89] "kube-scheduler-custom-weave-20200724220248-14997" [d2064a1a-0e7e-4eed-8632-d4aacb0cedf6] Running I0724 22:12:03.664922 126494 system_pods.go:89] "storage-provisioner" [8ce030ba-9435-404a-98ab-0e5fffd82cea] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:12:03.664942 126494 system_pods.go:89] "weave-net-w4lvx" [fda79fe7-76fb-4fbb-b246-1da694a2b942] Running / Ready:ContainersNotReady (containers with unready status: [weave]) / ContainersReady:ContainersNotReady (containers with unready status: [weave]) I0724 22:12:03.664963 126494 retry.go:30] will retry after 1m5.901574973s: missing components: kube-dns I0724 22:13:09.573628 126494 system_pods.go:86] 8 kube-system pods found I0724 22:13:09.573667 126494 system_pods.go:89] "coredns-66bff467f8-c6xtm" [404c9846-5599-483c-a9ee-4ce0c790d832] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:13:09.573762 126494 system_pods.go:89] "etcd-custom-weave-20200724220248-14997" [b6be47cc-382b-4bab-8e66-0482a5947f89] Running I0724 22:13:09.573771 126494 system_pods.go:89] "kube-apiserver-custom-weave-20200724220248-14997" [259ce929-c39c-46d2-94cf-a26980082767] Running I0724 22:13:09.573784 126494 system_pods.go:89] "kube-controller-manager-custom-weave-20200724220248-14997" [7d3fd7da-e02f-4447-a202-858783b88e61] Running I0724 22:13:09.573790 126494 system_pods.go:89] "kube-proxy-4fxmn" [d27e15e4-b21a-41ab-98fc-172c594abd85] Running I0724 22:13:09.573795 126494 system_pods.go:89] "kube-scheduler-custom-weave-20200724220248-14997" [d2064a1a-0e7e-4eed-8632-d4aacb0cedf6] Running I0724 22:13:09.573803 126494 system_pods.go:89] "storage-provisioner" [8ce030ba-9435-404a-98ab-0e5fffd82cea] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:13:09.573817 126494 system_pods.go:89] "weave-net-w4lvx" [fda79fe7-76fb-4fbb-b246-1da694a2b942] Running / Ready:ContainersNotReady (containers with unready status: [weave]) / ContainersReady:ContainersNotReady (containers with unready status: [weave]) I0724 22:13:09.573830 126494 retry.go:30] will retry after 1m0.714609182s: missing components: kube-dns I0724 22:14:10.293755 126494 system_pods.go:86] 8 kube-system pods found I0724 22:14:10.293795 126494 system_pods.go:89] "coredns-66bff467f8-c6xtm" [404c9846-5599-483c-a9ee-4ce0c790d832] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:14:10.293802 126494 system_pods.go:89] "etcd-custom-weave-20200724220248-14997" [b6be47cc-382b-4bab-8e66-0482a5947f89] Running I0724 22:14:10.293810 126494 system_pods.go:89] "kube-apiserver-custom-weave-20200724220248-14997" [259ce929-c39c-46d2-94cf-a26980082767] Running I0724 22:14:10.293817 126494 system_pods.go:89] "kube-controller-manager-custom-weave-20200724220248-14997" [7d3fd7da-e02f-4447-a202-858783b88e61] Running I0724 22:14:10.293822 126494 system_pods.go:89] "kube-proxy-4fxmn" [d27e15e4-b21a-41ab-98fc-172c594abd85] Running I0724 22:14:10.293828 126494 system_pods.go:89] "kube-scheduler-custom-weave-20200724220248-14997" [d2064a1a-0e7e-4eed-8632-d4aacb0cedf6] Running I0724 22:14:10.293834 126494 system_pods.go:89] "storage-provisioner" [8ce030ba-9435-404a-98ab-0e5fffd82cea] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:14:10.293849 126494 system_pods.go:89] "weave-net-w4lvx" [fda79fe7-76fb-4fbb-b246-1da694a2b942] Running / Ready:ContainersNotReady (containers with unready status: [weave]) / ContainersReady:ContainersNotReady (containers with unready status: [weave]) I0724 22:14:10.293859 126494 retry.go:30] will retry after 45.849092499s: missing components: kube-dns I0724 22:14:56.149085 126494 system_pods.go:86] 8 kube-system pods found I0724 22:14:56.149120 126494 system_pods.go:89] "coredns-66bff467f8-c6xtm" [404c9846-5599-483c-a9ee-4ce0c790d832] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:14:56.149127 126494 system_pods.go:89] "etcd-custom-weave-20200724220248-14997" [b6be47cc-382b-4bab-8e66-0482a5947f89] Running I0724 22:14:56.149135 126494 system_pods.go:89] "kube-apiserver-custom-weave-20200724220248-14997" [259ce929-c39c-46d2-94cf-a26980082767] Running I0724 22:14:56.149140 126494 system_pods.go:89] "kube-controller-manager-custom-weave-20200724220248-14997" [7d3fd7da-e02f-4447-a202-858783b88e61] Running I0724 22:14:56.149145 126494 system_pods.go:89] "kube-proxy-4fxmn" [d27e15e4-b21a-41ab-98fc-172c594abd85] Running I0724 22:14:56.149152 126494 system_pods.go:89] "kube-scheduler-custom-weave-20200724220248-14997" [d2064a1a-0e7e-4eed-8632-d4aacb0cedf6] Running I0724 22:14:56.149160 126494 system_pods.go:89] "storage-provisioner" [8ce030ba-9435-404a-98ab-0e5fffd82cea] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:14:56.149176 126494 system_pods.go:89] "weave-net-w4lvx" [fda79fe7-76fb-4fbb-b246-1da694a2b942] Running / Ready:ContainersNotReady (containers with unready status: [weave]) / ContainersReady:ContainersNotReady (containers with unready status: [weave]) I0724 22:14:56.149188 126494 retry.go:30] will retry after 49.749848332s: missing components: kube-dns I0724 22:15:45.905361 126494 system_pods.go:86] 8 kube-system pods found I0724 22:15:45.905393 126494 system_pods.go:89] "coredns-66bff467f8-c6xtm" [404c9846-5599-483c-a9ee-4ce0c790d832] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:15:45.905401 126494 system_pods.go:89] "etcd-custom-weave-20200724220248-14997" [b6be47cc-382b-4bab-8e66-0482a5947f89] Running I0724 22:15:45.905408 126494 system_pods.go:89] "kube-apiserver-custom-weave-20200724220248-14997" [259ce929-c39c-46d2-94cf-a26980082767] Running I0724 22:15:45.905413 126494 system_pods.go:89] "kube-controller-manager-custom-weave-20200724220248-14997" [7d3fd7da-e02f-4447-a202-858783b88e61] Running I0724 22:15:45.905418 126494 system_pods.go:89] "kube-proxy-4fxmn" [d27e15e4-b21a-41ab-98fc-172c594abd85] Running I0724 22:15:45.905425 126494 system_pods.go:89] "kube-scheduler-custom-weave-20200724220248-14997" [d2064a1a-0e7e-4eed-8632-d4aacb0cedf6] Running I0724 22:15:45.905432 126494 system_pods.go:89] "storage-provisioner" [8ce030ba-9435-404a-98ab-0e5fffd82cea] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:15:45.905439 126494 system_pods.go:89] "weave-net-w4lvx" [fda79fe7-76fb-4fbb-b246-1da694a2b942] Running / Ready:ContainersNotReady (containers with unready status: [weave]) / ContainersReady:ContainersNotReady (containers with unready status: [weave]) I0724 22:15:45.905449 126494 retry.go:30] will retry after 1m3.217603186s: missing components: kube-dns I0724 22:16:49.128455 126494 system_pods.go:86] 8 kube-system pods found I0724 22:16:49.128537 126494 system_pods.go:89] "coredns-66bff467f8-c6xtm" [404c9846-5599-483c-a9ee-4ce0c790d832] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:16:49.128551 126494 system_pods.go:89] "etcd-custom-weave-20200724220248-14997" [b6be47cc-382b-4bab-8e66-0482a5947f89] Running I0724 22:16:49.128559 126494 system_pods.go:89] "kube-apiserver-custom-weave-20200724220248-14997" [259ce929-c39c-46d2-94cf-a26980082767] Running I0724 22:16:49.128565 126494 system_pods.go:89] "kube-controller-manager-custom-weave-20200724220248-14997" [7d3fd7da-e02f-4447-a202-858783b88e61] Running I0724 22:16:49.128576 126494 system_pods.go:89] "kube-proxy-4fxmn" [d27e15e4-b21a-41ab-98fc-172c594abd85] Running I0724 22:16:49.128581 126494 system_pods.go:89] "kube-scheduler-custom-weave-20200724220248-14997" [d2064a1a-0e7e-4eed-8632-d4aacb0cedf6] Running I0724 22:16:49.128588 126494 system_pods.go:89] "storage-provisioner" [8ce030ba-9435-404a-98ab-0e5fffd82cea] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:16:49.128603 126494 system_pods.go:89] "weave-net-w4lvx" [fda79fe7-76fb-4fbb-b246-1da694a2b942] Running / Ready:ContainersNotReady (containers with unready status: [weave]) / ContainersReady:ContainersNotReady (containers with unready status: [weave]) I0724 22:16:49.128614 126494 retry.go:30] will retry after 1m14.257248566s: missing components: kube-dns I0724 22:18:03.394837 126494 system_pods.go:86] 8 kube-system pods found I0724 22:18:03.394873 126494 system_pods.go:89] "coredns-66bff467f8-c6xtm" [404c9846-5599-483c-a9ee-4ce0c790d832] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:18:03.394880 126494 system_pods.go:89] "etcd-custom-weave-20200724220248-14997" [b6be47cc-382b-4bab-8e66-0482a5947f89] Running I0724 22:18:03.394889 126494 system_pods.go:89] "kube-apiserver-custom-weave-20200724220248-14997" [259ce929-c39c-46d2-94cf-a26980082767] Running I0724 22:18:03.394895 126494 system_pods.go:89] "kube-controller-manager-custom-weave-20200724220248-14997" [7d3fd7da-e02f-4447-a202-858783b88e61] Running I0724 22:18:03.394900 126494 system_pods.go:89] "kube-proxy-4fxmn" [d27e15e4-b21a-41ab-98fc-172c594abd85] Running I0724 22:18:03.394906 126494 system_pods.go:89] "kube-scheduler-custom-weave-20200724220248-14997" [d2064a1a-0e7e-4eed-8632-d4aacb0cedf6] Running I0724 22:18:03.394913 126494 system_pods.go:89] "storage-provisioner" [8ce030ba-9435-404a-98ab-0e5fffd82cea] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:18:03.394920 126494 system_pods.go:89] "weave-net-w4lvx" [fda79fe7-76fb-4fbb-b246-1da694a2b942] Running / Ready:ContainersNotReady (containers with unready status: [weave]) / ContainersReady:ContainersNotReady (containers with unready status: [weave]) I0724 22:18:03.394938 126494 retry.go:30] will retry after 47.383608701s: missing components: kube-dns I0724 22:18:50.783705 126494 system_pods.go:86] 8 kube-system pods found I0724 22:18:50.783898 126494 system_pods.go:89] "coredns-66bff467f8-c6xtm" [404c9846-5599-483c-a9ee-4ce0c790d832] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:18:50.783906 126494 system_pods.go:89] "etcd-custom-weave-20200724220248-14997" [b6be47cc-382b-4bab-8e66-0482a5947f89] Running I0724 22:18:50.783913 126494 system_pods.go:89] "kube-apiserver-custom-weave-20200724220248-14997" [259ce929-c39c-46d2-94cf-a26980082767] Running I0724 22:18:50.783919 126494 system_pods.go:89] "kube-controller-manager-custom-weave-20200724220248-14997" [7d3fd7da-e02f-4447-a202-858783b88e61] Running I0724 22:18:50.783925 126494 system_pods.go:89] "kube-proxy-4fxmn" [d27e15e4-b21a-41ab-98fc-172c594abd85] Running I0724 22:18:50.783930 126494 system_pods.go:89] "kube-scheduler-custom-weave-20200724220248-14997" [d2064a1a-0e7e-4eed-8632-d4aacb0cedf6] Running I0724 22:18:50.783937 126494 system_pods.go:89] "storage-provisioner" [8ce030ba-9435-404a-98ab-0e5fffd82cea] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:18:50.783945 126494 system_pods.go:89] "weave-net-w4lvx" [fda79fe7-76fb-4fbb-b246-1da694a2b942] Running / Ready:ContainersNotReady (containers with unready status: [weave]) / ContainersReady:ContainersNotReady (containers with unready status: [weave]) I0724 22:18:50.783955 126494 retry.go:30] will retry after 1m2.844257931s: missing components: kube-dns I0724 22:19:53.635011 126494 system_pods.go:86] 8 kube-system pods found I0724 22:19:53.635052 126494 system_pods.go:89] "coredns-66bff467f8-c6xtm" [404c9846-5599-483c-a9ee-4ce0c790d832] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:19:53.635060 126494 system_pods.go:89] "etcd-custom-weave-20200724220248-14997" [b6be47cc-382b-4bab-8e66-0482a5947f89] Running I0724 22:19:53.635067 126494 system_pods.go:89] "kube-apiserver-custom-weave-20200724220248-14997" [259ce929-c39c-46d2-94cf-a26980082767] Running I0724 22:19:53.635073 126494 system_pods.go:89] "kube-controller-manager-custom-weave-20200724220248-14997" [7d3fd7da-e02f-4447-a202-858783b88e61] Running I0724 22:19:53.635078 126494 system_pods.go:89] "kube-proxy-4fxmn" [d27e15e4-b21a-41ab-98fc-172c594abd85] Running I0724 22:19:53.635083 126494 system_pods.go:89] "kube-scheduler-custom-weave-20200724220248-14997" [d2064a1a-0e7e-4eed-8632-d4aacb0cedf6] Running I0724 22:19:53.635089 126494 system_pods.go:89] "storage-provisioner" [8ce030ba-9435-404a-98ab-0e5fffd82cea] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:19:53.635098 126494 system_pods.go:89] "weave-net-w4lvx" [fda79fe7-76fb-4fbb-b246-1da694a2b942] Running / Ready:ContainersNotReady (containers with unready status: [weave]) / ContainersReady:ContainersNotReady (containers with unready status: [weave]) I0724 22:19:53.635112 126494 retry.go:30] will retry after 46.773619539s: missing components: kube-dns I0724 22:20:40.417689 126494 system_pods.go:86] 8 kube-system pods found I0724 22:20:40.417726 126494 system_pods.go:89] "coredns-66bff467f8-c6xtm" [404c9846-5599-483c-a9ee-4ce0c790d832] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:20:40.417733 126494 system_pods.go:89] "etcd-custom-weave-20200724220248-14997" [b6be47cc-382b-4bab-8e66-0482a5947f89] Running I0724 22:20:40.417741 126494 system_pods.go:89] "kube-apiserver-custom-weave-20200724220248-14997" [259ce929-c39c-46d2-94cf-a26980082767] Running I0724 22:20:40.417747 126494 system_pods.go:89] "kube-controller-manager-custom-weave-20200724220248-14997" [7d3fd7da-e02f-4447-a202-858783b88e61] Running I0724 22:20:40.417752 126494 system_pods.go:89] "kube-proxy-4fxmn" [d27e15e4-b21a-41ab-98fc-172c594abd85] Running I0724 22:20:40.417757 126494 system_pods.go:89] "kube-scheduler-custom-weave-20200724220248-14997" [d2064a1a-0e7e-4eed-8632-d4aacb0cedf6] Running I0724 22:20:40.417763 126494 system_pods.go:89] "storage-provisioner" [8ce030ba-9435-404a-98ab-0e5fffd82cea] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:20:40.417772 126494 system_pods.go:89] "weave-net-w4lvx" [fda79fe7-76fb-4fbb-b246-1da694a2b942] Running / Ready:ContainersNotReady (containers with unready status: [weave]) / ContainersReady:ContainersNotReady (containers with unready status: [weave]) I0724 22:20:40.417783 126494 retry.go:30] will retry after 1m5.760737621s: missing components: kube-dns I0724 22:21:46.183706 126494 system_pods.go:86] 8 kube-system pods found I0724 22:21:46.183742 126494 system_pods.go:89] "coredns-66bff467f8-c6xtm" [404c9846-5599-483c-a9ee-4ce0c790d832] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:21:46.183749 126494 system_pods.go:89] "etcd-custom-weave-20200724220248-14997" [b6be47cc-382b-4bab-8e66-0482a5947f89] Running I0724 22:21:46.183760 126494 system_pods.go:89] "kube-apiserver-custom-weave-20200724220248-14997" [259ce929-c39c-46d2-94cf-a26980082767] Running I0724 22:21:46.183766 126494 system_pods.go:89] "kube-controller-manager-custom-weave-20200724220248-14997" [7d3fd7da-e02f-4447-a202-858783b88e61] Running I0724 22:21:46.183771 126494 system_pods.go:89] "kube-proxy-4fxmn" [d27e15e4-b21a-41ab-98fc-172c594abd85] Running I0724 22:21:46.183777 126494 system_pods.go:89] "kube-scheduler-custom-weave-20200724220248-14997" [d2064a1a-0e7e-4eed-8632-d4aacb0cedf6] Running I0724 22:21:46.183782 126494 system_pods.go:89] "storage-provisioner" [8ce030ba-9435-404a-98ab-0e5fffd82cea] Running I0724 22:21:46.183796 126494 system_pods.go:89] "weave-net-w4lvx" [fda79fe7-76fb-4fbb-b246-1da694a2b942] Running / Ready:ContainersNotReady (containers with unready status: [weave]) / ContainersReady:ContainersNotReady (containers with unready status: [weave]) I0724 22:21:46.183809 126494 retry.go:30] will retry after 54.04568043s: missing components: kube-dns I0724 22:22:40.234601 126494 system_pods.go:86] 8 kube-system pods found I0724 22:22:40.234636 126494 system_pods.go:89] "coredns-66bff467f8-c6xtm" [404c9846-5599-483c-a9ee-4ce0c790d832] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:22:40.234643 126494 system_pods.go:89] "etcd-custom-weave-20200724220248-14997" [b6be47cc-382b-4bab-8e66-0482a5947f89] Running I0724 22:22:40.234651 126494 system_pods.go:89] "kube-apiserver-custom-weave-20200724220248-14997" [259ce929-c39c-46d2-94cf-a26980082767] Running I0724 22:22:40.234657 126494 system_pods.go:89] "kube-controller-manager-custom-weave-20200724220248-14997" [7d3fd7da-e02f-4447-a202-858783b88e61] Running I0724 22:22:40.234662 126494 system_pods.go:89] "kube-proxy-4fxmn" [d27e15e4-b21a-41ab-98fc-172c594abd85] Running I0724 22:22:40.234667 126494 system_pods.go:89] "kube-scheduler-custom-weave-20200724220248-14997" [d2064a1a-0e7e-4eed-8632-d4aacb0cedf6] Running I0724 22:22:40.234674 126494 system_pods.go:89] "storage-provisioner" [8ce030ba-9435-404a-98ab-0e5fffd82cea] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:22:40.234681 126494 system_pods.go:89] "weave-net-w4lvx" [fda79fe7-76fb-4fbb-b246-1da694a2b942] Running / Ready:ContainersNotReady (containers with unready status: [weave]) / ContainersReady:ContainersNotReady (containers with unready status: [weave]) I0724 22:22:40.234691 126494 retry.go:30] will retry after 50.197987145s: missing components: kube-dns I0724 22:23:30.439960 126494 system_pods.go:86] 8 kube-system pods found I0724 22:23:30.439993 126494 system_pods.go:89] "coredns-66bff467f8-c6xtm" [404c9846-5599-483c-a9ee-4ce0c790d832] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:23:30.440000 126494 system_pods.go:89] "etcd-custom-weave-20200724220248-14997" [b6be47cc-382b-4bab-8e66-0482a5947f89] Running I0724 22:23:30.440008 126494 system_pods.go:89] "kube-apiserver-custom-weave-20200724220248-14997" [259ce929-c39c-46d2-94cf-a26980082767] Running I0724 22:23:30.440014 126494 system_pods.go:89] "kube-controller-manager-custom-weave-20200724220248-14997" [7d3fd7da-e02f-4447-a202-858783b88e61] Running I0724 22:23:30.440019 126494 system_pods.go:89] "kube-proxy-4fxmn" [d27e15e4-b21a-41ab-98fc-172c594abd85] Running I0724 22:23:30.440025 126494 system_pods.go:89] "kube-scheduler-custom-weave-20200724220248-14997" [d2064a1a-0e7e-4eed-8632-d4aacb0cedf6] Running I0724 22:23:30.440032 126494 system_pods.go:89] "storage-provisioner" [8ce030ba-9435-404a-98ab-0e5fffd82cea] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:23:30.440045 126494 system_pods.go:89] "weave-net-w4lvx" [fda79fe7-76fb-4fbb-b246-1da694a2b942] Running / Ready:ContainersNotReady (containers with unready status: [weave]) / ContainersReady:ContainersNotReady (containers with unready status: [weave]) I0724 22:23:30.440055 126494 retry.go:30] will retry after 1m1.23299565s: missing components: kube-dns I0724 22:24:31.678784 126494 system_pods.go:86] 8 kube-system pods found I0724 22:24:31.678824 126494 system_pods.go:89] "coredns-66bff467f8-c6xtm" [404c9846-5599-483c-a9ee-4ce0c790d832] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:24:31.678832 126494 system_pods.go:89] "etcd-custom-weave-20200724220248-14997" [b6be47cc-382b-4bab-8e66-0482a5947f89] Running I0724 22:24:31.678840 126494 system_pods.go:89] "kube-apiserver-custom-weave-20200724220248-14997" [259ce929-c39c-46d2-94cf-a26980082767] Running I0724 22:24:31.678847 126494 system_pods.go:89] "kube-controller-manager-custom-weave-20200724220248-14997" [7d3fd7da-e02f-4447-a202-858783b88e61] Running I0724 22:24:31.678855 126494 system_pods.go:89] "kube-proxy-4fxmn" [d27e15e4-b21a-41ab-98fc-172c594abd85] Running I0724 22:24:31.678865 126494 system_pods.go:89] "kube-scheduler-custom-weave-20200724220248-14997" [d2064a1a-0e7e-4eed-8632-d4aacb0cedf6] Running I0724 22:24:31.678877 126494 system_pods.go:89] "storage-provisioner" [8ce030ba-9435-404a-98ab-0e5fffd82cea] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:24:31.678902 126494 system_pods.go:89] "weave-net-w4lvx" [fda79fe7-76fb-4fbb-b246-1da694a2b942] Running / Ready:ContainersNotReady (containers with unready status: [weave]) / ContainersReady:ContainersNotReady (containers with unready status: [weave]) I0724 22:24:31.678913 126494 retry.go:30] will retry after 1m1.32466719s: missing components: kube-dns I0724 22:25:33.010490 126494 system_pods.go:86] 8 kube-system pods found I0724 22:25:33.010523 126494 system_pods.go:89] "coredns-66bff467f8-c6xtm" [404c9846-5599-483c-a9ee-4ce0c790d832] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:25:33.010530 126494 system_pods.go:89] "etcd-custom-weave-20200724220248-14997" [b6be47cc-382b-4bab-8e66-0482a5947f89] Running I0724 22:25:33.010537 126494 system_pods.go:89] "kube-apiserver-custom-weave-20200724220248-14997" [259ce929-c39c-46d2-94cf-a26980082767] Running I0724 22:25:33.010543 126494 system_pods.go:89] "kube-controller-manager-custom-weave-20200724220248-14997" [7d3fd7da-e02f-4447-a202-858783b88e61] Running I0724 22:25:33.010549 126494 system_pods.go:89] "kube-proxy-4fxmn" [d27e15e4-b21a-41ab-98fc-172c594abd85] Running I0724 22:25:33.010554 126494 system_pods.go:89] "kube-scheduler-custom-weave-20200724220248-14997" [d2064a1a-0e7e-4eed-8632-d4aacb0cedf6] Running I0724 22:25:33.010561 126494 system_pods.go:89] "storage-provisioner" [8ce030ba-9435-404a-98ab-0e5fffd82cea] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:25:33.010568 126494 system_pods.go:89] "weave-net-w4lvx" [fda79fe7-76fb-4fbb-b246-1da694a2b942] Running / Ready:ContainersNotReady (containers with unready status: [weave]) / ContainersReady:ContainersNotReady (containers with unready status: [weave]) I0724 22:25:33.010579 126494 retry.go:30] will retry after 53.355228654s: missing components: kube-dns I0724 22:26:26.372217 126494 system_pods.go:86] 8 kube-system pods found I0724 22:26:26.372252 126494 system_pods.go:89] "coredns-66bff467f8-c6xtm" [404c9846-5599-483c-a9ee-4ce0c790d832] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:26:26.372260 126494 system_pods.go:89] "etcd-custom-weave-20200724220248-14997" [b6be47cc-382b-4bab-8e66-0482a5947f89] Running I0724 22:26:26.372268 126494 system_pods.go:89] "kube-apiserver-custom-weave-20200724220248-14997" [259ce929-c39c-46d2-94cf-a26980082767] Running I0724 22:26:26.372274 126494 system_pods.go:89] "kube-controller-manager-custom-weave-20200724220248-14997" [7d3fd7da-e02f-4447-a202-858783b88e61] Running I0724 22:26:26.372279 126494 system_pods.go:89] "kube-proxy-4fxmn" [d27e15e4-b21a-41ab-98fc-172c594abd85] Running I0724 22:26:26.372284 126494 system_pods.go:89] "kube-scheduler-custom-weave-20200724220248-14997" [d2064a1a-0e7e-4eed-8632-d4aacb0cedf6] Running I0724 22:26:26.372292 126494 system_pods.go:89] "storage-provisioner" [8ce030ba-9435-404a-98ab-0e5fffd82cea] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:26:26.372300 126494 system_pods.go:89] "weave-net-w4lvx" [fda79fe7-76fb-4fbb-b246-1da694a2b942] Running / Ready:ContainersNotReady (containers with unready status: [weave]) / ContainersReady:ContainersNotReady (containers with unready status: [weave]) I0724 22:26:26.372316 126494 retry.go:30] will retry after 57.694566047s: missing components: kube-dns I0724 22:27:24.073325 126494 system_pods.go:86] 8 kube-system pods found I0724 22:27:24.073370 126494 system_pods.go:89] "coredns-66bff467f8-c6xtm" [404c9846-5599-483c-a9ee-4ce0c790d832] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:27:24.073381 126494 system_pods.go:89] "etcd-custom-weave-20200724220248-14997" [b6be47cc-382b-4bab-8e66-0482a5947f89] Running I0724 22:27:24.073392 126494 system_pods.go:89] "kube-apiserver-custom-weave-20200724220248-14997" [259ce929-c39c-46d2-94cf-a26980082767] Running I0724 22:27:24.073410 126494 system_pods.go:89] "kube-controller-manager-custom-weave-20200724220248-14997" [7d3fd7da-e02f-4447-a202-858783b88e61] Running I0724 22:27:24.073419 126494 system_pods.go:89] "kube-proxy-4fxmn" [d27e15e4-b21a-41ab-98fc-172c594abd85] Running I0724 22:27:24.073438 126494 system_pods.go:89] "kube-scheduler-custom-weave-20200724220248-14997" [d2064a1a-0e7e-4eed-8632-d4aacb0cedf6] Running I0724 22:27:24.073450 126494 system_pods.go:89] "storage-provisioner" [8ce030ba-9435-404a-98ab-0e5fffd82cea] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:27:24.073462 126494 system_pods.go:89] "weave-net-w4lvx" [fda79fe7-76fb-4fbb-b246-1da694a2b942] Running / Ready:ContainersNotReady (containers with unready status: [weave]) / ContainersReady:ContainersNotReady (containers with unready status: [weave]) I0724 22:27:24.073485 126494 retry.go:30] will retry after 1m0.917571461s: missing components: kube-dns I0724 22:28:24.996438 126494 system_pods.go:86] 8 kube-system pods found I0724 22:28:24.996472 126494 system_pods.go:89] "coredns-66bff467f8-c6xtm" [404c9846-5599-483c-a9ee-4ce0c790d832] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:28:24.996479 126494 system_pods.go:89] "etcd-custom-weave-20200724220248-14997" [b6be47cc-382b-4bab-8e66-0482a5947f89] Running I0724 22:28:24.996487 126494 system_pods.go:89] "kube-apiserver-custom-weave-20200724220248-14997" [259ce929-c39c-46d2-94cf-a26980082767] Running I0724 22:28:24.996493 126494 system_pods.go:89] "kube-controller-manager-custom-weave-20200724220248-14997" [7d3fd7da-e02f-4447-a202-858783b88e61] Running I0724 22:28:24.996498 126494 system_pods.go:89] "kube-proxy-4fxmn" [d27e15e4-b21a-41ab-98fc-172c594abd85] Running I0724 22:28:24.996504 126494 system_pods.go:89] "kube-scheduler-custom-weave-20200724220248-14997" [d2064a1a-0e7e-4eed-8632-d4aacb0cedf6] Running I0724 22:28:24.996511 126494 system_pods.go:89] "storage-provisioner" [8ce030ba-9435-404a-98ab-0e5fffd82cea] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:28:24.996518 126494 system_pods.go:89] "weave-net-w4lvx" [fda79fe7-76fb-4fbb-b246-1da694a2b942] Running / Ready:ContainersNotReady (containers with unready status: [weave]) / ContainersReady:ContainersNotReady (containers with unready status: [weave]) I0724 22:28:24.996529 126494 retry.go:30] will retry after 52.606215015s: missing components: kube-dns I0724 22:29:17.608125 126494 system_pods.go:86] 8 kube-system pods found I0724 22:29:17.608159 126494 system_pods.go:89] "coredns-66bff467f8-c6xtm" [404c9846-5599-483c-a9ee-4ce0c790d832] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:29:17.608166 126494 system_pods.go:89] "etcd-custom-weave-20200724220248-14997" [b6be47cc-382b-4bab-8e66-0482a5947f89] Running I0724 22:29:17.608173 126494 system_pods.go:89] "kube-apiserver-custom-weave-20200724220248-14997" [259ce929-c39c-46d2-94cf-a26980082767] Running I0724 22:29:17.608180 126494 system_pods.go:89] "kube-controller-manager-custom-weave-20200724220248-14997" [7d3fd7da-e02f-4447-a202-858783b88e61] Running I0724 22:29:17.608186 126494 system_pods.go:89] "kube-proxy-4fxmn" [d27e15e4-b21a-41ab-98fc-172c594abd85] Running I0724 22:29:17.608191 126494 system_pods.go:89] "kube-scheduler-custom-weave-20200724220248-14997" [d2064a1a-0e7e-4eed-8632-d4aacb0cedf6] Running I0724 22:29:17.608198 126494 system_pods.go:89] "storage-provisioner" [8ce030ba-9435-404a-98ab-0e5fffd82cea] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:29:17.608214 126494 system_pods.go:89] "weave-net-w4lvx" [fda79fe7-76fb-4fbb-b246-1da694a2b942] Running / Ready:ContainersNotReady (containers with unready status: [weave]) / ContainersReady:ContainersNotReady (containers with unready status: [weave]) I0724 22:29:17.608225 126494 retry.go:30] will retry after 53.462429849s: missing components: kube-dns I0724 22:30:11.076611 126494 system_pods.go:86] 8 kube-system pods found I0724 22:30:11.076660 126494 system_pods.go:89] "coredns-66bff467f8-c6xtm" [404c9846-5599-483c-a9ee-4ce0c790d832] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:30:11.076668 126494 system_pods.go:89] "etcd-custom-weave-20200724220248-14997" [b6be47cc-382b-4bab-8e66-0482a5947f89] Running I0724 22:30:11.076677 126494 system_pods.go:89] "kube-apiserver-custom-weave-20200724220248-14997" [259ce929-c39c-46d2-94cf-a26980082767] Running I0724 22:30:11.076683 126494 system_pods.go:89] "kube-controller-manager-custom-weave-20200724220248-14997" [7d3fd7da-e02f-4447-a202-858783b88e61] Running I0724 22:30:11.076689 126494 system_pods.go:89] "kube-proxy-4fxmn" [d27e15e4-b21a-41ab-98fc-172c594abd85] Running I0724 22:30:11.076695 126494 system_pods.go:89] "kube-scheduler-custom-weave-20200724220248-14997" [d2064a1a-0e7e-4eed-8632-d4aacb0cedf6] Running I0724 22:30:11.076705 126494 system_pods.go:89] "storage-provisioner" [8ce030ba-9435-404a-98ab-0e5fffd82cea] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:30:11.076719 126494 system_pods.go:89] "weave-net-w4lvx" [fda79fe7-76fb-4fbb-b246-1da694a2b942] Running / Ready:ContainersNotReady (containers with unready status: [weave]) / ContainersReady:ContainersNotReady (containers with unready status: [weave]) I0724 22:30:11.076742 126494 retry.go:30] will retry after 1m8.658147451s: missing components: kube-dns I0724 22:31:19.740222 126494 system_pods.go:86] 8 kube-system pods found I0724 22:31:19.740261 126494 system_pods.go:89] "coredns-66bff467f8-c6xtm" [404c9846-5599-483c-a9ee-4ce0c790d832] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:31:19.740269 126494 system_pods.go:89] "etcd-custom-weave-20200724220248-14997" [b6be47cc-382b-4bab-8e66-0482a5947f89] Running I0724 22:31:19.740279 126494 system_pods.go:89] "kube-apiserver-custom-weave-20200724220248-14997" [259ce929-c39c-46d2-94cf-a26980082767] Running I0724 22:31:19.740285 126494 system_pods.go:89] "kube-controller-manager-custom-weave-20200724220248-14997" [7d3fd7da-e02f-4447-a202-858783b88e61] Running I0724 22:31:19.740291 126494 system_pods.go:89] "kube-proxy-4fxmn" [d27e15e4-b21a-41ab-98fc-172c594abd85] Running I0724 22:31:19.740297 126494 system_pods.go:89] "kube-scheduler-custom-weave-20200724220248-14997" [d2064a1a-0e7e-4eed-8632-d4aacb0cedf6] Running I0724 22:31:19.740305 126494 system_pods.go:89] "storage-provisioner" [8ce030ba-9435-404a-98ab-0e5fffd82cea] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:31:19.740402 126494 system_pods.go:89] "weave-net-w4lvx" [fda79fe7-76fb-4fbb-b246-1da694a2b942] Running / Ready:ContainersNotReady (containers with unready status: [weave]) / ContainersReady:ContainersNotReady (containers with unready status: [weave]) I0724 22:31:19.740506 126494 exit.go:58] WithError(failed to start node)=startup failed: wait 25m0s for node: waiting for apps_running: expected k8s-apps: missing components: kube-dns called from: goroutine 1 [running]: runtime/debug.Stack(0x0, 0x0, 0x100000000000000) /home/jenkins/actions-runner/_work/_tool/go/1.14.6/x64/src/runtime/debug/stack.go:24 +0x9d k8s.io/minikube/pkg/minikube/exit.WithError(0x1ba7c56, 0x14, 0x1ebf200, 0xc0000f3400) /home/jenkins/actions-runner/_work/minikube/minikube/pkg/minikube/exit/exit.go:58 +0x34 k8s.io/minikube/cmd/minikube/cmd.runStart(0x2cd0820, 0xc000266370, 0x2, 0xb) /home/jenkins/actions-runner/_work/minikube/minikube/cmd/minikube/cmd/start.go:206 +0x505 github.com/spf13/cobra.(*Command).execute(0x2cd0820, 0xc0002662c0, 0xb, 0xb, 0x2cd0820, 0xc0002662c0) /home/jenkins/go/pkg/mod/github.com/spf13/cobra@v1.0.0/command.go:846 +0x29d github.com/spf13/cobra.(*Command).ExecuteC(0x2ccf860, 0x0, 0x1, 0xc0000428c0) /home/jenkins/go/pkg/mod/github.com/spf13/cobra@v1.0.0/command.go:950 +0x349 github.com/spf13/cobra.(*Command).Execute(...) /home/jenkins/go/pkg/mod/github.com/spf13/cobra@v1.0.0/command.go:887 k8s.io/minikube/cmd/minikube/cmd.Execute() /home/jenkins/actions-runner/_work/minikube/minikube/cmd/minikube/cmd/root.go:106 +0x72c main.main() /home/jenkins/actions-runner/_work/minikube/minikube/cmd/minikube/main.go:71 +0x11f W0724 22:31:19.740642 126494 out.go:249] failed to start node: startup failed: wait 25m0s for node: waiting for apps_running: expected k8s-apps: missing components: kube-dns * X failed to start node: startup failed: wait 25m0s for node: waiting for apps_running: expected k8s-apps: missing components: kube-dns * * minikube is exiting due to an error. If the above message is not useful, open an issue: - https://github.com/kubernetes/minikube/issues/new/choose ** /stderr ** net_test.go:82: failed start: exit status 70 === CONT TestNetworkPlugins/group/custom-weave net_test.go:147: skipping remaining tests for weave, as results can be unpredictable panic.go:617: *** TestNetworkPlugins/group/custom-weave FAILED at 2020-07-24 22:31:19.768681851 +0000 UTC m=+3309.893046236 helpers_test.go:215: -----------------------post-mortem-------------------------------- helpers_test.go:223: ======> post-mortem[TestNetworkPlugins/group/custom-weave]: docker inspect <====== helpers_test.go:224: (dbg) Run: docker inspect custom-weave-20200724220248-14997 helpers_test.go:228: (dbg) docker inspect custom-weave-20200724220248-14997: -- stdout -- [ { "Id": "3ae847ad86e84b8b39ba0f04e9a313a9e687698ce4a2d0a88f9a73e43caa629f", "Created": "2020-07-24T22:04:10.063601165Z", "Path": "/usr/local/bin/entrypoint", "Args": [ "/sbin/init" ], "State": { "Status": "running", "Running": true, "Paused": false, "Restarting": false, "OOMKilled": false, "Dead": false, "Pid": 145695, "ExitCode": 0, "Error": "", "StartedAt": "2020-07-24T22:04:11.042402598Z", "FinishedAt": "0001-01-01T00:00:00Z" }, "Image": "sha256:e6bc41c39dc48b2b472936db36aedb28527ce0f675ed1bc20d029125c9ccf578", "ResolvConfPath": "/var/lib/docker/containers/3ae847ad86e84b8b39ba0f04e9a313a9e687698ce4a2d0a88f9a73e43caa629f/resolv.conf", "HostnamePath": "/var/lib/docker/containers/3ae847ad86e84b8b39ba0f04e9a313a9e687698ce4a2d0a88f9a73e43caa629f/hostname", "HostsPath": "/var/lib/docker/containers/3ae847ad86e84b8b39ba0f04e9a313a9e687698ce4a2d0a88f9a73e43caa629f/hosts", "LogPath": "/var/lib/docker/containers/3ae847ad86e84b8b39ba0f04e9a313a9e687698ce4a2d0a88f9a73e43caa629f/3ae847ad86e84b8b39ba0f04e9a313a9e687698ce4a2d0a88f9a73e43caa629f-json.log", "Name": "/custom-weave-20200724220248-14997", "RestartCount": 0, "Driver": "overlay2", "Platform": "linux", "MountLabel": "", "ProcessLabel": "", "AppArmorProfile": "unconfined", "ExecIDs": null, "HostConfig": { "Binds": [ "/lib/modules:/lib/modules:ro", "custom-weave-20200724220248-14997:/var" ], "ContainerIDFile": "", "LogConfig": { "Type": "json-file", "Config": {} }, "NetworkMode": "default", "PortBindings": { "22/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "2376/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "5000/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "8443/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ] }, "RestartPolicy": { "Name": "no", "MaximumRetryCount": 0 }, "AutoRemove": false, "VolumeDriver": "", "VolumesFrom": null, "CapAdd": null, "CapDrop": null, "Capabilities": null, "Dns": [], "DnsOptions": [], "DnsSearch": [], "ExtraHosts": null, "GroupAdd": null, "IpcMode": "private", "Cgroup": "", "Links": null, "OomScoreAdj": 0, "PidMode": "", "Privileged": true, "PublishAllPorts": false, "ReadonlyRootfs": false, "SecurityOpt": [ "seccomp=unconfined", "apparmor=unconfined", "label=disable" ], "Tmpfs": { "/run": "", "/tmp": "" }, "UTSMode": "", "UsernsMode": "", "ShmSize": 67108864, "Runtime": "runc", "ConsoleSize": [ 0, 0 ], "Isolation": "", "CpuShares": 0, "Memory": 1887436800, "NanoCpus": 2000000000, "CgroupParent": "", "BlkioWeight": 0, "BlkioWeightDevice": [], "BlkioDeviceReadBps": null, "BlkioDeviceWriteBps": null, "BlkioDeviceReadIOps": null, "BlkioDeviceWriteIOps": null, "CpuPeriod": 0, "CpuQuota": 0, "CpuRealtimePeriod": 0, "CpuRealtimeRuntime": 0, "CpusetCpus": "", "CpusetMems": "", "Devices": [], "DeviceCgroupRules": null, "DeviceRequests": null, "KernelMemory": 0, "KernelMemoryTCP": 0, "MemoryReservation": 0, "MemorySwap": -1, "MemorySwappiness": null, "OomKillDisable": false, "PidsLimit": null, "Ulimits": null, "CpuCount": 0, "CpuPercent": 0, "IOMaximumIOps": 0, "IOMaximumBandwidth": 0, "MaskedPaths": null, "ReadonlyPaths": null }, "GraphDriver": { "Data": { "LowerDir": "/var/lib/docker/overlay2/1a75bdf697bc1076fe5f0311de103e14408fb6575d4cbd65eb932016f5dfd223-init/diff:/var/lib/docker/overlay2/33ef5d7ed4fbb30d55d2e5925de9b5c359ee2f6bcaf5a31308f0c4c11e358759/diff:/var/lib/docker/overlay2/3cf31b1deb19cf9de586952311d9f94baab00dac6393d96e1a80da943c3823e6/diff:/var/lib/docker/overlay2/5e25be0edbb143d613aab16e23056ece18370c2427214464ccc0b8f033fc76c5/diff:/var/lib/docker/overlay2/933d7b760faf1726c0231eb48cbf3e7af8aeea7d1f449959b7bf07bbe9a84376/diff:/var/lib/docker/overlay2/69ec5555996fe29cf36b7e927034690ecb66f32d4c72396f3c2f3d73303ab10c/diff:/var/lib/docker/overlay2/891f9311e0e93aca44c576d873c0860bf67e972178833fc3ac0569308996933c/diff:/var/lib/docker/overlay2/431ddf5eea9395c020c3a27539c6984764cb7a0dd8f3eeadfbb24f37a897caf7/diff:/var/lib/docker/overlay2/ef7d3d9d53c985fdb46a41c8bcbc04c7d98baa3d8030b1432d115657269a4611/diff:/var/lib/docker/overlay2/d976f7471257da8ef85e0de564cae8e6b16eeb8f83fff6750b3985387f87052c/diff:/var/lib/docker/overlay2/16ed6f6abe25e98b50b10529e70d835da8b64ca99934243f70e5cc63cca44ba1/diff:/var/lib/docker/overlay2/a6495d6f579c32bd0faf387dc3b48c733dae0219e3cfc5d94f9f6ba027261668/diff:/var/lib/docker/overlay2/a34db548da9e24b0323e43e53767c49484fa19509ba9c1e738fb42330788ec52/diff:/var/lib/docker/overlay2/03f93f74dafc48e2bb0234297546abf70dc9918c047a7e3b500ed2242aec09ff/diff:/var/lib/docker/overlay2/b0d08d0be034080c25561d9451d8081050a591c2991a7f3d242234b9bf7c046d/diff:/var/lib/docker/overlay2/d7be6ed61eedc76988e74fe1d87a6a1ef287912654acea5566e7f9cb797b3e3c/diff:/var/lib/docker/overlay2/bde6a4a42d5652b3ef22e3a1dddd0f6fb765d2cd9f5b1a38d4ecb9b44c45b998/diff:/var/lib/docker/overlay2/0f384da0b034b730d4bdd22f270821719c0ac0b7b7d608c3a52fa89b9a386b90/diff:/var/lib/docker/overlay2/46e9d50bc162d2d523c12da3e378e5b859515056dc7d7f11344c1c42c64718d4/diff:/var/lib/docker/overlay2/6e7552cdb94dfc1c9a0a8efae25c758d93fff3d2eec58a3e2b710496d0c94653/diff:/var/lib/docker/overlay2/b9217960caef08b70ded801ca89f5a0808227383f2ab1de5ae7313c200a8090b/diff:/var/lib/docker/overlay2/d11fc39cafccd3e3cb8fba6cfdda2bbfdb99c2e41619ce169b0d6ede0b3f6a64/diff:/var/lib/docker/overlay2/0f89620291cba4bc3afac7c9fa1faf205450d5dcae032c3a9882009633b72b4a/diff", "MergedDir": "/var/lib/docker/overlay2/1a75bdf697bc1076fe5f0311de103e14408fb6575d4cbd65eb932016f5dfd223/merged", "UpperDir": "/var/lib/docker/overlay2/1a75bdf697bc1076fe5f0311de103e14408fb6575d4cbd65eb932016f5dfd223/diff", "WorkDir": "/var/lib/docker/overlay2/1a75bdf697bc1076fe5f0311de103e14408fb6575d4cbd65eb932016f5dfd223/work" }, "Name": "overlay2" }, "Mounts": [ { "Type": "bind", "Source": "/lib/modules", "Destination": "/lib/modules", "Mode": "ro", "RW": false, "Propagation": "rprivate" }, { "Type": "volume", "Name": "custom-weave-20200724220248-14997", "Source": "/var/lib/docker/volumes/custom-weave-20200724220248-14997/_data", "Destination": "/var", "Driver": "local", "Mode": "z", "RW": true, "Propagation": "" } ], "Config": { "Hostname": "custom-weave-20200724220248-14997", "Domainname": "", "User": "root", "AttachStdin": false, "AttachStdout": false, "AttachStderr": false, "ExposedPorts": { "22/tcp": {}, "2376/tcp": {}, "5000/tcp": {}, "8443/tcp": {} }, "Tty": true, "OpenStdin": false, "StdinOnce": false, "Env": [ "container=docker", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], "Cmd": null, "Image": "kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438", "Volumes": null, "WorkingDir": "", "Entrypoint": [ "/usr/local/bin/entrypoint", "/sbin/init" ], "OnBuild": null, "Labels": { "created_by.minikube.sigs.k8s.io": "true", "mode.minikube.sigs.k8s.io": "custom-weave-20200724220248-14997", "name.minikube.sigs.k8s.io": "custom-weave-20200724220248-14997", "role.minikube.sigs.k8s.io": "" }, "StopSignal": "SIGRTMIN+3" }, "NetworkSettings": { "Bridge": "", "SandboxID": "d820646207a744b2f8b6b80a142fe6dad4bbcb584646402d7180e94d84c958df", "HairpinMode": false, "LinkLocalIPv6Address": "", "LinkLocalIPv6PrefixLen": 0, "Ports": { "22/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32864" } ], "2376/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32863" } ], "5000/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32862" } ], "8443/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32861" } ] }, "SandboxKey": "/var/run/docker/netns/d820646207a7", "SecondaryIPAddresses": null, "SecondaryIPv6Addresses": null, "EndpointID": "fd0a03ce40a5729f46cbccfc155ce4fdaee6afa662982ce13c4a0e92991baf9a", "Gateway": "172.17.0.1", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "IPAddress": "172.17.0.8", "IPPrefixLen": 16, "IPv6Gateway": "", "MacAddress": "02:42:ac:11:00:08", "Networks": { "bridge": { "IPAMConfig": null, "Links": null, "Aliases": null, "NetworkID": "d4a420189740571bc15c14b2ebac1ab66e95c9c6796461aa96bb3486a5f7e03d", "EndpointID": "fd0a03ce40a5729f46cbccfc155ce4fdaee6afa662982ce13c4a0e92991baf9a", "Gateway": "172.17.0.1", "IPAddress": "172.17.0.8", "IPPrefixLen": 16, "IPv6Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "MacAddress": "02:42:ac:11:00:08", "DriverOpts": null } } } } ] -- /stdout -- helpers_test.go:232: (dbg) Run: ./minikube-linux-amd64 status --format={{.Host}} -p custom-weave-20200724220248-14997 -n custom-weave-20200724220248-14997 helpers_test.go:237: <<< TestNetworkPlugins/group/custom-weave FAILED: start of post-mortem logs <<< helpers_test.go:238: ======> post-mortem[TestNetworkPlugins/group/custom-weave]: minikube logs <====== helpers_test.go:240: (dbg) Run: ./minikube-linux-amd64 -p custom-weave-20200724220248-14997 logs -n 25 helpers_test.go:240: (dbg) Done: ./minikube-linux-amd64 -p custom-weave-20200724220248-14997 logs -n 25: (2.326797769s) helpers_test.go:245: TestNetworkPlugins/group/custom-weave logs: -- stdout -- * ==> Docker <== * -- Logs begin at Fri 2020-07-24 22:04:20 UTC, end at Fri 2020-07-24 22:31:20 UTC. -- * Jul 24 22:29:55 custom-weave-20200724220248-14997 dockerd[353]: time="2020-07-24T22:29:55.662835091Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:29:58 custom-weave-20200724220248-14997 dockerd[353]: time="2020-07-24T22:29:58.752459893Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:30:01 custom-weave-20200724220248-14997 dockerd[353]: time="2020-07-24T22:30:01.676026789Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:30:04 custom-weave-20200724220248-14997 dockerd[353]: time="2020-07-24T22:30:04.622284961Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:30:07 custom-weave-20200724220248-14997 dockerd[353]: time="2020-07-24T22:30:07.617716960Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:30:11 custom-weave-20200724220248-14997 dockerd[353]: time="2020-07-24T22:30:11.453874610Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:30:14 custom-weave-20200724220248-14997 dockerd[353]: time="2020-07-24T22:30:14.820977127Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:30:17 custom-weave-20200724220248-14997 dockerd[353]: time="2020-07-24T22:30:17.957737650Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:30:21 custom-weave-20200724220248-14997 dockerd[353]: time="2020-07-24T22:30:21.114866826Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:30:24 custom-weave-20200724220248-14997 dockerd[353]: time="2020-07-24T22:30:24.254430368Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:30:26 custom-weave-20200724220248-14997 dockerd[353]: time="2020-07-24T22:30:26.694424459Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:30:30 custom-weave-20200724220248-14997 dockerd[353]: time="2020-07-24T22:30:30.199508926Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:30:33 custom-weave-20200724220248-14997 dockerd[353]: time="2020-07-24T22:30:33.159158366Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:30:36 custom-weave-20200724220248-14997 dockerd[353]: time="2020-07-24T22:30:36.478425335Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:30:41 custom-weave-20200724220248-14997 dockerd[353]: time="2020-07-24T22:30:41.777182345Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:30:45 custom-weave-20200724220248-14997 dockerd[353]: time="2020-07-24T22:30:45.430720056Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:30:48 custom-weave-20200724220248-14997 dockerd[353]: time="2020-07-24T22:30:48.930059171Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:30:52 custom-weave-20200724220248-14997 dockerd[353]: time="2020-07-24T22:30:52.295158249Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:30:55 custom-weave-20200724220248-14997 dockerd[353]: time="2020-07-24T22:30:55.591240437Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:31:03 custom-weave-20200724220248-14997 dockerd[353]: time="2020-07-24T22:31:03.220646946Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:31:06 custom-weave-20200724220248-14997 dockerd[353]: time="2020-07-24T22:31:06.815784120Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:31:10 custom-weave-20200724220248-14997 dockerd[353]: time="2020-07-24T22:31:10.131579577Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:31:13 custom-weave-20200724220248-14997 dockerd[353]: time="2020-07-24T22:31:13.540985264Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:31:16 custom-weave-20200724220248-14997 dockerd[353]: time="2020-07-24T22:31:16.536682792Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:31:19 custom-weave-20200724220248-14997 dockerd[353]: time="2020-07-24T22:31:19.242537049Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * * ==> container status <== * CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID * 7bfa5387aaa34 e9dd2f85e51b4 3 minutes ago Exited weave 9 7f0f164f9b3d7 * 94dc1eeda59e9 4689081edb103 4 minutes ago Exited storage-provisioner 9 72f3f6a30a3bc * fbb15e90847fb weaveworks/weave-npc@sha256:0f6166e000faa500ccc0df53caae17edd3110590b7b159007a5ea727cdfb1cef 25 minutes ago Running weave-npc 0 7f0f164f9b3d7 * d80d24d8fbb8b 3439b7546f29b 25 minutes ago Running kube-proxy 0 b2d08cb8d40d8 * 6016deaeb8804 da26705ccb4b5 26 minutes ago Running kube-controller-manager 1 de29956c73cd3 * d6380752cf972 da26705ccb4b5 26 minutes ago Exited kube-controller-manager 0 de29956c73cd3 * af5fdbf8455c2 303ce5db0e90d 26 minutes ago Running etcd 0 a375e59c7a690 * 9be6e902897f5 76216c34ed0c7 26 minutes ago Running kube-scheduler 0 2ee99f550df93 * c9086894e304a 7e28efa976bd1 26 minutes ago Running kube-apiserver 0 682b632fddb5d * * ==> describe nodes <== * Name: custom-weave-20200724220248-14997 * Roles: master * Labels: beta.kubernetes.io/arch=amd64 * beta.kubernetes.io/os=linux * kubernetes.io/arch=amd64 * kubernetes.io/hostname=custom-weave-20200724220248-14997 * kubernetes.io/os=linux * minikube.k8s.io/commit=40eac8ce825d2bb784efa63b900c8d788ea49faf * minikube.k8s.io/name=custom-weave-20200724220248-14997 * minikube.k8s.io/updated_at=2020_07_24T22_05_22_0700 * minikube.k8s.io/version=v1.12.1 * node-role.kubernetes.io/master= * Annotations: kubeadm.alpha.kubernetes.io/cri-socket: /var/run/dockershim.sock * node.alpha.kubernetes.io/ttl: 0 * volumes.kubernetes.io/controller-managed-attach-detach: true * CreationTimestamp: Fri, 24 Jul 2020 22:04:53 +0000 * Taints: * Unschedulable: false * Lease: * HolderIdentity: custom-weave-20200724220248-14997 * AcquireTime: * RenewTime: Fri, 24 Jul 2020 22:31:20 +0000 * Conditions: * Type Status LastHeartbeatTime LastTransitionTime Reason Message * ---- ------ ----------------- ------------------ ------ ------- * MemoryPressure False Fri, 24 Jul 2020 22:31:13 +0000 Fri, 24 Jul 2020 22:04:53 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available * DiskPressure False Fri, 24 Jul 2020 22:31:13 +0000 Fri, 24 Jul 2020 22:04:53 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure * PIDPressure False Fri, 24 Jul 2020 22:31:13 +0000 Fri, 24 Jul 2020 22:04:53 +0000 KubeletHasSufficientPID kubelet has sufficient PID available * Ready True Fri, 24 Jul 2020 22:31:13 +0000 Fri, 24 Jul 2020 22:04:57 +0000 KubeletReady kubelet is posting ready status * Addresses: * InternalIP: 172.17.0.8 * Hostname: custom-weave-20200724220248-14997 * Capacity: * cpu: 16 * ephemeral-storage: 128884272Ki * hugepages-1Gi: 0 * hugepages-2Mi: 0 * memory: 65817044Ki * pods: 110 * Allocatable: * cpu: 16 * ephemeral-storage: 128884272Ki * hugepages-1Gi: 0 * hugepages-2Mi: 0 * memory: 65817044Ki * pods: 110 * System Info: * Machine ID: 15b8d19de6b6459492c027f3a29aecd6 * System UUID: c551b473-d4d4-40ea-90a3-4bc9a5ca6d2d * Boot ID: 65219ec9-ab55-4151-85fa-6cbcd6144529 * Kernel Version: 5.4.0-1022-azure * OS Image: Ubuntu 19.10 * Operating System: linux * Architecture: amd64 * Container Runtime Version: docker://19.3.2 * Kubelet Version: v1.18.3 * Kube-Proxy Version: v1.18.3 * PodCIDR: 10.244.0.0/24 * PodCIDRs: 10.244.0.0/24 * Non-terminated Pods: (8 in total) * Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits AGE * --------- ---- ------------ ---------- --------------- ------------- --- * kube-system coredns-66bff467f8-c6xtm 100m (0%) 0 (0%) 70Mi (0%) 170Mi (0%) 25m * kube-system etcd-custom-weave-20200724220248-14997 0 (0%) 0 (0%) 0 (0%) 0 (0%) 26m * kube-system kube-apiserver-custom-weave-20200724220248-14997 250m (1%) 0 (0%) 0 (0%) 0 (0%) 26m * kube-system kube-controller-manager-custom-weave-20200724220248-14997 200m (1%) 0 (0%) 0 (0%) 0 (0%) 26m * kube-system kube-proxy-4fxmn 0 (0%) 0 (0%) 0 (0%) 0 (0%) 25m * kube-system kube-scheduler-custom-weave-20200724220248-14997 100m (0%) 0 (0%) 0 (0%) 0 (0%) 26m * kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 25m * kube-system weave-net-w4lvx 20m (0%) 0 (0%) 0 (0%) 0 (0%) 25m * Allocated resources: * (Total limits may be over 100 percent, i.e., overcommitted.) * Resource Requests Limits * -------- -------- ------ * cpu 670m (4%) 0 (0%) * memory 70Mi (0%) 170Mi (0%) * ephemeral-storage 0 (0%) 0 (0%) * hugepages-1Gi 0 (0%) 0 (0%) * hugepages-2Mi 0 (0%) 0 (0%) * Events: * Type Reason Age From Message * ---- ------ ---- ---- ------- * Normal NodeHasSufficientMemory 26m (x7 over 26m) kubelet, custom-weave-20200724220248-14997 Node custom-weave-20200724220248-14997 status is now: NodeHasSufficientMemory * Normal NodeHasNoDiskPressure 26m (x7 over 26m) kubelet, custom-weave-20200724220248-14997 Node custom-weave-20200724220248-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientPID 26m (x7 over 26m) kubelet, custom-weave-20200724220248-14997 Node custom-weave-20200724220248-14997 status is now: NodeHasSufficientPID * Normal NodeAllocatableEnforced 26m kubelet, custom-weave-20200724220248-14997 Updated Node Allocatable limit across pods * Warning SystemOOM 26m kubelet, custom-weave-20200724220248-14997 System OOM encountered, victim process: registry-server, pid: 28567 * Warning SystemOOM 26m kubelet, custom-weave-20200724220248-14997 System OOM encountered, victim process: registry-server, pid: 26041 * Normal Starting 26m kubelet, custom-weave-20200724220248-14997 Starting kubelet. * Normal NodeHasSufficientMemory 26m kubelet, custom-weave-20200724220248-14997 Node custom-weave-20200724220248-14997 status is now: NodeHasSufficientMemory * Normal NodeHasNoDiskPressure 26m kubelet, custom-weave-20200724220248-14997 Node custom-weave-20200724220248-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientPID 26m kubelet, custom-weave-20200724220248-14997 Node custom-weave-20200724220248-14997 status is now: NodeHasSufficientPID * Normal NodeAllocatableEnforced 26m kubelet, custom-weave-20200724220248-14997 Updated Node Allocatable limit across pods * Warning readOnlySysFS 25m kube-proxy, custom-weave-20200724220248-14997 CRI error: /sys is read-only: cannot modify conntrack limits, problems may arise later (If running Docker, see docker issue #24000) * Normal Starting 25m kube-proxy, custom-weave-20200724220248-14997 Starting kube-proxy. * * ==> dmesg <== * [ +0.008042] FS-Cache: N-cookie d=00000000087e516b n=00000000639e5e65 * [ +0.007221] FS-Cache: N-key=[8] '56c00f0000000000' * [ +0.005631] FS-Cache: Duplicate cookie detected * [ +0.003841] FS-Cache: O-cookie c=0000000011bf07ab [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.005951] FS-Cache: O-cookie d=00000000087e516b n=00000000251e97b7 * [ +0.004790] FS-Cache: O-key=[8] '56c00f0000000000' * [ +0.003643] FS-Cache: N-cookie c=00000000e2eec606 [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.005689] FS-Cache: N-cookie d=00000000087e516b n=000000007550b1ca * [ +0.004826] FS-Cache: N-key=[8] '56c00f0000000000' * [ +1.890005] FS-Cache: Duplicate cookie detected * [ +0.022632] FS-Cache: O-cookie c=0000000036b15294 [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.008521] FS-Cache: O-cookie d=00000000087e516b n=00000000efd734b0 * [ +0.007450] FS-Cache: O-key=[8] '55c00f0000000000' * [ +0.004298] FS-Cache: N-cookie c=00000000c6760349 [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.007729] FS-Cache: N-cookie d=00000000087e516b n=000000002b6d28f2 * [ +0.006730] FS-Cache: N-key=[8] '55c00f0000000000' * [ +2.781512] FS-Cache: Duplicate cookie detected * [ +0.004691] FS-Cache: O-cookie c=0000000026b60cdb [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.006817] FS-Cache: O-cookie d=00000000087e516b n=0000000083eaff9d * [ +0.006146] FS-Cache: O-key=[8] '5ac00f0000000000' * [ +0.004163] FS-Cache: N-cookie c=000000006a4278df [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.006241] FS-Cache: N-cookie d=00000000087e516b n=00000000e4c3dd6e * [ +0.005074] FS-Cache: N-key=[8] '5ac00f0000000000' * [Jul24 22:02] tee (116897): /proc/108378/oom_adj is deprecated, please use /proc/108378/oom_score_adj instead. * [Jul24 22:09] hrtimer: interrupt took 1877431 ns * * ==> etcd [af5fdbf8455c] <== * 2020-07-24 22:28:16.220449 W | wal: sync duration of 11.109995543s, expected less than 1s * 2020-07-24 22:28:16.522972 W | etcdserver: request "header: txn: success:> failure:<>>" with result "size:16" took too long (302.243378ms) to execute * 2020-07-24 22:28:16.523523 W | etcdserver: read-only range request "key:\"/registry/configmaps\" range_end:\"/registry/configmapt\" count_only:true " with result "range_response_count:0 size:7" took too long (6.160618462s) to execute * 2020-07-24 22:28:16.523617 W | etcdserver: read-only range request "key:\"/registry/jobs/\" range_end:\"/registry/jobs0\" limit:500 " with result "range_response_count:0 size:5" took too long (8.501022605s) to execute * 2020-07-24 22:28:16.523709 W | etcdserver: read-only range request "key:\"/registry/events/kube-system/etcd-custom-weave-20200724220248-14997.1624cf802d5750ca\" " with result "range_response_count:1 size:853" took too long (9.102530008s) to execute * 2020-07-24 22:28:16.523762 W | etcdserver: read-only range request "key:\"/registry/podsecuritypolicy\" range_end:\"/registry/podsecuritypolicz\" count_only:true " with result "range_response_count:0 size:5" took too long (10.313489869s) to execute * 2020-07-24 22:28:16.523851 W | etcdserver: read-only range request "key:\"/registry/volumeattachments\" range_end:\"/registry/volumeattachmentt\" count_only:true " with result "range_response_count:0 size:5" took too long (7.045912935s) to execute * 2020-07-24 22:28:16.579602 W | etcdserver: read-only range request "key:\"/registry/services/endpoints\" range_end:\"/registry/services/endpointt\" count_only:true " with result "range_response_count:0 size:7" took too long (160.687405ms) to execute * 2020-07-24 22:28:16.579696 W | etcdserver: read-only range request "key:\"/registry/namespaces/default\" " with result "range_response_count:1 size:257" took too long (1.472269773s) to execute * 2020-07-24 22:28:19.453122 W | wal: sync duration of 2.873637701s, expected less than 1s * 2020-07-24 22:28:20.053339 W | etcdserver: read-only range request "key:\"/registry/health\" " with result "error:context canceled" took too long (1.999992675s) to execute * WARNING: 2020/07/24 22:28:20 grpc: Server.processUnaryRPC failed to write status: connection error: desc = "transport is closing" * 2020-07-24 22:28:20.664880 W | etcdserver: read-only range request "key:\"/registry/cronjobs/\" range_end:\"/registry/cronjobs0\" limit:500 " with result "range_response_count:0 size:5" took too long (4.13893314s) to execute * 2020-07-24 22:28:20.664985 W | etcdserver: read-only range request "key:\"/registry/leases/kube-node-lease/custom-weave-20200724220248-14997\" " with result "range_response_count:1 size:700" took too long (4.138059979s) to execute * 2020-07-24 22:28:20.665007 W | etcdserver: request "header: txn: success:> failure: >>" with result "size:16" took too long (1.211648397s) to execute * 2020-07-24 22:28:20.786602 W | wal: sync duration of 1.332201005s, expected less than 1s * 2020-07-24 22:28:20.853736 W | etcdserver: read-only range request "key:\"/registry/services/specs/default/kubernetes\" " with result "range_response_count:1 size:610" took too long (4.272471352s) to execute * 2020-07-24 22:28:20.854031 W | etcdserver: read-only range request "key:\"/registry/mutatingwebhookconfigurations\" range_end:\"/registry/mutatingwebhookconfigurationt\" count_only:true " with result "range_response_count:0 size:5" took too long (1.195873898s) to execute * 2020-07-24 22:28:20.854187 W | etcdserver: read-only range request "key:\"/registry/events/kube-system/kube-apiserver-custom-weave-20200724220248-14997.1624cf80ca55b9b8\" " with result "range_response_count:1 size:894" took too long (186.828429ms) to execute * 2020-07-24 22:29:48.187817 I | mvcc: store.index: compact 766 * 2020-07-24 22:29:48.188402 I | mvcc: finished scheduled compaction at 766 (took 276.219µs) * 2020-07-24 22:31:00.956723 W | etcdserver: read-only range request "key:\"/registry/replicasets\" range_end:\"/registry/replicasett\" count_only:true " with result "range_response_count:0 size:7" took too long (309.141914ms) to execute * 2020-07-24 22:31:00.956885 W | etcdserver: read-only range request "key:\"/registry/jobs/\" range_end:\"/registry/jobs0\" limit:500 " with result "range_response_count:0 size:5" took too long (221.283112ms) to execute * 2020-07-24 22:31:02.985930 W | etcdserver: read-only range request "key:\"/registry/pods\" range_end:\"/registry/podt\" count_only:true " with result "range_response_count:0 size:7" took too long (936.267848ms) to execute * 2020-07-24 22:31:02.985978 W | etcdserver: read-only range request "key:\"/registry/services/endpoints\" range_end:\"/registry/services/endpointt\" count_only:true " with result "range_response_count:0 size:7" took too long (1.420081501s) to execute * * ==> kernel <== * 22:31:22 up 58 min, 0 users, load average: 7.22, 8.22, 8.10 * Linux custom-weave-20200724220248-14997 5.4.0-1022-azure #22-Ubuntu SMP Fri Jul 10 06:14:37 UTC 2020 x86_64 x86_64 x86_64 GNU/Linux * PRETTY_NAME="Ubuntu 19.10" * * ==> kube-apiserver [c9086894e304] <== * Trace[1144677032]: [7.003405897s] [7.00331419s] END * I0724 22:28:16.524261 1 trace.go:116] Trace[180796793]: "List etcd3" key:/jobs,resourceVersion:,limit:500,continue: (started: 2020-07-24 22:28:08.022136185 +0000 UTC m=+1401.181608999) (total time: 8.502085579s): * Trace[180796793]: [8.502085579s] [8.502085579s] END * I0724 22:28:16.524438 1 trace.go:116] Trace[188754999]: "List" url:/apis/batch/v1/jobs,user-agent:kube-controller-manager/v1.18.3 (linux/amd64) kubernetes/2e7996e/system:serviceaccount:kube-system:cronjob-controller,client:172.17.0.8 (started: 2020-07-24 22:28:08.022111684 +0000 UTC m=+1401.181584498) (total time: 8.502300993s): * Trace[188754999]: [8.502175184s] [8.502156483s] Listing from storage done * I0724 22:28:16.525345 1 trace.go:116] Trace[586641096]: "GuaranteedUpdate etcd3" type:*coordination.Lease (started: 2020-07-24 22:28:14.108001462 +0000 UTC m=+1407.267474276) (total time: 2.417301577s): * Trace[586641096]: [2.415722567s] [2.414646092s] Transaction committed * I0724 22:28:16.525493 1 trace.go:116] Trace[744612478]: "Update" url:/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/custom-weave-20200724220248-14997,user-agent:kubelet/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:172.17.0.8 (started: 2020-07-24 22:28:14.107877653 +0000 UTC m=+1407.267350567) (total time: 2.417575597s): * Trace[744612478]: [2.417575597s] [2.417491791s] END * I0724 22:28:16.580184 1 trace.go:116] Trace[887389300]: "Get" url:/api/v1/namespaces/default,user-agent:kube-apiserver/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:127.0.0.1 (started: 2020-07-24 22:28:15.106867021 +0000 UTC m=+1408.266339935) (total time: 1.473282843s): * Trace[887389300]: [1.47324414s] [1.47323424s] About to write a response * I0724 22:28:20.665440 1 trace.go:116] Trace[530009283]: "List etcd3" key:/cronjobs,resourceVersion:,limit:500,continue: (started: 2020-07-24 22:28:16.525651163 +0000 UTC m=+1409.685123977) (total time: 4.139752798s): * Trace[530009283]: [4.139752798s] [4.139752798s] END * I0724 22:28:20.665452 1 trace.go:116] Trace[417329854]: "GuaranteedUpdate etcd3" type:*core.Event (started: 2020-07-24 22:28:07.42074699 +0000 UTC m=+1400.580219804) (total time: 13.244671572s): * Trace[417329854]: [9.103643486s] [9.103643486s] initial value restored * Trace[417329854]: [13.24464077s] [4.085374705s] Transaction committed * I0724 22:28:20.665522 1 trace.go:116] Trace[773867459]: "Get" url:/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/custom-weave-20200724220248-14997,user-agent:kubelet/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:172.17.0.8 (started: 2020-07-24 22:28:16.52660043 +0000 UTC m=+1409.686073244) (total time: 4.138887737s): * Trace[773867459]: [4.138820532s] [4.138805331s] About to write a response * I0724 22:28:20.665539 1 trace.go:116] Trace[1685735]: "Patch" url:/api/v1/namespaces/kube-system/events/etcd-custom-weave-20200724220248-14997.1624cf802d5750ca,user-agent:kubelet/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:172.17.0.8 (started: 2020-07-24 22:28:07.420662084 +0000 UTC m=+1400.580134898) (total time: 13.244856685s): * Trace[1685735]: [9.103730092s] [9.10369749s] About to apply patch * Trace[1685735]: [13.244812482s] [4.140571855s] Object stored in database * I0724 22:28:20.665550 1 trace.go:116] Trace[1924924319]: "List" url:/apis/batch/v1beta1/cronjobs,user-agent:kube-controller-manager/v1.18.3 (linux/amd64) kubernetes/2e7996e/system:serviceaccount:kube-system:cronjob-controller,client:172.17.0.8 (started: 2020-07-24 22:28:16.525633962 +0000 UTC m=+1409.685106776) (total time: 4.139887107s): * Trace[1924924319]: [4.139829403s] [4.139816702s] Listing from storage done * I0724 22:28:20.854343 1 trace.go:116] Trace[1383351247]: "Get" url:/api/v1/namespaces/default/services/kubernetes,user-agent:kube-apiserver/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:127.0.0.1 (started: 2020-07-24 22:28:16.580989123 +0000 UTC m=+1409.740462037) (total time: 4.273307511s): * Trace[1383351247]: [4.273241106s] [4.273232906s] About to write a response * * ==> kube-controller-manager [6016deaeb880] <== * I0724 22:05:28.243298 1 event.go:278] Event(v1.ObjectReference{Kind:"DaemonSet", Namespace:"kube-system", Name:"weave-net", UID:"5256c6ba-7b4e-483e-ab16-9cabbcf2d799", APIVersion:"apps/v1", ResourceVersion:"246", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: weave-net-w4lvx * I0724 22:05:28.244139 1 shared_informer.go:230] Caches are synced for ClusterRoleAggregator * I0724 22:05:28.244516 1 event.go:278] Event(v1.ObjectReference{Kind:"DaemonSet", Namespace:"kube-system", Name:"kube-proxy", UID:"eb393c50-a0f6-46c1-bc45-8c4682512dc9", APIVersion:"apps/v1", ResourceVersion:"213", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: kube-proxy-4fxmn * I0724 22:05:28.244756 1 shared_informer.go:230] Caches are synced for persistent volume * I0724 22:05:28.267350 1 shared_informer.go:230] Caches are synced for TTL * I0724 22:05:28.285048 1 shared_informer.go:230] Caches are synced for GC * I0724 22:05:28.294394 1 shared_informer.go:230] Caches are synced for attach detach * I0724 22:05:28.373088 1 shared_informer.go:230] Caches are synced for taint * I0724 22:05:28.373204 1 taint_manager.go:187] Starting NoExecuteTaintManager * I0724 22:05:28.373397 1 node_lifecycle_controller.go:1433] Initializing eviction metric for zone: * W0724 22:05:28.373502 1 node_lifecycle_controller.go:1048] Missing timestamp for Node custom-weave-20200724220248-14997. Assuming now as a timestamp. * I0724 22:05:28.373552 1 node_lifecycle_controller.go:1249] Controller detected that zone is now in state Normal. * I0724 22:05:28.373616 1 event.go:278] Event(v1.ObjectReference{Kind:"Node", Namespace:"", Name:"custom-weave-20200724220248-14997", UID:"e6f3ea97-399a-42ff-a5a1-224eeb7fc814", APIVersion:"v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'RegisteredNode' Node custom-weave-20200724220248-14997 event: Registered Node custom-weave-20200724220248-14997 in Controller * I0724 22:05:28.394931 1 shared_informer.go:230] Caches are synced for deployment * I0724 22:05:28.398040 1 shared_informer.go:230] Caches are synced for resource quota * I0724 22:05:28.409434 1 event.go:278] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"kube-system", Name:"coredns", UID:"9fed05d2-cb1a-4ecc-9c81-0b1cc447ae6a", APIVersion:"apps/v1", ResourceVersion:"336", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set coredns-66bff467f8 to 1 * I0724 22:05:28.435543 1 shared_informer.go:230] Caches are synced for ReplicaSet * I0724 22:05:28.439599 1 shared_informer.go:230] Caches are synced for disruption * I0724 22:05:28.439624 1 disruption.go:339] Sending events to api server. * I0724 22:05:28.451665 1 event.go:278] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"kube-system", Name:"coredns-66bff467f8", UID:"bbc553a4-35e6-4e2b-8d5c-5e3083ccf256", APIVersion:"apps/v1", ResourceVersion:"360", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: coredns-66bff467f8-c6xtm * I0724 22:05:28.492478 1 shared_informer.go:230] Caches are synced for garbage collector * I0724 22:05:28.492505 1 garbagecollector.go:142] Garbage collector: all resource monitors have synced. Proceeding to collect garbage * I0724 22:05:28.535605 1 shared_informer.go:230] Caches are synced for resource quota * I0724 22:05:28.535687 1 shared_informer.go:230] Caches are synced for garbage collector * E0724 22:05:38.034150 1 tokens_controller.go:261] error synchronizing serviceaccount kube-system/storage-provisioner: etcdserver: request timed out * * ==> kube-controller-manager [d6380752cf97] <== * I0724 22:04:48.054380 1 serving.go:313] Generated self-signed cert in-memory * I0724 22:04:49.049100 1 controllermanager.go:161] Version: v1.18.3 * I0724 22:04:49.050399 1 secure_serving.go:178] Serving securely on 127.0.0.1:10257 * I0724 22:04:49.051256 1 deprecated_insecure_serving.go:53] Serving insecurely on [::]:10252 * I0724 22:04:49.051964 1 dynamic_cafile_content.go:167] Starting request-header::/var/lib/minikube/certs/front-proxy-ca.crt * I0724 22:04:49.052024 1 tlsconfig.go:240] Starting DynamicServingCertificateController * I0724 22:04:49.052143 1 dynamic_cafile_content.go:167] Starting client-ca-bundle::/var/lib/minikube/certs/ca.crt * F0724 22:05:02.529428 1 controllermanager.go:230] error building controller context: failed to wait for apiserver being healthy: timed out waiting for the condition: failed to get apiserver /healthz status: an error on the server ("[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/start-kube-apiserver-admission-initializer ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/start-apiextensions-informers ok\n[+]poststarthook/start-apiextensions-controllers ok\n[+]poststarthook/crd-informer-synced ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/start-cluster-authentication-info-controller ok\n[+]poststarthook/start-kube-aggregator-informers ok\n[+]poststarthook/apiservice-registration-controller ok\n[+]poststarthook/apiservice-status-available-controller ok\n[+]poststarthook/kube-apiserver-autoregistration ok\n[+]autoregister-completion ok\n[+]poststarthook/apiservice-openapi-controller ok\nhealthz check failed") has prevented the request from succeeding * * ==> kube-proxy [d80d24d8fbb8] <== * W0724 22:05:41.176892 1 server_others.go:559] Unknown proxy mode "", assuming iptables proxy * I0724 22:05:41.185034 1 node.go:136] Successfully retrieved node IP: 172.17.0.8 * I0724 22:05:41.185082 1 server_others.go:186] Using iptables Proxier. * I0724 22:05:41.185386 1 server.go:583] Version: v1.18.3 * I0724 22:05:41.186109 1 conntrack.go:52] Setting nf_conntrack_max to 524288 * E0724 22:05:41.186534 1 conntrack.go:127] sysfs is not writable: {Device:sysfs Path:/sys Type:sysfs Opts:[ro nosuid nodev noexec relatime] Freq:0 Pass:0} (mount options are [ro nosuid nodev noexec relatime]) * I0724 22:05:41.186815 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_established' to 86400 * I0724 22:05:41.186934 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_close_wait' to 3600 * I0724 22:05:41.187173 1 config.go:315] Starting service config controller * I0724 22:05:41.187206 1 shared_informer.go:223] Waiting for caches to sync for service config * I0724 22:05:41.187263 1 config.go:133] Starting endpoints config controller * I0724 22:05:41.187290 1 shared_informer.go:223] Waiting for caches to sync for endpoints config * I0724 22:05:41.287595 1 shared_informer.go:230] Caches are synced for endpoints config * I0724 22:05:41.287609 1 shared_informer.go:230] Caches are synced for service config * * ==> kube-scheduler [9be6e902897f] <== * E0724 22:04:52.237325 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope * E0724 22:04:52.237361 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope * E0724 22:04:52.237363 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1beta1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope * E0724 22:04:52.237655 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope * E0724 22:04:52.237672 1 reflector.go:178] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:233: Failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope * E0724 22:04:53.066012 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope * E0724 22:04:53.217592 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope * E0724 22:04:53.354700 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope * E0724 22:04:53.481162 1 reflector.go:178] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system" * E0724 22:04:53.534969 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1beta1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope * E0724 22:04:53.597641 1 reflector.go:178] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:233: Failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope * E0724 22:04:53.630395 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope * E0724 22:04:53.738907 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope * E0724 22:04:53.759610 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope * E0724 22:04:55.347034 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope * E0724 22:04:55.508296 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope * E0724 22:04:55.621904 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope * E0724 22:04:56.076891 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope * E0724 22:04:56.105741 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope * E0724 22:04:56.277134 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope * E0724 22:04:56.338152 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1beta1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope * E0724 22:04:56.625282 1 reflector.go:178] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system" * E0724 22:04:56.758359 1 reflector.go:178] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:233: Failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope * E0724 22:05:02.327498 1 reflector.go:178] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system" * I0724 22:05:12.759723 1 shared_informer.go:230] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file * * ==> kubelet <== * -- Logs begin at Fri 2020-07-24 22:04:20 UTC, end at Fri 2020-07-24 22:31:22 UTC. -- * Jul 24 22:31:16 custom-weave-20200724220248-14997 kubelet[2493]: Try `iptables -h' or 'iptables --help' for more information. * Jul 24 22:31:16 custom-weave-20200724220248-14997 kubelet[2493]: ] * Jul 24 22:31:16 custom-weave-20200724220248-14997 kubelet[2493]: E0724 22:31:16.605139 2493 pod_workers.go:191] Error syncing pod 404c9846-5599-483c-a9ee-4ce0c790d832 ("coredns-66bff467f8-c6xtm_kube-system(404c9846-5599-483c-a9ee-4ce0c790d832)"), skipping: failed to "CreatePodSandbox" for "coredns-66bff467f8-c6xtm_kube-system(404c9846-5599-483c-a9ee-4ce0c790d832)" with CreatePodSandboxError: "CreatePodSandbox for pod \"coredns-66bff467f8-c6xtm_kube-system(404c9846-5599-483c-a9ee-4ce0c790d832)\" failed: rpc error: code = Unknown desc = [failed to set up sandbox container \"620e3006f8faebc58e5bb25843516ab9e7115938110bc348eea6b5cd94636f84\" network for pod \"coredns-66bff467f8-c6xtm\": networkPlugin cni failed to set up pod \"coredns-66bff467f8-c6xtm_kube-system\" network: failed to set bridge addr: could not add IP address to \"cni0\": permission denied, failed to clean up sandbox container \"620e3006f8faebc58e5bb25843516ab9e7115938110bc348eea6b5cd94636f84\" network for pod \"coredns-66bff467f8-c6xtm\": networkPlugin cni failed to teardown pod \"coredns-66bff467f8-c6xtm_kube-system\" network: running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.88.1.172 -j CNI-df76c43296993f1ad9989206 -m comment --comment name: \"crio-bridge\" id: \"620e3006f8faebc58e5bb25843516ab9e7115938110bc348eea6b5cd94636f84\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-df76c43296993f1ad9989206':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n]" * Jul 24 22:31:16 custom-weave-20200724220248-14997 kubelet[2493]: W0724 22:31:16.955074 2493 docker_sandbox.go:400] failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod "coredns-66bff467f8-c6xtm_kube-system": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container "620e3006f8faebc58e5bb25843516ab9e7115938110bc348eea6b5cd94636f84" * Jul 24 22:31:16 custom-weave-20200724220248-14997 kubelet[2493]: W0724 22:31:16.958907 2493 pod_container_deletor.go:77] Container "620e3006f8faebc58e5bb25843516ab9e7115938110bc348eea6b5cd94636f84" not found in pod's containers * Jul 24 22:31:16 custom-weave-20200724220248-14997 kubelet[2493]: W0724 22:31:16.960402 2493 cni.go:331] CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container "620e3006f8faebc58e5bb25843516ab9e7115938110bc348eea6b5cd94636f84" * Jul 24 22:31:19 custom-weave-20200724220248-14997 kubelet[2493]: E0724 22:31:19.030192 2493 cni.go:364] Error adding kube-system_coredns-66bff467f8-c6xtm/ab7f87fcc94a1419edacb70615497731875b4475325001d07cf097063e66f5ae to network bridge/crio-bridge: failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:31:19 custom-weave-20200724220248-14997 kubelet[2493]: I0724 22:31:19.100970 2493 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: 94dc1eeda59e9ac834db1da6eba19e63410f5764ab89aed80012e955245a099b * Jul 24 22:31:19 custom-weave-20200724220248-14997 kubelet[2493]: E0724 22:31:19.101300 2493 pod_workers.go:191] Error syncing pod 8ce030ba-9435-404a-98ab-0e5fffd82cea ("storage-provisioner_kube-system(8ce030ba-9435-404a-98ab-0e5fffd82cea)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 5m0s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(8ce030ba-9435-404a-98ab-0e5fffd82cea)" * Jul 24 22:31:19 custom-weave-20200724220248-14997 kubelet[2493]: E0724 22:31:19.116309 2493 cni.go:385] Error deleting kube-system_coredns-66bff467f8-c6xtm/ab7f87fcc94a1419edacb70615497731875b4475325001d07cf097063e66f5ae from network bridge/crio-bridge: running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.88.1.173 -j CNI-49700d9ede56628cd1e8e047 -m comment --comment name: "crio-bridge" id: "ab7f87fcc94a1419edacb70615497731875b4475325001d07cf097063e66f5ae" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-49700d9ede56628cd1e8e047':No such file or directory * Jul 24 22:31:19 custom-weave-20200724220248-14997 kubelet[2493]: Try `iptables -h' or 'iptables --help' for more information. * Jul 24 22:31:19 custom-weave-20200724220248-14997 kubelet[2493]: E0724 22:31:19.309138 2493 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = [failed to set up sandbox container "ab7f87fcc94a1419edacb70615497731875b4475325001d07cf097063e66f5ae" network for pod "coredns-66bff467f8-c6xtm": networkPlugin cni failed to set up pod "coredns-66bff467f8-c6xtm_kube-system" network: failed to set bridge addr: could not add IP address to "cni0": permission denied, failed to clean up sandbox container "ab7f87fcc94a1419edacb70615497731875b4475325001d07cf097063e66f5ae" network for pod "coredns-66bff467f8-c6xtm": networkPlugin cni failed to teardown pod "coredns-66bff467f8-c6xtm_kube-system" network: running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.88.1.173 -j CNI-49700d9ede56628cd1e8e047 -m comment --comment name: "crio-bridge" id: "ab7f87fcc94a1419edacb70615497731875b4475325001d07cf097063e66f5ae" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-49700d9ede56628cd1e8e047':No such file or directory * Jul 24 22:31:19 custom-weave-20200724220248-14997 kubelet[2493]: Try `iptables -h' or 'iptables --help' for more information. * Jul 24 22:31:19 custom-weave-20200724220248-14997 kubelet[2493]: ] * Jul 24 22:31:19 custom-weave-20200724220248-14997 kubelet[2493]: E0724 22:31:19.309200 2493 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "coredns-66bff467f8-c6xtm_kube-system(404c9846-5599-483c-a9ee-4ce0c790d832)" failed: rpc error: code = Unknown desc = [failed to set up sandbox container "ab7f87fcc94a1419edacb70615497731875b4475325001d07cf097063e66f5ae" network for pod "coredns-66bff467f8-c6xtm": networkPlugin cni failed to set up pod "coredns-66bff467f8-c6xtm_kube-system" network: failed to set bridge addr: could not add IP address to "cni0": permission denied, failed to clean up sandbox container "ab7f87fcc94a1419edacb70615497731875b4475325001d07cf097063e66f5ae" network for pod "coredns-66bff467f8-c6xtm": networkPlugin cni failed to teardown pod "coredns-66bff467f8-c6xtm_kube-system" network: running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.88.1.173 -j CNI-49700d9ede56628cd1e8e047 -m comment --comment name: "crio-bridge" id: "ab7f87fcc94a1419edacb70615497731875b4475325001d07cf097063e66f5ae" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-49700d9ede56628cd1e8e047':No such file or directory * Jul 24 22:31:19 custom-weave-20200724220248-14997 kubelet[2493]: Try `iptables -h' or 'iptables --help' for more information. * Jul 24 22:31:19 custom-weave-20200724220248-14997 kubelet[2493]: ] * Jul 24 22:31:19 custom-weave-20200724220248-14997 kubelet[2493]: E0724 22:31:19.309220 2493 kuberuntime_manager.go:727] createPodSandbox for pod "coredns-66bff467f8-c6xtm_kube-system(404c9846-5599-483c-a9ee-4ce0c790d832)" failed: rpc error: code = Unknown desc = [failed to set up sandbox container "ab7f87fcc94a1419edacb70615497731875b4475325001d07cf097063e66f5ae" network for pod "coredns-66bff467f8-c6xtm": networkPlugin cni failed to set up pod "coredns-66bff467f8-c6xtm_kube-system" network: failed to set bridge addr: could not add IP address to "cni0": permission denied, failed to clean up sandbox container "ab7f87fcc94a1419edacb70615497731875b4475325001d07cf097063e66f5ae" network for pod "coredns-66bff467f8-c6xtm": networkPlugin cni failed to teardown pod "coredns-66bff467f8-c6xtm_kube-system" network: running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.88.1.173 -j CNI-49700d9ede56628cd1e8e047 -m comment --comment name: "crio-bridge" id: "ab7f87fcc94a1419edacb70615497731875b4475325001d07cf097063e66f5ae" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-49700d9ede56628cd1e8e047':No such file or directory * Jul 24 22:31:19 custom-weave-20200724220248-14997 kubelet[2493]: Try `iptables -h' or 'iptables --help' for more information. * Jul 24 22:31:19 custom-weave-20200724220248-14997 kubelet[2493]: ] * Jul 24 22:31:19 custom-weave-20200724220248-14997 kubelet[2493]: E0724 22:31:19.309298 2493 pod_workers.go:191] Error syncing pod 404c9846-5599-483c-a9ee-4ce0c790d832 ("coredns-66bff467f8-c6xtm_kube-system(404c9846-5599-483c-a9ee-4ce0c790d832)"), skipping: failed to "CreatePodSandbox" for "coredns-66bff467f8-c6xtm_kube-system(404c9846-5599-483c-a9ee-4ce0c790d832)" with CreatePodSandboxError: "CreatePodSandbox for pod \"coredns-66bff467f8-c6xtm_kube-system(404c9846-5599-483c-a9ee-4ce0c790d832)\" failed: rpc error: code = Unknown desc = [failed to set up sandbox container \"ab7f87fcc94a1419edacb70615497731875b4475325001d07cf097063e66f5ae\" network for pod \"coredns-66bff467f8-c6xtm\": networkPlugin cni failed to set up pod \"coredns-66bff467f8-c6xtm_kube-system\" network: failed to set bridge addr: could not add IP address to \"cni0\": permission denied, failed to clean up sandbox container \"ab7f87fcc94a1419edacb70615497731875b4475325001d07cf097063e66f5ae\" network for pod \"coredns-66bff467f8-c6xtm\": networkPlugin cni failed to teardown pod \"coredns-66bff467f8-c6xtm_kube-system\" network: running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.88.1.173 -j CNI-49700d9ede56628cd1e8e047 -m comment --comment name: \"crio-bridge\" id: \"ab7f87fcc94a1419edacb70615497731875b4475325001d07cf097063e66f5ae\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-49700d9ede56628cd1e8e047':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n]" * Jul 24 22:31:19 custom-weave-20200724220248-14997 kubelet[2493]: W0724 22:31:19.989693 2493 docker_sandbox.go:400] failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod "coredns-66bff467f8-c6xtm_kube-system": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container "ab7f87fcc94a1419edacb70615497731875b4475325001d07cf097063e66f5ae" * Jul 24 22:31:19 custom-weave-20200724220248-14997 kubelet[2493]: W0724 22:31:19.994740 2493 pod_container_deletor.go:77] Container "ab7f87fcc94a1419edacb70615497731875b4475325001d07cf097063e66f5ae" not found in pod's containers * Jul 24 22:31:19 custom-weave-20200724220248-14997 kubelet[2493]: W0724 22:31:19.996239 2493 cni.go:331] CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container "ab7f87fcc94a1419edacb70615497731875b4475325001d07cf097063e66f5ae" * Jul 24 22:31:22 custom-weave-20200724220248-14997 kubelet[2493]: E0724 22:31:22.414335 2493 cni.go:364] Error adding kube-system_coredns-66bff467f8-c6xtm/5d0fb45ad02736f8b81c780eb7ee294f759b4506fb82232a5af43e9c3a68c857 to network bridge/crio-bridge: failed to set bridge addr: could not add IP address to "cni0": permission denied * * ==> storage-provisioner [94dc1eeda59e] <== * F0724 22:26:53.516857 1 main.go:37] Error getting server version: Get https://10.96.0.1:443/version: dial tcp 10.96.0.1:443: getsockopt: no route to host -- /stdout -- helpers_test.go:247: (dbg) Run: ./minikube-linux-amd64 status --format={{.APIServer}} -p custom-weave-20200724220248-14997 -n custom-weave-20200724220248-14997 helpers_test.go:254: (dbg) Run: kubectl --context custom-weave-20200724220248-14997 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running helpers_test.go:260: non-running pods: coredns-66bff467f8-c6xtm helpers_test.go:262: ======> post-mortem[TestNetworkPlugins/group/custom-weave]: describe non-running pods <====== helpers_test.go:265: (dbg) Run: kubectl --context custom-weave-20200724220248-14997 describe pod coredns-66bff467f8-c6xtm helpers_test.go:265: (dbg) Non-zero exit: kubectl --context custom-weave-20200724220248-14997 describe pod coredns-66bff467f8-c6xtm: exit status 1 (95.391765ms) ** stderr ** Error from server (NotFound): pods "coredns-66bff467f8-c6xtm" not found ** /stderr ** helpers_test.go:267: kubectl --context custom-weave-20200724220248-14997 describe pod coredns-66bff467f8-c6xtm: exit status 1 helpers_test.go:170: Cleaning up "custom-weave-20200724220248-14997" profile ... helpers_test.go:171: (dbg) Run: ./minikube-linux-amd64 delete -p custom-weave-20200724220248-14997 helpers_test.go:171: (dbg) Done: ./minikube-linux-amd64 delete -p custom-weave-20200724220248-14997: (5.071617908s) === CONT TestNetworkPlugins/group/kindnet/Start net_test.go:80: (dbg) Non-zero exit: ./minikube-linux-amd64 start -p kindnet-20200724220311-14997 --memory=1800 --alsologtostderr --wait=true --wait-timeout=25m --cni=kindnet --vm-driver=docker --base-image=local/kicbase:-snapshot: exit status 70 (28m29.181281994s) -- stdout -- * [kindnet-20200724220311-14997] minikube v1.12.1 on Ubuntu 20.04 - KUBECONFIG=/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/kubeconfig - MINIKUBE_HOME=/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome * Using the docker driver based on user configuration * Starting control plane node kindnet-20200724220311-14997 in cluster kindnet-20200724220311-14997 * Pulling base image ... * Creating docker container (CPUs=2, Memory=1800MB) ... * Preparing Kubernetes v1.18.3 on Docker 19.03.2 ... * Configuring CNI (Container Networking Interface) ... * Verifying Kubernetes components... * Enabled addons: default-storageclass, storage-provisioner -- /stdout -- ** stderr ** I0724 22:03:12.019733 131317 out.go:188] Setting JSON to false I0724 22:03:12.022927 131317 start.go:101] hostinfo: {"hostname":"mini-test-11-ubuntu","uptime":1831,"bootTime":1595626361,"procs":1004,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.4.0-1022-azure","virtualizationSystem":"kvm","virtualizationRole":"host","hostid":"c95cb721-f5cd-cb47-980f-2a6f7a0ad6b2"} I0724 22:03:12.023613 131317 start.go:111] virtualization: kvm host I0724 22:03:12.042284 131317 notify.go:125] Checking for updates... I0724 22:03:12.051201 131317 driver.go:287] Setting default libvirt URI to qemu:///system I0724 22:03:12.111164 131317 docker.go:87] docker version: linux-19.03.8 I0724 22:03:12.118225 131317 start.go:217] selected driver: docker I0724 22:03:12.118234 131317 start.go:623] validating driver "docker" against I0724 22:03:12.118260 131317 start.go:634] status for docker: {Installed:true Healthy:true NeedsImprovement:false Error: Fix: Doc:} I0724 22:03:12.118357 131317 cli_runner.go:109] Run: docker system info --format "{{json .}}" ! Requested memory allocation (1800MB) is less than the recommended minimum 2000MB. Kubernetes may crash unexpectedly. I0724 22:03:12.179108 131317 start_flags.go:223] no existing cluster config was found, will generate one from the flags ! Requested memory allocation (1800MB) is less than the recommended minimum 2000MB. Kubernetes may crash unexpectedly. I0724 22:03:12.179496 131317 start_flags.go:617] Waiting for all components: map[apiserver:true apps_running:true default_sa:true system_pods:true] I0724 22:03:12.179529 131317 cni.go:74] Creating CNI manager for "kindnet" I0724 22:03:12.179544 131317 start_flags.go:340] Found "CNI" CNI - setting NetworkPlugin=cni I0724 22:03:12.179563 131317 start_flags.go:345] config: {Name:kindnet-20200724220311-14997 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:local/kicbase:-snapshot Memory:1800 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.99.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio KubernetesConfig:{KubernetesVersion:v1.18.3 ClusterName:kindnet-20200724220311-14997 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:kindnet NodeIP: NodePort:8443 NodeName:} Nodes:[] Addons:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true system_pods:true]} I0724 22:03:12.263079 131317 cache.go:117] Beginning downloading kic base image for docker with docker I0724 22:03:12.292830 131317 preload.go:97] Checking if preload exists for k8s version v1.18.3 and runtime docker I0724 22:03:12.292875 131317 preload.go:105] Found local preload: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v4-v1.18.3-docker-overlay2-amd64.tar.lz4 I0724 22:03:12.292906 131317 cache.go:51] Caching tarball of preloaded images I0724 22:03:12.292961 131317 cache.go:137] Downloading local/kicbase:-snapshot to local daemon I0724 22:03:12.292962 131317 preload.go:131] Found /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v4-v1.18.3-docker-overlay2-amd64.tar.lz4 in cache, skipping download I0724 22:03:12.292975 131317 image.go:140] Writing local/kicbase:-snapshot to local daemon I0724 22:03:12.292976 131317 cache.go:54] Finished verifying existence of preloaded tar for v1.18.3 on docker I0724 22:03:12.293223 131317 profile.go:150] Saving config to /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/kindnet-20200724220311-14997/config.json ... I0724 22:03:12.293310 131317 lock.go:35] WriteFile acquiring /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/kindnet-20200724220311-14997/config.json: {Name:mkce43ce1b45cd352951a3b4764c5b2e8de5d06e Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0724 22:03:12.689202 131317 cache.go:151] failed to download local/kicbase:-snapshot, will try fallback image if available: getting remote image: GET https://index.docker.io/v2/local/kicbase/manifests/-snapshot: unsupported status code 404; body: 404 page not found I0724 22:03:12.689255 131317 cache.go:137] Downloading kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 to local daemon I0724 22:03:12.689262 131317 image.go:140] Writing kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 to local daemon I0724 22:03:17.475728 131317 cache.go:140] successfully downloaded kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 ! minikube was unable to download local/kicbase:-snapshot, but successfully downloaded kicbase/stable:v0.0.10 as a fallback image I0724 22:03:17.475816 131317 cache.go:178] Successfully downloaded all kic artifacts I0724 22:03:17.475843 131317 start.go:241] acquiring machines lock for kindnet-20200724220311-14997: {Name:mk91e0531dbac8bf0bab062b1deb1a07bd4532ce Clock:{} Delay:500ms Timeout:15m0s Cancel:} I0724 22:04:44.179494 131317 start.go:245] acquired machines lock for "kindnet-20200724220311-14997" in 1m26.703622914s I0724 22:04:44.179539 131317 start.go:85] Provisioning new machine with config: &{Name:kindnet-20200724220311-14997 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 Memory:1800 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.99.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio KubernetesConfig:{KubernetesVersion:v1.18.3 ClusterName:kindnet-20200724220311-14997 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:kindnet NodeIP: NodePort:8443 NodeName:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.18.3 ControlPlane:true Worker:true}] Addons:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true system_pods:true]} &{Name: IP: Port:8443 KubernetesVersion:v1.18.3 ControlPlane:true Worker:true} I0724 22:04:44.179628 131317 start.go:122] createHost starting for "" (driver="docker") I0724 22:04:44.342605 131317 start.go:158] libmachine.API.Create for "kindnet-20200724220311-14997" (driver="docker") I0724 22:04:44.342665 131317 client.go:161] LocalClient.Create starting I0724 22:04:44.342714 131317 main.go:115] libmachine: Reading certificate data from /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca.pem I0724 22:04:44.342760 131317 main.go:115] libmachine: Decoding PEM data... I0724 22:04:44.342795 131317 main.go:115] libmachine: Parsing certificate... I0724 22:04:44.342966 131317 main.go:115] libmachine: Reading certificate data from /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/cert.pem I0724 22:04:44.343003 131317 main.go:115] libmachine: Decoding PEM data... I0724 22:04:44.343027 131317 main.go:115] libmachine: Parsing certificate... I0724 22:04:44.343573 131317 cli_runner.go:109] Run: docker ps -a --format {{.Names}} I0724 22:04:44.402256 131317 cli_runner.go:109] Run: docker volume create kindnet-20200724220311-14997 --label name.minikube.sigs.k8s.io=kindnet-20200724220311-14997 --label created_by.minikube.sigs.k8s.io=true I0724 22:04:44.469880 131317 oci.go:101] Successfully created a docker volume kindnet-20200724220311-14997 I0724 22:04:44.469946 131317 cli_runner.go:109] Run: docker run --rm --entrypoint /usr/bin/test -v kindnet-20200724220311-14997:/var kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 -d /var/lib I0724 22:04:46.342253 131317 cli_runner.go:151] Completed: docker run --rm --entrypoint /usr/bin/test -v kindnet-20200724220311-14997:/var kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 -d /var/lib: (1.872270224s) I0724 22:04:46.342302 131317 oci.go:105] Successfully prepared a docker volume kindnet-20200724220311-14997 W0724 22:04:46.342344 131317 oci.go:165] Your kernel does not support swap limit capabilities or the cgroup is not mounted. I0724 22:04:46.343105 131317 cli_runner.go:109] Run: docker info --format "'{{json .SecurityOptions}}'" I0724 22:04:46.342369 131317 preload.go:97] Checking if preload exists for k8s version v1.18.3 and runtime docker I0724 22:04:46.343370 131317 preload.go:105] Found local preload: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v4-v1.18.3-docker-overlay2-amd64.tar.lz4 I0724 22:04:46.343390 131317 kic.go:133] Starting extracting preloaded images to volume ... I0724 22:04:46.343495 131317 cli_runner.go:109] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v4-v1.18.3-docker-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v kindnet-20200724220311-14997:/extractDir kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 -I lz4 -xvf /preloaded.tar -C /extractDir I0724 22:04:46.435140 131317 cli_runner.go:109] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname kindnet-20200724220311-14997 --name kindnet-20200724220311-14997 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=kindnet-20200724220311-14997 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=kindnet-20200724220311-14997 --volume kindnet-20200724220311-14997:/var --security-opt apparmor=unconfined --cpus=2 --memory=1800mb -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 I0724 22:04:47.337053 131317 cli_runner.go:109] Run: docker container inspect kindnet-20200724220311-14997 --format={{.State.Running}} I0724 22:04:47.408505 131317 cli_runner.go:109] Run: docker container inspect kindnet-20200724220311-14997 --format={{.State.Status}} I0724 22:04:47.470605 131317 cli_runner.go:109] Run: docker exec kindnet-20200724220311-14997 stat /var/lib/dpkg/alternatives/iptables I0724 22:04:49.010863 131317 cli_runner.go:151] Completed: docker exec kindnet-20200724220311-14997 stat /var/lib/dpkg/alternatives/iptables: (1.540203427s) I0724 22:04:49.010890 131317 oci.go:222] the created container "kindnet-20200724220311-14997" has a running status. I0724 22:04:49.010904 131317 kic.go:157] Creating ssh key for kic: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/kindnet-20200724220311-14997/id_rsa... I0724 22:04:49.254153 131317 kic_runner.go:179] docker (temp): /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/kindnet-20200724220311-14997/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes) I0724 22:04:57.788506 131317 cli_runner.go:109] Run: docker container inspect kindnet-20200724220311-14997 --format={{.State.Status}} I0724 22:04:57.857906 131317 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys I0724 22:04:57.857929 131317 kic_runner.go:114] Args: [docker exec --privileged kindnet-20200724220311-14997 chown docker:docker /home/docker/.ssh/authorized_keys] I0724 22:04:58.178883 131317 cli_runner.go:151] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v4-v1.18.3-docker-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v kindnet-20200724220311-14997:/extractDir kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 -I lz4 -xvf /preloaded.tar -C /extractDir: (11.835331408s) I0724 22:04:58.178921 131317 kic.go:138] duration metric: took 11.835530 seconds to extract preloaded images to volume I0724 22:04:58.179030 131317 cli_runner.go:109] Run: docker container inspect kindnet-20200724220311-14997 --format={{.State.Status}} I0724 22:04:58.244868 131317 machine.go:88] provisioning docker machine ... I0724 22:04:58.244904 131317 ubuntu.go:166] provisioning hostname "kindnet-20200724220311-14997" I0724 22:04:58.244992 131317 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" kindnet-20200724220311-14997 I0724 22:04:58.329487 131317 main.go:115] libmachine: Using SSH client type: native I0724 22:04:58.329736 131317 main.go:115] libmachine: &{{{ 0 [] [] []} docker [0x7b99a0] 0x7b9970 [] 0s} 127.0.0.1 32872 } I0724 22:04:58.329758 131317 main.go:115] libmachine: About to run SSH command: sudo hostname kindnet-20200724220311-14997 && echo "kindnet-20200724220311-14997" | sudo tee /etc/hostname I0724 22:04:58.330755 131317 main.go:115] libmachine: Error dialing TCP: ssh: handshake failed: read tcp 127.0.0.1:54890->127.0.0.1:32872: read: connection reset by peer I0724 22:05:01.465939 131317 main.go:115] libmachine: SSH cmd err, output: : kindnet-20200724220311-14997 I0724 22:05:01.466033 131317 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" kindnet-20200724220311-14997 I0724 22:05:01.529151 131317 main.go:115] libmachine: Using SSH client type: native I0724 22:05:01.529361 131317 main.go:115] libmachine: &{{{ 0 [] [] []} docker [0x7b99a0] 0x7b9970 [] 0s} 127.0.0.1 32872 } I0724 22:05:01.529391 131317 main.go:115] libmachine: About to run SSH command: if ! grep -xq '.*\skindnet-20200724220311-14997' /etc/hosts; then if grep -xq '127.0.1.1\s.*' /etc/hosts; then sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 kindnet-20200724220311-14997/g' /etc/hosts; else echo '127.0.1.1 kindnet-20200724220311-14997' | sudo tee -a /etc/hosts; fi fi I0724 22:05:01.657555 131317 main.go:115] libmachine: SSH cmd err, output: : I0724 22:05:01.657587 131317 ubuntu.go:172] set auth options {CertDir:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube CaCertPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube} I0724 22:05:01.657617 131317 ubuntu.go:174] setting up certificates I0724 22:05:01.657628 131317 provision.go:82] configureAuth start I0724 22:05:01.657699 131317 cli_runner.go:109] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" kindnet-20200724220311-14997 I0724 22:05:01.715102 131317 provision.go:131] copyHostCerts I0724 22:05:01.715162 131317 exec_runner.go:91] found /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/ca.pem, removing ... I0724 22:05:01.715217 131317 exec_runner.go:98] cp: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca.pem --> /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/ca.pem (1038 bytes) I0724 22:05:01.715299 131317 exec_runner.go:91] found /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cert.pem, removing ... I0724 22:05:01.715331 131317 exec_runner.go:98] cp: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/cert.pem --> /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cert.pem (1078 bytes) I0724 22:05:01.715382 131317 exec_runner.go:91] found /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/key.pem, removing ... I0724 22:05:01.715416 131317 exec_runner.go:98] cp: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/key.pem --> /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/key.pem (1675 bytes) I0724 22:05:01.715475 131317 provision.go:105] generating server cert: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/server.pem ca-key=/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca.pem private-key=/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca-key.pem org=jenkins.kindnet-20200724220311-14997 san=[172.17.0.11 localhost 127.0.0.1] I0724 22:05:01.927748 131317 provision.go:159] copyRemoteCerts I0724 22:05:01.927818 131317 ssh_runner.go:148] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker I0724 22:05:01.927861 131317 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" kindnet-20200724220311-14997 I0724 22:05:01.985327 131317 sshutil.go:44] new ssh client: &{IP:127.0.0.1 Port:32872 SSHKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/kindnet-20200724220311-14997/id_rsa Username:docker} I0724 22:05:02.077121 131317 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/server.pem --> /etc/docker/server.pem (1147 bytes) I0724 22:05:02.101466 131317 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes) I0724 22:05:02.123914 131317 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1038 bytes) I0724 22:05:02.144480 131317 provision.go:85] duration metric: configureAuth took 486.835252ms I0724 22:05:02.144508 131317 ubuntu.go:190] setting minikube options for container-runtime I0724 22:05:02.144703 131317 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" kindnet-20200724220311-14997 I0724 22:05:02.198456 131317 main.go:115] libmachine: Using SSH client type: native I0724 22:05:02.198627 131317 main.go:115] libmachine: &{{{ 0 [] [] []} docker [0x7b99a0] 0x7b9970 [] 0s} 127.0.0.1 32872 } I0724 22:05:02.198645 131317 main.go:115] libmachine: About to run SSH command: df --output=fstype / | tail -n 1 I0724 22:05:02.325487 131317 main.go:115] libmachine: SSH cmd err, output: : overlay I0724 22:05:02.325512 131317 ubuntu.go:71] root file system type: overlay I0724 22:05:02.325652 131317 provision.go:290] Updating docker unit: /lib/systemd/system/docker.service ... I0724 22:05:02.325719 131317 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" kindnet-20200724220311-14997 I0724 22:05:02.387036 131317 main.go:115] libmachine: Using SSH client type: native I0724 22:05:02.387241 131317 main.go:115] libmachine: &{{{ 0 [] [] []} docker [0x7b99a0] 0x7b9970 [] 0s} 127.0.0.1 32872 } I0724 22:05:02.387333 131317 main.go:115] libmachine: About to run SSH command: sudo mkdir -p /lib/systemd/system && printf %s "[Unit] Description=Docker Application Container Engine Documentation=https://docs.docker.com BindsTo=containerd.service After=network-online.target firewalld.service containerd.service Wants=network-online.target Requires=docker.socket [Service] Type=notify # This file is a systemd drop-in unit that inherits from the base dockerd configuration. # The base configuration already specifies an 'ExecStart=...' command. The first directive # here is to clear out that command inherited from the base configuration. Without this, # the command from the base configuration and the command specified here are treated as # a sequence of commands, which is not the desired behavior, nor is it valid -- systemd # will catch this invalid input and refuse to start the service with an error like: # Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services. # NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other # container runtimes. If left unlimited, it may result in OOM issues with MySQL. ExecStart= ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12 ExecReload=/bin/kill -s HUP $MAINPID # Having non-zero Limit*s causes performance problems due to accounting overhead # in the kernel. We recommend using cgroups to do container-local accounting. LimitNOFILE=infinity LimitNPROC=infinity LimitCORE=infinity # Uncomment TasksMax if your systemd version supports it. # Only systemd 226 and above support this version. TasksMax=infinity TimeoutStartSec=0 # set delegate yes so that systemd does not reset the cgroups of docker containers Delegate=yes # kill only the docker process, not all processes in the cgroup KillMode=process [Install] WantedBy=multi-user.target " | sudo tee /lib/systemd/system/docker.service.new I0724 22:05:02.525279 131317 main.go:115] libmachine: SSH cmd err, output: : [Unit] Description=Docker Application Container Engine Documentation=https://docs.docker.com BindsTo=containerd.service After=network-online.target firewalld.service containerd.service Wants=network-online.target Requires=docker.socket [Service] Type=notify # This file is a systemd drop-in unit that inherits from the base dockerd configuration. # The base configuration already specifies an 'ExecStart=...' command. The first directive # here is to clear out that command inherited from the base configuration. Without this, # the command from the base configuration and the command specified here are treated as # a sequence of commands, which is not the desired behavior, nor is it valid -- systemd # will catch this invalid input and refuse to start the service with an error like: # Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services. # NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other # container runtimes. If left unlimited, it may result in OOM issues with MySQL. ExecStart= ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12 ExecReload=/bin/kill -s HUP # Having non-zero Limit*s causes performance problems due to accounting overhead # in the kernel. We recommend using cgroups to do container-local accounting. LimitNOFILE=infinity LimitNPROC=infinity LimitCORE=infinity # Uncomment TasksMax if your systemd version supports it. # Only systemd 226 and above support this version. TasksMax=infinity TimeoutStartSec=0 # set delegate yes so that systemd does not reset the cgroups of docker containers Delegate=yes # kill only the docker process, not all processes in the cgroup KillMode=process [Install] WantedBy=multi-user.target I0724 22:05:02.525355 131317 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" kindnet-20200724220311-14997 I0724 22:05:02.583172 131317 main.go:115] libmachine: Using SSH client type: native I0724 22:05:02.583335 131317 main.go:115] libmachine: &{{{ 0 [] [] []} docker [0x7b99a0] 0x7b9970 [] 0s} 127.0.0.1 32872 } I0724 22:05:02.583360 131317 main.go:115] libmachine: About to run SSH command: sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; } I0724 22:05:04.851198 131317 main.go:115] libmachine: SSH cmd err, output: : --- /lib/systemd/system/docker.service 2019-08-29 04:42:14.000000000 +0000 +++ /lib/systemd/system/docker.service.new 2020-07-24 22:05:02.518104836 +0000 @@ -8,24 +8,22 @@ [Service] Type=notify -# the default is not to use systemd for cgroups because the delegate issues still -# exists and systemd currently does not support the cgroup feature set required -# for containers run by docker -ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock -ExecReload=/bin/kill -s HUP $MAINPID -TimeoutSec=0 -RestartSec=2 -Restart=always - -# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229. -# Both the old, and new location are accepted by systemd 229 and up, so using the old location -# to make them work for either version of systemd. -StartLimitBurst=3 - -# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230. -# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make -# this option work for either version of systemd. -StartLimitInterval=60s + + + +# This file is a systemd drop-in unit that inherits from the base dockerd configuration. +# The base configuration already specifies an 'ExecStart=...' command. The first directive +# here is to clear out that command inherited from the base configuration. Without this, +# the command from the base configuration and the command specified here are treated as +# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd +# will catch this invalid input and refuse to start the service with an error like: +# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services. + +# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other +# container runtimes. If left unlimited, it may result in OOM issues with MySQL. +ExecStart= +ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12 +ExecReload=/bin/kill -s HUP # Having non-zero Limit*s causes performance problems due to accounting overhead # in the kernel. We recommend using cgroups to do container-local accounting. @@ -33,9 +31,10 @@ LimitNPROC=infinity LimitCORE=infinity -# Comment TasksMax if your systemd version does not support it. -# Only systemd 226 and above support this option. +# Uncomment TasksMax if your systemd version supports it. +# Only systemd 226 and above support this version. TasksMax=infinity +TimeoutStartSec=0 # set delegate yes so that systemd does not reset the cgroups of docker containers Delegate=yes I0724 22:05:04.851244 131317 machine.go:91] provisioned docker machine in 6.606352761s I0724 22:05:04.851261 131317 client.go:164] LocalClient.Create took 20.508582617s I0724 22:05:04.851286 131317 start.go:163] duration metric: libmachine.API.Create for "kindnet-20200724220311-14997" took 20.508678523s I0724 22:05:04.851299 131317 start.go:204] post-start starting for "kindnet-20200724220311-14997" (driver="docker") I0724 22:05:04.851309 131317 start.go:214] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs] I0724 22:05:04.851398 131317 ssh_runner.go:148] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs I0724 22:05:04.851667 131317 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" kindnet-20200724220311-14997 I0724 22:05:04.913159 131317 sshutil.go:44] new ssh client: &{IP:127.0.0.1 Port:32872 SSHKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/kindnet-20200724220311-14997/id_rsa Username:docker} I0724 22:05:05.007796 131317 ssh_runner.go:148] Run: cat /etc/os-release I0724 22:05:05.011472 131317 main.go:115] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found I0724 22:05:05.011508 131317 main.go:115] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found I0724 22:05:05.011527 131317 main.go:115] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found I0724 22:05:05.011542 131317 info.go:98] Remote host: Ubuntu 19.10 I0724 22:05:05.011554 131317 filesync.go:118] Scanning /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/addons for local assets ... I0724 22:05:05.011623 131317 filesync.go:118] Scanning /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/files for local assets ... I0724 22:05:05.011803 131317 filesync.go:141] local asset: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/files/etc/test/nested/copy/14997/hosts -> hosts in /etc/test/nested/copy/14997 I0724 22:05:05.011874 131317 ssh_runner.go:148] Run: sudo mkdir -p /etc/test/nested/copy/14997 I0724 22:05:05.020589 131317 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/files/etc/test/nested/copy/14997/hosts --> /etc/test/nested/copy/14997/hosts (40 bytes) I0724 22:05:05.046706 131317 start.go:207] post-start completed in 195.390087ms I0724 22:05:05.047088 131317 cli_runner.go:109] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" kindnet-20200724220311-14997 I0724 22:05:05.106352 131317 profile.go:150] Saving config to /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/kindnet-20200724220311-14997/config.json ... I0724 22:05:05.106581 131317 start.go:125] duration metric: createHost completed in 20.926941398s I0724 22:05:05.106602 131317 start.go:76] releasing machines lock for "kindnet-20200724220311-14997", held for 20.927082208s I0724 22:05:05.106692 131317 cli_runner.go:109] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" kindnet-20200724220311-14997 I0724 22:05:05.175450 131317 ssh_runner.go:148] Run: systemctl --version I0724 22:05:05.175510 131317 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" kindnet-20200724220311-14997 I0724 22:05:05.175515 131317 ssh_runner.go:148] Run: curl -sS -m 2 https://k8s.gcr.io/ I0724 22:05:05.175606 131317 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" kindnet-20200724220311-14997 I0724 22:05:05.251896 131317 sshutil.go:44] new ssh client: &{IP:127.0.0.1 Port:32872 SSHKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/kindnet-20200724220311-14997/id_rsa Username:docker} I0724 22:05:05.259461 131317 sshutil.go:44] new ssh client: &{IP:127.0.0.1 Port:32872 SSHKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/kindnet-20200724220311-14997/id_rsa Username:docker} I0724 22:05:05.452201 131317 ssh_runner.go:148] Run: sudo systemctl is-active --quiet service containerd I0724 22:05:05.467788 131317 ssh_runner.go:148] Run: sudo systemctl cat docker.service I0724 22:05:05.488660 131317 cruntime.go:192] skipping containerd shutdown because we are bound to it I0724 22:05:05.488744 131317 ssh_runner.go:148] Run: sudo systemctl is-active --quiet service crio I0724 22:05:05.505258 131317 ssh_runner.go:148] Run: sudo systemctl cat docker.service I0724 22:05:05.519216 131317 ssh_runner.go:148] Run: sudo systemctl daemon-reload I0724 22:05:05.621875 131317 ssh_runner.go:148] Run: sudo systemctl start docker I0724 22:05:05.638232 131317 ssh_runner.go:148] Run: docker version --format {{.Server.Version}} I0724 22:05:05.733444 131317 cli_runner.go:109] Run: docker network ls --filter name=bridge --format {{.ID}} I0724 22:05:05.816439 131317 cli_runner.go:109] Run: docker network inspect --format "{{(index .IPAM.Config 0).Gateway}}" d4a420189740 I0724 22:05:05.887575 131317 network.go:77] got host ip for mount in container by inspect docker network: 172.17.0.1 I0724 22:05:05.887645 131317 ssh_runner.go:148] Run: grep 172.17.0.1 host.minikube.internal$ /etc/hosts I0724 22:05:05.891751 131317 ssh_runner.go:148] Run: /bin/bash -c "{ grep -v '\thost.minikube.internal$' /etc/hosts; echo "172.17.0.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ /etc/hosts" I0724 22:05:05.904686 131317 preload.go:97] Checking if preload exists for k8s version v1.18.3 and runtime docker I0724 22:05:05.904718 131317 preload.go:105] Found local preload: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v4-v1.18.3-docker-overlay2-amd64.tar.lz4 I0724 22:05:05.904806 131317 ssh_runner.go:148] Run: docker images --format {{.Repository}}:{{.Tag}} I0724 22:05:05.980291 131317 docker.go:381] Got preloaded images: -- stdout -- kubernetesui/dashboard:v2.0.1 k8s.gcr.io/kube-proxy:v1.18.3 k8s.gcr.io/kube-controller-manager:v1.18.3 k8s.gcr.io/kube-apiserver:v1.18.3 k8s.gcr.io/kube-scheduler:v1.18.3 kubernetesui/metrics-scraper:v1.0.4 k8s.gcr.io/pause:3.2 k8s.gcr.io/coredns:1.6.7 k8s.gcr.io/etcd:3.4.3-0 gcr.io/k8s-minikube/storage-provisioner:v1.8.1 -- /stdout -- I0724 22:05:05.980383 131317 docker.go:319] Images already preloaded, skipping extraction I0724 22:05:05.980442 131317 ssh_runner.go:148] Run: docker images --format {{.Repository}}:{{.Tag}} I0724 22:05:06.044504 131317 docker.go:381] Got preloaded images: -- stdout -- kubernetesui/dashboard:v2.0.1 k8s.gcr.io/kube-proxy:v1.18.3 k8s.gcr.io/kube-scheduler:v1.18.3 k8s.gcr.io/kube-apiserver:v1.18.3 k8s.gcr.io/kube-controller-manager:v1.18.3 kubernetesui/metrics-scraper:v1.0.4 k8s.gcr.io/pause:3.2 k8s.gcr.io/coredns:1.6.7 k8s.gcr.io/etcd:3.4.3-0 gcr.io/k8s-minikube/storage-provisioner:v1.8.1 -- /stdout -- I0724 22:05:06.044539 131317 cache_images.go:69] Images are preloaded, skipping loading I0724 22:05:06.044599 131317 ssh_runner.go:148] Run: docker info --format {{.CgroupDriver}} I0724 22:05:06.115631 131317 cni.go:74] Creating CNI manager for "kindnet" I0724 22:05:06.115658 131317 kubeadm.go:84] Using pod CIDR: 10.244.0.0/16 I0724 22:05:06.115676 131317 kubeadm.go:150] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:172.17.0.11 APIServerPort:8443 KubernetesVersion:v1.18.3 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:kindnet-20200724220311-14997 NodeName:kindnet-20200724220311-14997 DNSDomain:cluster.local CRISocket: ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "172.17.0.11"]]}] FeatureArgs:map[] NoTaintMaster:true NodeIP:172.17.0.11 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[]} I0724 22:05:06.115826 131317 kubeadm.go:154] kubeadm config: apiVersion: kubeadm.k8s.io/v1beta2 kind: InitConfiguration localAPIEndpoint: advertiseAddress: 172.17.0.11 bindPort: 8443 bootstrapTokens: - groups: - system:bootstrappers:kubeadm:default-node-token ttl: 24h0m0s usages: - signing - authentication nodeRegistration: criSocket: /var/run/dockershim.sock name: "kindnet-20200724220311-14997" kubeletExtraArgs: node-ip: 172.17.0.11 taints: [] --- apiVersion: kubeadm.k8s.io/v1beta2 kind: ClusterConfiguration apiServer: certSANs: ["127.0.0.1", "localhost", "172.17.0.11"] extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs clusterName: mk controlPlaneEndpoint: control-plane.minikube.internal:8443 dns: type: CoreDNS etcd: local: dataDir: /var/lib/minikube/etcd controllerManager: extraArgs: "leader-elect": "false" scheduler: extraArgs: "leader-elect": "false" kubernetesVersion: v1.18.3 networking: dnsDomain: cluster.local podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: cgroupfs clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 evictionHard: nodefs.available: "0%" nodefs.inodesFree: "0%" imagefs.available: "0%" failSwapOn: false staticPodPath: /etc/kubernetes/manifests --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration clusterCIDR: "10.244.0.0/16" metricsBindAddress: 172.17.0.11:10249 I0724 22:05:06.115923 131317 kubeadm.go:790] kubelet [Unit] Wants=docker.socket [Service] ExecStart= ExecStart=/var/lib/minikube/binaries/v1.18.3/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --container-runtime=docker --hostname-override=kindnet-20200724220311-14997 --kubeconfig=/etc/kubernetes/kubelet.conf --network-plugin=cni --node-ip=172.17.0.11 [Install] config: {KubernetesVersion:v1.18.3 ClusterName:kindnet-20200724220311-14997 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:kindnet NodeIP: NodePort:8443 NodeName:} I0724 22:05:06.116017 131317 ssh_runner.go:148] Run: sudo ls /var/lib/minikube/binaries/v1.18.3 I0724 22:05:06.144621 131317 binaries.go:43] Found k8s binaries, skipping transfer I0724 22:05:06.144729 131317 ssh_runner.go:148] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube I0724 22:05:06.195138 131317 ssh_runner.go:215] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (374 bytes) I0724 22:05:06.225326 131317 ssh_runner.go:215] scp memory --> /lib/systemd/system/kubelet.service (349 bytes) I0724 22:05:06.259134 131317 ssh_runner.go:215] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (1780 bytes) I0724 22:05:06.289873 131317 ssh_runner.go:148] Run: grep 172.17.0.11 control-plane.minikube.internal$ /etc/hosts I0724 22:05:06.295750 131317 ssh_runner.go:148] Run: /bin/bash -c "{ grep -v '\tcontrol-plane.minikube.internal$' /etc/hosts; echo "172.17.0.11 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ /etc/hosts" I0724 22:05:06.315719 131317 ssh_runner.go:148] Run: sudo systemctl daemon-reload I0724 22:05:06.431894 131317 ssh_runner.go:148] Run: sudo systemctl start kubelet I0724 22:05:06.461757 131317 certs.go:52] Setting up /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/kindnet-20200724220311-14997 for IP: 172.17.0.11 I0724 22:05:06.461818 131317 certs.go:169] skipping minikubeCA CA generation: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/ca.key I0724 22:05:06.461841 131317 certs.go:169] skipping proxyClientCA CA generation: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/proxy-client-ca.key I0724 22:05:06.461914 131317 certs.go:273] generating minikube-user signed cert: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/kindnet-20200724220311-14997/client.key I0724 22:05:06.461921 131317 crypto.go:69] Generating cert /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/kindnet-20200724220311-14997/client.crt with IP's: [] I0724 22:05:06.658608 131317 crypto.go:157] Writing cert to /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/kindnet-20200724220311-14997/client.crt ... I0724 22:05:06.658653 131317 lock.go:35] WriteFile acquiring /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/kindnet-20200724220311-14997/client.crt: {Name:mk8877042ffb96ed4d13e332ebe732a85721da85 Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0724 22:05:06.790713 131317 crypto.go:165] Writing key to /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/kindnet-20200724220311-14997/client.key ... I0724 22:05:06.790755 131317 lock.go:35] WriteFile acquiring /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/kindnet-20200724220311-14997/client.key: {Name:mkf88b328710696c8070243807dd988c8bf5ddf2 Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0724 22:05:06.790927 131317 certs.go:273] generating minikube signed cert: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/kindnet-20200724220311-14997/apiserver.key.3107a04c I0724 22:05:06.790947 131317 crypto.go:69] Generating cert /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/kindnet-20200724220311-14997/apiserver.crt.3107a04c with IP's: [172.17.0.11 10.96.0.1 127.0.0.1 10.0.0.1] I0724 22:05:07.145802 131317 crypto.go:157] Writing cert to /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/kindnet-20200724220311-14997/apiserver.crt.3107a04c ... I0724 22:05:07.145839 131317 lock.go:35] WriteFile acquiring /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/kindnet-20200724220311-14997/apiserver.crt.3107a04c: {Name:mk51ff1e994a77d43d162bc06b5bfb51350b67f2 Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0724 22:05:07.146103 131317 crypto.go:165] Writing key to /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/kindnet-20200724220311-14997/apiserver.key.3107a04c ... I0724 22:05:07.146158 131317 lock.go:35] WriteFile acquiring /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/kindnet-20200724220311-14997/apiserver.key.3107a04c: {Name:mk227ad818da6e4f951d0a937d65ad0fed4b5778 Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0724 22:05:07.146301 131317 certs.go:284] copying /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/kindnet-20200724220311-14997/apiserver.crt.3107a04c -> /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/kindnet-20200724220311-14997/apiserver.crt I0724 22:05:07.146407 131317 certs.go:288] copying /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/kindnet-20200724220311-14997/apiserver.key.3107a04c -> /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/kindnet-20200724220311-14997/apiserver.key I0724 22:05:07.146538 131317 certs.go:273] generating aggregator signed cert: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/kindnet-20200724220311-14997/proxy-client.key I0724 22:05:07.146553 131317 crypto.go:69] Generating cert /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/kindnet-20200724220311-14997/proxy-client.crt with IP's: [] I0724 22:05:07.288561 131317 crypto.go:157] Writing cert to /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/kindnet-20200724220311-14997/proxy-client.crt ... I0724 22:05:07.288596 131317 lock.go:35] WriteFile acquiring /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/kindnet-20200724220311-14997/proxy-client.crt: {Name:mk98634f2807ffcefe5951a5d36150bfc8ca2875 Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0724 22:05:07.288806 131317 crypto.go:165] Writing key to /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/kindnet-20200724220311-14997/proxy-client.key ... I0724 22:05:07.288823 131317 lock.go:35] WriteFile acquiring /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/kindnet-20200724220311-14997/proxy-client.key: {Name:mk67b5f17a473b9ed787533fee580db66be7207c Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0724 22:05:07.289082 131317 certs.go:348] found cert: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/14997.pem (1338 bytes) W0724 22:05:07.289133 131317 certs.go:344] ignoring /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/14997_empty.pem, impossibly tiny 0 bytes I0724 22:05:07.289155 131317 certs.go:348] found cert: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca-key.pem (1675 bytes) I0724 22:05:07.289207 131317 certs.go:348] found cert: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca.pem (1038 bytes) I0724 22:05:07.289241 131317 certs.go:348] found cert: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/cert.pem (1078 bytes) I0724 22:05:07.289279 131317 certs.go:348] found cert: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/key.pem (1675 bytes) I0724 22:05:07.290341 131317 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/kindnet-20200724220311-14997/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1350 bytes) I0724 22:05:07.319344 131317 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/kindnet-20200724220311-14997/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes) I0724 22:05:07.754111 131317 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/kindnet-20200724220311-14997/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1103 bytes) I0724 22:05:07.783388 131317 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/kindnet-20200724220311-14997/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes) I0724 22:05:07.812299 131317 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1066 bytes) I0724 22:05:07.841298 131317 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1679 bytes) I0724 22:05:07.873527 131317 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1074 bytes) I0724 22:05:07.899823 131317 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes) I0724 22:05:07.925897 131317 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/14997.pem --> /usr/share/ca-certificates/14997.pem (1338 bytes) I0724 22:05:07.953006 131317 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1066 bytes) I0724 22:05:07.981755 131317 ssh_runner.go:215] scp memory --> /var/lib/minikube/kubeconfig (392 bytes) I0724 22:05:08.010773 131317 ssh_runner.go:148] Run: openssl version I0724 22:05:08.016868 131317 ssh_runner.go:148] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/14997.pem && ln -fs /usr/share/ca-certificates/14997.pem /etc/ssl/certs/14997.pem" I0724 22:05:08.030476 131317 ssh_runner.go:148] Run: ls -la /usr/share/ca-certificates/14997.pem I0724 22:05:08.035320 131317 certs.go:389] hashing: -rw-r--r-- 1 root root 1338 Jul 24 21:50 /usr/share/ca-certificates/14997.pem I0724 22:05:08.035390 131317 ssh_runner.go:148] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/14997.pem I0724 22:05:08.042685 131317 ssh_runner.go:148] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/14997.pem /etc/ssl/certs/51391683.0" I0724 22:05:08.053385 131317 ssh_runner.go:148] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem" I0724 22:05:08.068024 131317 ssh_runner.go:148] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem I0724 22:05:08.074766 131317 certs.go:389] hashing: -rw-r--r-- 1 root root 1066 Jul 24 21:47 /usr/share/ca-certificates/minikubeCA.pem I0724 22:05:08.074833 131317 ssh_runner.go:148] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem I0724 22:05:08.081938 131317 ssh_runner.go:148] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0" I0724 22:05:08.100554 131317 kubeadm.go:327] StartCluster: {Name:kindnet-20200724220311-14997 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 Memory:1800 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.99.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio KubernetesConfig:{KubernetesVersion:v1.18.3 ClusterName:kindnet-20200724220311-14997 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:kindnet NodeIP: NodePort:8443 NodeName:} Nodes:[{Name: IP:172.17.0.11 Port:8443 KubernetesVersion:v1.18.3 ControlPlane:true Worker:true}] Addons:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true system_pods:true]} I0724 22:05:08.100691 131317 ssh_runner.go:148] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}} I0724 22:05:08.171481 131317 ssh_runner.go:148] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd I0724 22:05:08.182350 131317 ssh_runner.go:148] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml I0724 22:05:08.191907 131317 kubeadm.go:211] ignoring SystemVerification for kubeadm because of docker driver I0724 22:05:08.191986 131317 ssh_runner.go:148] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf I0724 22:05:08.203347 131317 kubeadm.go:147] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2 stdout: stderr: ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory I0724 22:05:08.203398 131317 ssh_runner.go:148] Run: /bin/bash -c "sudo env PATH=/var/lib/minikube/binaries/v1.18.3:$PATH kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables" I0724 22:05:45.905203 131317 ssh_runner.go:188] Completed: /bin/bash -c "sudo env PATH=/var/lib/minikube/binaries/v1.18.3:$PATH kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables": (37.701780691s) I0724 22:05:45.905229 131317 cni.go:74] Creating CNI manager for "kindnet" I0724 22:05:45.922314 131317 ssh_runner.go:148] Run: stat /opt/cni/bin/portmap I0724 22:05:45.929020 131317 cni.go:137] applying CNI manifest using /var/lib/minikube/binaries/v1.18.3/kubectl ... I0724 22:05:45.929039 131317 ssh_runner.go:215] scp memory --> /var/tmp/minikube/cni.yaml (2285 bytes) I0724 22:05:45.977813 131317 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml I0724 22:05:48.646273 131317 ssh_runner.go:188] Completed: sudo /var/lib/minikube/binaries/v1.18.3/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml: (2.66841688s) I0724 22:05:48.646330 131317 ssh_runner.go:148] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj" I0724 22:05:48.646456 131317 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:05:48.646470 131317 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl label nodes minikube.k8s.io/version=v1.12.1 minikube.k8s.io/commit=40eac8ce825d2bb784efa63b900c8d788ea49faf minikube.k8s.io/name=kindnet-20200724220311-14997 minikube.k8s.io/updated_at=2020_07_24T22_05_48_0700 --all --overwrite --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:05:48.794825 131317 ops.go:35] apiserver oom_adj: -16 I0724 22:05:48.839958 131317 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:05:49.441319 131317 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:05:49.941350 131317 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:05:50.441358 131317 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:05:50.941374 131317 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:05:51.441266 131317 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:05:51.941321 131317 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:05:52.441317 131317 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:05:52.941328 131317 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:05:53.441312 131317 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:05:53.941345 131317 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:05:54.441734 131317 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:05:54.941374 131317 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:05:55.441360 131317 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:05:55.941321 131317 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:05:56.441354 131317 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:05:56.941311 131317 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:05:57.441291 131317 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:05:57.941261 131317 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:05:58.441305 131317 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:05:58.941311 131317 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:05:59.441297 131317 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:05:59.941377 131317 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:06:00.441309 131317 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:06:00.941281 131317 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:06:01.441471 131317 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:06:01.941914 131317 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:06:02.441315 131317 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:06:02.941305 131317 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig I0724 22:06:03.077928 131317 kubeadm.go:866] duration metric: took 14.431549777s to wait for elevateKubeSystemPrivileges. I0724 22:06:03.077955 131317 kubeadm.go:329] StartCluster complete in 54.977408749s I0724 22:06:03.077971 131317 settings.go:123] acquiring lock: {Name:mk120aead41f4abf9b6da50636235ecd4ae2a41a Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0724 22:06:03.078051 131317 settings.go:131] Updating kubeconfig: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/kubeconfig I0724 22:06:03.082051 131317 lock.go:35] WriteFile acquiring /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/kubeconfig: {Name:mk94f19b810ab6208411eb086ed6241d89a90d8c Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0724 22:06:03.082302 131317 start.go:195] Will wait wait-timeout for node ... I0724 22:06:03.082352 131317 addons.go:353] enableAddons start: toEnable=map[], additional=[] I0724 22:06:03.082421 131317 ssh_runner.go:148] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.18.3/kubectl scale deployment --replicas=1 coredns -n=kube-system I0724 22:06:03.098615 131317 addons.go:53] Setting storage-provisioner=true in profile "kindnet-20200724220311-14997" I0724 22:06:03.098650 131317 addons.go:129] Setting addon storage-provisioner=true in "kindnet-20200724220311-14997" W0724 22:06:03.098663 131317 addons.go:138] addon storage-provisioner should already be in state true I0724 22:06:03.098679 131317 addons.go:53] Setting default-storageclass=true in profile "kindnet-20200724220311-14997" I0724 22:06:03.098684 131317 host.go:65] Checking if "kindnet-20200724220311-14997" exists ... I0724 22:06:03.098708 131317 addons.go:267] enableOrDisableStorageClasses default-storageclass=true on "kindnet-20200724220311-14997" I0724 22:06:03.099196 131317 cli_runner.go:109] Run: docker container inspect kindnet-20200724220311-14997 --format={{.State.Status}} I0724 22:06:03.099480 131317 cli_runner.go:109] Run: docker container inspect kindnet-20200724220311-14997 --format={{.State.Status}} I0724 22:06:03.102442 131317 api_server.go:48] waiting for apiserver process to appear ... I0724 22:06:03.102511 131317 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:06:03.167429 131317 addons.go:236] installing /etc/kubernetes/addons/storage-provisioner.yaml I0724 22:06:03.167459 131317 ssh_runner.go:215] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2668 bytes) I0724 22:06:03.167527 131317 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" kindnet-20200724220311-14997 I0724 22:06:03.181916 131317 addons.go:129] Setting addon default-storageclass=true in "kindnet-20200724220311-14997" W0724 22:06:03.181939 131317 addons.go:138] addon default-storageclass should already be in state true I0724 22:06:03.181954 131317 host.go:65] Checking if "kindnet-20200724220311-14997" exists ... I0724 22:06:03.182530 131317 cli_runner.go:109] Run: docker container inspect kindnet-20200724220311-14997 --format={{.State.Status}} I0724 22:06:03.238095 131317 sshutil.go:44] new ssh client: &{IP:127.0.0.1 Port:32872 SSHKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/kindnet-20200724220311-14997/id_rsa Username:docker} I0724 22:06:03.245468 131317 api_server.go:68] duration metric: took 163.137922ms to wait for apiserver process to appear ... I0724 22:06:03.245500 131317 api_server.go:84] waiting for apiserver healthz status ... I0724 22:06:03.245511 131317 api_server.go:221] Checking apiserver healthz at https://172.17.0.11:8443/healthz ... I0724 22:06:03.245629 131317 start.go:549] successfully scaled coredns replicas to 1 I0724 22:06:03.246271 131317 addons.go:236] installing /etc/kubernetes/addons/storageclass.yaml I0724 22:06:03.246293 131317 ssh_runner.go:215] scp deploy/addons/storageclass/storageclass.yaml.tmpl --> /etc/kubernetes/addons/storageclass.yaml (271 bytes) I0724 22:06:03.246361 131317 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" kindnet-20200724220311-14997 I0724 22:06:03.256036 131317 api_server.go:241] https://172.17.0.11:8443/healthz returned 200: ok I0724 22:06:03.257140 131317 api_server.go:137] control plane version: v1.18.3 I0724 22:06:03.257167 131317 api_server.go:127] duration metric: took 11.658909ms to wait for apiserver health ... I0724 22:06:03.257186 131317 system_pods.go:43] waiting for kube-system pods to appear ... I0724 22:06:03.284922 131317 system_pods.go:59] 8 kube-system pods found I0724 22:06:03.284965 131317 system_pods.go:61] "coredns-66bff467f8-dqmpm" [742bcecd-250a-48d0-bb34-f541a7635647] Pending: PodScheduled:Unschedulable (0/1 nodes are available: 1 node(s) had taint {node.kubernetes.io/not-ready: }, that the pod didn't tolerate.) I0724 22:06:03.284978 131317 system_pods.go:61] "coredns-66bff467f8-t5bzk" [d30c77b2-32c8-49f6-8815-424e5aa3a6a3] Pending: PodScheduled:Unschedulable (0/1 nodes are available: 1 node(s) had taint {node.kubernetes.io/not-ready: }, that the pod didn't tolerate.) I0724 22:06:03.284987 131317 system_pods.go:61] "etcd-kindnet-20200724220311-14997" [7c3295f7-8a2e-46e7-a751-1f1ee8c811c3] Running I0724 22:06:03.285001 131317 system_pods.go:61] "kindnet-dzph2" [a8f17cd0-5e62-4f1e-8ac2-33e6d84c8dbb] Pending / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:06:03.285011 131317 system_pods.go:61] "kube-apiserver-kindnet-20200724220311-14997" [f3628be1-ff44-43cc-9125-5bbfdaae52ea] Running I0724 22:06:03.285018 131317 system_pods.go:61] "kube-controller-manager-kindnet-20200724220311-14997" [45a54775-4ef5-44a8-bd23-4ac90077a621] Running I0724 22:06:03.285025 131317 system_pods.go:61] "kube-proxy-74jgx" [db724ee5-afc9-4d13-92fb-43ba0a9bcd4b] Pending / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy]) I0724 22:06:03.285039 131317 system_pods.go:61] "kube-scheduler-kindnet-20200724220311-14997" [a2cb645e-c7e0-4678-9e5b-56be917c4666] Running I0724 22:06:03.285045 131317 system_pods.go:74] duration metric: took 27.851733ms to wait for pod list to return data ... I0724 22:06:03.285053 131317 default_sa.go:33] waiting for default service account to be created ... I0724 22:06:03.298039 131317 default_sa.go:44] found service account: "default" I0724 22:06:03.298068 131317 default_sa.go:54] duration metric: took 13.001102ms for default service account to be created ... I0724 22:06:03.298080 131317 system_pods.go:116] waiting for k8s-apps to be running ... I0724 22:06:03.321032 131317 sshutil.go:44] new ssh client: &{IP:127.0.0.1 Port:32872 SSHKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/kindnet-20200724220311-14997/id_rsa Username:docker} I0724 22:06:03.352504 131317 system_pods.go:86] 7 kube-system pods found I0724 22:06:03.352542 131317 system_pods.go:89] "coredns-66bff467f8-dqmpm" [742bcecd-250a-48d0-bb34-f541a7635647] Pending: PodScheduled:Unschedulable (0/1 nodes are available: 1 node(s) had taint {node.kubernetes.io/not-ready: }, that the pod didn't tolerate.) I0724 22:06:03.352553 131317 system_pods.go:89] "etcd-kindnet-20200724220311-14997" [7c3295f7-8a2e-46e7-a751-1f1ee8c811c3] Running I0724 22:06:03.352567 131317 system_pods.go:89] "kindnet-dzph2" [a8f17cd0-5e62-4f1e-8ac2-33e6d84c8dbb] Pending / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:06:03.352582 131317 system_pods.go:89] "kube-apiserver-kindnet-20200724220311-14997" [f3628be1-ff44-43cc-9125-5bbfdaae52ea] Running I0724 22:06:03.352609 131317 system_pods.go:89] "kube-controller-manager-kindnet-20200724220311-14997" [45a54775-4ef5-44a8-bd23-4ac90077a621] Running I0724 22:06:03.352621 131317 system_pods.go:89] "kube-proxy-74jgx" [db724ee5-afc9-4d13-92fb-43ba0a9bcd4b] Pending / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy]) I0724 22:06:03.352646 131317 system_pods.go:89] "kube-scheduler-kindnet-20200724220311-14997" [a2cb645e-c7e0-4678-9e5b-56be917c4666] Running I0724 22:06:03.352662 131317 retry.go:30] will retry after 263.082536ms: missing components: kube-dns, kube-proxy I0724 22:06:03.363512 131317 ssh_runner.go:148] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.18.3/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml I0724 22:06:03.444559 131317 ssh_runner.go:148] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.18.3/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml I0724 22:06:03.621067 131317 system_pods.go:86] 7 kube-system pods found I0724 22:06:03.621096 131317 system_pods.go:89] "coredns-66bff467f8-dqmpm" [742bcecd-250a-48d0-bb34-f541a7635647] Pending: PodScheduled:Unschedulable (0/1 nodes are available: 1 node(s) had taint {node.kubernetes.io/not-ready: }, that the pod didn't tolerate.) I0724 22:06:03.621103 131317 system_pods.go:89] "etcd-kindnet-20200724220311-14997" [7c3295f7-8a2e-46e7-a751-1f1ee8c811c3] Running I0724 22:06:03.621112 131317 system_pods.go:89] "kindnet-dzph2" [a8f17cd0-5e62-4f1e-8ac2-33e6d84c8dbb] Pending / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:06:03.621118 131317 system_pods.go:89] "kube-apiserver-kindnet-20200724220311-14997" [f3628be1-ff44-43cc-9125-5bbfdaae52ea] Running I0724 22:06:03.621125 131317 system_pods.go:89] "kube-controller-manager-kindnet-20200724220311-14997" [45a54775-4ef5-44a8-bd23-4ac90077a621] Running I0724 22:06:03.621132 131317 system_pods.go:89] "kube-proxy-74jgx" [db724ee5-afc9-4d13-92fb-43ba0a9bcd4b] Pending / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy]) I0724 22:06:03.621138 131317 system_pods.go:89] "kube-scheduler-kindnet-20200724220311-14997" [a2cb645e-c7e0-4678-9e5b-56be917c4666] Running I0724 22:06:03.621152 131317 retry.go:30] will retry after 381.329545ms: missing components: kube-dns, kube-proxy I0724 22:06:03.947181 131317 addons.go:355] enableAddons completed in 864.831922ms I0724 22:06:04.009370 131317 system_pods.go:86] 8 kube-system pods found I0724 22:06:04.009415 131317 system_pods.go:89] "coredns-66bff467f8-dqmpm" [742bcecd-250a-48d0-bb34-f541a7635647] Pending: PodScheduled:Unschedulable (0/1 nodes are available: 1 node(s) had taint {node.kubernetes.io/not-ready: }, that the pod didn't tolerate.) I0724 22:06:04.009426 131317 system_pods.go:89] "etcd-kindnet-20200724220311-14997" [7c3295f7-8a2e-46e7-a751-1f1ee8c811c3] Running I0724 22:06:04.009443 131317 system_pods.go:89] "kindnet-dzph2" [a8f17cd0-5e62-4f1e-8ac2-33e6d84c8dbb] Pending / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:06:04.009452 131317 system_pods.go:89] "kube-apiserver-kindnet-20200724220311-14997" [f3628be1-ff44-43cc-9125-5bbfdaae52ea] Running I0724 22:06:04.009472 131317 system_pods.go:89] "kube-controller-manager-kindnet-20200724220311-14997" [45a54775-4ef5-44a8-bd23-4ac90077a621] Running I0724 22:06:04.009484 131317 system_pods.go:89] "kube-proxy-74jgx" [db724ee5-afc9-4d13-92fb-43ba0a9bcd4b] Pending / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy]) I0724 22:06:04.009499 131317 system_pods.go:89] "kube-scheduler-kindnet-20200724220311-14997" [a2cb645e-c7e0-4678-9e5b-56be917c4666] Running I0724 22:06:04.009512 131317 system_pods.go:89] "storage-provisioner" [5bc5dc2c-70ae-4a9d-8c50-1ae72813f060] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:06:04.009537 131317 retry.go:30] will retry after 422.765636ms: missing components: kube-dns, kube-proxy I0724 22:06:04.443097 131317 system_pods.go:86] 8 kube-system pods found I0724 22:06:04.443144 131317 system_pods.go:89] "coredns-66bff467f8-dqmpm" [742bcecd-250a-48d0-bb34-f541a7635647] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:06:04.443155 131317 system_pods.go:89] "etcd-kindnet-20200724220311-14997" [7c3295f7-8a2e-46e7-a751-1f1ee8c811c3] Running I0724 22:06:04.443168 131317 system_pods.go:89] "kindnet-dzph2" [a8f17cd0-5e62-4f1e-8ac2-33e6d84c8dbb] Pending / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:06:04.443177 131317 system_pods.go:89] "kube-apiserver-kindnet-20200724220311-14997" [f3628be1-ff44-43cc-9125-5bbfdaae52ea] Running I0724 22:06:04.443188 131317 system_pods.go:89] "kube-controller-manager-kindnet-20200724220311-14997" [45a54775-4ef5-44a8-bd23-4ac90077a621] Running I0724 22:06:04.443201 131317 system_pods.go:89] "kube-proxy-74jgx" [db724ee5-afc9-4d13-92fb-43ba0a9bcd4b] Pending / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy]) I0724 22:06:04.443253 131317 system_pods.go:89] "kube-scheduler-kindnet-20200724220311-14997" [a2cb645e-c7e0-4678-9e5b-56be917c4666] Running I0724 22:06:04.443275 131317 system_pods.go:89] "storage-provisioner" [5bc5dc2c-70ae-4a9d-8c50-1ae72813f060] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:06:04.443290 131317 retry.go:30] will retry after 473.074753ms: missing components: kube-dns, kube-proxy I0724 22:06:04.931710 131317 system_pods.go:86] 8 kube-system pods found I0724 22:06:04.931743 131317 system_pods.go:89] "coredns-66bff467f8-dqmpm" [742bcecd-250a-48d0-bb34-f541a7635647] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:06:04.931750 131317 system_pods.go:89] "etcd-kindnet-20200724220311-14997" [7c3295f7-8a2e-46e7-a751-1f1ee8c811c3] Running I0724 22:06:04.931759 131317 system_pods.go:89] "kindnet-dzph2" [a8f17cd0-5e62-4f1e-8ac2-33e6d84c8dbb] Pending / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:06:04.931765 131317 system_pods.go:89] "kube-apiserver-kindnet-20200724220311-14997" [f3628be1-ff44-43cc-9125-5bbfdaae52ea] Running I0724 22:06:04.931771 131317 system_pods.go:89] "kube-controller-manager-kindnet-20200724220311-14997" [45a54775-4ef5-44a8-bd23-4ac90077a621] Running I0724 22:06:04.931778 131317 system_pods.go:89] "kube-proxy-74jgx" [db724ee5-afc9-4d13-92fb-43ba0a9bcd4b] Pending / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy]) I0724 22:06:04.931784 131317 system_pods.go:89] "kube-scheduler-kindnet-20200724220311-14997" [a2cb645e-c7e0-4678-9e5b-56be917c4666] Running I0724 22:06:04.931791 131317 system_pods.go:89] "storage-provisioner" [5bc5dc2c-70ae-4a9d-8c50-1ae72813f060] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:06:04.931801 131317 retry.go:30] will retry after 587.352751ms: missing components: kube-dns, kube-proxy I0724 22:06:05.539965 131317 system_pods.go:86] 8 kube-system pods found I0724 22:06:05.540001 131317 system_pods.go:89] "coredns-66bff467f8-dqmpm" [742bcecd-250a-48d0-bb34-f541a7635647] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:06:05.540011 131317 system_pods.go:89] "etcd-kindnet-20200724220311-14997" [7c3295f7-8a2e-46e7-a751-1f1ee8c811c3] Running I0724 22:06:05.540024 131317 system_pods.go:89] "kindnet-dzph2" [a8f17cd0-5e62-4f1e-8ac2-33e6d84c8dbb] Pending / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:06:05.540035 131317 system_pods.go:89] "kube-apiserver-kindnet-20200724220311-14997" [f3628be1-ff44-43cc-9125-5bbfdaae52ea] Running I0724 22:06:05.540046 131317 system_pods.go:89] "kube-controller-manager-kindnet-20200724220311-14997" [45a54775-4ef5-44a8-bd23-4ac90077a621] Running I0724 22:06:05.540064 131317 system_pods.go:89] "kube-proxy-74jgx" [db724ee5-afc9-4d13-92fb-43ba0a9bcd4b] Pending / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy]) I0724 22:06:05.540088 131317 system_pods.go:89] "kube-scheduler-kindnet-20200724220311-14997" [a2cb645e-c7e0-4678-9e5b-56be917c4666] Running I0724 22:06:05.540100 131317 system_pods.go:89] "storage-provisioner" [5bc5dc2c-70ae-4a9d-8c50-1ae72813f060] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:06:05.540121 131317 retry.go:30] will retry after 834.206799ms: missing components: kube-dns, kube-proxy I0724 22:06:06.459171 131317 system_pods.go:86] 8 kube-system pods found I0724 22:06:06.459205 131317 system_pods.go:89] "coredns-66bff467f8-dqmpm" [742bcecd-250a-48d0-bb34-f541a7635647] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:06:06.459212 131317 system_pods.go:89] "etcd-kindnet-20200724220311-14997" [7c3295f7-8a2e-46e7-a751-1f1ee8c811c3] Running I0724 22:06:06.459221 131317 system_pods.go:89] "kindnet-dzph2" [a8f17cd0-5e62-4f1e-8ac2-33e6d84c8dbb] Pending / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:06:06.459227 131317 system_pods.go:89] "kube-apiserver-kindnet-20200724220311-14997" [f3628be1-ff44-43cc-9125-5bbfdaae52ea] Running I0724 22:06:06.459233 131317 system_pods.go:89] "kube-controller-manager-kindnet-20200724220311-14997" [45a54775-4ef5-44a8-bd23-4ac90077a621] Running I0724 22:06:06.459239 131317 system_pods.go:89] "kube-proxy-74jgx" [db724ee5-afc9-4d13-92fb-43ba0a9bcd4b] Running I0724 22:06:06.459244 131317 system_pods.go:89] "kube-scheduler-kindnet-20200724220311-14997" [a2cb645e-c7e0-4678-9e5b-56be917c4666] Running I0724 22:06:06.459251 131317 system_pods.go:89] "storage-provisioner" [5bc5dc2c-70ae-4a9d-8c50-1ae72813f060] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:06:06.459305 131317 retry.go:30] will retry after 746.553905ms: missing components: kube-dns I0724 22:06:07.211453 131317 system_pods.go:86] 8 kube-system pods found I0724 22:06:07.211486 131317 system_pods.go:89] "coredns-66bff467f8-dqmpm" [742bcecd-250a-48d0-bb34-f541a7635647] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:06:07.211493 131317 system_pods.go:89] "etcd-kindnet-20200724220311-14997" [7c3295f7-8a2e-46e7-a751-1f1ee8c811c3] Running I0724 22:06:07.211501 131317 system_pods.go:89] "kindnet-dzph2" [a8f17cd0-5e62-4f1e-8ac2-33e6d84c8dbb] Pending / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:06:07.211529 131317 system_pods.go:89] "kube-apiserver-kindnet-20200724220311-14997" [f3628be1-ff44-43cc-9125-5bbfdaae52ea] Running I0724 22:06:07.211549 131317 system_pods.go:89] "kube-controller-manager-kindnet-20200724220311-14997" [45a54775-4ef5-44a8-bd23-4ac90077a621] Running I0724 22:06:07.211555 131317 system_pods.go:89] "kube-proxy-74jgx" [db724ee5-afc9-4d13-92fb-43ba0a9bcd4b] Running I0724 22:06:07.211567 131317 system_pods.go:89] "kube-scheduler-kindnet-20200724220311-14997" [a2cb645e-c7e0-4678-9e5b-56be917c4666] Running I0724 22:06:07.211575 131317 system_pods.go:89] "storage-provisioner" [5bc5dc2c-70ae-4a9d-8c50-1ae72813f060] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:06:07.211590 131317 retry.go:30] will retry after 987.362415ms: missing components: kube-dns I0724 22:06:08.204991 131317 system_pods.go:86] 8 kube-system pods found I0724 22:06:08.205027 131317 system_pods.go:89] "coredns-66bff467f8-dqmpm" [742bcecd-250a-48d0-bb34-f541a7635647] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:06:08.205038 131317 system_pods.go:89] "etcd-kindnet-20200724220311-14997" [7c3295f7-8a2e-46e7-a751-1f1ee8c811c3] Running I0724 22:06:08.205050 131317 system_pods.go:89] "kindnet-dzph2" [a8f17cd0-5e62-4f1e-8ac2-33e6d84c8dbb] Pending / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:06:08.205060 131317 system_pods.go:89] "kube-apiserver-kindnet-20200724220311-14997" [f3628be1-ff44-43cc-9125-5bbfdaae52ea] Running I0724 22:06:08.205071 131317 system_pods.go:89] "kube-controller-manager-kindnet-20200724220311-14997" [45a54775-4ef5-44a8-bd23-4ac90077a621] Running I0724 22:06:08.205086 131317 system_pods.go:89] "kube-proxy-74jgx" [db724ee5-afc9-4d13-92fb-43ba0a9bcd4b] Running I0724 22:06:08.205095 131317 system_pods.go:89] "kube-scheduler-kindnet-20200724220311-14997" [a2cb645e-c7e0-4678-9e5b-56be917c4666] Running I0724 22:06:08.205122 131317 system_pods.go:89] "storage-provisioner" [5bc5dc2c-70ae-4a9d-8c50-1ae72813f060] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:06:08.205137 131317 retry.go:30] will retry after 1.189835008s: missing components: kube-dns I0724 22:06:09.400808 131317 system_pods.go:86] 8 kube-system pods found I0724 22:06:09.400848 131317 system_pods.go:89] "coredns-66bff467f8-dqmpm" [742bcecd-250a-48d0-bb34-f541a7635647] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:06:09.400856 131317 system_pods.go:89] "etcd-kindnet-20200724220311-14997" [7c3295f7-8a2e-46e7-a751-1f1ee8c811c3] Running I0724 22:06:09.400868 131317 system_pods.go:89] "kindnet-dzph2" [a8f17cd0-5e62-4f1e-8ac2-33e6d84c8dbb] Pending / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:06:09.400890 131317 system_pods.go:89] "kube-apiserver-kindnet-20200724220311-14997" [f3628be1-ff44-43cc-9125-5bbfdaae52ea] Running I0724 22:06:09.400912 131317 system_pods.go:89] "kube-controller-manager-kindnet-20200724220311-14997" [45a54775-4ef5-44a8-bd23-4ac90077a621] Running I0724 22:06:09.400917 131317 system_pods.go:89] "kube-proxy-74jgx" [db724ee5-afc9-4d13-92fb-43ba0a9bcd4b] Running I0724 22:06:09.400935 131317 system_pods.go:89] "kube-scheduler-kindnet-20200724220311-14997" [a2cb645e-c7e0-4678-9e5b-56be917c4666] Running I0724 22:06:09.400951 131317 system_pods.go:89] "storage-provisioner" [5bc5dc2c-70ae-4a9d-8c50-1ae72813f060] Running I0724 22:06:09.400973 131317 retry.go:30] will retry after 1.677229867s: missing components: kube-dns I0724 22:06:11.085629 131317 system_pods.go:86] 8 kube-system pods found I0724 22:06:11.085660 131317 system_pods.go:89] "coredns-66bff467f8-dqmpm" [742bcecd-250a-48d0-bb34-f541a7635647] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:06:11.085669 131317 system_pods.go:89] "etcd-kindnet-20200724220311-14997" [7c3295f7-8a2e-46e7-a751-1f1ee8c811c3] Running I0724 22:06:11.085678 131317 system_pods.go:89] "kindnet-dzph2" [a8f17cd0-5e62-4f1e-8ac2-33e6d84c8dbb] Pending / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:06:11.085683 131317 system_pods.go:89] "kube-apiserver-kindnet-20200724220311-14997" [f3628be1-ff44-43cc-9125-5bbfdaae52ea] Running I0724 22:06:11.085689 131317 system_pods.go:89] "kube-controller-manager-kindnet-20200724220311-14997" [45a54775-4ef5-44a8-bd23-4ac90077a621] Running I0724 22:06:11.085694 131317 system_pods.go:89] "kube-proxy-74jgx" [db724ee5-afc9-4d13-92fb-43ba0a9bcd4b] Running I0724 22:06:11.085700 131317 system_pods.go:89] "kube-scheduler-kindnet-20200724220311-14997" [a2cb645e-c7e0-4678-9e5b-56be917c4666] Running I0724 22:06:11.085716 131317 system_pods.go:89] "storage-provisioner" [5bc5dc2c-70ae-4a9d-8c50-1ae72813f060] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:06:11.085727 131317 retry.go:30] will retry after 2.346016261s: missing components: kube-dns I0724 22:06:13.437996 131317 system_pods.go:86] 8 kube-system pods found I0724 22:06:13.438027 131317 system_pods.go:89] "coredns-66bff467f8-dqmpm" [742bcecd-250a-48d0-bb34-f541a7635647] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:06:13.438035 131317 system_pods.go:89] "etcd-kindnet-20200724220311-14997" [7c3295f7-8a2e-46e7-a751-1f1ee8c811c3] Running I0724 22:06:13.438043 131317 system_pods.go:89] "kindnet-dzph2" [a8f17cd0-5e62-4f1e-8ac2-33e6d84c8dbb] Running I0724 22:06:13.438049 131317 system_pods.go:89] "kube-apiserver-kindnet-20200724220311-14997" [f3628be1-ff44-43cc-9125-5bbfdaae52ea] Running I0724 22:06:13.438055 131317 system_pods.go:89] "kube-controller-manager-kindnet-20200724220311-14997" [45a54775-4ef5-44a8-bd23-4ac90077a621] Running I0724 22:06:13.438061 131317 system_pods.go:89] "kube-proxy-74jgx" [db724ee5-afc9-4d13-92fb-43ba0a9bcd4b] Running I0724 22:06:13.438066 131317 system_pods.go:89] "kube-scheduler-kindnet-20200724220311-14997" [a2cb645e-c7e0-4678-9e5b-56be917c4666] Running I0724 22:06:13.438073 131317 system_pods.go:89] "storage-provisioner" [5bc5dc2c-70ae-4a9d-8c50-1ae72813f060] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:06:13.438085 131317 retry.go:30] will retry after 3.36678925s: missing components: kube-dns I0724 22:06:16.810133 131317 system_pods.go:86] 8 kube-system pods found I0724 22:06:16.810165 131317 system_pods.go:89] "coredns-66bff467f8-dqmpm" [742bcecd-250a-48d0-bb34-f541a7635647] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:06:16.810172 131317 system_pods.go:89] "etcd-kindnet-20200724220311-14997" [7c3295f7-8a2e-46e7-a751-1f1ee8c811c3] Running I0724 22:06:16.810179 131317 system_pods.go:89] "kindnet-dzph2" [a8f17cd0-5e62-4f1e-8ac2-33e6d84c8dbb] Running I0724 22:06:16.810184 131317 system_pods.go:89] "kube-apiserver-kindnet-20200724220311-14997" [f3628be1-ff44-43cc-9125-5bbfdaae52ea] Running I0724 22:06:16.810192 131317 system_pods.go:89] "kube-controller-manager-kindnet-20200724220311-14997" [45a54775-4ef5-44a8-bd23-4ac90077a621] Running I0724 22:06:16.810200 131317 system_pods.go:89] "kube-proxy-74jgx" [db724ee5-afc9-4d13-92fb-43ba0a9bcd4b] Running I0724 22:06:16.810207 131317 system_pods.go:89] "kube-scheduler-kindnet-20200724220311-14997" [a2cb645e-c7e0-4678-9e5b-56be917c4666] Running I0724 22:06:16.810238 131317 system_pods.go:89] "storage-provisioner" [5bc5dc2c-70ae-4a9d-8c50-1ae72813f060] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:06:16.810249 131317 retry.go:30] will retry after 3.11822781s: missing components: kube-dns I0724 22:06:19.934372 131317 system_pods.go:86] 8 kube-system pods found I0724 22:06:19.934405 131317 system_pods.go:89] "coredns-66bff467f8-dqmpm" [742bcecd-250a-48d0-bb34-f541a7635647] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:06:19.934412 131317 system_pods.go:89] "etcd-kindnet-20200724220311-14997" [7c3295f7-8a2e-46e7-a751-1f1ee8c811c3] Running I0724 22:06:19.934420 131317 system_pods.go:89] "kindnet-dzph2" [a8f17cd0-5e62-4f1e-8ac2-33e6d84c8dbb] Running I0724 22:06:19.934426 131317 system_pods.go:89] "kube-apiserver-kindnet-20200724220311-14997" [f3628be1-ff44-43cc-9125-5bbfdaae52ea] Running I0724 22:06:19.934432 131317 system_pods.go:89] "kube-controller-manager-kindnet-20200724220311-14997" [45a54775-4ef5-44a8-bd23-4ac90077a621] Running I0724 22:06:19.934437 131317 system_pods.go:89] "kube-proxy-74jgx" [db724ee5-afc9-4d13-92fb-43ba0a9bcd4b] Running I0724 22:06:19.934443 131317 system_pods.go:89] "kube-scheduler-kindnet-20200724220311-14997" [a2cb645e-c7e0-4678-9e5b-56be917c4666] Running I0724 22:06:19.934450 131317 system_pods.go:89] "storage-provisioner" [5bc5dc2c-70ae-4a9d-8c50-1ae72813f060] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:06:19.934466 131317 retry.go:30] will retry after 4.276119362s: missing components: kube-dns I0724 22:06:24.832116 131317 system_pods.go:86] 8 kube-system pods found I0724 22:06:24.832149 131317 system_pods.go:89] "coredns-66bff467f8-dqmpm" [742bcecd-250a-48d0-bb34-f541a7635647] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:06:24.832159 131317 system_pods.go:89] "etcd-kindnet-20200724220311-14997" [7c3295f7-8a2e-46e7-a751-1f1ee8c811c3] Running I0724 22:06:24.832169 131317 system_pods.go:89] "kindnet-dzph2" [a8f17cd0-5e62-4f1e-8ac2-33e6d84c8dbb] Running I0724 22:06:24.832178 131317 system_pods.go:89] "kube-apiserver-kindnet-20200724220311-14997" [f3628be1-ff44-43cc-9125-5bbfdaae52ea] Running I0724 22:06:24.832188 131317 system_pods.go:89] "kube-controller-manager-kindnet-20200724220311-14997" [45a54775-4ef5-44a8-bd23-4ac90077a621] Running I0724 22:06:24.832197 131317 system_pods.go:89] "kube-proxy-74jgx" [db724ee5-afc9-4d13-92fb-43ba0a9bcd4b] Running I0724 22:06:24.832213 131317 system_pods.go:89] "kube-scheduler-kindnet-20200724220311-14997" [a2cb645e-c7e0-4678-9e5b-56be917c4666] Running I0724 22:06:24.832225 131317 system_pods.go:89] "storage-provisioner" [5bc5dc2c-70ae-4a9d-8c50-1ae72813f060] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:06:24.832244 131317 retry.go:30] will retry after 5.167232101s: missing components: kube-dns I0724 22:06:32.235250 131317 system_pods.go:86] 8 kube-system pods found I0724 22:06:32.235283 131317 system_pods.go:89] "coredns-66bff467f8-dqmpm" [742bcecd-250a-48d0-bb34-f541a7635647] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:06:32.235290 131317 system_pods.go:89] "etcd-kindnet-20200724220311-14997" [7c3295f7-8a2e-46e7-a751-1f1ee8c811c3] Running I0724 22:06:32.235298 131317 system_pods.go:89] "kindnet-dzph2" [a8f17cd0-5e62-4f1e-8ac2-33e6d84c8dbb] Running I0724 22:06:32.235304 131317 system_pods.go:89] "kube-apiserver-kindnet-20200724220311-14997" [f3628be1-ff44-43cc-9125-5bbfdaae52ea] Running I0724 22:06:32.235310 131317 system_pods.go:89] "kube-controller-manager-kindnet-20200724220311-14997" [45a54775-4ef5-44a8-bd23-4ac90077a621] Running I0724 22:06:32.235316 131317 system_pods.go:89] "kube-proxy-74jgx" [db724ee5-afc9-4d13-92fb-43ba0a9bcd4b] Running I0724 22:06:32.235322 131317 system_pods.go:89] "kube-scheduler-kindnet-20200724220311-14997" [a2cb645e-c7e0-4678-9e5b-56be917c4666] Running I0724 22:06:32.235350 131317 system_pods.go:89] "storage-provisioner" [5bc5dc2c-70ae-4a9d-8c50-1ae72813f060] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:06:32.235361 131317 retry.go:30] will retry after 6.994901864s: missing components: kube-dns I0724 22:06:39.235910 131317 system_pods.go:86] 8 kube-system pods found I0724 22:06:39.235950 131317 system_pods.go:89] "coredns-66bff467f8-dqmpm" [742bcecd-250a-48d0-bb34-f541a7635647] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:06:39.235969 131317 system_pods.go:89] "etcd-kindnet-20200724220311-14997" [7c3295f7-8a2e-46e7-a751-1f1ee8c811c3] Running I0724 22:06:39.235979 131317 system_pods.go:89] "kindnet-dzph2" [a8f17cd0-5e62-4f1e-8ac2-33e6d84c8dbb] Running I0724 22:06:39.235988 131317 system_pods.go:89] "kube-apiserver-kindnet-20200724220311-14997" [f3628be1-ff44-43cc-9125-5bbfdaae52ea] Running I0724 22:06:39.235997 131317 system_pods.go:89] "kube-controller-manager-kindnet-20200724220311-14997" [45a54775-4ef5-44a8-bd23-4ac90077a621] Running I0724 22:06:39.236006 131317 system_pods.go:89] "kube-proxy-74jgx" [db724ee5-afc9-4d13-92fb-43ba0a9bcd4b] Running I0724 22:06:39.236144 131317 system_pods.go:89] "kube-scheduler-kindnet-20200724220311-14997" [a2cb645e-c7e0-4678-9e5b-56be917c4666] Running I0724 22:06:39.236216 131317 system_pods.go:89] "storage-provisioner" [5bc5dc2c-70ae-4a9d-8c50-1ae72813f060] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:06:39.236234 131317 retry.go:30] will retry after 7.91826225s: missing components: kube-dns I0724 22:06:47.160112 131317 system_pods.go:86] 8 kube-system pods found I0724 22:06:47.160148 131317 system_pods.go:89] "coredns-66bff467f8-dqmpm" [742bcecd-250a-48d0-bb34-f541a7635647] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:06:47.160158 131317 system_pods.go:89] "etcd-kindnet-20200724220311-14997" [7c3295f7-8a2e-46e7-a751-1f1ee8c811c3] Running I0724 22:06:47.160169 131317 system_pods.go:89] "kindnet-dzph2" [a8f17cd0-5e62-4f1e-8ac2-33e6d84c8dbb] Running I0724 22:06:47.160178 131317 system_pods.go:89] "kube-apiserver-kindnet-20200724220311-14997" [f3628be1-ff44-43cc-9125-5bbfdaae52ea] Running I0724 22:06:47.160184 131317 system_pods.go:89] "kube-controller-manager-kindnet-20200724220311-14997" [45a54775-4ef5-44a8-bd23-4ac90077a621] Running I0724 22:06:47.160189 131317 system_pods.go:89] "kube-proxy-74jgx" [db724ee5-afc9-4d13-92fb-43ba0a9bcd4b] Running I0724 22:06:47.160195 131317 system_pods.go:89] "kube-scheduler-kindnet-20200724220311-14997" [a2cb645e-c7e0-4678-9e5b-56be917c4666] Running I0724 22:06:47.160202 131317 system_pods.go:89] "storage-provisioner" [5bc5dc2c-70ae-4a9d-8c50-1ae72813f060] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:06:47.160216 131317 retry.go:30] will retry after 9.953714808s: missing components: kube-dns I0724 22:06:57.120491 131317 system_pods.go:86] 8 kube-system pods found I0724 22:06:57.120527 131317 system_pods.go:89] "coredns-66bff467f8-dqmpm" [742bcecd-250a-48d0-bb34-f541a7635647] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:06:57.120535 131317 system_pods.go:89] "etcd-kindnet-20200724220311-14997" [7c3295f7-8a2e-46e7-a751-1f1ee8c811c3] Running I0724 22:06:57.120545 131317 system_pods.go:89] "kindnet-dzph2" [a8f17cd0-5e62-4f1e-8ac2-33e6d84c8dbb] Running I0724 22:06:57.120554 131317 system_pods.go:89] "kube-apiserver-kindnet-20200724220311-14997" [f3628be1-ff44-43cc-9125-5bbfdaae52ea] Running I0724 22:06:57.120564 131317 system_pods.go:89] "kube-controller-manager-kindnet-20200724220311-14997" [45a54775-4ef5-44a8-bd23-4ac90077a621] Running I0724 22:06:57.120586 131317 system_pods.go:89] "kube-proxy-74jgx" [db724ee5-afc9-4d13-92fb-43ba0a9bcd4b] Running I0724 22:06:57.120592 131317 system_pods.go:89] "kube-scheduler-kindnet-20200724220311-14997" [a2cb645e-c7e0-4678-9e5b-56be917c4666] Running I0724 22:06:57.120603 131317 system_pods.go:89] "storage-provisioner" [5bc5dc2c-70ae-4a9d-8c50-1ae72813f060] Running I0724 22:06:57.120628 131317 retry.go:30] will retry after 15.120437328s: missing components: kube-dns I0724 22:07:12.246213 131317 system_pods.go:86] 8 kube-system pods found I0724 22:07:12.246244 131317 system_pods.go:89] "coredns-66bff467f8-dqmpm" [742bcecd-250a-48d0-bb34-f541a7635647] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:07:12.246250 131317 system_pods.go:89] "etcd-kindnet-20200724220311-14997" [7c3295f7-8a2e-46e7-a751-1f1ee8c811c3] Running I0724 22:07:12.246257 131317 system_pods.go:89] "kindnet-dzph2" [a8f17cd0-5e62-4f1e-8ac2-33e6d84c8dbb] Running I0724 22:07:12.246262 131317 system_pods.go:89] "kube-apiserver-kindnet-20200724220311-14997" [f3628be1-ff44-43cc-9125-5bbfdaae52ea] Running I0724 22:07:12.246268 131317 system_pods.go:89] "kube-controller-manager-kindnet-20200724220311-14997" [45a54775-4ef5-44a8-bd23-4ac90077a621] Running I0724 22:07:12.246273 131317 system_pods.go:89] "kube-proxy-74jgx" [db724ee5-afc9-4d13-92fb-43ba0a9bcd4b] Running I0724 22:07:12.246278 131317 system_pods.go:89] "kube-scheduler-kindnet-20200724220311-14997" [a2cb645e-c7e0-4678-9e5b-56be917c4666] Running I0724 22:07:12.246285 131317 system_pods.go:89] "storage-provisioner" [5bc5dc2c-70ae-4a9d-8c50-1ae72813f060] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:07:12.246295 131317 retry.go:30] will retry after 14.90607158s: missing components: kube-dns I0724 22:07:27.159731 131317 system_pods.go:86] 8 kube-system pods found I0724 22:07:27.159767 131317 system_pods.go:89] "coredns-66bff467f8-dqmpm" [742bcecd-250a-48d0-bb34-f541a7635647] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:07:27.159774 131317 system_pods.go:89] "etcd-kindnet-20200724220311-14997" [7c3295f7-8a2e-46e7-a751-1f1ee8c811c3] Running I0724 22:07:27.159784 131317 system_pods.go:89] "kindnet-dzph2" [a8f17cd0-5e62-4f1e-8ac2-33e6d84c8dbb] Running I0724 22:07:27.159797 131317 system_pods.go:89] "kube-apiserver-kindnet-20200724220311-14997" [f3628be1-ff44-43cc-9125-5bbfdaae52ea] Running I0724 22:07:27.159811 131317 system_pods.go:89] "kube-controller-manager-kindnet-20200724220311-14997" [45a54775-4ef5-44a8-bd23-4ac90077a621] Running I0724 22:07:27.159826 131317 system_pods.go:89] "kube-proxy-74jgx" [db724ee5-afc9-4d13-92fb-43ba0a9bcd4b] Running I0724 22:07:27.159841 131317 system_pods.go:89] "kube-scheduler-kindnet-20200724220311-14997" [a2cb645e-c7e0-4678-9e5b-56be917c4666] Running I0724 22:07:27.159854 131317 system_pods.go:89] "storage-provisioner" [5bc5dc2c-70ae-4a9d-8c50-1ae72813f060] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:07:27.159873 131317 retry.go:30] will retry after 18.465989061s: missing components: kube-dns I0724 22:07:45.632512 131317 system_pods.go:86] 8 kube-system pods found I0724 22:07:45.632557 131317 system_pods.go:89] "coredns-66bff467f8-dqmpm" [742bcecd-250a-48d0-bb34-f541a7635647] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:07:45.632569 131317 system_pods.go:89] "etcd-kindnet-20200724220311-14997" [7c3295f7-8a2e-46e7-a751-1f1ee8c811c3] Running I0724 22:07:45.632578 131317 system_pods.go:89] "kindnet-dzph2" [a8f17cd0-5e62-4f1e-8ac2-33e6d84c8dbb] Running I0724 22:07:45.632583 131317 system_pods.go:89] "kube-apiserver-kindnet-20200724220311-14997" [f3628be1-ff44-43cc-9125-5bbfdaae52ea] Running I0724 22:07:45.632589 131317 system_pods.go:89] "kube-controller-manager-kindnet-20200724220311-14997" [45a54775-4ef5-44a8-bd23-4ac90077a621] Running I0724 22:07:45.632595 131317 system_pods.go:89] "kube-proxy-74jgx" [db724ee5-afc9-4d13-92fb-43ba0a9bcd4b] Running I0724 22:07:45.632601 131317 system_pods.go:89] "kube-scheduler-kindnet-20200724220311-14997" [a2cb645e-c7e0-4678-9e5b-56be917c4666] Running I0724 22:07:45.632608 131317 system_pods.go:89] "storage-provisioner" [5bc5dc2c-70ae-4a9d-8c50-1ae72813f060] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:07:45.632629 131317 retry.go:30] will retry after 25.219510332s: missing components: kube-dns I0724 22:08:10.857571 131317 system_pods.go:86] 8 kube-system pods found I0724 22:08:10.857603 131317 system_pods.go:89] "coredns-66bff467f8-dqmpm" [742bcecd-250a-48d0-bb34-f541a7635647] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:08:10.857611 131317 system_pods.go:89] "etcd-kindnet-20200724220311-14997" [7c3295f7-8a2e-46e7-a751-1f1ee8c811c3] Running I0724 22:08:10.857625 131317 system_pods.go:89] "kindnet-dzph2" [a8f17cd0-5e62-4f1e-8ac2-33e6d84c8dbb] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:08:10.857635 131317 system_pods.go:89] "kube-apiserver-kindnet-20200724220311-14997" [f3628be1-ff44-43cc-9125-5bbfdaae52ea] Running I0724 22:08:10.857642 131317 system_pods.go:89] "kube-controller-manager-kindnet-20200724220311-14997" [45a54775-4ef5-44a8-bd23-4ac90077a621] Running I0724 22:08:10.857648 131317 system_pods.go:89] "kube-proxy-74jgx" [db724ee5-afc9-4d13-92fb-43ba0a9bcd4b] Running I0724 22:08:10.857653 131317 system_pods.go:89] "kube-scheduler-kindnet-20200724220311-14997" [a2cb645e-c7e0-4678-9e5b-56be917c4666] Running I0724 22:08:10.857668 131317 system_pods.go:89] "storage-provisioner" [5bc5dc2c-70ae-4a9d-8c50-1ae72813f060] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:08:10.857678 131317 retry.go:30] will retry after 35.078569648s: missing components: kube-dns I0724 22:08:45.942362 131317 system_pods.go:86] 8 kube-system pods found I0724 22:08:45.942409 131317 system_pods.go:89] "coredns-66bff467f8-dqmpm" [742bcecd-250a-48d0-bb34-f541a7635647] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:08:45.942417 131317 system_pods.go:89] "etcd-kindnet-20200724220311-14997" [7c3295f7-8a2e-46e7-a751-1f1ee8c811c3] Running I0724 22:08:45.942425 131317 system_pods.go:89] "kindnet-dzph2" [a8f17cd0-5e62-4f1e-8ac2-33e6d84c8dbb] Running I0724 22:08:45.942430 131317 system_pods.go:89] "kube-apiserver-kindnet-20200724220311-14997" [f3628be1-ff44-43cc-9125-5bbfdaae52ea] Running I0724 22:08:45.942437 131317 system_pods.go:89] "kube-controller-manager-kindnet-20200724220311-14997" [45a54775-4ef5-44a8-bd23-4ac90077a621] Running I0724 22:08:45.942442 131317 system_pods.go:89] "kube-proxy-74jgx" [db724ee5-afc9-4d13-92fb-43ba0a9bcd4b] Running I0724 22:08:45.942448 131317 system_pods.go:89] "kube-scheduler-kindnet-20200724220311-14997" [a2cb645e-c7e0-4678-9e5b-56be917c4666] Running I0724 22:08:45.942455 131317 system_pods.go:89] "storage-provisioner" [5bc5dc2c-70ae-4a9d-8c50-1ae72813f060] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:08:45.942466 131317 retry.go:30] will retry after 50.027701973s: missing components: kube-dns I0724 22:09:35.977705 131317 system_pods.go:86] 8 kube-system pods found I0724 22:09:35.977753 131317 system_pods.go:89] "coredns-66bff467f8-dqmpm" [742bcecd-250a-48d0-bb34-f541a7635647] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:09:35.977764 131317 system_pods.go:89] "etcd-kindnet-20200724220311-14997" [7c3295f7-8a2e-46e7-a751-1f1ee8c811c3] Running I0724 22:09:35.977778 131317 system_pods.go:89] "kindnet-dzph2" [a8f17cd0-5e62-4f1e-8ac2-33e6d84c8dbb] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:09:35.977788 131317 system_pods.go:89] "kube-apiserver-kindnet-20200724220311-14997" [f3628be1-ff44-43cc-9125-5bbfdaae52ea] Running I0724 22:09:35.977799 131317 system_pods.go:89] "kube-controller-manager-kindnet-20200724220311-14997" [45a54775-4ef5-44a8-bd23-4ac90077a621] Running I0724 22:09:35.977809 131317 system_pods.go:89] "kube-proxy-74jgx" [db724ee5-afc9-4d13-92fb-43ba0a9bcd4b] Running I0724 22:09:35.977819 131317 system_pods.go:89] "kube-scheduler-kindnet-20200724220311-14997" [a2cb645e-c7e0-4678-9e5b-56be917c4666] Running I0724 22:09:35.977830 131317 system_pods.go:89] "storage-provisioner" [5bc5dc2c-70ae-4a9d-8c50-1ae72813f060] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:09:35.977845 131317 retry.go:30] will retry after 47.463338706s: missing components: kube-dns I0724 22:10:23.447792 131317 system_pods.go:86] 8 kube-system pods found I0724 22:10:23.447838 131317 system_pods.go:89] "coredns-66bff467f8-dqmpm" [742bcecd-250a-48d0-bb34-f541a7635647] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:10:23.447848 131317 system_pods.go:89] "etcd-kindnet-20200724220311-14997" [7c3295f7-8a2e-46e7-a751-1f1ee8c811c3] Running I0724 22:10:23.447862 131317 system_pods.go:89] "kindnet-dzph2" [a8f17cd0-5e62-4f1e-8ac2-33e6d84c8dbb] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:10:23.447886 131317 system_pods.go:89] "kube-apiserver-kindnet-20200724220311-14997" [f3628be1-ff44-43cc-9125-5bbfdaae52ea] Running I0724 22:10:23.447897 131317 system_pods.go:89] "kube-controller-manager-kindnet-20200724220311-14997" [45a54775-4ef5-44a8-bd23-4ac90077a621] Running I0724 22:10:23.447912 131317 system_pods.go:89] "kube-proxy-74jgx" [db724ee5-afc9-4d13-92fb-43ba0a9bcd4b] Running I0724 22:10:23.447921 131317 system_pods.go:89] "kube-scheduler-kindnet-20200724220311-14997" [a2cb645e-c7e0-4678-9e5b-56be917c4666] Running I0724 22:10:23.447938 131317 system_pods.go:89] "storage-provisioner" [5bc5dc2c-70ae-4a9d-8c50-1ae72813f060] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:10:23.447953 131317 retry.go:30] will retry after 53.912476906s: missing components: kube-dns I0724 22:11:17.366691 131317 system_pods.go:86] 8 kube-system pods found I0724 22:11:17.366728 131317 system_pods.go:89] "coredns-66bff467f8-dqmpm" [742bcecd-250a-48d0-bb34-f541a7635647] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:11:17.366735 131317 system_pods.go:89] "etcd-kindnet-20200724220311-14997" [7c3295f7-8a2e-46e7-a751-1f1ee8c811c3] Running I0724 22:11:17.366742 131317 system_pods.go:89] "kindnet-dzph2" [a8f17cd0-5e62-4f1e-8ac2-33e6d84c8dbb] Running I0724 22:11:17.366749 131317 system_pods.go:89] "kube-apiserver-kindnet-20200724220311-14997" [f3628be1-ff44-43cc-9125-5bbfdaae52ea] Running I0724 22:11:17.366755 131317 system_pods.go:89] "kube-controller-manager-kindnet-20200724220311-14997" [45a54775-4ef5-44a8-bd23-4ac90077a621] Running I0724 22:11:17.366760 131317 system_pods.go:89] "kube-proxy-74jgx" [db724ee5-afc9-4d13-92fb-43ba0a9bcd4b] Running I0724 22:11:17.366766 131317 system_pods.go:89] "kube-scheduler-kindnet-20200724220311-14997" [a2cb645e-c7e0-4678-9e5b-56be917c4666] Running I0724 22:11:17.366773 131317 system_pods.go:89] "storage-provisioner" [5bc5dc2c-70ae-4a9d-8c50-1ae72813f060] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:11:17.366784 131317 retry.go:30] will retry after 1m7.577191067s: missing components: kube-dns I0724 22:12:25.002282 131317 system_pods.go:86] 8 kube-system pods found I0724 22:12:25.002319 131317 system_pods.go:89] "coredns-66bff467f8-dqmpm" [742bcecd-250a-48d0-bb34-f541a7635647] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:12:25.002327 131317 system_pods.go:89] "etcd-kindnet-20200724220311-14997" [7c3295f7-8a2e-46e7-a751-1f1ee8c811c3] Running I0724 22:12:25.002337 131317 system_pods.go:89] "kindnet-dzph2" [a8f17cd0-5e62-4f1e-8ac2-33e6d84c8dbb] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:12:25.002346 131317 system_pods.go:89] "kube-apiserver-kindnet-20200724220311-14997" [f3628be1-ff44-43cc-9125-5bbfdaae52ea] Running I0724 22:12:25.002353 131317 system_pods.go:89] "kube-controller-manager-kindnet-20200724220311-14997" [45a54775-4ef5-44a8-bd23-4ac90077a621] Running I0724 22:12:25.002358 131317 system_pods.go:89] "kube-proxy-74jgx" [db724ee5-afc9-4d13-92fb-43ba0a9bcd4b] Running I0724 22:12:25.002364 131317 system_pods.go:89] "kube-scheduler-kindnet-20200724220311-14997" [a2cb645e-c7e0-4678-9e5b-56be917c4666] Running I0724 22:12:25.002370 131317 system_pods.go:89] "storage-provisioner" [5bc5dc2c-70ae-4a9d-8c50-1ae72813f060] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:12:25.002388 131317 retry.go:30] will retry after 51.197479857s: missing components: kube-dns I0724 22:13:16.206122 131317 system_pods.go:86] 8 kube-system pods found I0724 22:13:16.206161 131317 system_pods.go:89] "coredns-66bff467f8-dqmpm" [742bcecd-250a-48d0-bb34-f541a7635647] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:13:16.206174 131317 system_pods.go:89] "etcd-kindnet-20200724220311-14997" [7c3295f7-8a2e-46e7-a751-1f1ee8c811c3] Running I0724 22:13:16.206185 131317 system_pods.go:89] "kindnet-dzph2" [a8f17cd0-5e62-4f1e-8ac2-33e6d84c8dbb] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:13:16.206192 131317 system_pods.go:89] "kube-apiserver-kindnet-20200724220311-14997" [f3628be1-ff44-43cc-9125-5bbfdaae52ea] Running I0724 22:13:16.206199 131317 system_pods.go:89] "kube-controller-manager-kindnet-20200724220311-14997" [45a54775-4ef5-44a8-bd23-4ac90077a621] Running I0724 22:13:16.206204 131317 system_pods.go:89] "kube-proxy-74jgx" [db724ee5-afc9-4d13-92fb-43ba0a9bcd4b] Running I0724 22:13:16.206209 131317 system_pods.go:89] "kube-scheduler-kindnet-20200724220311-14997" [a2cb645e-c7e0-4678-9e5b-56be917c4666] Running I0724 22:13:16.206224 131317 system_pods.go:89] "storage-provisioner" [5bc5dc2c-70ae-4a9d-8c50-1ae72813f060] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:13:16.206236 131317 retry.go:30] will retry after 1m10.96005039s: missing components: kube-dns I0724 22:14:27.172705 131317 system_pods.go:86] 8 kube-system pods found I0724 22:14:27.172746 131317 system_pods.go:89] "coredns-66bff467f8-dqmpm" [742bcecd-250a-48d0-bb34-f541a7635647] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:14:27.172757 131317 system_pods.go:89] "etcd-kindnet-20200724220311-14997" [7c3295f7-8a2e-46e7-a751-1f1ee8c811c3] Running I0724 22:14:27.172766 131317 system_pods.go:89] "kindnet-dzph2" [a8f17cd0-5e62-4f1e-8ac2-33e6d84c8dbb] Running I0724 22:14:27.172772 131317 system_pods.go:89] "kube-apiserver-kindnet-20200724220311-14997" [f3628be1-ff44-43cc-9125-5bbfdaae52ea] Running I0724 22:14:27.172779 131317 system_pods.go:89] "kube-controller-manager-kindnet-20200724220311-14997" [45a54775-4ef5-44a8-bd23-4ac90077a621] Running I0724 22:14:27.172784 131317 system_pods.go:89] "kube-proxy-74jgx" [db724ee5-afc9-4d13-92fb-43ba0a9bcd4b] Running I0724 22:14:27.172790 131317 system_pods.go:89] "kube-scheduler-kindnet-20200724220311-14997" [a2cb645e-c7e0-4678-9e5b-56be917c4666] Running I0724 22:14:27.172797 131317 system_pods.go:89] "storage-provisioner" [5bc5dc2c-70ae-4a9d-8c50-1ae72813f060] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:14:27.172811 131317 retry.go:30] will retry after 1m5.901574973s: missing components: kube-dns I0724 22:15:33.079906 131317 system_pods.go:86] 8 kube-system pods found I0724 22:15:33.079950 131317 system_pods.go:89] "coredns-66bff467f8-dqmpm" [742bcecd-250a-48d0-bb34-f541a7635647] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:15:33.079960 131317 system_pods.go:89] "etcd-kindnet-20200724220311-14997" [7c3295f7-8a2e-46e7-a751-1f1ee8c811c3] Running I0724 22:15:33.079974 131317 system_pods.go:89] "kindnet-dzph2" [a8f17cd0-5e62-4f1e-8ac2-33e6d84c8dbb] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:15:33.079986 131317 system_pods.go:89] "kube-apiserver-kindnet-20200724220311-14997" [f3628be1-ff44-43cc-9125-5bbfdaae52ea] Running I0724 22:15:33.080004 131317 system_pods.go:89] "kube-controller-manager-kindnet-20200724220311-14997" [45a54775-4ef5-44a8-bd23-4ac90077a621] Running I0724 22:15:33.080012 131317 system_pods.go:89] "kube-proxy-74jgx" [db724ee5-afc9-4d13-92fb-43ba0a9bcd4b] Running I0724 22:15:33.080027 131317 system_pods.go:89] "kube-scheduler-kindnet-20200724220311-14997" [a2cb645e-c7e0-4678-9e5b-56be917c4666] Running I0724 22:15:33.080039 131317 system_pods.go:89] "storage-provisioner" [5bc5dc2c-70ae-4a9d-8c50-1ae72813f060] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:15:33.080059 131317 retry.go:30] will retry after 1m0.714609182s: missing components: kube-dns I0724 22:16:33.800811 131317 system_pods.go:86] 8 kube-system pods found I0724 22:16:33.800857 131317 system_pods.go:89] "coredns-66bff467f8-dqmpm" [742bcecd-250a-48d0-bb34-f541a7635647] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:16:33.800868 131317 system_pods.go:89] "etcd-kindnet-20200724220311-14997" [7c3295f7-8a2e-46e7-a751-1f1ee8c811c3] Running I0724 22:16:33.800882 131317 system_pods.go:89] "kindnet-dzph2" [a8f17cd0-5e62-4f1e-8ac2-33e6d84c8dbb] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:16:33.800895 131317 system_pods.go:89] "kube-apiserver-kindnet-20200724220311-14997" [f3628be1-ff44-43cc-9125-5bbfdaae52ea] Running I0724 22:16:33.800906 131317 system_pods.go:89] "kube-controller-manager-kindnet-20200724220311-14997" [45a54775-4ef5-44a8-bd23-4ac90077a621] Running I0724 22:16:33.800916 131317 system_pods.go:89] "kube-proxy-74jgx" [db724ee5-afc9-4d13-92fb-43ba0a9bcd4b] Running I0724 22:16:33.800926 131317 system_pods.go:89] "kube-scheduler-kindnet-20200724220311-14997" [a2cb645e-c7e0-4678-9e5b-56be917c4666] Running I0724 22:16:33.800938 131317 system_pods.go:89] "storage-provisioner" [5bc5dc2c-70ae-4a9d-8c50-1ae72813f060] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:16:33.800953 131317 retry.go:30] will retry after 45.849092499s: missing components: kube-dns I0724 22:17:19.657415 131317 system_pods.go:86] 8 kube-system pods found I0724 22:17:19.657453 131317 system_pods.go:89] "coredns-66bff467f8-dqmpm" [742bcecd-250a-48d0-bb34-f541a7635647] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:17:19.657461 131317 system_pods.go:89] "etcd-kindnet-20200724220311-14997" [7c3295f7-8a2e-46e7-a751-1f1ee8c811c3] Running I0724 22:17:19.657470 131317 system_pods.go:89] "kindnet-dzph2" [a8f17cd0-5e62-4f1e-8ac2-33e6d84c8dbb] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:17:19.657478 131317 system_pods.go:89] "kube-apiserver-kindnet-20200724220311-14997" [f3628be1-ff44-43cc-9125-5bbfdaae52ea] Running I0724 22:17:19.657484 131317 system_pods.go:89] "kube-controller-manager-kindnet-20200724220311-14997" [45a54775-4ef5-44a8-bd23-4ac90077a621] Running I0724 22:17:19.657490 131317 system_pods.go:89] "kube-proxy-74jgx" [db724ee5-afc9-4d13-92fb-43ba0a9bcd4b] Running I0724 22:17:19.657495 131317 system_pods.go:89] "kube-scheduler-kindnet-20200724220311-14997" [a2cb645e-c7e0-4678-9e5b-56be917c4666] Running I0724 22:17:19.657502 131317 system_pods.go:89] "storage-provisioner" [5bc5dc2c-70ae-4a9d-8c50-1ae72813f060] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:17:19.657515 131317 retry.go:30] will retry after 49.749848332s: missing components: kube-dns I0724 22:18:09.412431 131317 system_pods.go:86] 8 kube-system pods found I0724 22:18:09.412473 131317 system_pods.go:89] "coredns-66bff467f8-dqmpm" [742bcecd-250a-48d0-bb34-f541a7635647] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:18:09.412481 131317 system_pods.go:89] "etcd-kindnet-20200724220311-14997" [7c3295f7-8a2e-46e7-a751-1f1ee8c811c3] Running I0724 22:18:09.412490 131317 system_pods.go:89] "kindnet-dzph2" [a8f17cd0-5e62-4f1e-8ac2-33e6d84c8dbb] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:18:09.412499 131317 system_pods.go:89] "kube-apiserver-kindnet-20200724220311-14997" [f3628be1-ff44-43cc-9125-5bbfdaae52ea] Running I0724 22:18:09.412506 131317 system_pods.go:89] "kube-controller-manager-kindnet-20200724220311-14997" [45a54775-4ef5-44a8-bd23-4ac90077a621] Running I0724 22:18:09.412511 131317 system_pods.go:89] "kube-proxy-74jgx" [db724ee5-afc9-4d13-92fb-43ba0a9bcd4b] Running I0724 22:18:09.412520 131317 system_pods.go:89] "kube-scheduler-kindnet-20200724220311-14997" [a2cb645e-c7e0-4678-9e5b-56be917c4666] Running I0724 22:18:09.412530 131317 system_pods.go:89] "storage-provisioner" [5bc5dc2c-70ae-4a9d-8c50-1ae72813f060] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:18:09.412553 131317 retry.go:30] will retry after 1m3.217603186s: missing components: kube-dns I0724 22:19:12.637093 131317 system_pods.go:86] 8 kube-system pods found I0724 22:19:12.637132 131317 system_pods.go:89] "coredns-66bff467f8-dqmpm" [742bcecd-250a-48d0-bb34-f541a7635647] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:19:12.637140 131317 system_pods.go:89] "etcd-kindnet-20200724220311-14997" [7c3295f7-8a2e-46e7-a751-1f1ee8c811c3] Running I0724 22:19:12.637151 131317 system_pods.go:89] "kindnet-dzph2" [a8f17cd0-5e62-4f1e-8ac2-33e6d84c8dbb] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:19:12.637158 131317 system_pods.go:89] "kube-apiserver-kindnet-20200724220311-14997" [f3628be1-ff44-43cc-9125-5bbfdaae52ea] Running I0724 22:19:12.637164 131317 system_pods.go:89] "kube-controller-manager-kindnet-20200724220311-14997" [45a54775-4ef5-44a8-bd23-4ac90077a621] Running I0724 22:19:12.637170 131317 system_pods.go:89] "kube-proxy-74jgx" [db724ee5-afc9-4d13-92fb-43ba0a9bcd4b] Running I0724 22:19:12.637175 131317 system_pods.go:89] "kube-scheduler-kindnet-20200724220311-14997" [a2cb645e-c7e0-4678-9e5b-56be917c4666] Running I0724 22:19:12.637182 131317 system_pods.go:89] "storage-provisioner" [5bc5dc2c-70ae-4a9d-8c50-1ae72813f060] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:19:12.637192 131317 retry.go:30] will retry after 1m14.257248566s: missing components: kube-dns I0724 22:20:26.900856 131317 system_pods.go:86] 8 kube-system pods found I0724 22:20:26.900891 131317 system_pods.go:89] "coredns-66bff467f8-dqmpm" [742bcecd-250a-48d0-bb34-f541a7635647] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:20:26.900898 131317 system_pods.go:89] "etcd-kindnet-20200724220311-14997" [7c3295f7-8a2e-46e7-a751-1f1ee8c811c3] Running I0724 22:20:26.900907 131317 system_pods.go:89] "kindnet-dzph2" [a8f17cd0-5e62-4f1e-8ac2-33e6d84c8dbb] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:20:26.900916 131317 system_pods.go:89] "kube-apiserver-kindnet-20200724220311-14997" [f3628be1-ff44-43cc-9125-5bbfdaae52ea] Running I0724 22:20:26.900923 131317 system_pods.go:89] "kube-controller-manager-kindnet-20200724220311-14997" [45a54775-4ef5-44a8-bd23-4ac90077a621] Running I0724 22:20:26.900928 131317 system_pods.go:89] "kube-proxy-74jgx" [db724ee5-afc9-4d13-92fb-43ba0a9bcd4b] Running I0724 22:20:26.900933 131317 system_pods.go:89] "kube-scheduler-kindnet-20200724220311-14997" [a2cb645e-c7e0-4678-9e5b-56be917c4666] Running I0724 22:20:26.900940 131317 system_pods.go:89] "storage-provisioner" [5bc5dc2c-70ae-4a9d-8c50-1ae72813f060] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:20:26.900957 131317 retry.go:30] will retry after 47.383608701s: missing components: kube-dns I0724 22:21:14.290212 131317 system_pods.go:86] 8 kube-system pods found I0724 22:21:14.290246 131317 system_pods.go:89] "coredns-66bff467f8-dqmpm" [742bcecd-250a-48d0-bb34-f541a7635647] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:21:14.290253 131317 system_pods.go:89] "etcd-kindnet-20200724220311-14997" [7c3295f7-8a2e-46e7-a751-1f1ee8c811c3] Running I0724 22:21:14.290262 131317 system_pods.go:89] "kindnet-dzph2" [a8f17cd0-5e62-4f1e-8ac2-33e6d84c8dbb] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:21:14.290269 131317 system_pods.go:89] "kube-apiserver-kindnet-20200724220311-14997" [f3628be1-ff44-43cc-9125-5bbfdaae52ea] Running I0724 22:21:14.290275 131317 system_pods.go:89] "kube-controller-manager-kindnet-20200724220311-14997" [45a54775-4ef5-44a8-bd23-4ac90077a621] Running I0724 22:21:14.290281 131317 system_pods.go:89] "kube-proxy-74jgx" [db724ee5-afc9-4d13-92fb-43ba0a9bcd4b] Running I0724 22:21:14.290286 131317 system_pods.go:89] "kube-scheduler-kindnet-20200724220311-14997" [a2cb645e-c7e0-4678-9e5b-56be917c4666] Running I0724 22:21:14.290293 131317 system_pods.go:89] "storage-provisioner" [5bc5dc2c-70ae-4a9d-8c50-1ae72813f060] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:21:14.290303 131317 retry.go:30] will retry after 1m2.844257931s: missing components: kube-dns I0724 22:22:17.140109 131317 system_pods.go:86] 8 kube-system pods found I0724 22:22:17.140144 131317 system_pods.go:89] "coredns-66bff467f8-dqmpm" [742bcecd-250a-48d0-bb34-f541a7635647] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:22:17.140151 131317 system_pods.go:89] "etcd-kindnet-20200724220311-14997" [7c3295f7-8a2e-46e7-a751-1f1ee8c811c3] Running I0724 22:22:17.140160 131317 system_pods.go:89] "kindnet-dzph2" [a8f17cd0-5e62-4f1e-8ac2-33e6d84c8dbb] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:22:17.140169 131317 system_pods.go:89] "kube-apiserver-kindnet-20200724220311-14997" [f3628be1-ff44-43cc-9125-5bbfdaae52ea] Running I0724 22:22:17.140175 131317 system_pods.go:89] "kube-controller-manager-kindnet-20200724220311-14997" [45a54775-4ef5-44a8-bd23-4ac90077a621] Running I0724 22:22:17.140180 131317 system_pods.go:89] "kube-proxy-74jgx" [db724ee5-afc9-4d13-92fb-43ba0a9bcd4b] Running I0724 22:22:17.140186 131317 system_pods.go:89] "kube-scheduler-kindnet-20200724220311-14997" [a2cb645e-c7e0-4678-9e5b-56be917c4666] Running I0724 22:22:17.140193 131317 system_pods.go:89] "storage-provisioner" [5bc5dc2c-70ae-4a9d-8c50-1ae72813f060] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:22:17.140203 131317 retry.go:30] will retry after 46.773619539s: missing components: kube-dns I0724 22:23:03.919922 131317 system_pods.go:86] 8 kube-system pods found I0724 22:23:03.919959 131317 system_pods.go:89] "coredns-66bff467f8-dqmpm" [742bcecd-250a-48d0-bb34-f541a7635647] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:23:03.919966 131317 system_pods.go:89] "etcd-kindnet-20200724220311-14997" [7c3295f7-8a2e-46e7-a751-1f1ee8c811c3] Running I0724 22:23:03.919980 131317 system_pods.go:89] "kindnet-dzph2" [a8f17cd0-5e62-4f1e-8ac2-33e6d84c8dbb] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:23:03.919992 131317 system_pods.go:89] "kube-apiserver-kindnet-20200724220311-14997" [f3628be1-ff44-43cc-9125-5bbfdaae52ea] Running I0724 22:23:03.919998 131317 system_pods.go:89] "kube-controller-manager-kindnet-20200724220311-14997" [45a54775-4ef5-44a8-bd23-4ac90077a621] Running I0724 22:23:03.920003 131317 system_pods.go:89] "kube-proxy-74jgx" [db724ee5-afc9-4d13-92fb-43ba0a9bcd4b] Running I0724 22:23:03.920010 131317 system_pods.go:89] "kube-scheduler-kindnet-20200724220311-14997" [a2cb645e-c7e0-4678-9e5b-56be917c4666] Running I0724 22:23:03.920016 131317 system_pods.go:89] "storage-provisioner" [5bc5dc2c-70ae-4a9d-8c50-1ae72813f060] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:23:03.920027 131317 retry.go:30] will retry after 1m5.760737621s: missing components: kube-dns I0724 22:24:09.686676 131317 system_pods.go:86] 8 kube-system pods found I0724 22:24:09.686712 131317 system_pods.go:89] "coredns-66bff467f8-dqmpm" [742bcecd-250a-48d0-bb34-f541a7635647] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:24:09.686719 131317 system_pods.go:89] "etcd-kindnet-20200724220311-14997" [7c3295f7-8a2e-46e7-a751-1f1ee8c811c3] Running I0724 22:24:09.686728 131317 system_pods.go:89] "kindnet-dzph2" [a8f17cd0-5e62-4f1e-8ac2-33e6d84c8dbb] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:24:09.686744 131317 system_pods.go:89] "kube-apiserver-kindnet-20200724220311-14997" [f3628be1-ff44-43cc-9125-5bbfdaae52ea] Running I0724 22:24:09.686750 131317 system_pods.go:89] "kube-controller-manager-kindnet-20200724220311-14997" [45a54775-4ef5-44a8-bd23-4ac90077a621] Running I0724 22:24:09.686756 131317 system_pods.go:89] "kube-proxy-74jgx" [db724ee5-afc9-4d13-92fb-43ba0a9bcd4b] Running I0724 22:24:09.686761 131317 system_pods.go:89] "kube-scheduler-kindnet-20200724220311-14997" [a2cb645e-c7e0-4678-9e5b-56be917c4666] Running I0724 22:24:09.686776 131317 system_pods.go:89] "storage-provisioner" [5bc5dc2c-70ae-4a9d-8c50-1ae72813f060] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:24:09.686787 131317 retry.go:30] will retry after 54.04568043s: missing components: kube-dns I0724 22:25:03.737573 131317 system_pods.go:86] 8 kube-system pods found I0724 22:25:03.737611 131317 system_pods.go:89] "coredns-66bff467f8-dqmpm" [742bcecd-250a-48d0-bb34-f541a7635647] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:25:03.737618 131317 system_pods.go:89] "etcd-kindnet-20200724220311-14997" [7c3295f7-8a2e-46e7-a751-1f1ee8c811c3] Running I0724 22:25:03.737628 131317 system_pods.go:89] "kindnet-dzph2" [a8f17cd0-5e62-4f1e-8ac2-33e6d84c8dbb] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:25:03.737634 131317 system_pods.go:89] "kube-apiserver-kindnet-20200724220311-14997" [f3628be1-ff44-43cc-9125-5bbfdaae52ea] Running I0724 22:25:03.737640 131317 system_pods.go:89] "kube-controller-manager-kindnet-20200724220311-14997" [45a54775-4ef5-44a8-bd23-4ac90077a621] Running I0724 22:25:03.737646 131317 system_pods.go:89] "kube-proxy-74jgx" [db724ee5-afc9-4d13-92fb-43ba0a9bcd4b] Running I0724 22:25:03.737651 131317 system_pods.go:89] "kube-scheduler-kindnet-20200724220311-14997" [a2cb645e-c7e0-4678-9e5b-56be917c4666] Running I0724 22:25:03.737658 131317 system_pods.go:89] "storage-provisioner" [5bc5dc2c-70ae-4a9d-8c50-1ae72813f060] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:25:03.737668 131317 retry.go:30] will retry after 50.197987145s: missing components: kube-dns I0724 22:25:53.945414 131317 system_pods.go:86] 8 kube-system pods found I0724 22:25:53.945446 131317 system_pods.go:89] "coredns-66bff467f8-dqmpm" [742bcecd-250a-48d0-bb34-f541a7635647] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:25:53.945453 131317 system_pods.go:89] "etcd-kindnet-20200724220311-14997" [7c3295f7-8a2e-46e7-a751-1f1ee8c811c3] Running I0724 22:25:53.945463 131317 system_pods.go:89] "kindnet-dzph2" [a8f17cd0-5e62-4f1e-8ac2-33e6d84c8dbb] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:25:53.945515 131317 system_pods.go:89] "kube-apiserver-kindnet-20200724220311-14997" [f3628be1-ff44-43cc-9125-5bbfdaae52ea] Running I0724 22:25:53.945522 131317 system_pods.go:89] "kube-controller-manager-kindnet-20200724220311-14997" [45a54775-4ef5-44a8-bd23-4ac90077a621] Running I0724 22:25:53.945527 131317 system_pods.go:89] "kube-proxy-74jgx" [db724ee5-afc9-4d13-92fb-43ba0a9bcd4b] Running I0724 22:25:53.945533 131317 system_pods.go:89] "kube-scheduler-kindnet-20200724220311-14997" [a2cb645e-c7e0-4678-9e5b-56be917c4666] Running I0724 22:25:53.945541 131317 system_pods.go:89] "storage-provisioner" [5bc5dc2c-70ae-4a9d-8c50-1ae72813f060] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:25:53.945560 131317 retry.go:30] will retry after 1m1.23299565s: missing components: kube-dns I0724 22:26:55.184425 131317 system_pods.go:86] 8 kube-system pods found I0724 22:26:55.184487 131317 system_pods.go:89] "coredns-66bff467f8-dqmpm" [742bcecd-250a-48d0-bb34-f541a7635647] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:26:55.184495 131317 system_pods.go:89] "etcd-kindnet-20200724220311-14997" [7c3295f7-8a2e-46e7-a751-1f1ee8c811c3] Running I0724 22:26:55.184513 131317 system_pods.go:89] "kindnet-dzph2" [a8f17cd0-5e62-4f1e-8ac2-33e6d84c8dbb] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:26:55.184522 131317 system_pods.go:89] "kube-apiserver-kindnet-20200724220311-14997" [f3628be1-ff44-43cc-9125-5bbfdaae52ea] Running I0724 22:26:55.184534 131317 system_pods.go:89] "kube-controller-manager-kindnet-20200724220311-14997" [45a54775-4ef5-44a8-bd23-4ac90077a621] Running I0724 22:26:55.184539 131317 system_pods.go:89] "kube-proxy-74jgx" [db724ee5-afc9-4d13-92fb-43ba0a9bcd4b] Running I0724 22:26:55.184544 131317 system_pods.go:89] "kube-scheduler-kindnet-20200724220311-14997" [a2cb645e-c7e0-4678-9e5b-56be917c4666] Running I0724 22:26:55.184551 131317 system_pods.go:89] "storage-provisioner" [5bc5dc2c-70ae-4a9d-8c50-1ae72813f060] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:26:55.184567 131317 retry.go:30] will retry after 1m1.32466719s: missing components: kube-dns I0724 22:27:56.546160 131317 system_pods.go:86] 8 kube-system pods found I0724 22:27:56.546195 131317 system_pods.go:89] "coredns-66bff467f8-dqmpm" [742bcecd-250a-48d0-bb34-f541a7635647] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:27:56.546202 131317 system_pods.go:89] "etcd-kindnet-20200724220311-14997" [7c3295f7-8a2e-46e7-a751-1f1ee8c811c3] Running I0724 22:27:56.546211 131317 system_pods.go:89] "kindnet-dzph2" [a8f17cd0-5e62-4f1e-8ac2-33e6d84c8dbb] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:27:56.546218 131317 system_pods.go:89] "kube-apiserver-kindnet-20200724220311-14997" [f3628be1-ff44-43cc-9125-5bbfdaae52ea] Running I0724 22:27:56.546224 131317 system_pods.go:89] "kube-controller-manager-kindnet-20200724220311-14997" [45a54775-4ef5-44a8-bd23-4ac90077a621] Running I0724 22:27:56.546229 131317 system_pods.go:89] "kube-proxy-74jgx" [db724ee5-afc9-4d13-92fb-43ba0a9bcd4b] Running I0724 22:27:56.546235 131317 system_pods.go:89] "kube-scheduler-kindnet-20200724220311-14997" [a2cb645e-c7e0-4678-9e5b-56be917c4666] Running I0724 22:27:56.546242 131317 system_pods.go:89] "storage-provisioner" [5bc5dc2c-70ae-4a9d-8c50-1ae72813f060] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:27:56.546253 131317 retry.go:30] will retry after 53.355228654s: missing components: kube-dns I0724 22:28:49.906756 131317 system_pods.go:86] 8 kube-system pods found I0724 22:28:49.906790 131317 system_pods.go:89] "coredns-66bff467f8-dqmpm" [742bcecd-250a-48d0-bb34-f541a7635647] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:28:49.906797 131317 system_pods.go:89] "etcd-kindnet-20200724220311-14997" [7c3295f7-8a2e-46e7-a751-1f1ee8c811c3] Running I0724 22:28:49.906806 131317 system_pods.go:89] "kindnet-dzph2" [a8f17cd0-5e62-4f1e-8ac2-33e6d84c8dbb] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:28:49.906812 131317 system_pods.go:89] "kube-apiserver-kindnet-20200724220311-14997" [f3628be1-ff44-43cc-9125-5bbfdaae52ea] Running I0724 22:28:49.906819 131317 system_pods.go:89] "kube-controller-manager-kindnet-20200724220311-14997" [45a54775-4ef5-44a8-bd23-4ac90077a621] Running I0724 22:28:49.906831 131317 system_pods.go:89] "kube-proxy-74jgx" [db724ee5-afc9-4d13-92fb-43ba0a9bcd4b] Running I0724 22:28:49.906853 131317 system_pods.go:89] "kube-scheduler-kindnet-20200724220311-14997" [a2cb645e-c7e0-4678-9e5b-56be917c4666] Running I0724 22:28:49.906860 131317 system_pods.go:89] "storage-provisioner" [5bc5dc2c-70ae-4a9d-8c50-1ae72813f060] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:28:49.906870 131317 retry.go:30] will retry after 57.694566047s: missing components: kube-dns I0724 22:29:47.606565 131317 system_pods.go:86] 8 kube-system pods found I0724 22:29:47.606600 131317 system_pods.go:89] "coredns-66bff467f8-dqmpm" [742bcecd-250a-48d0-bb34-f541a7635647] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:29:47.606607 131317 system_pods.go:89] "etcd-kindnet-20200724220311-14997" [7c3295f7-8a2e-46e7-a751-1f1ee8c811c3] Running I0724 22:29:47.606620 131317 system_pods.go:89] "kindnet-dzph2" [a8f17cd0-5e62-4f1e-8ac2-33e6d84c8dbb] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:29:47.606630 131317 system_pods.go:89] "kube-apiserver-kindnet-20200724220311-14997" [f3628be1-ff44-43cc-9125-5bbfdaae52ea] Running I0724 22:29:47.606641 131317 system_pods.go:89] "kube-controller-manager-kindnet-20200724220311-14997" [45a54775-4ef5-44a8-bd23-4ac90077a621] Running I0724 22:29:47.606649 131317 system_pods.go:89] "kube-proxy-74jgx" [db724ee5-afc9-4d13-92fb-43ba0a9bcd4b] Running I0724 22:29:47.606656 131317 system_pods.go:89] "kube-scheduler-kindnet-20200724220311-14997" [a2cb645e-c7e0-4678-9e5b-56be917c4666] Running I0724 22:29:47.606663 131317 system_pods.go:89] "storage-provisioner" [5bc5dc2c-70ae-4a9d-8c50-1ae72813f060] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:29:47.606673 131317 retry.go:30] will retry after 1m0.917571461s: missing components: kube-dns I0724 22:30:48.529567 131317 system_pods.go:86] 8 kube-system pods found I0724 22:30:48.529681 131317 system_pods.go:89] "coredns-66bff467f8-dqmpm" [742bcecd-250a-48d0-bb34-f541a7635647] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:30:48.529693 131317 system_pods.go:89] "etcd-kindnet-20200724220311-14997" [7c3295f7-8a2e-46e7-a751-1f1ee8c811c3] Running I0724 22:30:48.529710 131317 system_pods.go:89] "kindnet-dzph2" [a8f17cd0-5e62-4f1e-8ac2-33e6d84c8dbb] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:30:48.529721 131317 system_pods.go:89] "kube-apiserver-kindnet-20200724220311-14997" [f3628be1-ff44-43cc-9125-5bbfdaae52ea] Running I0724 22:30:48.529727 131317 system_pods.go:89] "kube-controller-manager-kindnet-20200724220311-14997" [45a54775-4ef5-44a8-bd23-4ac90077a621] Running I0724 22:30:48.529734 131317 system_pods.go:89] "kube-proxy-74jgx" [db724ee5-afc9-4d13-92fb-43ba0a9bcd4b] Running I0724 22:30:48.529742 131317 system_pods.go:89] "kube-scheduler-kindnet-20200724220311-14997" [a2cb645e-c7e0-4678-9e5b-56be917c4666] Running I0724 22:30:48.529749 131317 system_pods.go:89] "storage-provisioner" [5bc5dc2c-70ae-4a9d-8c50-1ae72813f060] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:30:48.529767 131317 retry.go:30] will retry after 52.606215015s: missing components: kube-dns I0724 22:31:41.141214 131317 system_pods.go:86] 8 kube-system pods found I0724 22:31:41.141250 131317 system_pods.go:89] "coredns-66bff467f8-dqmpm" [742bcecd-250a-48d0-bb34-f541a7635647] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:31:41.141257 131317 system_pods.go:89] "etcd-kindnet-20200724220311-14997" [7c3295f7-8a2e-46e7-a751-1f1ee8c811c3] Running I0724 22:31:41.141267 131317 system_pods.go:89] "kindnet-dzph2" [a8f17cd0-5e62-4f1e-8ac2-33e6d84c8dbb] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:31:41.141274 131317 system_pods.go:89] "kube-apiserver-kindnet-20200724220311-14997" [f3628be1-ff44-43cc-9125-5bbfdaae52ea] Running I0724 22:31:41.141280 131317 system_pods.go:89] "kube-controller-manager-kindnet-20200724220311-14997" [45a54775-4ef5-44a8-bd23-4ac90077a621] Running I0724 22:31:41.141285 131317 system_pods.go:89] "kube-proxy-74jgx" [db724ee5-afc9-4d13-92fb-43ba0a9bcd4b] Running I0724 22:31:41.141291 131317 system_pods.go:89] "kube-scheduler-kindnet-20200724220311-14997" [a2cb645e-c7e0-4678-9e5b-56be917c4666] Running I0724 22:31:41.141298 131317 system_pods.go:89] "storage-provisioner" [5bc5dc2c-70ae-4a9d-8c50-1ae72813f060] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:31:41.141396 131317 exit.go:58] WithError(failed to start node)=startup failed: wait 25m0s for node: waiting for apps_running: expected k8s-apps: missing components: kube-dns called from: goroutine 1 [running]: runtime/debug.Stack(0x0, 0x0, 0x100000000000000) /home/jenkins/actions-runner/_work/_tool/go/1.14.6/x64/src/runtime/debug/stack.go:24 +0x9d k8s.io/minikube/pkg/minikube/exit.WithError(0x1ba7c56, 0x14, 0x1ebf200, 0xc000a628e0) /home/jenkins/actions-runner/_work/minikube/minikube/pkg/minikube/exit/exit.go:58 +0x34 k8s.io/minikube/cmd/minikube/cmd.runStart(0x2cd0820, 0xc0003998c0, 0x2, 0xb) /home/jenkins/actions-runner/_work/minikube/minikube/cmd/minikube/cmd/start.go:206 +0x505 github.com/spf13/cobra.(*Command).execute(0x2cd0820, 0xc000398790, 0xb, 0xb, 0x2cd0820, 0xc000398790) /home/jenkins/go/pkg/mod/github.com/spf13/cobra@v1.0.0/command.go:846 +0x29d github.com/spf13/cobra.(*Command).ExecuteC(0x2ccf860, 0x0, 0x1, 0xc0005de5a0) /home/jenkins/go/pkg/mod/github.com/spf13/cobra@v1.0.0/command.go:950 +0x349 github.com/spf13/cobra.(*Command).Execute(...) /home/jenkins/go/pkg/mod/github.com/spf13/cobra@v1.0.0/command.go:887 k8s.io/minikube/cmd/minikube/cmd.Execute() /home/jenkins/actions-runner/_work/minikube/minikube/cmd/minikube/cmd/root.go:106 +0x72c main.main() /home/jenkins/actions-runner/_work/minikube/minikube/cmd/minikube/main.go:71 +0x11f W0724 22:31:41.141534 131317 out.go:249] failed to start node: startup failed: wait 25m0s for node: waiting for apps_running: expected k8s-apps: missing components: kube-dns * X failed to start node: startup failed: wait 25m0s for node: waiting for apps_running: expected k8s-apps: missing components: kube-dns * * minikube is exiting due to an error. If the above message is not useful, open an issue: - https://github.com/kubernetes/minikube/issues/new/choose ** /stderr ** net_test.go:82: failed start: exit status 70 === CONT TestNetworkPlugins/group/kindnet net_test.go:204: "kindnet" test finished in 31m32.987195466s, failed=true net_test.go:205: *** TestNetworkPlugins/group/kindnet FAILED at 2020-07-24 22:31:41.164108471 +0000 UTC m=+3331.288472956 helpers_test.go:215: -----------------------post-mortem-------------------------------- helpers_test.go:223: ======> post-mortem[TestNetworkPlugins/group/kindnet]: docker inspect <====== helpers_test.go:224: (dbg) Run: docker inspect kindnet-20200724220311-14997 helpers_test.go:228: (dbg) docker inspect kindnet-20200724220311-14997: -- stdout -- [ { "Id": "5fbdedd438321d0be71f9ae4d11af61e3737c8bf5c5536f112f38c6b16b4eeda", "Created": "2020-07-24T22:04:46.489887099Z", "Path": "/usr/local/bin/entrypoint", "Args": [ "/sbin/init" ], "State": { "Status": "running", "Running": true, "Paused": false, "Restarting": false, "OOMKilled": false, "Dead": false, "Pid": 155465, "ExitCode": 0, "Error": "", "StartedAt": "2020-07-24T22:04:47.304069028Z", "FinishedAt": "0001-01-01T00:00:00Z" }, "Image": "sha256:e6bc41c39dc48b2b472936db36aedb28527ce0f675ed1bc20d029125c9ccf578", "ResolvConfPath": "/var/lib/docker/containers/5fbdedd438321d0be71f9ae4d11af61e3737c8bf5c5536f112f38c6b16b4eeda/resolv.conf", "HostnamePath": "/var/lib/docker/containers/5fbdedd438321d0be71f9ae4d11af61e3737c8bf5c5536f112f38c6b16b4eeda/hostname", "HostsPath": "/var/lib/docker/containers/5fbdedd438321d0be71f9ae4d11af61e3737c8bf5c5536f112f38c6b16b4eeda/hosts", "LogPath": "/var/lib/docker/containers/5fbdedd438321d0be71f9ae4d11af61e3737c8bf5c5536f112f38c6b16b4eeda/5fbdedd438321d0be71f9ae4d11af61e3737c8bf5c5536f112f38c6b16b4eeda-json.log", "Name": "/kindnet-20200724220311-14997", "RestartCount": 0, "Driver": "overlay2", "Platform": "linux", "MountLabel": "", "ProcessLabel": "", "AppArmorProfile": "unconfined", "ExecIDs": null, "HostConfig": { "Binds": [ "/lib/modules:/lib/modules:ro", "kindnet-20200724220311-14997:/var" ], "ContainerIDFile": "", "LogConfig": { "Type": "json-file", "Config": {} }, "NetworkMode": "default", "PortBindings": { "22/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "2376/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "5000/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "8443/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ] }, "RestartPolicy": { "Name": "no", "MaximumRetryCount": 0 }, "AutoRemove": false, "VolumeDriver": "", "VolumesFrom": null, "CapAdd": null, "CapDrop": null, "Capabilities": null, "Dns": [], "DnsOptions": [], "DnsSearch": [], "ExtraHosts": null, "GroupAdd": null, "IpcMode": "private", "Cgroup": "", "Links": null, "OomScoreAdj": 0, "PidMode": "", "Privileged": true, "PublishAllPorts": false, "ReadonlyRootfs": false, "SecurityOpt": [ "seccomp=unconfined", "apparmor=unconfined", "label=disable" ], "Tmpfs": { "/run": "", "/tmp": "" }, "UTSMode": "", "UsernsMode": "", "ShmSize": 67108864, "Runtime": "runc", "ConsoleSize": [ 0, 0 ], "Isolation": "", "CpuShares": 0, "Memory": 1887436800, "NanoCpus": 2000000000, "CgroupParent": "", "BlkioWeight": 0, "BlkioWeightDevice": [], "BlkioDeviceReadBps": null, "BlkioDeviceWriteBps": null, "BlkioDeviceReadIOps": null, "BlkioDeviceWriteIOps": null, "CpuPeriod": 0, "CpuQuota": 0, "CpuRealtimePeriod": 0, "CpuRealtimeRuntime": 0, "CpusetCpus": "", "CpusetMems": "", "Devices": [], "DeviceCgroupRules": null, "DeviceRequests": null, "KernelMemory": 0, "KernelMemoryTCP": 0, "MemoryReservation": 0, "MemorySwap": -1, "MemorySwappiness": null, "OomKillDisable": false, "PidsLimit": null, "Ulimits": null, "CpuCount": 0, "CpuPercent": 0, "IOMaximumIOps": 0, "IOMaximumBandwidth": 0, "MaskedPaths": null, "ReadonlyPaths": null }, "GraphDriver": { "Data": { "LowerDir": "/var/lib/docker/overlay2/eb15579aa5cfd1fcec016f54ccb8370d4bd8652c463639728c609bdb7f6e3b09-init/diff:/var/lib/docker/overlay2/33ef5d7ed4fbb30d55d2e5925de9b5c359ee2f6bcaf5a31308f0c4c11e358759/diff:/var/lib/docker/overlay2/3cf31b1deb19cf9de586952311d9f94baab00dac6393d96e1a80da943c3823e6/diff:/var/lib/docker/overlay2/5e25be0edbb143d613aab16e23056ece18370c2427214464ccc0b8f033fc76c5/diff:/var/lib/docker/overlay2/933d7b760faf1726c0231eb48cbf3e7af8aeea7d1f449959b7bf07bbe9a84376/diff:/var/lib/docker/overlay2/69ec5555996fe29cf36b7e927034690ecb66f32d4c72396f3c2f3d73303ab10c/diff:/var/lib/docker/overlay2/891f9311e0e93aca44c576d873c0860bf67e972178833fc3ac0569308996933c/diff:/var/lib/docker/overlay2/431ddf5eea9395c020c3a27539c6984764cb7a0dd8f3eeadfbb24f37a897caf7/diff:/var/lib/docker/overlay2/ef7d3d9d53c985fdb46a41c8bcbc04c7d98baa3d8030b1432d115657269a4611/diff:/var/lib/docker/overlay2/d976f7471257da8ef85e0de564cae8e6b16eeb8f83fff6750b3985387f87052c/diff:/var/lib/docker/overlay2/16ed6f6abe25e98b50b10529e70d835da8b64ca99934243f70e5cc63cca44ba1/diff:/var/lib/docker/overlay2/a6495d6f579c32bd0faf387dc3b48c733dae0219e3cfc5d94f9f6ba027261668/diff:/var/lib/docker/overlay2/a34db548da9e24b0323e43e53767c49484fa19509ba9c1e738fb42330788ec52/diff:/var/lib/docker/overlay2/03f93f74dafc48e2bb0234297546abf70dc9918c047a7e3b500ed2242aec09ff/diff:/var/lib/docker/overlay2/b0d08d0be034080c25561d9451d8081050a591c2991a7f3d242234b9bf7c046d/diff:/var/lib/docker/overlay2/d7be6ed61eedc76988e74fe1d87a6a1ef287912654acea5566e7f9cb797b3e3c/diff:/var/lib/docker/overlay2/bde6a4a42d5652b3ef22e3a1dddd0f6fb765d2cd9f5b1a38d4ecb9b44c45b998/diff:/var/lib/docker/overlay2/0f384da0b034b730d4bdd22f270821719c0ac0b7b7d608c3a52fa89b9a386b90/diff:/var/lib/docker/overlay2/46e9d50bc162d2d523c12da3e378e5b859515056dc7d7f11344c1c42c64718d4/diff:/var/lib/docker/overlay2/6e7552cdb94dfc1c9a0a8efae25c758d93fff3d2eec58a3e2b710496d0c94653/diff:/var/lib/docker/overlay2/b9217960caef08b70ded801ca89f5a0808227383f2ab1de5ae7313c200a8090b/diff:/var/lib/docker/overlay2/d11fc39cafccd3e3cb8fba6cfdda2bbfdb99c2e41619ce169b0d6ede0b3f6a64/diff:/var/lib/docker/overlay2/0f89620291cba4bc3afac7c9fa1faf205450d5dcae032c3a9882009633b72b4a/diff", "MergedDir": "/var/lib/docker/overlay2/eb15579aa5cfd1fcec016f54ccb8370d4bd8652c463639728c609bdb7f6e3b09/merged", "UpperDir": "/var/lib/docker/overlay2/eb15579aa5cfd1fcec016f54ccb8370d4bd8652c463639728c609bdb7f6e3b09/diff", "WorkDir": "/var/lib/docker/overlay2/eb15579aa5cfd1fcec016f54ccb8370d4bd8652c463639728c609bdb7f6e3b09/work" }, "Name": "overlay2" }, "Mounts": [ { "Type": "bind", "Source": "/lib/modules", "Destination": "/lib/modules", "Mode": "ro", "RW": false, "Propagation": "rprivate" }, { "Type": "volume", "Name": "kindnet-20200724220311-14997", "Source": "/var/lib/docker/volumes/kindnet-20200724220311-14997/_data", "Destination": "/var", "Driver": "local", "Mode": "z", "RW": true, "Propagation": "" } ], "Config": { "Hostname": "kindnet-20200724220311-14997", "Domainname": "", "User": "root", "AttachStdin": false, "AttachStdout": false, "AttachStderr": false, "ExposedPorts": { "22/tcp": {}, "2376/tcp": {}, "5000/tcp": {}, "8443/tcp": {} }, "Tty": true, "OpenStdin": false, "StdinOnce": false, "Env": [ "container=docker", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], "Cmd": null, "Image": "kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438", "Volumes": null, "WorkingDir": "", "Entrypoint": [ "/usr/local/bin/entrypoint", "/sbin/init" ], "OnBuild": null, "Labels": { "created_by.minikube.sigs.k8s.io": "true", "mode.minikube.sigs.k8s.io": "kindnet-20200724220311-14997", "name.minikube.sigs.k8s.io": "kindnet-20200724220311-14997", "role.minikube.sigs.k8s.io": "" }, "StopSignal": "SIGRTMIN+3" }, "NetworkSettings": { "Bridge": "", "SandboxID": "d10d1aac88278c4e9d02afe2278188fffc70c0b3cac54f5afabee0e8f9e0b93a", "HairpinMode": false, "LinkLocalIPv6Address": "", "LinkLocalIPv6PrefixLen": 0, "Ports": { "22/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32872" } ], "2376/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32871" } ], "5000/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32870" } ], "8443/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32869" } ] }, "SandboxKey": "/var/run/docker/netns/d10d1aac8827", "SecondaryIPAddresses": null, "SecondaryIPv6Addresses": null, "EndpointID": "0a4f25982c8dc84e277565384f3cb4b05703a62764e919d0493853f6f6a04b88", "Gateway": "172.17.0.1", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "IPAddress": "172.17.0.11", "IPPrefixLen": 16, "IPv6Gateway": "", "MacAddress": "02:42:ac:11:00:0b", "Networks": { "bridge": { "IPAMConfig": null, "Links": null, "Aliases": null, "NetworkID": "d4a420189740571bc15c14b2ebac1ab66e95c9c6796461aa96bb3486a5f7e03d", "EndpointID": "0a4f25982c8dc84e277565384f3cb4b05703a62764e919d0493853f6f6a04b88", "Gateway": "172.17.0.1", "IPAddress": "172.17.0.11", "IPPrefixLen": 16, "IPv6Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "MacAddress": "02:42:ac:11:00:0b", "DriverOpts": null } } } } ] -- /stdout -- helpers_test.go:232: (dbg) Run: ./minikube-linux-amd64 status --format={{.Host}} -p kindnet-20200724220311-14997 -n kindnet-20200724220311-14997 helpers_test.go:237: <<< TestNetworkPlugins/group/kindnet FAILED: start of post-mortem logs <<< helpers_test.go:238: ======> post-mortem[TestNetworkPlugins/group/kindnet]: minikube logs <====== helpers_test.go:240: (dbg) Run: ./minikube-linux-amd64 -p kindnet-20200724220311-14997 logs -n 25 helpers_test.go:240: (dbg) Done: ./minikube-linux-amd64 -p kindnet-20200724220311-14997 logs -n 25: (2.247024093s) helpers_test.go:245: TestNetworkPlugins/group/kindnet logs: -- stdout -- * ==> Docker <== * -- Logs begin at Fri 2020-07-24 22:04:58 UTC, end at Fri 2020-07-24 22:31:42 UTC. -- * Jul 24 22:30:26 kindnet-20200724220311-14997 dockerd[366]: time="2020-07-24T22:30:26.603747864Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:30:30 kindnet-20200724220311-14997 dockerd[366]: time="2020-07-24T22:30:30.069986500Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:30:32 kindnet-20200724220311-14997 dockerd[366]: time="2020-07-24T22:30:32.938400890Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:30:35 kindnet-20200724220311-14997 dockerd[366]: time="2020-07-24T22:30:35.952000051Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:30:39 kindnet-20200724220311-14997 dockerd[366]: time="2020-07-24T22:30:39.915316002Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:30:44 kindnet-20200724220311-14997 dockerd[366]: time="2020-07-24T22:30:44.507476625Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:30:48 kindnet-20200724220311-14997 dockerd[366]: time="2020-07-24T22:30:48.401034950Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:30:49 kindnet-20200724220311-14997 dockerd[366]: time="2020-07-24T22:30:49.153653469Z" level=warning msg="Your kernel does not support swap limit capabilities or the cgroup is not mounted. Memory limited without swap." * Jul 24 22:30:51 kindnet-20200724220311-14997 dockerd[366]: time="2020-07-24T22:30:51.450843189Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:30:54 kindnet-20200724220311-14997 dockerd[366]: time="2020-07-24T22:30:54.507916854Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:30:57 kindnet-20200724220311-14997 dockerd[366]: time="2020-07-24T22:30:57.275992173Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:31:05 kindnet-20200724220311-14997 dockerd[366]: time="2020-07-24T22:31:05.144562423Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:31:08 kindnet-20200724220311-14997 dockerd[366]: time="2020-07-24T22:31:08.743431042Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:31:09 kindnet-20200724220311-14997 dockerd[366]: time="2020-07-24T22:31:09.346415768Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:31:09 kindnet-20200724220311-14997 dockerd[366]: time="2020-07-24T22:31:09.346507774Z" level=warning msg="14812958b0774b373fd86b7ff8926419c523179c7de133558416a39a6ebcc849 cleanup: failed to unmount IPC: umount /var/lib/docker/containers/14812958b0774b373fd86b7ff8926419c523179c7de133558416a39a6ebcc849/mounts/shm, flags: 0x2: no such file or directory" * Jul 24 22:31:11 kindnet-20200724220311-14997 dockerd[366]: time="2020-07-24T22:31:11.387301044Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:31:14 kindnet-20200724220311-14997 dockerd[366]: time="2020-07-24T22:31:14.595982133Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:31:17 kindnet-20200724220311-14997 dockerd[366]: time="2020-07-24T22:31:17.982385437Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:31:21 kindnet-20200724220311-14997 dockerd[366]: time="2020-07-24T22:31:21.037366939Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:31:23 kindnet-20200724220311-14997 dockerd[366]: time="2020-07-24T22:31:23.436697996Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:31:29 kindnet-20200724220311-14997 dockerd[366]: time="2020-07-24T22:31:29.011015182Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:31:31 kindnet-20200724220311-14997 dockerd[366]: time="2020-07-24T22:31:31.588905158Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:31:35 kindnet-20200724220311-14997 dockerd[366]: time="2020-07-24T22:31:35.031089039Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:31:38 kindnet-20200724220311-14997 dockerd[366]: time="2020-07-24T22:31:38.152523331Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * Jul 24 22:31:40 kindnet-20200724220311-14997 dockerd[366]: time="2020-07-24T22:31:40.878495635Z" level=info msg="ignoring event" module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" * * ==> container status <== * CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID * 14812958b0774 2186a1a396deb 53 seconds ago Exited kindnet-cni 9 8f68346fd924b * 1b518c536e9d6 4689081edb103 4 minutes ago Exited storage-provisioner 9 a3920045f29ba * f0d49cb960427 3439b7546f29b 25 minutes ago Running kube-proxy 0 b4d8c0c9cfe19 * 240fbcdf3bf25 da26705ccb4b5 25 minutes ago Running kube-controller-manager 1 e4ff21c26de39 * ac1240b804a5d 76216c34ed0c7 26 minutes ago Running kube-scheduler 0 cd4f9bc6c6719 * 3e208158924ec 7e28efa976bd1 26 minutes ago Running kube-apiserver 0 1d3732fb2704c * af9cc157d7800 303ce5db0e90d 26 minutes ago Running etcd 0 2bd34137e4e2a * dab663366f9f2 da26705ccb4b5 26 minutes ago Exited kube-controller-manager 0 e4ff21c26de39 * * ==> describe nodes <== * Name: kindnet-20200724220311-14997 * Roles: master * Labels: beta.kubernetes.io/arch=amd64 * beta.kubernetes.io/os=linux * kubernetes.io/arch=amd64 * kubernetes.io/hostname=kindnet-20200724220311-14997 * kubernetes.io/os=linux * minikube.k8s.io/commit=40eac8ce825d2bb784efa63b900c8d788ea49faf * minikube.k8s.io/name=kindnet-20200724220311-14997 * minikube.k8s.io/updated_at=2020_07_24T22_05_48_0700 * minikube.k8s.io/version=v1.12.1 * node-role.kubernetes.io/master= * Annotations: kubeadm.alpha.kubernetes.io/cri-socket: /var/run/dockershim.sock * node.alpha.kubernetes.io/ttl: 0 * volumes.kubernetes.io/controller-managed-attach-detach: true * CreationTimestamp: Fri, 24 Jul 2020 22:05:33 +0000 * Taints: * Unschedulable: false * Lease: * HolderIdentity: kindnet-20200724220311-14997 * AcquireTime: * RenewTime: Fri, 24 Jul 2020 22:31:40 +0000 * Conditions: * Type Status LastHeartbeatTime LastTransitionTime Reason Message * ---- ------ ----------------- ------------------ ------ ------- * MemoryPressure False Fri, 24 Jul 2020 22:31:21 +0000 Fri, 24 Jul 2020 22:05:33 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available * DiskPressure False Fri, 24 Jul 2020 22:31:21 +0000 Fri, 24 Jul 2020 22:05:33 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure * PIDPressure False Fri, 24 Jul 2020 22:31:21 +0000 Fri, 24 Jul 2020 22:05:33 +0000 KubeletHasSufficientPID kubelet has sufficient PID available * Ready True Fri, 24 Jul 2020 22:31:21 +0000 Fri, 24 Jul 2020 22:05:56 +0000 KubeletReady kubelet is posting ready status * Addresses: * InternalIP: 172.17.0.11 * Hostname: kindnet-20200724220311-14997 * Capacity: * cpu: 16 * ephemeral-storage: 128884272Ki * hugepages-1Gi: 0 * hugepages-2Mi: 0 * memory: 65817044Ki * pods: 110 * Allocatable: * cpu: 16 * ephemeral-storage: 128884272Ki * hugepages-1Gi: 0 * hugepages-2Mi: 0 * memory: 65817044Ki * pods: 110 * System Info: * Machine ID: 625f6dd6709d4a4fb99e44bf8eb02822 * System UUID: 4003bf13-de4b-41a4-8639-956fa0bbb5e7 * Boot ID: 65219ec9-ab55-4151-85fa-6cbcd6144529 * Kernel Version: 5.4.0-1022-azure * OS Image: Ubuntu 19.10 * Operating System: linux * Architecture: amd64 * Container Runtime Version: docker://19.3.2 * Kubelet Version: v1.18.3 * Kube-Proxy Version: v1.18.3 * PodCIDR: 10.244.0.0/24 * PodCIDRs: 10.244.0.0/24 * Non-terminated Pods: (8 in total) * Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits AGE * --------- ---- ------------ ---------- --------------- ------------- --- * kube-system coredns-66bff467f8-dqmpm 100m (0%) 0 (0%) 70Mi (0%) 170Mi (0%) 25m * kube-system etcd-kindnet-20200724220311-14997 0 (0%) 0 (0%) 0 (0%) 0 (0%) 25m * kube-system kindnet-dzph2 100m (0%) 100m (0%) 50Mi (0%) 50Mi (0%) 25m * kube-system kube-apiserver-kindnet-20200724220311-14997 250m (1%) 0 (0%) 0 (0%) 0 (0%) 25m * kube-system kube-controller-manager-kindnet-20200724220311-14997 200m (1%) 0 (0%) 0 (0%) 0 (0%) 25m * kube-system kube-proxy-74jgx 0 (0%) 0 (0%) 0 (0%) 0 (0%) 25m * kube-system kube-scheduler-kindnet-20200724220311-14997 100m (0%) 0 (0%) 0 (0%) 0 (0%) 25m * kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 25m * Allocated resources: * (Total limits may be over 100 percent, i.e., overcommitted.) * Resource Requests Limits * -------- -------- ------ * cpu 750m (4%) 100m (0%) * memory 120Mi (0%) 220Mi (0%) * ephemeral-storage 0 (0%) 0 (0%) * hugepages-1Gi 0 (0%) 0 (0%) * hugepages-2Mi 0 (0%) 0 (0%) * Events: * Type Reason Age From Message * ---- ------ ---- ---- ------- * Normal NodeHasSufficientMemory 26m (x7 over 26m) kubelet, kindnet-20200724220311-14997 Node kindnet-20200724220311-14997 status is now: NodeHasSufficientMemory * Normal NodeHasNoDiskPressure 26m (x7 over 26m) kubelet, kindnet-20200724220311-14997 Node kindnet-20200724220311-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientPID 26m (x7 over 26m) kubelet, kindnet-20200724220311-14997 Node kindnet-20200724220311-14997 status is now: NodeHasSufficientPID * Normal NodeAllocatableEnforced 26m kubelet, kindnet-20200724220311-14997 Updated Node Allocatable limit across pods * Normal Starting 25m kubelet, kindnet-20200724220311-14997 Starting kubelet. * Warning SystemOOM 25m kubelet, kindnet-20200724220311-14997 System OOM encountered, victim process: registry-server, pid: 26041 * Warning SystemOOM 25m kubelet, kindnet-20200724220311-14997 System OOM encountered, victim process: registry-server, pid: 28567 * Normal NodeHasSufficientMemory 25m kubelet, kindnet-20200724220311-14997 Node kindnet-20200724220311-14997 status is now: NodeHasSufficientMemory * Normal NodeHasNoDiskPressure 25m kubelet, kindnet-20200724220311-14997 Node kindnet-20200724220311-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientPID 25m kubelet, kindnet-20200724220311-14997 Node kindnet-20200724220311-14997 status is now: NodeHasSufficientPID * Normal NodeAllocatableEnforced 25m kubelet, kindnet-20200724220311-14997 Updated Node Allocatable limit across pods * Normal NodeReady 25m kubelet, kindnet-20200724220311-14997 Node kindnet-20200724220311-14997 status is now: NodeReady * Warning readOnlySysFS 25m kube-proxy, kindnet-20200724220311-14997 CRI error: /sys is read-only: cannot modify conntrack limits, problems may arise later (If running Docker, see docker issue #24000) * Normal Starting 25m kube-proxy, kindnet-20200724220311-14997 Starting kube-proxy. * * ==> dmesg <== * [ +0.008042] FS-Cache: N-cookie d=00000000087e516b n=00000000639e5e65 * [ +0.007221] FS-Cache: N-key=[8] '56c00f0000000000' * [ +0.005631] FS-Cache: Duplicate cookie detected * [ +0.003841] FS-Cache: O-cookie c=0000000011bf07ab [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.005951] FS-Cache: O-cookie d=00000000087e516b n=00000000251e97b7 * [ +0.004790] FS-Cache: O-key=[8] '56c00f0000000000' * [ +0.003643] FS-Cache: N-cookie c=00000000e2eec606 [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.005689] FS-Cache: N-cookie d=00000000087e516b n=000000007550b1ca * [ +0.004826] FS-Cache: N-key=[8] '56c00f0000000000' * [ +1.890005] FS-Cache: Duplicate cookie detected * [ +0.022632] FS-Cache: O-cookie c=0000000036b15294 [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.008521] FS-Cache: O-cookie d=00000000087e516b n=00000000efd734b0 * [ +0.007450] FS-Cache: O-key=[8] '55c00f0000000000' * [ +0.004298] FS-Cache: N-cookie c=00000000c6760349 [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.007729] FS-Cache: N-cookie d=00000000087e516b n=000000002b6d28f2 * [ +0.006730] FS-Cache: N-key=[8] '55c00f0000000000' * [ +2.781512] FS-Cache: Duplicate cookie detected * [ +0.004691] FS-Cache: O-cookie c=0000000026b60cdb [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.006817] FS-Cache: O-cookie d=00000000087e516b n=0000000083eaff9d * [ +0.006146] FS-Cache: O-key=[8] '5ac00f0000000000' * [ +0.004163] FS-Cache: N-cookie c=000000006a4278df [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.006241] FS-Cache: N-cookie d=00000000087e516b n=00000000e4c3dd6e * [ +0.005074] FS-Cache: N-key=[8] '5ac00f0000000000' * [Jul24 22:02] tee (116897): /proc/108378/oom_adj is deprecated, please use /proc/108378/oom_score_adj instead. * [Jul24 22:09] hrtimer: interrupt took 1877431 ns * * ==> etcd [af9cc157d780] <== * 2020-07-24 22:28:16.009775 W | etcdserver: failed to revoke 29fd7382dc13c289 ("etcdserver: request timed out") * 2020-07-24 22:28:16.220407 W | wal: sync duration of 12.557362452s, expected less than 1s * 2020-07-24 22:28:16.223083 W | etcdserver: failed to revoke 29fd7382dc13c289 ("lease not found") * 2020-07-24 22:28:16.223294 W | etcdserver: read-only range request "key:\"/registry/pods\" range_end:\"/registry/podt\" count_only:true " with result "range_response_count:0 size:7" took too long (9.688605114s) to execute * 2020-07-24 22:28:16.223326 W | etcdserver: read-only range request "key:\"/registry/ingress\" range_end:\"/registry/ingrest\" count_only:true " with result "range_response_count:0 size:5" took too long (10.671599548s) to execute * 2020-07-24 22:28:16.223446 W | etcdserver: read-only range request "key:\"/registry/jobs/\" range_end:\"/registry/jobs0\" limit:500 " with result "range_response_count:0 size:5" took too long (9.050167038s) to execute * 2020-07-24 22:28:16.224726 W | etcdserver: read-only range request "key:\"/registry/events/kube-system/etcd-kindnet-20200724220311-14997.1624cf9234dae31c\" " with result "range_response_count:1 size:833" took too long (11.368572788s) to execute * 2020-07-24 22:28:16.224757 W | etcdserver: read-only range request "key:\"/registry/podsecuritypolicy\" range_end:\"/registry/podsecuritypolicz\" count_only:true " with result "range_response_count:0 size:5" took too long (6.2476157s) to execute * 2020-07-24 22:28:16.224902 W | etcdserver: read-only range request "key:\"/registry/roles\" range_end:\"/registry/rolet\" count_only:true " with result "range_response_count:0 size:7" took too long (9.119697082s) to execute * 2020-07-24 22:28:16.225031 W | etcdserver: read-only range request "key:\"/registry/minions\" range_end:\"/registry/miniont\" count_only:true " with result "range_response_count:0 size:7" took too long (8.802843809s) to execute * 2020-07-24 22:28:16.291316 W | etcdserver: read-only range request "key:\"/registry/ingress\" range_end:\"/registry/ingrest\" count_only:true " with result "range_response_count:0 size:5" took too long (1.428788741s) to execute * 2020-07-24 22:28:16.291410 W | etcdserver: read-only range request "key:\"/registry/namespaces/default\" " with result "range_response_count:1 size:257" took too long (2.631331879s) to execute * 2020-07-24 22:28:16.291451 W | etcdserver: read-only range request "key:\"/registry/health\" " with result "range_response_count:0 size:5" took too long (1.303405197s) to execute * 2020-07-24 22:28:19.453001 W | etcdserver: read-only range request "key:\"/registry/cronjobs/\" range_end:\"/registry/cronjobs0\" limit:500 " with result "range_response_count:0 size:5" took too long (3.226633718s) to execute * 2020-07-24 22:28:19.453175 W | etcdserver: read-only range request "key:\"/registry/leases/kube-node-lease/kindnet-20200724220311-14997\" " with result "range_response_count:1 size:681" took too long (3.227668891s) to execute * 2020-07-24 22:28:19.453414 W | etcdserver: request "header: txn: success:> failure: >>" with result "size:16" took too long (2.877801492s) to execute * 2020-07-24 22:28:20.692845 W | wal: sync duration of 4.117395538s, expected less than 1s * 2020-07-24 22:28:20.693256 W | etcdserver: read-only range request "key:\"/registry/services/specs/default/kubernetes\" " with result "range_response_count:1 size:610" took too long (4.399603719s) to execute * 2020-07-24 22:28:20.715965 W | etcdserver: read-only range request "key:\"/registry/apiregistration.k8s.io/apiservices\" range_end:\"/registry/apiregistration.k8s.io/apiservicet\" count_only:true " with result "range_response_count:0 size:7" took too long (682.029763ms) to execute * 2020-07-24 22:28:20.715998 W | etcdserver: read-only range request "key:\"/registry/csinodes\" range_end:\"/registry/csinodet\" count_only:true " with result "range_response_count:0 size:7" took too long (3.541944307s) to execute * 2020-07-24 22:28:20.716052 W | etcdserver: read-only range request "key:\"/registry/events/kube-system/kube-apiserver-kindnet-20200724220311-14997.1624cfe1dd755ba2\" " with result "range_response_count:1 size:874" took too long (1.259988769s) to execute * 2020-07-24 22:28:20.716106 W | etcdserver: read-only range request "key:\"/registry/limitranges\" range_end:\"/registry/limitranget\" count_only:true " with result "range_response_count:0 size:5" took too long (2.472884253s) to execute * 2020-07-24 22:30:29.303965 I | mvcc: store.index: compact 801 * 2020-07-24 22:30:29.304541 I | mvcc: finished scheduled compaction at 801 (took 289.82µs) * 2020-07-24 22:31:00.522180 W | etcdserver: read-only range request "key:\"/registry/jobs/\" range_end:\"/registry/jobs0\" limit:500 " with result "range_response_count:0 size:5" took too long (1.003358584s) to execute * * ==> kernel <== * 22:31:43 up 59 min, 0 users, load average: 7.16, 8.18, 8.09 * Linux kindnet-20200724220311-14997 5.4.0-1022-azure #22-Ubuntu SMP Fri Jul 10 06:14:37 UTC 2020 x86_64 x86_64 x86_64 GNU/Linux * PRETTY_NAME="Ubuntu 19.10" * * ==> kube-apiserver [3e208158924e] <== * I0724 22:28:19.453693 1 trace.go:116] Trace[2026227631]: "Get" url:/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/kindnet-20200724220311-14997,user-agent:kubelet/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:172.17.0.11 (started: 2020-07-24 22:28:16.225128806 +0000 UTC m=+1368.584571821) (total time: 3.22852545s): * Trace[2026227631]: [3.228463746s] [3.228447645s] About to write a response * I0724 22:28:19.453809 1 trace.go:116] Trace[1695875259]: "List" url:/apis/batch/v1beta1/cronjobs,user-agent:kube-controller-manager/v1.18.3 (linux/amd64) kubernetes/2e7996e/system:serviceaccount:kube-system:cronjob-controller,client:172.17.0.11 (started: 2020-07-24 22:28:16.226069871 +0000 UTC m=+1368.585512886) (total time: 3.227707994s): * Trace[1695875259]: [3.227641889s] [3.227630188s] Listing from storage done * I0724 22:28:19.453837 1 trace.go:116] Trace[1263090756]: "GuaranteedUpdate etcd3" type:*core.Event (started: 2020-07-24 22:28:04.855712459 +0000 UTC m=+1357.215155374) (total time: 14.598094308s): * Trace[1263090756]: [11.369375244s] [11.369375244s] initial value restored * Trace[1263090756]: [14.598050104s] [3.161814097s] Transaction committed * I0724 22:28:19.453946 1 trace.go:116] Trace[1259252359]: "Patch" url:/api/v1/namespaces/kube-system/events/etcd-kindnet-20200724220311-14997.1624cf9234dae31c,user-agent:kubelet/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:172.17.0.11 (started: 2020-07-24 22:28:04.855611652 +0000 UTC m=+1357.215054667) (total time: 14.598308322s): * Trace[1259252359]: [11.369477551s] [11.369424548s] About to apply patch * Trace[1259252359]: [14.598261919s] [3.228395641s] Object stored in database * I0724 22:28:20.693807 1 trace.go:116] Trace[1047303435]: "Get" url:/api/v1/namespaces/default/services/kubernetes,user-agent:kube-apiserver/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:127.0.0.1 (started: 2020-07-24 22:28:16.293351763 +0000 UTC m=+1368.652794778) (total time: 4.400412976s): * Trace[1047303435]: [4.400355972s] [4.400349671s] About to write a response * I0724 22:28:20.693810 1 trace.go:116] Trace[597738367]: "GuaranteedUpdate etcd3" type:*coordination.Lease (started: 2020-07-24 22:28:19.455201964 +0000 UTC m=+1371.814644979) (total time: 1.238577976s): * Trace[597738367]: [1.238553974s] [1.237479799s] Transaction committed * I0724 22:28:20.694073 1 trace.go:116] Trace[889883406]: "Update" url:/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/kindnet-20200724220311-14997,user-agent:kubelet/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:172.17.0.11 (started: 2020-07-24 22:28:19.455037452 +0000 UTC m=+1371.814480367) (total time: 1.239000806s): * Trace[889883406]: [1.238948402s] [1.238848695s] Object stored in database * I0724 22:28:20.811074 1 trace.go:116] Trace[234249838]: "GuaranteedUpdate etcd3" type:*core.Event (started: 2020-07-24 22:28:19.455390277 +0000 UTC m=+1371.814833292) (total time: 1.35564434s): * Trace[234249838]: [1.261249057s] [1.261249057s] initial value restored * I0724 22:28:20.811225 1 trace.go:116] Trace[2088813794]: "Patch" url:/api/v1/namespaces/kube-system/events/kube-apiserver-kindnet-20200724220311-14997.1624cfe1dd755ba2,user-agent:kubelet/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:172.17.0.11 (started: 2020-07-24 22:28:19.455276769 +0000 UTC m=+1371.814719784) (total time: 1.355908158s): * Trace[2088813794]: [1.261364465s] [1.261324062s] About to apply patch * Trace[2088813794]: [1.355837753s] [94.019156ms] Object stored in database * I0724 22:31:00.522827 1 trace.go:116] Trace[456102867]: "List etcd3" key:/jobs,resourceVersion:,limit:500,continue: (started: 2020-07-24 22:30:59.518393471 +0000 UTC m=+1531.877836486) (total time: 1.00440235s): * Trace[456102867]: [1.00440235s] [1.00440235s] END * I0724 22:31:00.522926 1 trace.go:116] Trace[383749159]: "List" url:/apis/batch/v1/jobs,user-agent:kube-controller-manager/v1.18.3 (linux/amd64) kubernetes/2e7996e/system:serviceaccount:kube-system:cronjob-controller,client:172.17.0.11 (started: 2020-07-24 22:30:59.518372269 +0000 UTC m=+1531.877815184) (total time: 1.004535359s): * Trace[383749159]: [1.004479356s] [1.004463255s] Listing from storage done * * ==> kube-controller-manager [240fbcdf3bf2] <== * I0724 22:06:02.647171 1 event.go:278] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"kube-system", Name:"coredns-66bff467f8", UID:"c92445a8-444c-4983-a37b-31c41bab8c59", APIVersion:"apps/v1", ResourceVersion:"315", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: coredns-66bff467f8-dqmpm * E0724 22:06:02.653042 1 clusterroleaggregation_controller.go:181] admin failed with : Operation cannot be fulfilled on clusterroles.rbac.authorization.k8s.io "admin": the object has been modified; please apply your changes to the latest version and try again * I0724 22:06:02.837651 1 shared_informer.go:230] Caches are synced for taint * I0724 22:06:02.837942 1 node_lifecycle_controller.go:1433] Initializing eviction metric for zone: * I0724 22:06:02.837958 1 taint_manager.go:187] Starting NoExecuteTaintManager * W0724 22:06:02.838017 1 node_lifecycle_controller.go:1048] Missing timestamp for Node kindnet-20200724220311-14997. Assuming now as a timestamp. * I0724 22:06:02.838057 1 node_lifecycle_controller.go:1249] Controller detected that zone is now in state Normal. * I0724 22:06:02.838114 1 event.go:278] Event(v1.ObjectReference{Kind:"Node", Namespace:"", Name:"kindnet-20200724220311-14997", UID:"f3c6af39-96bb-4075-b321-38acbe61d32f", APIVersion:"v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'RegisteredNode' Node kindnet-20200724220311-14997 event: Registered Node kindnet-20200724220311-14997 in Controller * I0724 22:06:02.877629 1 shared_informer.go:230] Caches are synced for daemon sets * I0724 22:06:02.890110 1 event.go:278] Event(v1.ObjectReference{Kind:"DaemonSet", Namespace:"kube-system", Name:"kindnet", UID:"cc4148f3-26fe-4c23-9219-ab209a808ddf", APIVersion:"apps/v1", ResourceVersion:"249", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: kindnet-dzph2 * I0724 22:06:02.899835 1 event.go:278] Event(v1.ObjectReference{Kind:"DaemonSet", Namespace:"kube-system", Name:"kube-proxy", UID:"a2447b5b-22c8-4008-90f6-382a523b59fc", APIVersion:"apps/v1", ResourceVersion:"193", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: kube-proxy-74jgx * I0724 22:06:02.962732 1 shared_informer.go:230] Caches are synced for disruption * I0724 22:06:02.962763 1 disruption.go:339] Sending events to api server. * I0724 22:06:02.974390 1 shared_informer.go:230] Caches are synced for endpoint * I0724 22:06:02.977355 1 shared_informer.go:230] Caches are synced for endpoint_slice * I0724 22:06:03.008491 1 shared_informer.go:230] Caches are synced for stateful set * I0724 22:06:03.051220 1 shared_informer.go:230] Caches are synced for bootstrap_signer * I0724 22:06:03.076190 1 shared_informer.go:230] Caches are synced for resource quota * I0724 22:06:03.080415 1 shared_informer.go:230] Caches are synced for resource quota * I0724 22:06:03.084899 1 shared_informer.go:230] Caches are synced for garbage collector * I0724 22:06:03.136705 1 shared_informer.go:230] Caches are synced for garbage collector * I0724 22:06:03.136731 1 garbagecollector.go:142] Garbage collector: all resource monitors have synced. Proceeding to collect garbage * I0724 22:06:03.174741 1 shared_informer.go:230] Caches are synced for attach detach * I0724 22:06:03.256671 1 event.go:278] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"kube-system", Name:"coredns", UID:"a7290002-d18b-4a18-8c56-dedbf7932079", APIVersion:"apps/v1", ResourceVersion:"371", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled down replica set coredns-66bff467f8 to 1 * I0724 22:06:03.276192 1 event.go:278] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"kube-system", Name:"coredns-66bff467f8", UID:"c92445a8-444c-4983-a37b-31c41bab8c59", APIVersion:"apps/v1", ResourceVersion:"372", FieldPath:""}): type: 'Normal' reason: 'SuccessfulDelete' Deleted pod: coredns-66bff467f8-t5bzk * * ==> kube-controller-manager [dab663366f9f] <== * I0724 22:05:28.245223 1 serving.go:313] Generated self-signed cert in-memory * I0724 22:05:29.790626 1 controllermanager.go:161] Version: v1.18.3 * I0724 22:05:29.791727 1 dynamic_cafile_content.go:167] Starting request-header::/var/lib/minikube/certs/front-proxy-ca.crt * I0724 22:05:29.791842 1 dynamic_cafile_content.go:167] Starting client-ca-bundle::/var/lib/minikube/certs/ca.crt * I0724 22:05:29.792252 1 secure_serving.go:178] Serving securely on 127.0.0.1:10257 * I0724 22:05:29.835557 1 tlsconfig.go:240] Starting DynamicServingCertificateController * I0724 22:05:29.836569 1 deprecated_insecure_serving.go:53] Serving insecurely on [::]:10252 * F0724 22:05:43.274997 1 controllermanager.go:230] error building controller context: failed to wait for apiserver being healthy: timed out waiting for the condition: failed to get apiserver /healthz status: an error on the server ("[+]ping ok\n[+]log ok\n[+]etcd ok\n[+]poststarthook/start-kube-apiserver-admission-initializer ok\n[+]poststarthook/generic-apiserver-start-informers ok\n[+]poststarthook/start-apiextensions-informers ok\n[+]poststarthook/start-apiextensions-controllers ok\n[+]poststarthook/crd-informer-synced ok\n[+]poststarthook/bootstrap-controller ok\n[-]poststarthook/rbac/bootstrap-roles failed: reason withheld\n[+]poststarthook/scheduling/bootstrap-system-priority-classes ok\n[+]poststarthook/start-cluster-authentication-info-controller ok\n[+]poststarthook/start-kube-aggregator-informers ok\n[+]poststarthook/apiservice-registration-controller ok\n[+]poststarthook/apiservice-status-available-controller ok\n[+]poststarthook/kube-apiserver-autoregistration ok\n[+]autoregister-completion ok\n[+]poststarthook/apiservice-openapi-controller ok\nhealthz check failed") has prevented the request from succeeding * * ==> kube-proxy [f0d49cb96042] <== * W0724 22:06:04.729267 1 server_others.go:559] Unknown proxy mode "", assuming iptables proxy * I0724 22:06:04.737369 1 node.go:136] Successfully retrieved node IP: 172.17.0.11 * I0724 22:06:04.737421 1 server_others.go:186] Using iptables Proxier. * I0724 22:06:04.737894 1 server.go:583] Version: v1.18.3 * I0724 22:06:04.738534 1 conntrack.go:52] Setting nf_conntrack_max to 524288 * E0724 22:06:04.738935 1 conntrack.go:127] sysfs is not writable: {Device:sysfs Path:/sys Type:sysfs Opts:[ro nosuid nodev noexec relatime] Freq:0 Pass:0} (mount options are [ro nosuid nodev noexec relatime]) * I0724 22:06:04.739279 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_established' to 86400 * I0724 22:06:04.739367 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_close_wait' to 3600 * I0724 22:06:04.739568 1 config.go:315] Starting service config controller * I0724 22:06:04.739578 1 shared_informer.go:223] Waiting for caches to sync for service config * I0724 22:06:04.739609 1 config.go:133] Starting endpoints config controller * I0724 22:06:04.739622 1 shared_informer.go:223] Waiting for caches to sync for endpoints config * I0724 22:06:04.839738 1 shared_informer.go:230] Caches are synced for endpoints config * I0724 22:06:04.839812 1 shared_informer.go:230] Caches are synced for service config * * ==> kube-scheduler [ac1240b804a5] <== * E0724 22:05:34.430696 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope * E0724 22:05:34.609362 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope * E0724 22:05:34.650059 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1beta1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope * E0724 22:05:34.666157 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope * E0724 22:05:34.698833 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope * E0724 22:05:34.851804 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope * E0724 22:05:34.870317 1 reflector.go:178] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system" * E0724 22:05:36.251401 1 reflector.go:178] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:233: Failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope * E0724 22:05:36.541860 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope * E0724 22:05:36.617283 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope * E0724 22:05:36.883966 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope * E0724 22:05:37.137297 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope * E0724 22:05:37.308856 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope * E0724 22:05:37.374978 1 reflector.go:178] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system" * E0724 22:05:37.566254 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope * E0724 22:05:37.570418 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1beta1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope * E0724 22:05:40.958559 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope * E0724 22:05:40.981461 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope * E0724 22:05:40.993361 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope * E0724 22:05:41.184871 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope * E0724 22:05:41.383969 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope * E0724 22:05:41.686522 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1beta1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope * I0724 22:05:43.448001 1 shared_informer.go:230] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file * E0724 22:06:02.659243 1 factory.go:503] pod: kube-system/coredns-66bff467f8-t5bzk is already present in the active queue * E0724 22:06:02.781854 1 factory.go:503] pod: kube-system/coredns-66bff467f8-dqmpm is already present in the active queue * * ==> kubelet <== * -- Logs begin at Fri 2020-07-24 22:04:58 UTC, end at Fri 2020-07-24 22:31:43 UTC. -- * Jul 24 22:31:38 kindnet-20200724220311-14997 kubelet[2555]: E0724 22:31:38.224093 2555 pod_workers.go:191] Error syncing pod 742bcecd-250a-48d0-bb34-f541a7635647 ("coredns-66bff467f8-dqmpm_kube-system(742bcecd-250a-48d0-bb34-f541a7635647)"), skipping: failed to "CreatePodSandbox" for "coredns-66bff467f8-dqmpm_kube-system(742bcecd-250a-48d0-bb34-f541a7635647)" with CreatePodSandboxError: "CreatePodSandbox for pod \"coredns-66bff467f8-dqmpm_kube-system(742bcecd-250a-48d0-bb34-f541a7635647)\" failed: rpc error: code = Unknown desc = [failed to set up sandbox container \"f95ff203f8096577e9bc5ffa661abd458b83ac5a8a375fc735cca841a64fbd1e\" network for pod \"coredns-66bff467f8-dqmpm\": networkPlugin cni failed to set up pod \"coredns-66bff467f8-dqmpm_kube-system\" network: failed to set bridge addr: could not add IP address to \"cni0\": permission denied, failed to clean up sandbox container \"f95ff203f8096577e9bc5ffa661abd458b83ac5a8a375fc735cca841a64fbd1e\" network for pod \"coredns-66bff467f8-dqmpm\": networkPlugin cni failed to teardown pod \"coredns-66bff467f8-dqmpm_kube-system\" network: running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.88.1.175 -j CNI-af9ffa4afa84e39f295c2961 -m comment --comment name: \"crio-bridge\" id: \"f95ff203f8096577e9bc5ffa661abd458b83ac5a8a375fc735cca841a64fbd1e\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-af9ffa4afa84e39f295c2961':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n]" * Jul 24 22:31:38 kindnet-20200724220311-14997 kubelet[2555]: W0724 22:31:38.667662 2555 docker_sandbox.go:400] failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod "coredns-66bff467f8-dqmpm_kube-system": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container "f95ff203f8096577e9bc5ffa661abd458b83ac5a8a375fc735cca841a64fbd1e" * Jul 24 22:31:38 kindnet-20200724220311-14997 kubelet[2555]: W0724 22:31:38.679600 2555 pod_container_deletor.go:77] Container "f95ff203f8096577e9bc5ffa661abd458b83ac5a8a375fc735cca841a64fbd1e" not found in pod's containers * Jul 24 22:31:38 kindnet-20200724220311-14997 kubelet[2555]: W0724 22:31:38.681217 2555 cni.go:331] CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container "f95ff203f8096577e9bc5ffa661abd458b83ac5a8a375fc735cca841a64fbd1e" * Jul 24 22:31:40 kindnet-20200724220311-14997 kubelet[2555]: E0724 22:31:40.685610 2555 cni.go:364] Error adding kube-system_coredns-66bff467f8-dqmpm/dae8e65e361be0ca8b0abd25c2dca07b52c882d5fca71bd85bc3ecb0c27e4f7c to network bridge/crio-bridge: failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:31:40 kindnet-20200724220311-14997 kubelet[2555]: E0724 22:31:40.772427 2555 cni.go:385] Error deleting kube-system_coredns-66bff467f8-dqmpm/dae8e65e361be0ca8b0abd25c2dca07b52c882d5fca71bd85bc3ecb0c27e4f7c from network bridge/crio-bridge: running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.88.1.176 -j CNI-3310bdd72e0133ba9d2ae78a -m comment --comment name: "crio-bridge" id: "dae8e65e361be0ca8b0abd25c2dca07b52c882d5fca71bd85bc3ecb0c27e4f7c" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-3310bdd72e0133ba9d2ae78a':No such file or directory * Jul 24 22:31:40 kindnet-20200724220311-14997 kubelet[2555]: Try `iptables -h' or 'iptables --help' for more information. * Jul 24 22:31:41 kindnet-20200724220311-14997 kubelet[2555]: E0724 22:31:41.209463 2555 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = [failed to set up sandbox container "dae8e65e361be0ca8b0abd25c2dca07b52c882d5fca71bd85bc3ecb0c27e4f7c" network for pod "coredns-66bff467f8-dqmpm": networkPlugin cni failed to set up pod "coredns-66bff467f8-dqmpm_kube-system" network: failed to set bridge addr: could not add IP address to "cni0": permission denied, failed to clean up sandbox container "dae8e65e361be0ca8b0abd25c2dca07b52c882d5fca71bd85bc3ecb0c27e4f7c" network for pod "coredns-66bff467f8-dqmpm": networkPlugin cni failed to teardown pod "coredns-66bff467f8-dqmpm_kube-system" network: running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.88.1.176 -j CNI-3310bdd72e0133ba9d2ae78a -m comment --comment name: "crio-bridge" id: "dae8e65e361be0ca8b0abd25c2dca07b52c882d5fca71bd85bc3ecb0c27e4f7c" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-3310bdd72e0133ba9d2ae78a':No such file or directory * Jul 24 22:31:41 kindnet-20200724220311-14997 kubelet[2555]: Try `iptables -h' or 'iptables --help' for more information. * Jul 24 22:31:41 kindnet-20200724220311-14997 kubelet[2555]: ] * Jul 24 22:31:41 kindnet-20200724220311-14997 kubelet[2555]: E0724 22:31:41.209535 2555 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "coredns-66bff467f8-dqmpm_kube-system(742bcecd-250a-48d0-bb34-f541a7635647)" failed: rpc error: code = Unknown desc = [failed to set up sandbox container "dae8e65e361be0ca8b0abd25c2dca07b52c882d5fca71bd85bc3ecb0c27e4f7c" network for pod "coredns-66bff467f8-dqmpm": networkPlugin cni failed to set up pod "coredns-66bff467f8-dqmpm_kube-system" network: failed to set bridge addr: could not add IP address to "cni0": permission denied, failed to clean up sandbox container "dae8e65e361be0ca8b0abd25c2dca07b52c882d5fca71bd85bc3ecb0c27e4f7c" network for pod "coredns-66bff467f8-dqmpm": networkPlugin cni failed to teardown pod "coredns-66bff467f8-dqmpm_kube-system" network: running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.88.1.176 -j CNI-3310bdd72e0133ba9d2ae78a -m comment --comment name: "crio-bridge" id: "dae8e65e361be0ca8b0abd25c2dca07b52c882d5fca71bd85bc3ecb0c27e4f7c" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-3310bdd72e0133ba9d2ae78a':No such file or directory * Jul 24 22:31:41 kindnet-20200724220311-14997 kubelet[2555]: Try `iptables -h' or 'iptables --help' for more information. * Jul 24 22:31:41 kindnet-20200724220311-14997 kubelet[2555]: ] * Jul 24 22:31:41 kindnet-20200724220311-14997 kubelet[2555]: E0724 22:31:41.209554 2555 kuberuntime_manager.go:727] createPodSandbox for pod "coredns-66bff467f8-dqmpm_kube-system(742bcecd-250a-48d0-bb34-f541a7635647)" failed: rpc error: code = Unknown desc = [failed to set up sandbox container "dae8e65e361be0ca8b0abd25c2dca07b52c882d5fca71bd85bc3ecb0c27e4f7c" network for pod "coredns-66bff467f8-dqmpm": networkPlugin cni failed to set up pod "coredns-66bff467f8-dqmpm_kube-system" network: failed to set bridge addr: could not add IP address to "cni0": permission denied, failed to clean up sandbox container "dae8e65e361be0ca8b0abd25c2dca07b52c882d5fca71bd85bc3ecb0c27e4f7c" network for pod "coredns-66bff467f8-dqmpm": networkPlugin cni failed to teardown pod "coredns-66bff467f8-dqmpm_kube-system" network: running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.88.1.176 -j CNI-3310bdd72e0133ba9d2ae78a -m comment --comment name: "crio-bridge" id: "dae8e65e361be0ca8b0abd25c2dca07b52c882d5fca71bd85bc3ecb0c27e4f7c" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-3310bdd72e0133ba9d2ae78a':No such file or directory * Jul 24 22:31:41 kindnet-20200724220311-14997 kubelet[2555]: Try `iptables -h' or 'iptables --help' for more information. * Jul 24 22:31:41 kindnet-20200724220311-14997 kubelet[2555]: ] * Jul 24 22:31:41 kindnet-20200724220311-14997 kubelet[2555]: E0724 22:31:41.209628 2555 pod_workers.go:191] Error syncing pod 742bcecd-250a-48d0-bb34-f541a7635647 ("coredns-66bff467f8-dqmpm_kube-system(742bcecd-250a-48d0-bb34-f541a7635647)"), skipping: failed to "CreatePodSandbox" for "coredns-66bff467f8-dqmpm_kube-system(742bcecd-250a-48d0-bb34-f541a7635647)" with CreatePodSandboxError: "CreatePodSandbox for pod \"coredns-66bff467f8-dqmpm_kube-system(742bcecd-250a-48d0-bb34-f541a7635647)\" failed: rpc error: code = Unknown desc = [failed to set up sandbox container \"dae8e65e361be0ca8b0abd25c2dca07b52c882d5fca71bd85bc3ecb0c27e4f7c\" network for pod \"coredns-66bff467f8-dqmpm\": networkPlugin cni failed to set up pod \"coredns-66bff467f8-dqmpm_kube-system\" network: failed to set bridge addr: could not add IP address to \"cni0\": permission denied, failed to clean up sandbox container \"dae8e65e361be0ca8b0abd25c2dca07b52c882d5fca71bd85bc3ecb0c27e4f7c\" network for pod \"coredns-66bff467f8-dqmpm\": networkPlugin cni failed to teardown pod \"coredns-66bff467f8-dqmpm_kube-system\" network: running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.88.1.176 -j CNI-3310bdd72e0133ba9d2ae78a -m comment --comment name: \"crio-bridge\" id: \"dae8e65e361be0ca8b0abd25c2dca07b52c882d5fca71bd85bc3ecb0c27e4f7c\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-3310bdd72e0133ba9d2ae78a':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n]" * Jul 24 22:31:41 kindnet-20200724220311-14997 kubelet[2555]: W0724 22:31:41.722421 2555 docker_sandbox.go:400] failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod "coredns-66bff467f8-dqmpm_kube-system": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container "dae8e65e361be0ca8b0abd25c2dca07b52c882d5fca71bd85bc3ecb0c27e4f7c" * Jul 24 22:31:41 kindnet-20200724220311-14997 kubelet[2555]: E0724 22:31:41.797338 2555 kuberuntime_manager.go:937] PodSandboxStatus of sandbox "d23bc96b5dfb6d1ea27ea528d10f59e23af7243d677e7ed8607d54eeab0772ba" for pod "coredns-66bff467f8-dqmpm_kube-system(742bcecd-250a-48d0-bb34-f541a7635647)" error: rpc error: code = Unknown desc = Error: No such container: d23bc96b5dfb6d1ea27ea528d10f59e23af7243d677e7ed8607d54eeab0772ba * Jul 24 22:31:41 kindnet-20200724220311-14997 kubelet[2555]: E0724 22:31:41.797436 2555 pod_workers.go:191] Error syncing pod 742bcecd-250a-48d0-bb34-f541a7635647 ("coredns-66bff467f8-dqmpm_kube-system(742bcecd-250a-48d0-bb34-f541a7635647)"), skipping: rpc error: code = Unknown desc = Error: No such container: d23bc96b5dfb6d1ea27ea528d10f59e23af7243d677e7ed8607d54eeab0772ba * Jul 24 22:31:42 kindnet-20200724220311-14997 kubelet[2555]: W0724 22:31:42.804144 2555 docker_sandbox.go:400] failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod "coredns-66bff467f8-dqmpm_kube-system": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container "dae8e65e361be0ca8b0abd25c2dca07b52c882d5fca71bd85bc3ecb0c27e4f7c" * Jul 24 22:31:42 kindnet-20200724220311-14997 kubelet[2555]: W0724 22:31:42.804839 2555 pod_container_deletor.go:77] Container "dae8e65e361be0ca8b0abd25c2dca07b52c882d5fca71bd85bc3ecb0c27e4f7c" not found in pod's containers * Jul 24 22:31:42 kindnet-20200724220311-14997 kubelet[2555]: W0724 22:31:42.806873 2555 cni.go:331] CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container "dae8e65e361be0ca8b0abd25c2dca07b52c882d5fca71bd85bc3ecb0c27e4f7c" * Jul 24 22:31:43 kindnet-20200724220311-14997 kubelet[2555]: I0724 22:31:43.137650 2555 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: 14812958b0774b373fd86b7ff8926419c523179c7de133558416a39a6ebcc849 * Jul 24 22:31:43 kindnet-20200724220311-14997 kubelet[2555]: E0724 22:31:43.138037 2555 pod_workers.go:191] Error syncing pod a8f17cd0-5e62-4f1e-8ac2-33e6d84c8dbb ("kindnet-dzph2_kube-system(a8f17cd0-5e62-4f1e-8ac2-33e6d84c8dbb)"), skipping: failed to "StartContainer" for "kindnet-cni" with CrashLoopBackOff: "back-off 5m0s restarting failed container=kindnet-cni pod=kindnet-dzph2_kube-system(a8f17cd0-5e62-4f1e-8ac2-33e6d84c8dbb)" * * ==> storage-provisioner [1b518c536e9d] <== * F0724 22:27:34.636561 1 main.go:37] Error getting server version: Get https://10.96.0.1:443/version: dial tcp 10.96.0.1:443: getsockopt: no route to host -- /stdout -- helpers_test.go:247: (dbg) Run: ./minikube-linux-amd64 status --format={{.APIServer}} -p kindnet-20200724220311-14997 -n kindnet-20200724220311-14997 helpers_test.go:254: (dbg) Run: kubectl --context kindnet-20200724220311-14997 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running helpers_test.go:260: non-running pods: coredns-66bff467f8-dqmpm helpers_test.go:262: ======> post-mortem[TestNetworkPlugins/group/kindnet]: describe non-running pods <====== helpers_test.go:265: (dbg) Run: kubectl --context kindnet-20200724220311-14997 describe pod coredns-66bff467f8-dqmpm helpers_test.go:265: (dbg) Non-zero exit: kubectl --context kindnet-20200724220311-14997 describe pod coredns-66bff467f8-dqmpm: exit status 1 (80.194791ms) ** stderr ** Error from server (NotFound): pods "coredns-66bff467f8-dqmpm" not found ** /stderr ** helpers_test.go:267: kubectl --context kindnet-20200724220311-14997 describe pod coredns-66bff467f8-dqmpm: exit status 1 helpers_test.go:170: Cleaning up "kindnet-20200724220311-14997" profile ... helpers_test.go:171: (dbg) Run: ./minikube-linux-amd64 delete -p kindnet-20200724220311-14997 helpers_test.go:171: (dbg) Done: ./minikube-linux-amd64 delete -p kindnet-20200724220311-14997: (4.880877023s) --- FAIL: TestNetworkPlugins (1901.53s) --- FAIL: TestNetworkPlugins/group (0.00s) --- SKIP: TestNetworkPlugins/group/flannel (0.00s) --- PASS: TestNetworkPlugins/group/false (182.10s) --- PASS: TestNetworkPlugins/group/false/Start (140.75s) --- PASS: TestNetworkPlugins/group/false/KubeletFlags (0.36s) --- PASS: TestNetworkPlugins/group/false/NetCatPod (23.34s) --- PASS: TestNetworkPlugins/group/false/DNS (0.27s) --- PASS: TestNetworkPlugins/group/false/Localhost (0.27s) --- PASS: TestNetworkPlugins/group/false/HairPin (5.28s) --- PASS: TestNetworkPlugins/group/cilium (254.37s) --- PASS: TestNetworkPlugins/group/cilium/Start (228.71s) --- PASS: TestNetworkPlugins/group/cilium/ControllerPod (5.03s) --- PASS: TestNetworkPlugins/group/cilium/KubeletFlags (0.36s) --- PASS: TestNetworkPlugins/group/cilium/NetCatPod (13.52s) --- PASS: TestNetworkPlugins/group/cilium/DNS (0.25s) --- PASS: TestNetworkPlugins/group/cilium/Localhost (0.24s) --- PASS: TestNetworkPlugins/group/cilium/HairPin (0.23s) --- FAIL: TestNetworkPlugins/group/auto (537.26s) --- PASS: TestNetworkPlugins/group/auto/Start (125.01s) --- PASS: TestNetworkPlugins/group/auto/KubeletFlags (0.34s) --- PASS: TestNetworkPlugins/group/auto/NetCatPod (34.12s) --- FAIL: TestNetworkPlugins/group/auto/DNS (369.97s) --- FAIL: TestNetworkPlugins/group/enable-default-cni (580.45s) --- PASS: TestNetworkPlugins/group/enable-default-cni/Start (173.00s) --- PASS: TestNetworkPlugins/group/enable-default-cni/KubeletFlags (0.41s) --- PASS: TestNetworkPlugins/group/enable-default-cni/NetCatPod (13.93s) --- FAIL: TestNetworkPlugins/group/enable-default-cni/DNS (370.60s) --- FAIL: TestNetworkPlugins/group/kubenet (563.16s) --- PASS: TestNetworkPlugins/group/kubenet/Start (147.33s) --- PASS: TestNetworkPlugins/group/kubenet/KubeletFlags (0.38s) --- PASS: TestNetworkPlugins/group/kubenet/NetCatPod (11.95s) --- FAIL: TestNetworkPlugins/group/kubenet/DNS (393.76s) --- FAIL: TestNetworkPlugins/group/bridge (575.20s) --- PASS: TestNetworkPlugins/group/bridge/Start (92.78s) --- PASS: TestNetworkPlugins/group/bridge/KubeletFlags (0.51s) --- PASS: TestNetworkPlugins/group/bridge/NetCatPod (12.35s) --- FAIL: TestNetworkPlugins/group/bridge/DNS (460.12s) --- FAIL: TestNetworkPlugins/group/calico (1716.28s) --- FAIL: TestNetworkPlugins/group/calico/Start (1703.91s) --- FAIL: TestNetworkPlugins/group/custom-weave (1720.65s) --- FAIL: TestNetworkPlugins/group/custom-weave/Start (1711.46s) --- FAIL: TestNetworkPlugins/group/kindnet (1717.74s) --- FAIL: TestNetworkPlugins/group/kindnet/Start (1709.20s) === CONT TestStartStop/group/crio/serial/SecondStart start_stop_delete_test.go:190: (dbg) Non-zero exit: ./minikube-linux-amd64 start -p crio-20200724220901-14997 --memory=2200 --alsologtostderr --wait=true --container-runtime=crio --disable-driver-mounts --extra-config=kubeadm.ignore-preflight-errors=SystemVerification --vm-driver=docker --base-image=local/kicbase:-snapshot --kubernetes-version=v1.15.7: exit status 70 (7m30.252756647s) -- stdout -- * [crio-20200724220901-14997] minikube v1.12.1 on Ubuntu 20.04 - KUBECONFIG=/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/kubeconfig - MINIKUBE_HOME=/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome * Kubernetes 1.18.3 is now available. If you would like to upgrade, specify: --kubernetes-version=v1.18.3 * Using the docker driver based on existing profile * Starting control plane node crio-20200724220901-14997 in cluster crio-20200724220901-14997 * Pulling base image ... * Restarting existing docker container for "crio-20200724220901-14997" ... * Preparing Kubernetes v1.15.7 on CRI-O 1.17.3 ... - kubeadm.ignore-preflight-errors=SystemVerification * Configuring CNI (Container Networking Interface) ... * Verifying Kubernetes components... * Enabled addons: dashboard, default-storageclass, storage-provisioner -- /stdout -- ** stderr ** I0724 22:26:01.363150 665623 out.go:188] Setting JSON to false I0724 22:26:01.366790 665623 start.go:101] hostinfo: {"hostname":"mini-test-11-ubuntu","uptime":3200,"bootTime":1595626361,"procs":836,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.4.0-1022-azure","virtualizationSystem":"kvm","virtualizationRole":"host","hostid":"c95cb721-f5cd-cb47-980f-2a6f7a0ad6b2"} I0724 22:26:01.368178 665623 start.go:111] virtualization: kvm host I0724 22:26:01.386548 665623 notify.go:125] Checking for updates... I0724 22:26:01.402021 665623 driver.go:287] Setting default libvirt URI to qemu:///system I0724 22:26:01.461020 665623 docker.go:87] docker version: linux-19.03.8 I0724 22:26:01.473665 665623 start.go:217] selected driver: docker I0724 22:26:01.473675 665623 start.go:623] validating driver "docker" against &{Name:crio-20200724220901-14997 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 Memory:2200 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.99.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio KubernetesConfig:{KubernetesVersion:v1.15.7 ClusterName:crio-20200724220901-14997 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:crio CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: ExtraOptions:[{Component:kubeadm Key:ignore-preflight-errors Value:SystemVerification}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[{Name: IP:172.17.0.2 Port:8443 KubernetesVersion:v1.15.7 ControlPlane:true Worker:true}] Addons:map[dashboard:true] VerifyComponents:map[apiserver:true apps_running:true default_sa:true system_pods:true]} I0724 22:26:01.473796 665623 start.go:634] status for docker: {Installed:true Healthy:true NeedsImprovement:false Error: Fix: Doc:} I0724 22:26:01.473876 665623 cli_runner.go:109] Run: docker system info --format "{{json .}}" I0724 22:26:01.538058 665623 start_flags.go:617] Waiting for all components: map[apiserver:true apps_running:true default_sa:true system_pods:true] I0724 22:26:01.538205 665623 start_flags.go:345] config: {Name:crio-20200724220901-14997 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:local/kicbase:-snapshot Memory:2200 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.99.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio KubernetesConfig:{KubernetesVersion:v1.15.7 ClusterName:crio-20200724220901-14997 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:crio CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: ExtraOptions:[{Component:kubeadm Key:ignore-preflight-errors Value:SystemVerification}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[{Name: IP:172.17.0.2 Port:8443 KubernetesVersion:v1.15.7 ControlPlane:true Worker:true}] Addons:map[dashboard:true] VerifyComponents:map[apiserver:true apps_running:true default_sa:true system_pods:true]} I0724 22:26:01.622353 665623 cache.go:117] Beginning downloading kic base image for docker with crio I0724 22:26:01.639790 665623 preload.go:97] Checking if preload exists for k8s version v1.15.7 and runtime crio I0724 22:26:01.639915 665623 cache.go:137] Downloading local/kicbase:-snapshot to local daemon I0724 22:26:01.639935 665623 image.go:140] Writing local/kicbase:-snapshot to local daemon W0724 22:26:01.687304 665623 preload.go:118] https://storage.googleapis.com/minikube-preloaded-volume-tarballs/preloaded-images-k8s-v4-v1.15.7-cri-o-overlay-amd64.tar.lz4 status code: 404 I0724 22:26:01.688150 665623 profile.go:150] Saving config to /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/crio-20200724220901-14997/config.json ... I0724 22:26:01.688945 665623 cache.go:92] acquiring lock: {Name:mkbf11915380a29453ebb2928e02583f08e9fbef Clock:{} Delay:500ms Timeout:10m0s Cancel:} I0724 22:26:01.689152 665623 cache.go:100] /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/kubernetesui/metrics-scraper_v1.0.4 exists I0724 22:26:01.689155 665623 cache.go:92] acquiring lock: {Name:mk7a059f3306f99b39e3faab02f4e85d5d81b09a Clock:{} Delay:500ms Timeout:10m0s Cancel:} I0724 22:26:01.689174 665623 cache.go:81] cache image "kubernetesui/metrics-scraper:v1.0.4" -> "/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/kubernetesui/metrics-scraper_v1.0.4" took 249.618µs I0724 22:26:01.689203 665623 cache.go:66] save to tar file kubernetesui/metrics-scraper:v1.0.4 -> /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/kubernetesui/metrics-scraper_v1.0.4 succeeded I0724 22:26:01.689227 665623 cache.go:92] acquiring lock: {Name:mk32ca5b3e79b7307f47e9423a681719da980baf Clock:{} Delay:500ms Timeout:10m0s Cancel:} I0724 22:26:01.689247 665623 cache.go:92] acquiring lock: {Name:mkb173bfd0adb5c32495c7e2d9cb127d135dcb56 Clock:{} Delay:500ms Timeout:10m0s Cancel:} I0724 22:26:01.689242 665623 cache.go:92] acquiring lock: {Name:mka9e31604aefd879ba790960409de49ba1db0d2 Clock:{} Delay:500ms Timeout:10m0s Cancel:} I0724 22:26:01.689260 665623 cache.go:92] acquiring lock: {Name:mk255e673dbb60dbee8eadf0518a0d0ffba2c00a Clock:{} Delay:500ms Timeout:10m0s Cancel:} I0724 22:26:01.689282 665623 cache.go:92] acquiring lock: {Name:mk0e47a450df0c06d577780e58819b1c04eb4bab Clock:{} Delay:500ms Timeout:10m0s Cancel:} I0724 22:26:01.689285 665623 cache.go:92] acquiring lock: {Name:mk23b568bde816fbc3b7ffe51df75f69fbef3bc3 Clock:{} Delay:500ms Timeout:10m0s Cancel:} I0724 22:26:01.689304 665623 cache.go:92] acquiring lock: {Name:mk485a5c0ed60ac206a50c6264fd0a21b101b196 Clock:{} Delay:500ms Timeout:10m0s Cancel:} I0724 22:26:01.689340 665623 cache.go:92] acquiring lock: {Name:mk8dbb77fe3f422f0c49d4018c75dc085d55f33f Clock:{} Delay:500ms Timeout:10m0s Cancel:} I0724 22:26:01.689385 665623 cache.go:100] /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/etcd_3.3.10 exists I0724 22:26:01.689342 665623 cache.go:100] /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/kube-controller-manager_v1.15.7 exists I0724 22:26:01.689408 665623 cache.go:100] /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/pause_3.1 exists I0724 22:26:01.689406 665623 cache.go:100] /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/kube-scheduler_v1.15.7 exists I0724 22:26:01.689408 665623 cache.go:81] cache image "k8s.gcr.io/etcd:3.3.10" -> "/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/etcd_3.3.10" took 264.618µs I0724 22:26:01.689412 665623 cache.go:100] /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/coredns_1.3.1 exists I0724 22:26:01.689425 665623 cache.go:66] save to tar file k8s.gcr.io/etcd:3.3.10 -> /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/etcd_3.3.10 succeeded I0724 22:26:01.689429 665623 cache.go:100] /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/kube-proxy_v1.15.7 exists I0724 22:26:01.689426 665623 cache.go:81] cache image "k8s.gcr.io/pause:3.1" -> "/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/pause_3.1" took 303.321µs I0724 22:26:01.689420 665623 cache.go:81] cache image "k8s.gcr.io/kube-controller-manager:v1.15.7" -> "/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/kube-controller-manager_v1.15.7" took 282.62µs I0724 22:26:01.689433 665623 cache.go:81] cache image "k8s.gcr.io/kube-scheduler:v1.15.7" -> "/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/kube-scheduler_v1.15.7" took 220.516µs I0724 22:26:01.689443 665623 cache.go:66] save to tar file k8s.gcr.io/pause:3.1 -> /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/pause_3.1 succeeded I0724 22:26:01.689437 665623 cache.go:81] cache image "k8s.gcr.io/coredns:1.3.1" -> "/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/coredns_1.3.1" took 297.821µs I0724 22:26:01.689448 665623 cache.go:66] save to tar file k8s.gcr.io/kube-controller-manager:v1.15.7 -> /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/kube-controller-manager_v1.15.7 succeeded I0724 22:26:01.689451 665623 cache.go:81] cache image "k8s.gcr.io/kube-proxy:v1.15.7" -> "/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/kube-proxy_v1.15.7" took 288.02µs I0724 22:26:01.689462 665623 cache.go:66] save to tar file k8s.gcr.io/kube-scheduler:v1.15.7 -> /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/kube-scheduler_v1.15.7 succeeded I0724 22:26:01.689469 665623 cache.go:66] save to tar file k8s.gcr.io/coredns:1.3.1 -> /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/coredns_1.3.1 succeeded I0724 22:26:01.689471 665623 cache.go:100] /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/gcr.io/k8s-minikube/storage-provisioner_v1.8.1 exists I0724 22:26:01.689480 665623 cache.go:66] save to tar file k8s.gcr.io/kube-proxy:v1.15.7 -> /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/kube-proxy_v1.15.7 succeeded I0724 22:26:01.689485 665623 cache.go:100] /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/kube-apiserver_v1.15.7 exists I0724 22:26:01.689497 665623 cache.go:81] cache image "gcr.io/k8s-minikube/storage-provisioner:v1.8.1" -> "/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/gcr.io/k8s-minikube/storage-provisioner_v1.8.1" took 218.415µs I0724 22:26:01.689517 665623 cache.go:81] cache image "k8s.gcr.io/kube-apiserver:v1.15.7" -> "/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/kube-apiserver_v1.15.7" took 345.824µs I0724 22:26:01.689539 665623 cache.go:66] save to tar file k8s.gcr.io/kube-apiserver:v1.15.7 -> /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/kube-apiserver_v1.15.7 succeeded I0724 22:26:01.689545 665623 cache.go:100] /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/kubernetesui/dashboard_v2.0.1 exists I0724 22:26:01.689547 665623 cache.go:66] save to tar file gcr.io/k8s-minikube/storage-provisioner:v1.8.1 -> /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/gcr.io/k8s-minikube/storage-provisioner_v1.8.1 succeeded I0724 22:26:01.689558 665623 cache.go:81] cache image "kubernetesui/dashboard:v2.0.1" -> "/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/kubernetesui/dashboard_v2.0.1" took 277.22µs I0724 22:26:01.689909 665623 cache.go:66] save to tar file kubernetesui/dashboard:v2.0.1 -> /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/kubernetesui/dashboard_v2.0.1 succeeded I0724 22:26:01.689925 665623 cache.go:73] Successfully saved all images to host disk. I0724 22:26:02.414681 665623 cache.go:151] failed to download local/kicbase:-snapshot, will try fallback image if available: getting remote image: GET https://index.docker.io/v2/local/kicbase/manifests/-snapshot: unsupported status code 404; body: 404 page not found I0724 22:26:02.414738 665623 cache.go:137] Downloading kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 to local daemon I0724 22:26:02.414743 665623 image.go:140] Writing kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 to local daemon I0724 22:26:06.729141 665623 cache.go:140] successfully downloaded kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 ! minikube was unable to download local/kicbase:-snapshot, but successfully downloaded kicbase/stable:v0.0.10 as a fallback image I0724 22:26:06.729212 665623 cache.go:178] Successfully downloaded all kic artifacts I0724 22:26:06.729240 665623 start.go:241] acquiring machines lock for crio-20200724220901-14997: {Name:mk91e0531dbac8bf0bab062b1deb1a07bd4532ce Clock:{} Delay:500ms Timeout:15m0s Cancel:} I0724 22:26:06.729338 665623 start.go:245] acquired machines lock for "crio-20200724220901-14997" in 75.805µs I0724 22:26:06.729366 665623 start.go:89] Skipping create...Using existing machine configuration I0724 22:26:06.729376 665623 fix.go:53] fixHost starting: I0724 22:26:06.729745 665623 cli_runner.go:109] Run: docker container inspect crio-20200724220901-14997 --format={{.State.Status}} I0724 22:26:06.782344 665623 fix.go:105] recreateIfNeeded on crio-20200724220901-14997: state=Stopped err= W0724 22:26:06.782389 665623 fix.go:131] unexpected machine state, will restart: I0724 22:26:06.849425 665623 cli_runner.go:109] Run: docker start crio-20200724220901-14997 I0724 22:26:07.384743 665623 cli_runner.go:109] Run: docker container inspect crio-20200724220901-14997 --format={{.State.Status}} I0724 22:26:07.437912 665623 kic.go:330] container "crio-20200724220901-14997" state is running. I0724 22:26:07.438294 665623 cli_runner.go:109] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" crio-20200724220901-14997 I0724 22:26:07.496570 665623 profile.go:150] Saving config to /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/crio-20200724220901-14997/config.json ... I0724 22:26:07.496750 665623 machine.go:88] provisioning docker machine ... I0724 22:26:07.496770 665623 ubuntu.go:166] provisioning hostname "crio-20200724220901-14997" I0724 22:26:07.496817 665623 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" crio-20200724220901-14997 I0724 22:26:07.549609 665623 main.go:115] libmachine: Using SSH client type: native I0724 22:26:07.549794 665623 main.go:115] libmachine: &{{{ 0 [] [] []} docker [0x7b99a0] 0x7b9970 [] 0s} 127.0.0.1 32916 } I0724 22:26:07.549815 665623 main.go:115] libmachine: About to run SSH command: sudo hostname crio-20200724220901-14997 && echo "crio-20200724220901-14997" | sudo tee /etc/hostname I0724 22:26:07.550336 665623 main.go:115] libmachine: Error dialing TCP: ssh: handshake failed: read tcp 127.0.0.1:53720->127.0.0.1:32916: read: connection reset by peer I0724 22:26:10.684690 665623 main.go:115] libmachine: SSH cmd err, output: : crio-20200724220901-14997 I0724 22:26:10.684765 665623 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" crio-20200724220901-14997 I0724 22:26:10.739446 665623 main.go:115] libmachine: Using SSH client type: native I0724 22:26:10.739618 665623 main.go:115] libmachine: &{{{ 0 [] [] []} docker [0x7b99a0] 0x7b9970 [] 0s} 127.0.0.1 32916 } I0724 22:26:10.739644 665623 main.go:115] libmachine: About to run SSH command: if ! grep -xq '.*\scrio-20200724220901-14997' /etc/hosts; then if grep -xq '127.0.1.1\s.*' /etc/hosts; then sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 crio-20200724220901-14997/g' /etc/hosts; else echo '127.0.1.1 crio-20200724220901-14997' | sudo tee -a /etc/hosts; fi fi I0724 22:26:10.865695 665623 main.go:115] libmachine: SSH cmd err, output: : I0724 22:26:10.865725 665623 ubuntu.go:172] set auth options {CertDir:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube CaCertPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube} I0724 22:26:10.865745 665623 ubuntu.go:174] setting up certificates I0724 22:26:10.865753 665623 provision.go:82] configureAuth start I0724 22:26:10.865809 665623 cli_runner.go:109] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" crio-20200724220901-14997 I0724 22:26:10.920229 665623 provision.go:131] copyHostCerts I0724 22:26:10.920290 665623 exec_runner.go:91] found /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/ca.pem, removing ... I0724 22:26:10.920370 665623 exec_runner.go:98] cp: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca.pem --> /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/ca.pem (1038 bytes) I0724 22:26:10.920448 665623 exec_runner.go:91] found /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cert.pem, removing ... I0724 22:26:10.920483 665623 exec_runner.go:98] cp: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/cert.pem --> /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cert.pem (1078 bytes) I0724 22:26:10.920547 665623 exec_runner.go:91] found /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/key.pem, removing ... I0724 22:26:10.920575 665623 exec_runner.go:98] cp: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/key.pem --> /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/key.pem (1675 bytes) I0724 22:26:10.920614 665623 provision.go:105] generating server cert: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/server.pem ca-key=/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca.pem private-key=/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca-key.pem org=jenkins.crio-20200724220901-14997 san=[172.17.0.2 localhost 127.0.0.1] I0724 22:26:11.124552 665623 provision.go:159] copyRemoteCerts I0724 22:26:11.124640 665623 ssh_runner.go:148] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker I0724 22:26:11.124701 665623 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" crio-20200724220901-14997 I0724 22:26:11.176122 665623 sshutil.go:44] new ssh client: &{IP:127.0.0.1 Port:32916 SSHKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/crio-20200724220901-14997/id_rsa Username:docker} I0724 22:26:11.266066 665623 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1038 bytes) I0724 22:26:11.287986 665623 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/server.pem --> /etc/docker/server.pem (1143 bytes) I0724 22:26:11.309089 665623 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes) I0724 22:26:11.330309 665623 provision.go:85] duration metric: configureAuth took 464.540428ms I0724 22:26:11.330330 665623 ubuntu.go:190] setting minikube options for container-runtime I0724 22:26:11.330574 665623 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" crio-20200724220901-14997 I0724 22:26:11.386657 665623 main.go:115] libmachine: Using SSH client type: native I0724 22:26:11.386946 665623 main.go:115] libmachine: &{{{ 0 [] [] []} docker [0x7b99a0] 0x7b9970 [] 0s} 127.0.0.1 32916 } I0724 22:26:11.386970 665623 main.go:115] libmachine: About to run SSH command: sudo mkdir -p /etc/sysconfig && printf %s " CRIO_MINIKUBE_OPTIONS='--insecure-registry 10.96.0.0/12 ' " | sudo tee /etc/sysconfig/crio.minikube I0724 22:26:11.517167 665623 main.go:115] libmachine: SSH cmd err, output: : CRIO_MINIKUBE_OPTIONS='--insecure-registry 10.96.0.0/12 ' I0724 22:26:11.517198 665623 machine.go:91] provisioned docker machine in 4.020432186s I0724 22:26:11.517210 665623 start.go:204] post-start starting for "crio-20200724220901-14997" (driver="docker") I0724 22:26:11.517218 665623 start.go:214] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs] I0724 22:26:11.517296 665623 ssh_runner.go:148] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs I0724 22:26:11.517346 665623 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" crio-20200724220901-14997 I0724 22:26:11.576865 665623 sshutil.go:44] new ssh client: &{IP:127.0.0.1 Port:32916 SSHKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/crio-20200724220901-14997/id_rsa Username:docker} I0724 22:26:11.666553 665623 ssh_runner.go:148] Run: cat /etc/os-release I0724 22:26:11.669716 665623 main.go:115] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found I0724 22:26:11.669739 665623 main.go:115] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found I0724 22:26:11.669749 665623 main.go:115] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found I0724 22:26:11.669755 665623 info.go:98] Remote host: Ubuntu 19.10 I0724 22:26:11.669764 665623 filesync.go:118] Scanning /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/addons for local assets ... I0724 22:26:11.669812 665623 filesync.go:118] Scanning /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/files for local assets ... I0724 22:26:11.669908 665623 start.go:207] post-start completed in 152.690325ms I0724 22:26:11.669921 665623 fix.go:55] fixHost completed within 4.940546417s I0724 22:26:11.669926 665623 start.go:76] releasing machines lock for "crio-20200724220901-14997", held for 4.94057342s I0724 22:26:11.669986 665623 cli_runner.go:109] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" crio-20200724220901-14997 I0724 22:26:11.721331 665623 ssh_runner.go:148] Run: systemctl --version I0724 22:26:11.721395 665623 ssh_runner.go:148] Run: curl -sS -m 2 https://k8s.gcr.io/ I0724 22:26:11.721401 665623 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" crio-20200724220901-14997 I0724 22:26:11.721465 665623 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" crio-20200724220901-14997 I0724 22:26:11.773577 665623 sshutil.go:44] new ssh client: &{IP:127.0.0.1 Port:32916 SSHKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/crio-20200724220901-14997/id_rsa Username:docker} I0724 22:26:11.776143 665623 sshutil.go:44] new ssh client: &{IP:127.0.0.1 Port:32916 SSHKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/crio-20200724220901-14997/id_rsa Username:docker} I0724 22:26:11.857266 665623 ssh_runner.go:148] Run: sudo systemctl is-active --quiet service containerd I0724 22:26:11.931558 665623 ssh_runner.go:148] Run: sudo systemctl stop -f containerd I0724 22:26:11.953423 665623 ssh_runner.go:148] Run: sudo systemctl is-active --quiet service containerd I0724 22:26:11.964829 665623 ssh_runner.go:148] Run: sudo systemctl is-active --quiet service docker I0724 22:26:11.977726 665623 ssh_runner.go:148] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/crio/crio.sock image-endpoint: unix:///var/run/crio/crio.sock " | sudo tee /etc/crictl.yaml" I0724 22:26:12.013116 665623 ssh_runner.go:148] Run: /bin/bash -c "sudo sed -e 's|^pause_image = .*$|pause_image = "k8s.gcr.io/pause:3.1"|' -i /etc/crio/crio.conf" I0724 22:26:12.024246 665623 ssh_runner.go:148] Run: sudo sysctl net.bridge.bridge-nf-call-iptables I0724 22:26:12.032907 665623 ssh_runner.go:148] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward" I0724 22:26:12.042111 665623 ssh_runner.go:148] Run: sudo systemctl daemon-reload I0724 22:26:12.141780 665623 ssh_runner.go:148] Run: sudo systemctl start crio I0724 22:26:16.126961 665623 ssh_runner.go:188] Completed: sudo systemctl start crio: (3.985093559s) I0724 22:26:16.127046 665623 ssh_runner.go:148] Run: crio --version I0724 22:26:16.252641 665623 cli_runner.go:109] Run: docker network ls --filter name=bridge --format {{.ID}} I0724 22:26:16.306471 665623 cli_runner.go:109] Run: docker network inspect --format "{{(index .IPAM.Config 0).Gateway}}" d4a420189740 I0724 22:26:16.363232 665623 network.go:77] got host ip for mount in container by inspect docker network: 172.17.0.1 I0724 22:26:16.363312 665623 ssh_runner.go:148] Run: grep 172.17.0.1 host.minikube.internal$ /etc/hosts I0724 22:26:16.367512 665623 ssh_runner.go:148] Run: /bin/bash -c "{ grep -v '\thost.minikube.internal$' /etc/hosts; echo "172.17.0.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ /etc/hosts" I0724 22:26:16.404240 665623 preload.go:97] Checking if preload exists for k8s version v1.15.7 and runtime crio W0724 22:26:16.445572 665623 preload.go:118] https://storage.googleapis.com/minikube-preloaded-volume-tarballs/preloaded-images-k8s-v4-v1.15.7-cri-o-overlay-amd64.tar.lz4 status code: 404 I0724 22:26:16.445597 665623 cache_images.go:72] LoadImages start: [k8s.gcr.io/kube-proxy:v1.15.7 k8s.gcr.io/kube-scheduler:v1.15.7 k8s.gcr.io/kube-controller-manager:v1.15.7 k8s.gcr.io/kube-apiserver:v1.15.7 k8s.gcr.io/coredns:1.3.1 k8s.gcr.io/etcd:3.3.10 k8s.gcr.io/pause:3.1 gcr.io/k8s-minikube/storage-provisioner:v1.8.1 kubernetesui/dashboard:v2.0.1 kubernetesui/metrics-scraper:v1.0.4] I0724 22:26:16.445666 665623 image.go:168] retrieving image: kubernetesui/metrics-scraper:v1.0.4 I0724 22:26:16.445674 665623 image.go:168] retrieving image: k8s.gcr.io/kube-apiserver:v1.15.7 I0724 22:26:16.445704 665623 image.go:168] retrieving image: k8s.gcr.io/pause:3.1 I0724 22:26:16.445729 665623 image.go:168] retrieving image: k8s.gcr.io/kube-controller-manager:v1.15.7 I0724 22:26:16.445713 665623 image.go:168] retrieving image: k8s.gcr.io/kube-proxy:v1.15.7 I0724 22:26:16.445840 665623 image.go:168] retrieving image: gcr.io/k8s-minikube/storage-provisioner:v1.8.1 I0724 22:26:16.445863 665623 image.go:168] retrieving image: k8s.gcr.io/etcd:3.3.10 I0724 22:26:16.445680 665623 image.go:168] retrieving image: k8s.gcr.io/kube-scheduler:v1.15.7 I0724 22:26:16.445868 665623 image.go:168] retrieving image: kubernetesui/dashboard:v2.0.1 I0724 22:26:16.445840 665623 image.go:168] retrieving image: k8s.gcr.io/coredns:1.3.1 I0724 22:26:16.446544 665623 image.go:176] daemon lookup for gcr.io/k8s-minikube/storage-provisioner:v1.8.1: Error response from daemon: client version 1.41 is too new. Maximum supported API version is 1.40 I0724 22:26:16.446559 665623 image.go:176] daemon lookup for k8s.gcr.io/kube-proxy:v1.15.7: Error response from daemon: client version 1.41 is too new. Maximum supported API version is 1.40 I0724 22:26:16.446581 665623 image.go:176] daemon lookup for kubernetesui/dashboard:v2.0.1: Error response from daemon: client version 1.41 is too new. Maximum supported API version is 1.40 I0724 22:26:16.446588 665623 image.go:176] daemon lookup for k8s.gcr.io/kube-controller-manager:v1.15.7: Error response from daemon: client version 1.41 is too new. Maximum supported API version is 1.40 I0724 22:26:16.446657 665623 image.go:176] daemon lookup for k8s.gcr.io/kube-scheduler:v1.15.7: Error response from daemon: client version 1.41 is too new. Maximum supported API version is 1.40 I0724 22:26:16.446671 665623 image.go:176] daemon lookup for k8s.gcr.io/pause:3.1: Error response from daemon: client version 1.41 is too new. Maximum supported API version is 1.40 I0724 22:26:16.446598 665623 image.go:176] daemon lookup for k8s.gcr.io/coredns:1.3.1: Error response from daemon: client version 1.41 is too new. Maximum supported API version is 1.40 I0724 22:26:16.446614 665623 image.go:176] daemon lookup for kubernetesui/metrics-scraper:v1.0.4: Error response from daemon: client version 1.41 is too new. Maximum supported API version is 1.40 I0724 22:26:16.446628 665623 image.go:176] daemon lookup for k8s.gcr.io/kube-apiserver:v1.15.7: Error response from daemon: client version 1.41 is too new. Maximum supported API version is 1.40 I0724 22:26:16.446642 665623 image.go:176] daemon lookup for k8s.gcr.io/etcd:3.3.10: Error response from daemon: client version 1.41 is too new. Maximum supported API version is 1.40 I0724 22:26:16.621938 665623 ssh_runner.go:148] Run: sudo podman image inspect --format {{.Id}} gcr.io/k8s-minikube/storage-provisioner:v1.8.1 I0724 22:26:16.641275 665623 ssh_runner.go:148] Run: sudo podman image inspect --format {{.Id}} k8s.gcr.io/pause:3.1 I0724 22:26:16.644422 665623 ssh_runner.go:148] Run: sudo podman image inspect --format {{.Id}} k8s.gcr.io/etcd:3.3.10 I0724 22:26:16.652163 665623 ssh_runner.go:148] Run: sudo podman image inspect --format {{.Id}} k8s.gcr.io/coredns:1.3.1 I0724 22:26:16.671433 665623 ssh_runner.go:148] Run: sudo podman image inspect --format {{.Id}} k8s.gcr.io/kube-controller-manager:v1.15.7 I0724 22:26:16.684226 665623 ssh_runner.go:148] Run: sudo podman image inspect --format {{.Id}} k8s.gcr.io/kube-apiserver:v1.15.7 I0724 22:26:16.720811 665623 ssh_runner.go:148] Run: sudo podman image inspect --format {{.Id}} k8s.gcr.io/kube-proxy:v1.15.7 I0724 22:26:16.727370 665623 ssh_runner.go:148] Run: sudo podman image inspect --format {{.Id}} kubernetesui/dashboard:v2.0.1 I0724 22:26:16.775966 665623 ssh_runner.go:148] Run: sudo podman image inspect --format {{.Id}} kubernetesui/metrics-scraper:v1.0.4 I0724 22:26:16.792846 665623 ssh_runner.go:148] Run: sudo podman image inspect --format {{.Id}} k8s.gcr.io/kube-scheduler:v1.15.7 I0724 22:26:17.441452 665623 cache_images.go:105] "gcr.io/k8s-minikube/storage-provisioner:v1.8.1" needs transfer: "gcr.io/k8s-minikube/storage-provisioner:v1.8.1" does not exist at hash "4689081edb103a9e8174bf23a255bfbe0b2d9ed82edc907abab6989d1c60f02c" in container runtime I0724 22:26:17.441492 665623 cache_images.go:241] Loading image from cache: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/gcr.io/k8s-minikube/storage-provisioner_v1.8.1 I0724 22:26:17.441616 665623 ssh_runner.go:148] Run: stat -c "%s %y" /var/lib/minikube/images/storage-provisioner_v1.8.1 I0724 22:26:17.443904 665623 cache_images.go:105] "k8s.gcr.io/pause:3.1" needs transfer: "k8s.gcr.io/pause:3.1" does not exist at hash "da86e6ba6ca197bf6bc5e9d900febd906b133eaa4750e6bed647b0fbe50ed43e" in container runtime I0724 22:26:17.443934 665623 cache_images.go:241] Loading image from cache: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/pause_3.1 I0724 22:26:17.444010 665623 cache_images.go:105] "k8s.gcr.io/etcd:3.3.10" needs transfer: "k8s.gcr.io/etcd:3.3.10" does not exist at hash "2c4adeb21b4ff8ed3309d0e42b6b4ae39872399f7b37e0856e673b13c4aba13d" in container runtime I0724 22:26:17.444041 665623 cache_images.go:241] Loading image from cache: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/etcd_3.3.10 I0724 22:26:17.444054 665623 ssh_runner.go:148] Run: stat -c "%s %y" /var/lib/minikube/images/pause_3.1 I0724 22:26:17.444152 665623 ssh_runner.go:148] Run: stat -c "%s %y" /var/lib/minikube/images/etcd_3.3.10 I0724 22:26:17.539426 665623 cache_images.go:105] "k8s.gcr.io/kube-controller-manager:v1.15.7" needs transfer: "k8s.gcr.io/kube-controller-manager:v1.15.7" does not exist at hash "d2f090f2479fbf92c508100e0a6106b3516bb70421a465586661feb1494145a2" in container runtime I0724 22:26:17.539467 665623 cache_images.go:241] Loading image from cache: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/kube-controller-manager_v1.15.7 I0724 22:26:17.539595 665623 ssh_runner.go:148] Run: stat -c "%s %y" /var/lib/minikube/images/kube-controller-manager_v1.15.7 I0724 22:26:17.552789 665623 cache_images.go:105] "k8s.gcr.io/kube-apiserver:v1.15.7" needs transfer: "k8s.gcr.io/kube-apiserver:v1.15.7" does not exist at hash "c500a024ff843278184e5454ff6ee040a106c867c5a0361886fd3057cace2264" in container runtime I0724 22:26:17.552821 665623 cache_images.go:241] Loading image from cache: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/kube-apiserver_v1.15.7 I0724 22:26:17.552890 665623 cache_images.go:105] "k8s.gcr.io/kube-proxy:v1.15.7" needs transfer: "k8s.gcr.io/kube-proxy:v1.15.7" does not exist at hash "ae3d9889423ede337df3814baa77445e566597a5a882f3cdf933b4d9e0025f0f" in container runtime I0724 22:26:17.552916 665623 cache_images.go:241] Loading image from cache: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/kube-proxy_v1.15.7 I0724 22:26:17.552932 665623 ssh_runner.go:148] Run: stat -c "%s %y" /var/lib/minikube/images/kube-apiserver_v1.15.7 I0724 22:26:17.553009 665623 ssh_runner.go:148] Run: stat -c "%s %y" /var/lib/minikube/images/kube-proxy_v1.15.7 I0724 22:26:17.575248 665623 cache_images.go:105] "k8s.gcr.io/kube-scheduler:v1.15.7" needs transfer: "k8s.gcr.io/kube-scheduler:v1.15.7" does not exist at hash "78b4180ab00d0fb99b1be2b5ef92a4831ad07f00f27e6746828f374497d79367" in container runtime I0724 22:26:17.575276 665623 ssh_runner.go:209] copy: skipping /var/lib/minikube/images/storage-provisioner_v1.8.1 (exists) I0724 22:26:17.575282 665623 cache_images.go:241] Loading image from cache: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/kube-scheduler_v1.15.7 I0724 22:26:17.575288 665623 crio.go:152] Loading image: /var/lib/minikube/images/storage-provisioner_v1.8.1 I0724 22:26:17.575304 665623 ssh_runner.go:209] copy: skipping /var/lib/minikube/images/etcd_3.3.10 (exists) I0724 22:26:17.575335 665623 ssh_runner.go:148] Run: sudo podman load -i /var/lib/minikube/images/storage-provisioner_v1.8.1 I0724 22:26:17.575352 665623 ssh_runner.go:209] copy: skipping /var/lib/minikube/images/pause_3.1 (exists) I0724 22:26:17.575408 665623 ssh_runner.go:148] Run: stat -c "%s %y" /var/lib/minikube/images/kube-scheduler_v1.15.7 I0724 22:26:17.575410 665623 ssh_runner.go:209] copy: skipping /var/lib/minikube/images/kube-controller-manager_v1.15.7 (exists) I0724 22:26:17.575467 665623 ssh_runner.go:209] copy: skipping /var/lib/minikube/images/kube-proxy_v1.15.7 (exists) I0724 22:26:17.575480 665623 ssh_runner.go:209] copy: skipping /var/lib/minikube/images/kube-apiserver_v1.15.7 (exists) I0724 22:26:21.125672 665623 ssh_runner.go:188] Completed: stat -c "%s %y" /var/lib/minikube/images/kube-scheduler_v1.15.7: (3.550238116s) I0724 22:26:21.125705 665623 ssh_runner.go:209] copy: skipping /var/lib/minikube/images/kube-scheduler_v1.15.7 (exists) I0724 22:26:21.125734 665623 ssh_runner.go:188] Completed: sudo podman load -i /var/lib/minikube/images/storage-provisioner_v1.8.1: (3.550368525s) I0724 22:26:21.125755 665623 cache_images.go:263] Transferred and loaded /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/gcr.io/k8s-minikube/storage-provisioner_v1.8.1 from cache I0724 22:26:21.125780 665623 crio.go:152] Loading image: /var/lib/minikube/images/etcd_3.3.10 I0724 22:26:21.125850 665623 ssh_runner.go:148] Run: sudo podman load -i /var/lib/minikube/images/etcd_3.3.10 I0724 22:26:30.543005 665623 ssh_runner.go:188] Completed: sudo podman load -i /var/lib/minikube/images/etcd_3.3.10: (9.417130726s) I0724 22:26:30.543044 665623 cache_images.go:263] Transferred and loaded /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/etcd_3.3.10 from cache I0724 22:26:30.543061 665623 crio.go:152] Loading image: /var/lib/minikube/images/pause_3.1 I0724 22:26:30.543150 665623 ssh_runner.go:148] Run: sudo podman load -i /var/lib/minikube/images/pause_3.1 I0724 22:26:31.084062 665623 cache_images.go:263] Transferred and loaded /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/pause_3.1 from cache I0724 22:26:31.084141 665623 crio.go:152] Loading image: /var/lib/minikube/images/kube-controller-manager_v1.15.7 I0724 22:26:31.084488 665623 ssh_runner.go:148] Run: sudo podman load -i /var/lib/minikube/images/kube-controller-manager_v1.15.7 I0724 22:26:37.536891 665623 ssh_runner.go:188] Completed: sudo podman load -i /var/lib/minikube/images/kube-controller-manager_v1.15.7: (6.452345962s) I0724 22:26:37.536921 665623 cache_images.go:263] Transferred and loaded /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/kube-controller-manager_v1.15.7 from cache I0724 22:26:37.536939 665623 crio.go:152] Loading image: /var/lib/minikube/images/kube-proxy_v1.15.7 I0724 22:26:37.536994 665623 ssh_runner.go:148] Run: sudo podman load -i /var/lib/minikube/images/kube-proxy_v1.15.7 I0724 22:26:39.926583 665623 ssh_runner.go:188] Completed: sudo podman load -i /var/lib/minikube/images/kube-proxy_v1.15.7: (2.389564555s) I0724 22:26:39.926607 665623 cache_images.go:263] Transferred and loaded /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/kube-proxy_v1.15.7 from cache I0724 22:26:39.926621 665623 crio.go:152] Loading image: /var/lib/minikube/images/kube-apiserver_v1.15.7 I0724 22:26:39.926666 665623 ssh_runner.go:148] Run: sudo podman load -i /var/lib/minikube/images/kube-apiserver_v1.15.7 I0724 22:26:47.034925 665623 ssh_runner.go:188] Completed: sudo podman load -i /var/lib/minikube/images/kube-apiserver_v1.15.7: (7.10823483s) I0724 22:26:47.034950 665623 cache_images.go:263] Transferred and loaded /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/kube-apiserver_v1.15.7 from cache I0724 22:26:47.034960 665623 crio.go:152] Loading image: /var/lib/minikube/images/kube-scheduler_v1.15.7 I0724 22:26:47.035007 665623 ssh_runner.go:148] Run: sudo podman load -i /var/lib/minikube/images/kube-scheduler_v1.15.7 I0724 22:26:50.349777 665623 ssh_runner.go:188] Completed: sudo podman load -i /var/lib/minikube/images/kube-scheduler_v1.15.7: (3.31474606s) I0724 22:26:50.349800 665623 cache_images.go:263] Transferred and loaded /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/images/k8s.gcr.io/kube-scheduler_v1.15.7 from cache I0724 22:26:50.349816 665623 cache_images.go:112] Successfully loaded all cached images I0724 22:26:50.349822 665623 cache_images.go:76] LoadImages completed in 33.904211539s I0724 22:26:50.349891 665623 ssh_runner.go:148] Run: crio config I0724 22:26:50.449634 665623 cni.go:74] Creating CNI manager for "" I0724 22:26:50.449654 665623 cni.go:105] "docker" driver + crio runtime found, recommending kindnet I0724 22:26:50.449663 665623 kubeadm.go:84] Using pod CIDR: 10.244.0.0/16 I0724 22:26:50.449679 665623 kubeadm.go:150] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:172.17.0.2 APIServerPort:8443 KubernetesVersion:v1.15.7 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:crio-20200724220901-14997 NodeName:crio-20200724220901-14997 DNSDomain:cluster.local CRISocket:/var/run/crio/crio.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "172.17.0.2"]]}] FeatureArgs:map[] NoTaintMaster:true NodeIP:172.17.0.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[]} I0724 22:26:50.449787 665623 kubeadm.go:154] kubeadm config: apiVersion: kubeadm.k8s.io/v1beta1 kind: InitConfiguration localAPIEndpoint: advertiseAddress: 172.17.0.2 bindPort: 8443 bootstrapTokens: - groups: - system:bootstrappers:kubeadm:default-node-token ttl: 24h0m0s usages: - signing - authentication nodeRegistration: criSocket: /var/run/crio/crio.sock name: "crio-20200724220901-14997" kubeletExtraArgs: node-ip: 172.17.0.2 taints: [] --- apiVersion: kubeadm.k8s.io/v1beta1 kind: ClusterConfiguration apiServer: certSANs: ["127.0.0.1", "localhost", "172.17.0.2"] extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs clusterName: crio-20200724220901-14997 controlPlaneEndpoint: control-plane.minikube.internal:8443 dns: type: CoreDNS etcd: local: dataDir: /var/lib/minikube/etcd extraArgs: listen-metrics-urls: http://127.0.0.1:2381,http://172.17.0.2:2381 controllerManager: extraArgs: "leader-elect": "false" scheduler: extraArgs: "leader-elect": "false" kubernetesVersion: v1.15.7 networking: dnsDomain: cluster.local podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: cgroupfs clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 evictionHard: nodefs.available: "0%" nodefs.inodesFree: "0%" imagefs.available: "0%" failSwapOn: false staticPodPath: /etc/kubernetes/manifests --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration clusterCIDR: "10.244.0.0/16" metricsBindAddress: 172.17.0.2:10249 I0724 22:26:50.449863 665623 kubeadm.go:790] kubelet [Unit] Wants=docker.socket [Service] ExecStart= ExecStart=/var/lib/minikube/binaries/v1.15.7/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --client-ca-file=/var/lib/minikube/certs/ca.crt --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=/var/run/crio/crio.sock --hostname-override=crio-20200724220901-14997 --image-service-endpoint=/var/run/crio/crio.sock --kubeconfig=/etc/kubernetes/kubelet.conf --network-plugin=cni --node-ip=172.17.0.2 --runtime-request-timeout=15m [Install] config: {KubernetesVersion:v1.15.7 ClusterName:crio-20200724220901-14997 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:crio CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: ExtraOptions:[{Component:kubeadm Key:ignore-preflight-errors Value:SystemVerification}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} I0724 22:26:50.449934 665623 ssh_runner.go:148] Run: sudo ls /var/lib/minikube/binaries/v1.15.7 I0724 22:26:50.462771 665623 binaries.go:43] Found k8s binaries, skipping transfer I0724 22:26:50.462841 665623 ssh_runner.go:148] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube I0724 22:26:50.471822 665623 ssh_runner.go:215] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (550 bytes) I0724 22:26:50.495735 665623 ssh_runner.go:215] scp memory --> /lib/systemd/system/kubelet.service (349 bytes) I0724 22:26:50.518227 665623 ssh_runner.go:215] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (1882 bytes) I0724 22:26:50.541037 665623 ssh_runner.go:148] Run: grep 172.17.0.2 control-plane.minikube.internal$ /etc/hosts I0724 22:26:50.544900 665623 ssh_runner.go:148] Run: /bin/bash -c "{ grep -v '\tcontrol-plane.minikube.internal$' /etc/hosts; echo "172.17.0.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ /etc/hosts" I0724 22:26:50.557096 665623 ssh_runner.go:148] Run: sudo systemctl daemon-reload I0724 22:26:50.632471 665623 ssh_runner.go:148] Run: sudo systemctl start kubelet I0724 22:26:50.653528 665623 certs.go:52] Setting up /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/crio-20200724220901-14997 for IP: 172.17.0.2 I0724 22:26:50.653594 665623 certs.go:169] skipping minikubeCA CA generation: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/ca.key I0724 22:26:50.653615 665623 certs.go:169] skipping proxyClientCA CA generation: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/proxy-client-ca.key I0724 22:26:50.653757 665623 certs.go:269] skipping minikube-user signed cert generation: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/crio-20200724220901-14997/client.key I0724 22:26:50.653813 665623 certs.go:269] skipping minikube signed cert generation: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/crio-20200724220901-14997/apiserver.key.7b749c5f I0724 22:26:50.653845 665623 certs.go:269] skipping aggregator signed cert generation: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/crio-20200724220901-14997/proxy-client.key I0724 22:26:50.653968 665623 certs.go:348] found cert: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/14997.pem (1338 bytes) W0724 22:26:50.654026 665623 certs.go:344] ignoring /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/14997_empty.pem, impossibly tiny 0 bytes I0724 22:26:50.654042 665623 certs.go:348] found cert: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca-key.pem (1675 bytes) I0724 22:26:50.654156 665623 certs.go:348] found cert: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca.pem (1038 bytes) I0724 22:26:50.654228 665623 certs.go:348] found cert: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/cert.pem (1078 bytes) I0724 22:26:50.654281 665623 certs.go:348] found cert: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/key.pem (1675 bytes) I0724 22:26:50.655291 665623 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/crio-20200724220901-14997/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1350 bytes) I0724 22:26:50.680895 665623 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/crio-20200724220901-14997/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes) I0724 22:26:50.705936 665623 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/crio-20200724220901-14997/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1103 bytes) I0724 22:26:50.744611 665623 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/crio-20200724220901-14997/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes) I0724 22:26:50.770445 665623 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1066 bytes) I0724 22:26:50.792912 665623 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1679 bytes) I0724 22:26:50.846340 665623 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1074 bytes) I0724 22:26:50.872247 665623 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes) I0724 22:26:50.899912 665623 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1066 bytes) I0724 22:26:50.954783 665623 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/14997.pem --> /usr/share/ca-certificates/14997.pem (1338 bytes) I0724 22:26:50.983075 665623 ssh_runner.go:215] scp memory --> /var/lib/minikube/kubeconfig (392 bytes) I0724 22:26:51.006636 665623 ssh_runner.go:148] Run: openssl version I0724 22:26:51.012985 665623 ssh_runner.go:148] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/14997.pem && ln -fs /usr/share/ca-certificates/14997.pem /etc/ssl/certs/14997.pem" I0724 22:26:51.022864 665623 ssh_runner.go:148] Run: ls -la /usr/share/ca-certificates/14997.pem I0724 22:26:51.027753 665623 certs.go:389] hashing: -rw-r--r-- 1 root root 1338 Jul 24 21:50 /usr/share/ca-certificates/14997.pem I0724 22:26:51.027824 665623 ssh_runner.go:148] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/14997.pem I0724 22:26:51.034066 665623 ssh_runner.go:148] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/14997.pem /etc/ssl/certs/51391683.0" I0724 22:26:51.043429 665623 ssh_runner.go:148] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem" I0724 22:26:51.054383 665623 ssh_runner.go:148] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem I0724 22:26:51.058647 665623 certs.go:389] hashing: -rw-r--r-- 1 root root 1066 Jul 24 21:47 /usr/share/ca-certificates/minikubeCA.pem I0724 22:26:51.058703 665623 ssh_runner.go:148] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem I0724 22:26:51.064846 665623 ssh_runner.go:148] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0" I0724 22:26:51.073766 665623 kubeadm.go:327] StartCluster: {Name:crio-20200724220901-14997 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 Memory:2200 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.99.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio KubernetesConfig:{KubernetesVersion:v1.15.7 ClusterName:crio-20200724220901-14997 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:crio CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: ExtraOptions:[{Component:kubeadm Key:ignore-preflight-errors Value:SystemVerification}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[{Name: IP:172.17.0.2 Port:8443 KubernetesVersion:v1.15.7 ControlPlane:true Worker:true}] Addons:map[dashboard:true] VerifyComponents:map[apiserver:true apps_running:true default_sa:true system_pods:true]} I0724 22:26:51.073861 665623 cri.go:41] listing CRI containers in root : {State:paused Name: Namespaces:[kube-system]} I0724 22:26:51.073938 665623 ssh_runner.go:148] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system" I0724 22:26:51.092319 665623 cri.go:76] found id: "" I0724 22:26:51.092502 665623 ssh_runner.go:148] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd I0724 22:26:51.101031 665623 kubeadm.go:338] found existing configuration files, will attempt cluster restart I0724 22:26:51.101048 665623 kubeadm.go:512] restartCluster start I0724 22:26:51.101161 665623 ssh_runner.go:148] Run: sudo test -d /data/minikube I0724 22:26:51.109440 665623 kubeadm.go:122] /data/minikube skipping compat symlinks: sudo test -d /data/minikube: Process exited with status 1 stdout: stderr: I0724 22:26:51.112555 665623 ssh_runner.go:148] Run: sudo diff -u /var/tmp/minikube/kubeadm.yaml /var/tmp/minikube/kubeadm.yaml.new I0724 22:26:51.121617 665623 api_server.go:146] Checking apiserver status ... I0724 22:26:51.121688 665623 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* W0724 22:26:51.132796 665623 api_server.go:150] stopped: unable to get apiserver pid: sudo pgrep -xnf kube-apiserver.*minikube.*: Process exited with status 1 stdout: stderr: I0724 22:26:51.132815 665623 kubeadm.go:491] needs reconfigure: apiserver in state Stopped I0724 22:26:51.132823 665623 kubeadm.go:913] stopping kube-system containers ... I0724 22:26:51.132833 665623 cri.go:41] listing CRI containers in root : {State:all Name: Namespaces:[kube-system]} I0724 22:26:51.132889 665623 ssh_runner.go:148] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system" I0724 22:26:51.150834 665623 cri.go:76] found id: "" I0724 22:26:51.150907 665623 ssh_runner.go:148] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf I0724 22:26:51.159292 665623 kubeadm.go:150] found existing configuration files: -rw------- 1 root root 5587 Jul 24 22:10 /etc/kubernetes/admin.conf -rw------- 1 root root 5623 Jul 24 22:10 /etc/kubernetes/controller-manager.conf -rw------- 1 root root 5699 Jul 24 22:10 /etc/kubernetes/kubelet.conf -rw------- 1 root root 5567 Jul 24 22:10 /etc/kubernetes/scheduler.conf I0724 22:26:51.159349 665623 ssh_runner.go:148] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf I0724 22:26:51.168441 665623 ssh_runner.go:148] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf I0724 22:26:51.176520 665623 ssh_runner.go:148] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf I0724 22:26:51.185963 665623 ssh_runner.go:148] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf I0724 22:26:51.196753 665623 ssh_runner.go:148] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml I0724 22:26:51.213107 665623 kubeadm.go:576] reconfiguring cluster from /var/tmp/minikube/kubeadm.yaml I0724 22:26:51.213128 665623 ssh_runner.go:148] Run: /bin/bash -c "sudo env PATH=/var/lib/minikube/binaries/v1.15.7:$PATH kubeadm init phase certs all --config /var/tmp/minikube/kubeadm.yaml" I0724 22:26:51.254250 665623 ssh_runner.go:148] Run: /bin/bash -c "sudo env PATH=/var/lib/minikube/binaries/v1.15.7:$PATH kubeadm init phase kubeconfig all --config /var/tmp/minikube/kubeadm.yaml" I0724 22:26:52.503087 665623 ssh_runner.go:188] Completed: /bin/bash -c "sudo env PATH=/var/lib/minikube/binaries/v1.15.7:$PATH kubeadm init phase kubeconfig all --config /var/tmp/minikube/kubeadm.yaml": (1.248807237s) I0724 22:26:52.503120 665623 ssh_runner.go:148] Run: /bin/bash -c "sudo env PATH=/var/lib/minikube/binaries/v1.15.7:$PATH kubeadm init phase control-plane all --config /var/tmp/minikube/kubeadm.yaml" I0724 22:26:52.556514 665623 ssh_runner.go:148] Run: /bin/bash -c "sudo env PATH=/var/lib/minikube/binaries/v1.15.7:$PATH kubeadm init phase etcd local --config /var/tmp/minikube/kubeadm.yaml" I0724 22:26:52.601992 665623 api_server.go:48] waiting for apiserver process to appear ... I0724 22:26:52.602054 665623 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:26:53.144245 665623 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:26:53.644520 665623 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:26:54.144236 665623 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:26:54.155984 665623 api_server.go:68] duration metric: took 1.553994494s to wait for apiserver process to appear ... I0724 22:26:54.156005 665623 api_server.go:84] waiting for apiserver healthz status ... I0724 22:26:54.156013 665623 api_server.go:221] Checking apiserver healthz at https://172.17.0.2:8443/healthz ... I0724 22:26:54.156217 665623 api_server.go:231] stopped: https://172.17.0.2:8443/healthz: Get "https://172.17.0.2:8443/healthz": dial tcp 172.17.0.2:8443: connect: connection refused I0724 22:26:54.656457 665623 api_server.go:221] Checking apiserver healthz at https://172.17.0.2:8443/healthz ... I0724 22:27:00.017880 665623 api_server.go:241] https://172.17.0.2:8443/healthz returned 403: {"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403} W0724 22:27:00.017916 665623 api_server.go:99] status: https://172.17.0.2:8443/healthz returned error 403: {"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403} I0724 22:27:00.156434 665623 api_server.go:221] Checking apiserver healthz at https://172.17.0.2:8443/healthz ... I0724 22:27:00.166767 665623 api_server.go:241] https://172.17.0.2:8443/healthz returned 500: [+]ping ok [+]log ok [+]etcd ok [+]poststarthook/generic-apiserver-start-informers ok [+]poststarthook/start-apiextensions-informers ok [+]poststarthook/start-apiextensions-controllers ok [-]poststarthook/crd-informer-synced failed: reason withheld [-]poststarthook/bootstrap-controller failed: reason withheld [-]poststarthook/rbac/bootstrap-roles failed: reason withheld [-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld [-]poststarthook/ca-registration failed: reason withheld [+]poststarthook/start-kube-apiserver-admission-initializer ok [+]poststarthook/start-kube-aggregator-informers ok [+]poststarthook/apiservice-registration-controller ok [+]poststarthook/apiservice-status-available-controller ok [+]poststarthook/apiservice-openapi-controller ok [+]poststarthook/kube-apiserver-autoregistration ok [+]autoregister-completion ok healthz check failed W0724 22:27:00.166791 665623 api_server.go:99] status: https://172.17.0.2:8443/healthz returned error 500: [+]ping ok [+]log ok [+]etcd ok [+]poststarthook/generic-apiserver-start-informers ok [+]poststarthook/start-apiextensions-informers ok [+]poststarthook/start-apiextensions-controllers ok [-]poststarthook/crd-informer-synced failed: reason withheld [-]poststarthook/bootstrap-controller failed: reason withheld [-]poststarthook/rbac/bootstrap-roles failed: reason withheld [-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld [-]poststarthook/ca-registration failed: reason withheld [+]poststarthook/start-kube-apiserver-admission-initializer ok [+]poststarthook/start-kube-aggregator-informers ok [+]poststarthook/apiservice-registration-controller ok [+]poststarthook/apiservice-status-available-controller ok [+]poststarthook/apiservice-openapi-controller ok [+]poststarthook/kube-apiserver-autoregistration ok [+]autoregister-completion ok healthz check failed I0724 22:27:00.656442 665623 api_server.go:221] Checking apiserver healthz at https://172.17.0.2:8443/healthz ... I0724 22:27:00.671809 665623 api_server.go:241] https://172.17.0.2:8443/healthz returned 500: [+]ping ok [+]log ok [+]etcd ok [+]poststarthook/generic-apiserver-start-informers ok [+]poststarthook/start-apiextensions-informers ok [+]poststarthook/start-apiextensions-controllers ok [+]poststarthook/crd-informer-synced ok [+]poststarthook/bootstrap-controller ok [-]poststarthook/rbac/bootstrap-roles failed: reason withheld [-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld [-]poststarthook/ca-registration failed: reason withheld [+]poststarthook/start-kube-apiserver-admission-initializer ok [+]poststarthook/start-kube-aggregator-informers ok [+]poststarthook/apiservice-registration-controller ok [+]poststarthook/apiservice-status-available-controller ok [+]poststarthook/apiservice-openapi-controller ok [+]poststarthook/kube-apiserver-autoregistration ok [+]autoregister-completion ok healthz check failed W0724 22:27:00.671854 665623 api_server.go:99] status: https://172.17.0.2:8443/healthz returned error 500: [+]ping ok [+]log ok [+]etcd ok [+]poststarthook/generic-apiserver-start-informers ok [+]poststarthook/start-apiextensions-informers ok [+]poststarthook/start-apiextensions-controllers ok [+]poststarthook/crd-informer-synced ok [+]poststarthook/bootstrap-controller ok [-]poststarthook/rbac/bootstrap-roles failed: reason withheld [-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld [-]poststarthook/ca-registration failed: reason withheld [+]poststarthook/start-kube-apiserver-admission-initializer ok [+]poststarthook/start-kube-aggregator-informers ok [+]poststarthook/apiservice-registration-controller ok [+]poststarthook/apiservice-status-available-controller ok [+]poststarthook/apiservice-openapi-controller ok [+]poststarthook/kube-apiserver-autoregistration ok [+]autoregister-completion ok healthz check failed I0724 22:27:01.156396 665623 api_server.go:221] Checking apiserver healthz at https://172.17.0.2:8443/healthz ... I0724 22:27:01.163211 665623 api_server.go:241] https://172.17.0.2:8443/healthz returned 500: [+]ping ok [+]log ok [+]etcd ok [+]poststarthook/generic-apiserver-start-informers ok [+]poststarthook/start-apiextensions-informers ok [+]poststarthook/start-apiextensions-controllers ok [+]poststarthook/crd-informer-synced ok [+]poststarthook/bootstrap-controller ok [-]poststarthook/rbac/bootstrap-roles failed: reason withheld [+]poststarthook/scheduling/bootstrap-system-priority-classes ok [+]poststarthook/ca-registration ok [+]poststarthook/start-kube-apiserver-admission-initializer ok [+]poststarthook/start-kube-aggregator-informers ok [+]poststarthook/apiservice-registration-controller ok [+]poststarthook/apiservice-status-available-controller ok [+]poststarthook/apiservice-openapi-controller ok [+]poststarthook/kube-apiserver-autoregistration ok [+]autoregister-completion ok healthz check failed W0724 22:27:01.163235 665623 api_server.go:99] status: https://172.17.0.2:8443/healthz returned error 500: [+]ping ok [+]log ok [+]etcd ok [+]poststarthook/generic-apiserver-start-informers ok [+]poststarthook/start-apiextensions-informers ok [+]poststarthook/start-apiextensions-controllers ok [+]poststarthook/crd-informer-synced ok [+]poststarthook/bootstrap-controller ok [-]poststarthook/rbac/bootstrap-roles failed: reason withheld [+]poststarthook/scheduling/bootstrap-system-priority-classes ok [+]poststarthook/ca-registration ok [+]poststarthook/start-kube-apiserver-admission-initializer ok [+]poststarthook/start-kube-aggregator-informers ok [+]poststarthook/apiservice-registration-controller ok [+]poststarthook/apiservice-status-available-controller ok [+]poststarthook/apiservice-openapi-controller ok [+]poststarthook/kube-apiserver-autoregistration ok [+]autoregister-completion ok healthz check failed I0724 22:27:01.656423 665623 api_server.go:221] Checking apiserver healthz at https://172.17.0.2:8443/healthz ... I0724 22:27:01.661342 665623 api_server.go:241] https://172.17.0.2:8443/healthz returned 200: ok I0724 22:27:01.670032 665623 api_server.go:137] control plane version: v1.15.7 I0724 22:27:01.670053 665623 api_server.go:127] duration metric: took 7.514042707s to wait for apiserver health ... I0724 22:27:01.670061 665623 cni.go:74] Creating CNI manager for "" I0724 22:27:01.670066 665623 cni.go:105] "docker" driver + crio runtime found, recommending kindnet I0724 22:27:01.689202 665623 ssh_runner.go:148] Run: stat /opt/cni/bin/portmap I0724 22:27:01.695804 665623 cni.go:137] applying CNI manifest using /var/lib/minikube/binaries/v1.15.7/kubectl ... I0724 22:27:01.695823 665623 ssh_runner.go:215] scp memory --> /var/tmp/minikube/cni.yaml (2285 bytes) I0724 22:27:01.722355 665623 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.15.7/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml I0724 22:27:02.583355 665623 crio.go:331] Updating CRIO to use CIDR: "10.244.0.0/16" I0724 22:27:02.583434 665623 ssh_runner.go:148] Run: sudo /bin/bash -c "sed -i -e s#10.88.0.0/16#10.244.0.0/16# -e s#10.88.0.1#10.244.0.1# /etc/cni/net.d/*bridge*" I0724 22:27:02.595628 665623 ssh_runner.go:148] Run: sudo systemctl daemon-reload I0724 22:27:02.855045 665623 ssh_runner.go:148] Run: sudo systemctl restart crio I0724 22:27:05.677966 665623 ssh_runner.go:188] Completed: sudo systemctl restart crio: (2.822890027s) I0724 22:27:05.677997 665623 system_pods.go:43] waiting for kube-system pods to appear ... I0724 22:27:05.690136 665623 system_pods.go:59] 8 kube-system pods found I0724 22:27:05.690175 665623 system_pods.go:61] "coredns-5d4dd4b4db-9ssg6" [dbd8dc2c-1af2-4c99-89f0-d44686805742] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:27:05.690185 665623 system_pods.go:61] "etcd-crio-20200724220901-14997" [2a598f36-1d3e-4099-a544-319a0e8d8147] Running I0724 22:27:05.690198 665623 system_pods.go:61] "kindnet-4qfcd" [9fb35a28-5601-47e4-88e6-be2a18fa55ef] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:27:05.690215 665623 system_pods.go:61] "kube-apiserver-crio-20200724220901-14997" [0bcd6051-f6dd-4389-835d-e5dc98ede131] Running I0724 22:27:05.690226 665623 system_pods.go:61] "kube-controller-manager-crio-20200724220901-14997" [9216c440-48f4-426b-8d4c-06c6da411b8f] Running I0724 22:27:05.690234 665623 system_pods.go:61] "kube-proxy-6wf4w" [208ac09e-2a8d-43bf-a2da-f91dc042aaef] Running I0724 22:27:05.690251 665623 system_pods.go:61] "kube-scheduler-crio-20200724220901-14997" [463a5a27-eb31-4d31-9302-6a4646a51080] Running I0724 22:27:05.690260 665623 system_pods.go:61] "storage-provisioner" [e76b38cb-d66f-4b4b-a962-cdd9c670e250] Running I0724 22:27:05.690272 665623 system_pods.go:74] duration metric: took 12.267559ms to wait for pod list to return data ... I0724 22:27:05.690282 665623 node_conditions.go:101] verifying NodePressure condition ... I0724 22:27:05.693569 665623 node_conditions.go:121] node storage ephemeral capacity is 128884272Ki I0724 22:27:05.693599 665623 node_conditions.go:122] node cpu capacity is 16 I0724 22:27:05.693612 665623 node_conditions.go:104] duration metric: took 3.316432ms to run NodePressure ... I0724 22:27:05.693630 665623 ssh_runner.go:148] Run: /bin/bash -c "sudo env PATH=/var/lib/minikube/binaries/v1.15.7:$PATH kubeadm init phase addon all --config /var/tmp/minikube/kubeadm.yaml" I0724 22:27:05.933107 665623 ssh_runner.go:148] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj" I0724 22:27:05.941262 665623 ops.go:35] apiserver oom_adj: 16 I0724 22:27:05.941278 665623 ops.go:40] adjusting apiserver oom_adj to -10 I0724 22:27:05.941287 665623 ssh_runner.go:148] Run: /bin/bash -c "echo -10 | sudo tee /proc/$(pgrep kube-apiserver)/oom_adj" I0724 22:27:05.956503 665623 kubeadm.go:516] restartCluster took 14.855445594s I0724 22:27:05.956528 665623 kubeadm.go:329] StartCluster complete in 14.882769896s I0724 22:27:05.956577 665623 settings.go:123] acquiring lock: {Name:mk120aead41f4abf9b6da50636235ecd4ae2a41a Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0724 22:27:05.956691 665623 settings.go:131] Updating kubeconfig: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/kubeconfig I0724 22:27:05.958078 665623 lock.go:35] WriteFile acquiring /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/kubeconfig: {Name:mk94f19b810ab6208411eb086ed6241d89a90d8c Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0724 22:27:05.958318 665623 start.go:195] Will wait wait-timeout for node ... I0724 22:27:05.970440 665623 api_server.go:48] waiting for apiserver process to appear ... I0724 22:27:05.958355 665623 addons.go:353] enableAddons start: toEnable=map[dashboard:true], additional=[] I0724 22:27:05.970523 665623 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:27:05.958444 665623 ssh_runner.go:148] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.15.7/kubectl scale deployment --replicas=1 coredns -n=kube-system I0724 22:27:05.970609 665623 addons.go:53] Setting dashboard=true in profile "crio-20200724220901-14997" I0724 22:27:05.970640 665623 addons.go:129] Setting addon dashboard=true in "crio-20200724220901-14997" I0724 22:27:05.970672 665623 addons.go:53] Setting default-storageclass=true in profile "crio-20200724220901-14997" W0724 22:27:05.970690 665623 addons.go:138] addon dashboard should already be in state true I0724 22:27:05.970696 665623 addons.go:267] enableOrDisableStorageClasses default-storageclass=true on "crio-20200724220901-14997" I0724 22:27:05.970706 665623 host.go:65] Checking if "crio-20200724220901-14997" exists ... I0724 22:27:05.970587 665623 addons.go:53] Setting storage-provisioner=true in profile "crio-20200724220901-14997" I0724 22:27:05.970831 665623 addons.go:129] Setting addon storage-provisioner=true in "crio-20200724220901-14997" W0724 22:27:05.970844 665623 addons.go:138] addon storage-provisioner should already be in state true I0724 22:27:05.970860 665623 host.go:65] Checking if "crio-20200724220901-14997" exists ... I0724 22:27:05.971061 665623 cli_runner.go:109] Run: docker container inspect crio-20200724220901-14997 --format={{.State.Status}} I0724 22:27:05.971271 665623 cli_runner.go:109] Run: docker container inspect crio-20200724220901-14997 --format={{.State.Status}} I0724 22:27:05.971532 665623 cli_runner.go:109] Run: docker container inspect crio-20200724220901-14997 --format={{.State.Status}} I0724 22:27:06.031593 665623 addons.go:236] installing /etc/kubernetes/addons/storage-provisioner.yaml I0724 22:27:06.031625 665623 ssh_runner.go:215] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2668 bytes) I0724 22:27:06.031699 665623 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" crio-20200724220901-14997 I0724 22:27:06.032930 665623 addons.go:236] installing /etc/kubernetes/addons/dashboard-ns.yaml I0724 22:27:06.032957 665623 ssh_runner.go:215] scp deploy/addons/dashboard/dashboard-ns.yaml --> /etc/kubernetes/addons/dashboard-ns.yaml (759 bytes) I0724 22:27:06.033018 665623 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" crio-20200724220901-14997 I0724 22:27:06.046515 665623 addons.go:129] Setting addon default-storageclass=true in "crio-20200724220901-14997" W0724 22:27:06.046537 665623 addons.go:138] addon default-storageclass should already be in state true I0724 22:27:06.046550 665623 host.go:65] Checking if "crio-20200724220901-14997" exists ... I0724 22:27:06.046945 665623 cli_runner.go:109] Run: docker container inspect crio-20200724220901-14997 --format={{.State.Status}} I0724 22:27:06.079125 665623 start.go:549] successfully scaled coredns replicas to 1 I0724 22:27:06.079201 665623 api_server.go:68] duration metric: took 120.84906ms to wait for apiserver process to appear ... I0724 22:27:06.079227 665623 api_server.go:84] waiting for apiserver healthz status ... I0724 22:27:06.079237 665623 api_server.go:221] Checking apiserver healthz at https://172.17.0.2:8443/healthz ... I0724 22:27:06.085107 665623 api_server.go:241] https://172.17.0.2:8443/healthz returned 200: ok I0724 22:27:06.085871 665623 api_server.go:137] control plane version: v1.15.7 I0724 22:27:06.085894 665623 api_server.go:127] duration metric: took 6.660566ms to wait for apiserver health ... I0724 22:27:06.085906 665623 system_pods.go:43] waiting for kube-system pods to appear ... I0724 22:27:06.090867 665623 system_pods.go:59] 8 kube-system pods found I0724 22:27:06.090897 665623 system_pods.go:61] "coredns-5d4dd4b4db-9ssg6" [dbd8dc2c-1af2-4c99-89f0-d44686805742] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:27:06.090908 665623 system_pods.go:61] "etcd-crio-20200724220901-14997" [2a598f36-1d3e-4099-a544-319a0e8d8147] Running I0724 22:27:06.090925 665623 system_pods.go:61] "kindnet-4qfcd" [9fb35a28-5601-47e4-88e6-be2a18fa55ef] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:27:06.090931 665623 system_pods.go:61] "kube-apiserver-crio-20200724220901-14997" [0bcd6051-f6dd-4389-835d-e5dc98ede131] Running I0724 22:27:06.090938 665623 system_pods.go:61] "kube-controller-manager-crio-20200724220901-14997" [9216c440-48f4-426b-8d4c-06c6da411b8f] Running I0724 22:27:06.090954 665623 system_pods.go:61] "kube-proxy-6wf4w" [208ac09e-2a8d-43bf-a2da-f91dc042aaef] Running I0724 22:27:06.090962 665623 system_pods.go:61] "kube-scheduler-crio-20200724220901-14997" [463a5a27-eb31-4d31-9302-6a4646a51080] Running I0724 22:27:06.090969 665623 system_pods.go:61] "storage-provisioner" [e76b38cb-d66f-4b4b-a962-cdd9c670e250] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:27:06.090975 665623 system_pods.go:74] duration metric: took 5.062855ms to wait for pod list to return data ... I0724 22:27:06.090994 665623 default_sa.go:33] waiting for default service account to be created ... I0724 22:27:06.093908 665623 default_sa.go:44] found service account: "default" I0724 22:27:06.093930 665623 default_sa.go:54] duration metric: took 2.929406ms for default service account to be created ... I0724 22:27:06.093938 665623 system_pods.go:116] waiting for k8s-apps to be running ... I0724 22:27:06.096450 665623 sshutil.go:44] new ssh client: &{IP:127.0.0.1 Port:32916 SSHKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/crio-20200724220901-14997/id_rsa Username:docker} I0724 22:27:06.099581 665623 sshutil.go:44] new ssh client: &{IP:127.0.0.1 Port:32916 SSHKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/crio-20200724220901-14997/id_rsa Username:docker} I0724 22:27:06.102253 665623 system_pods.go:86] 8 kube-system pods found I0724 22:27:06.102280 665623 system_pods.go:89] "coredns-5d4dd4b4db-9ssg6" [dbd8dc2c-1af2-4c99-89f0-d44686805742] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:27:06.102286 665623 system_pods.go:89] "etcd-crio-20200724220901-14997" [2a598f36-1d3e-4099-a544-319a0e8d8147] Running I0724 22:27:06.102295 665623 system_pods.go:89] "kindnet-4qfcd" [9fb35a28-5601-47e4-88e6-be2a18fa55ef] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:27:06.102301 665623 system_pods.go:89] "kube-apiserver-crio-20200724220901-14997" [0bcd6051-f6dd-4389-835d-e5dc98ede131] Running I0724 22:27:06.102308 665623 system_pods.go:89] "kube-controller-manager-crio-20200724220901-14997" [9216c440-48f4-426b-8d4c-06c6da411b8f] Running I0724 22:27:06.102314 665623 system_pods.go:89] "kube-proxy-6wf4w" [208ac09e-2a8d-43bf-a2da-f91dc042aaef] Running I0724 22:27:06.102319 665623 system_pods.go:89] "kube-scheduler-crio-20200724220901-14997" [463a5a27-eb31-4d31-9302-6a4646a51080] Running I0724 22:27:06.102326 665623 system_pods.go:89] "storage-provisioner" [e76b38cb-d66f-4b4b-a962-cdd9c670e250] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:27:06.102352 665623 retry.go:30] will retry after 263.082536ms: missing components: kube-dns I0724 22:27:06.111033 665623 addons.go:236] installing /etc/kubernetes/addons/storageclass.yaml I0724 22:27:06.111057 665623 ssh_runner.go:215] scp deploy/addons/storageclass/storageclass.yaml.tmpl --> /etc/kubernetes/addons/storageclass.yaml (271 bytes) I0724 22:27:06.111132 665623 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" crio-20200724220901-14997 I0724 22:27:06.169554 665623 sshutil.go:44] new ssh client: &{IP:127.0.0.1 Port:32916 SSHKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/crio-20200724220901-14997/id_rsa Username:docker} I0724 22:27:06.207447 665623 ssh_runner.go:148] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.15.7/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml I0724 22:27:06.208942 665623 addons.go:236] installing /etc/kubernetes/addons/dashboard-clusterrole.yaml I0724 22:27:06.209000 665623 ssh_runner.go:215] scp deploy/addons/dashboard/dashboard-clusterrole.yaml --> /etc/kubernetes/addons/dashboard-clusterrole.yaml (1001 bytes) I0724 22:27:06.231313 665623 addons.go:236] installing /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml I0724 22:27:06.231334 665623 ssh_runner.go:215] scp deploy/addons/dashboard/dashboard-clusterrolebinding.yaml --> /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml (1018 bytes) I0724 22:27:06.258972 665623 addons.go:236] installing /etc/kubernetes/addons/dashboard-configmap.yaml I0724 22:27:06.259002 665623 ssh_runner.go:215] scp deploy/addons/dashboard/dashboard-configmap.yaml --> /etc/kubernetes/addons/dashboard-configmap.yaml (837 bytes) I0724 22:27:06.281402 665623 ssh_runner.go:148] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.15.7/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml I0724 22:27:06.284050 665623 addons.go:236] installing /etc/kubernetes/addons/dashboard-dp.yaml I0724 22:27:06.284068 665623 ssh_runner.go:215] scp deploy/addons/dashboard/dashboard-dp.yaml --> /etc/kubernetes/addons/dashboard-dp.yaml (4097 bytes) I0724 22:27:06.351521 665623 addons.go:236] installing /etc/kubernetes/addons/dashboard-role.yaml I0724 22:27:06.351544 665623 ssh_runner.go:215] scp deploy/addons/dashboard/dashboard-role.yaml --> /etc/kubernetes/addons/dashboard-role.yaml (1724 bytes) I0724 22:27:06.373639 665623 addons.go:236] installing /etc/kubernetes/addons/dashboard-rolebinding.yaml I0724 22:27:06.373660 665623 ssh_runner.go:215] scp deploy/addons/dashboard/dashboard-rolebinding.yaml --> /etc/kubernetes/addons/dashboard-rolebinding.yaml (1046 bytes) I0724 22:27:06.380129 665623 system_pods.go:86] 8 kube-system pods found I0724 22:27:06.380165 665623 system_pods.go:89] "coredns-5d4dd4b4db-9ssg6" [dbd8dc2c-1af2-4c99-89f0-d44686805742] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:27:06.380175 665623 system_pods.go:89] "etcd-crio-20200724220901-14997" [2a598f36-1d3e-4099-a544-319a0e8d8147] Running I0724 22:27:06.380186 665623 system_pods.go:89] "kindnet-4qfcd" [9fb35a28-5601-47e4-88e6-be2a18fa55ef] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:27:06.380194 665623 system_pods.go:89] "kube-apiserver-crio-20200724220901-14997" [0bcd6051-f6dd-4389-835d-e5dc98ede131] Running I0724 22:27:06.380201 665623 system_pods.go:89] "kube-controller-manager-crio-20200724220901-14997" [9216c440-48f4-426b-8d4c-06c6da411b8f] Running I0724 22:27:06.380206 665623 system_pods.go:89] "kube-proxy-6wf4w" [208ac09e-2a8d-43bf-a2da-f91dc042aaef] Running I0724 22:27:06.380211 665623 system_pods.go:89] "kube-scheduler-crio-20200724220901-14997" [463a5a27-eb31-4d31-9302-6a4646a51080] Running I0724 22:27:06.380218 665623 system_pods.go:89] "storage-provisioner" [e76b38cb-d66f-4b4b-a962-cdd9c670e250] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:27:06.380228 665623 retry.go:30] will retry after 381.329545ms: missing components: kube-dns I0724 22:27:06.445281 665623 addons.go:236] installing /etc/kubernetes/addons/dashboard-sa.yaml I0724 22:27:06.445302 665623 ssh_runner.go:215] scp deploy/addons/dashboard/dashboard-sa.yaml --> /etc/kubernetes/addons/dashboard-sa.yaml (837 bytes) I0724 22:27:06.472497 665623 addons.go:236] installing /etc/kubernetes/addons/dashboard-secret.yaml I0724 22:27:06.472524 665623 ssh_runner.go:215] scp deploy/addons/dashboard/dashboard-secret.yaml --> /etc/kubernetes/addons/dashboard-secret.yaml (1401 bytes) I0724 22:27:06.495391 665623 addons.go:236] installing /etc/kubernetes/addons/dashboard-svc.yaml I0724 22:27:06.495412 665623 ssh_runner.go:215] scp deploy/addons/dashboard/dashboard-svc.yaml --> /etc/kubernetes/addons/dashboard-svc.yaml (1294 bytes) I0724 22:27:06.518659 665623 ssh_runner.go:148] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.15.7/kubectl apply -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml I0724 22:27:06.784049 665623 system_pods.go:86] 8 kube-system pods found I0724 22:27:06.784089 665623 system_pods.go:89] "coredns-5d4dd4b4db-9ssg6" [dbd8dc2c-1af2-4c99-89f0-d44686805742] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:27:06.784096 665623 system_pods.go:89] "etcd-crio-20200724220901-14997" [2a598f36-1d3e-4099-a544-319a0e8d8147] Running I0724 22:27:06.784105 665623 system_pods.go:89] "kindnet-4qfcd" [9fb35a28-5601-47e4-88e6-be2a18fa55ef] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:27:06.784111 665623 system_pods.go:89] "kube-apiserver-crio-20200724220901-14997" [0bcd6051-f6dd-4389-835d-e5dc98ede131] Running I0724 22:27:06.784117 665623 system_pods.go:89] "kube-controller-manager-crio-20200724220901-14997" [9216c440-48f4-426b-8d4c-06c6da411b8f] Running I0724 22:27:06.784122 665623 system_pods.go:89] "kube-proxy-6wf4w" [208ac09e-2a8d-43bf-a2da-f91dc042aaef] Running I0724 22:27:06.784128 665623 system_pods.go:89] "kube-scheduler-crio-20200724220901-14997" [463a5a27-eb31-4d31-9302-6a4646a51080] Running I0724 22:27:06.784135 665623 system_pods.go:89] "storage-provisioner" [e76b38cb-d66f-4b4b-a962-cdd9c670e250] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:27:06.784158 665623 retry.go:30] will retry after 422.765636ms: missing components: kube-dns I0724 22:27:07.078031 665623 addons.go:355] enableAddons completed in 1.119677487s I0724 22:27:07.211669 665623 system_pods.go:86] 8 kube-system pods found I0724 22:27:07.211719 665623 system_pods.go:89] "coredns-5d4dd4b4db-9ssg6" [dbd8dc2c-1af2-4c99-89f0-d44686805742] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:27:07.211730 665623 system_pods.go:89] "etcd-crio-20200724220901-14997" [2a598f36-1d3e-4099-a544-319a0e8d8147] Running I0724 22:27:07.211743 665623 system_pods.go:89] "kindnet-4qfcd" [9fb35a28-5601-47e4-88e6-be2a18fa55ef] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:27:07.211753 665623 system_pods.go:89] "kube-apiserver-crio-20200724220901-14997" [0bcd6051-f6dd-4389-835d-e5dc98ede131] Running I0724 22:27:07.211772 665623 system_pods.go:89] "kube-controller-manager-crio-20200724220901-14997" [9216c440-48f4-426b-8d4c-06c6da411b8f] Running I0724 22:27:07.211780 665623 system_pods.go:89] "kube-proxy-6wf4w" [208ac09e-2a8d-43bf-a2da-f91dc042aaef] Running I0724 22:27:07.211796 665623 system_pods.go:89] "kube-scheduler-crio-20200724220901-14997" [463a5a27-eb31-4d31-9302-6a4646a51080] Running I0724 22:27:07.211808 665623 system_pods.go:89] "storage-provisioner" [e76b38cb-d66f-4b4b-a962-cdd9c670e250] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:27:07.211831 665623 retry.go:30] will retry after 473.074753ms: missing components: kube-dns I0724 22:27:07.688922 665623 system_pods.go:86] 8 kube-system pods found I0724 22:27:07.688951 665623 system_pods.go:89] "coredns-5d4dd4b4db-9ssg6" [dbd8dc2c-1af2-4c99-89f0-d44686805742] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:27:07.688958 665623 system_pods.go:89] "etcd-crio-20200724220901-14997" [2a598f36-1d3e-4099-a544-319a0e8d8147] Running I0724 22:27:07.688970 665623 system_pods.go:89] "kindnet-4qfcd" [9fb35a28-5601-47e4-88e6-be2a18fa55ef] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:27:07.688978 665623 system_pods.go:89] "kube-apiserver-crio-20200724220901-14997" [0bcd6051-f6dd-4389-835d-e5dc98ede131] Running I0724 22:27:07.688989 665623 system_pods.go:89] "kube-controller-manager-crio-20200724220901-14997" [9216c440-48f4-426b-8d4c-06c6da411b8f] Running I0724 22:27:07.688996 665623 system_pods.go:89] "kube-proxy-6wf4w" [208ac09e-2a8d-43bf-a2da-f91dc042aaef] Running I0724 22:27:07.689004 665623 system_pods.go:89] "kube-scheduler-crio-20200724220901-14997" [463a5a27-eb31-4d31-9302-6a4646a51080] Running I0724 22:27:07.689024 665623 system_pods.go:89] "storage-provisioner" [e76b38cb-d66f-4b4b-a962-cdd9c670e250] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:27:07.689034 665623 retry.go:30] will retry after 587.352751ms: missing components: kube-dns I0724 22:27:08.281467 665623 system_pods.go:86] 8 kube-system pods found I0724 22:27:08.281497 665623 system_pods.go:89] "coredns-5d4dd4b4db-9ssg6" [dbd8dc2c-1af2-4c99-89f0-d44686805742] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:27:08.281506 665623 system_pods.go:89] "etcd-crio-20200724220901-14997" [2a598f36-1d3e-4099-a544-319a0e8d8147] Running I0724 22:27:08.281527 665623 system_pods.go:89] "kindnet-4qfcd" [9fb35a28-5601-47e4-88e6-be2a18fa55ef] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:27:08.281553 665623 system_pods.go:89] "kube-apiserver-crio-20200724220901-14997" [0bcd6051-f6dd-4389-835d-e5dc98ede131] Running I0724 22:27:08.281562 665623 system_pods.go:89] "kube-controller-manager-crio-20200724220901-14997" [9216c440-48f4-426b-8d4c-06c6da411b8f] Running I0724 22:27:08.281571 665623 system_pods.go:89] "kube-proxy-6wf4w" [208ac09e-2a8d-43bf-a2da-f91dc042aaef] Running I0724 22:27:08.281585 665623 system_pods.go:89] "kube-scheduler-crio-20200724220901-14997" [463a5a27-eb31-4d31-9302-6a4646a51080] Running I0724 22:27:08.281612 665623 system_pods.go:89] "storage-provisioner" [e76b38cb-d66f-4b4b-a962-cdd9c670e250] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:27:08.281632 665623 retry.go:30] will retry after 834.206799ms: missing components: kube-dns I0724 22:27:09.121210 665623 system_pods.go:86] 8 kube-system pods found I0724 22:27:09.121244 665623 system_pods.go:89] "coredns-5d4dd4b4db-9ssg6" [dbd8dc2c-1af2-4c99-89f0-d44686805742] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:27:09.121253 665623 system_pods.go:89] "etcd-crio-20200724220901-14997" [2a598f36-1d3e-4099-a544-319a0e8d8147] Running I0724 22:27:09.121262 665623 system_pods.go:89] "kindnet-4qfcd" [9fb35a28-5601-47e4-88e6-be2a18fa55ef] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:27:09.121268 665623 system_pods.go:89] "kube-apiserver-crio-20200724220901-14997" [0bcd6051-f6dd-4389-835d-e5dc98ede131] Running I0724 22:27:09.121275 665623 system_pods.go:89] "kube-controller-manager-crio-20200724220901-14997" [9216c440-48f4-426b-8d4c-06c6da411b8f] Running I0724 22:27:09.121280 665623 system_pods.go:89] "kube-proxy-6wf4w" [208ac09e-2a8d-43bf-a2da-f91dc042aaef] Running I0724 22:27:09.121285 665623 system_pods.go:89] "kube-scheduler-crio-20200724220901-14997" [463a5a27-eb31-4d31-9302-6a4646a51080] Running I0724 22:27:09.121292 665623 system_pods.go:89] "storage-provisioner" [e76b38cb-d66f-4b4b-a962-cdd9c670e250] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:27:09.121311 665623 retry.go:30] will retry after 746.553905ms: missing components: kube-dns I0724 22:27:09.871851 665623 system_pods.go:86] 8 kube-system pods found I0724 22:27:09.871898 665623 system_pods.go:89] "coredns-5d4dd4b4db-9ssg6" [dbd8dc2c-1af2-4c99-89f0-d44686805742] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:27:09.871906 665623 system_pods.go:89] "etcd-crio-20200724220901-14997" [2a598f36-1d3e-4099-a544-319a0e8d8147] Running I0724 22:27:09.871915 665623 system_pods.go:89] "kindnet-4qfcd" [9fb35a28-5601-47e4-88e6-be2a18fa55ef] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:27:09.871928 665623 system_pods.go:89] "kube-apiserver-crio-20200724220901-14997" [0bcd6051-f6dd-4389-835d-e5dc98ede131] Running I0724 22:27:09.871935 665623 system_pods.go:89] "kube-controller-manager-crio-20200724220901-14997" [9216c440-48f4-426b-8d4c-06c6da411b8f] Running I0724 22:27:09.871946 665623 system_pods.go:89] "kube-proxy-6wf4w" [208ac09e-2a8d-43bf-a2da-f91dc042aaef] Running I0724 22:27:09.871951 665623 system_pods.go:89] "kube-scheduler-crio-20200724220901-14997" [463a5a27-eb31-4d31-9302-6a4646a51080] Running I0724 22:27:09.871959 665623 system_pods.go:89] "storage-provisioner" [e76b38cb-d66f-4b4b-a962-cdd9c670e250] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:27:09.871976 665623 retry.go:30] will retry after 987.362415ms: missing components: kube-dns I0724 22:27:10.863743 665623 system_pods.go:86] 8 kube-system pods found I0724 22:27:10.863778 665623 system_pods.go:89] "coredns-5d4dd4b4db-9ssg6" [dbd8dc2c-1af2-4c99-89f0-d44686805742] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:27:10.863785 665623 system_pods.go:89] "etcd-crio-20200724220901-14997" [2a598f36-1d3e-4099-a544-319a0e8d8147] Running I0724 22:27:10.863794 665623 system_pods.go:89] "kindnet-4qfcd" [9fb35a28-5601-47e4-88e6-be2a18fa55ef] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:27:10.863800 665623 system_pods.go:89] "kube-apiserver-crio-20200724220901-14997" [0bcd6051-f6dd-4389-835d-e5dc98ede131] Running I0724 22:27:10.863815 665623 system_pods.go:89] "kube-controller-manager-crio-20200724220901-14997" [9216c440-48f4-426b-8d4c-06c6da411b8f] Running I0724 22:27:10.863832 665623 system_pods.go:89] "kube-proxy-6wf4w" [208ac09e-2a8d-43bf-a2da-f91dc042aaef] Running I0724 22:27:10.863841 665623 system_pods.go:89] "kube-scheduler-crio-20200724220901-14997" [463a5a27-eb31-4d31-9302-6a4646a51080] Running I0724 22:27:10.863862 665623 system_pods.go:89] "storage-provisioner" [e76b38cb-d66f-4b4b-a962-cdd9c670e250] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:27:10.863876 665623 retry.go:30] will retry after 1.189835008s: missing components: kube-dns I0724 22:27:12.057716 665623 system_pods.go:86] 8 kube-system pods found I0724 22:27:12.057749 665623 system_pods.go:89] "coredns-5d4dd4b4db-9ssg6" [dbd8dc2c-1af2-4c99-89f0-d44686805742] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:27:12.057755 665623 system_pods.go:89] "etcd-crio-20200724220901-14997" [2a598f36-1d3e-4099-a544-319a0e8d8147] Running I0724 22:27:12.057763 665623 system_pods.go:89] "kindnet-4qfcd" [9fb35a28-5601-47e4-88e6-be2a18fa55ef] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:27:12.057769 665623 system_pods.go:89] "kube-apiserver-crio-20200724220901-14997" [0bcd6051-f6dd-4389-835d-e5dc98ede131] Running I0724 22:27:12.057775 665623 system_pods.go:89] "kube-controller-manager-crio-20200724220901-14997" [9216c440-48f4-426b-8d4c-06c6da411b8f] Running I0724 22:27:12.057780 665623 system_pods.go:89] "kube-proxy-6wf4w" [208ac09e-2a8d-43bf-a2da-f91dc042aaef] Running I0724 22:27:12.057785 665623 system_pods.go:89] "kube-scheduler-crio-20200724220901-14997" [463a5a27-eb31-4d31-9302-6a4646a51080] Running I0724 22:27:12.057793 665623 system_pods.go:89] "storage-provisioner" [e76b38cb-d66f-4b4b-a962-cdd9c670e250] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:27:12.057810 665623 retry.go:30] will retry after 1.677229867s: missing components: kube-dns I0724 22:27:13.739146 665623 system_pods.go:86] 8 kube-system pods found I0724 22:27:13.739178 665623 system_pods.go:89] "coredns-5d4dd4b4db-9ssg6" [dbd8dc2c-1af2-4c99-89f0-d44686805742] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:27:13.739185 665623 system_pods.go:89] "etcd-crio-20200724220901-14997" [2a598f36-1d3e-4099-a544-319a0e8d8147] Running I0724 22:27:13.739193 665623 system_pods.go:89] "kindnet-4qfcd" [9fb35a28-5601-47e4-88e6-be2a18fa55ef] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:27:13.739200 665623 system_pods.go:89] "kube-apiserver-crio-20200724220901-14997" [0bcd6051-f6dd-4389-835d-e5dc98ede131] Running I0724 22:27:13.739206 665623 system_pods.go:89] "kube-controller-manager-crio-20200724220901-14997" [9216c440-48f4-426b-8d4c-06c6da411b8f] Running I0724 22:27:13.739211 665623 system_pods.go:89] "kube-proxy-6wf4w" [208ac09e-2a8d-43bf-a2da-f91dc042aaef] Running I0724 22:27:13.739216 665623 system_pods.go:89] "kube-scheduler-crio-20200724220901-14997" [463a5a27-eb31-4d31-9302-6a4646a51080] Running I0724 22:27:13.739236 665623 system_pods.go:89] "storage-provisioner" [e76b38cb-d66f-4b4b-a962-cdd9c670e250] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:27:13.739246 665623 retry.go:30] will retry after 2.346016261s: missing components: kube-dns I0724 22:27:16.089782 665623 system_pods.go:86] 8 kube-system pods found I0724 22:27:16.089812 665623 system_pods.go:89] "coredns-5d4dd4b4db-9ssg6" [dbd8dc2c-1af2-4c99-89f0-d44686805742] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:27:16.089819 665623 system_pods.go:89] "etcd-crio-20200724220901-14997" [2a598f36-1d3e-4099-a544-319a0e8d8147] Running I0724 22:27:16.089829 665623 system_pods.go:89] "kindnet-4qfcd" [9fb35a28-5601-47e4-88e6-be2a18fa55ef] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:27:16.089835 665623 system_pods.go:89] "kube-apiserver-crio-20200724220901-14997" [0bcd6051-f6dd-4389-835d-e5dc98ede131] Running I0724 22:27:16.089842 665623 system_pods.go:89] "kube-controller-manager-crio-20200724220901-14997" [9216c440-48f4-426b-8d4c-06c6da411b8f] Running I0724 22:27:16.089848 665623 system_pods.go:89] "kube-proxy-6wf4w" [208ac09e-2a8d-43bf-a2da-f91dc042aaef] Running I0724 22:27:16.089854 665623 system_pods.go:89] "kube-scheduler-crio-20200724220901-14997" [463a5a27-eb31-4d31-9302-6a4646a51080] Running I0724 22:27:16.089869 665623 system_pods.go:89] "storage-provisioner" [e76b38cb-d66f-4b4b-a962-cdd9c670e250] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:27:16.089880 665623 retry.go:30] will retry after 3.36678925s: missing components: kube-dns I0724 22:27:19.460534 665623 system_pods.go:86] 8 kube-system pods found I0724 22:27:19.460565 665623 system_pods.go:89] "coredns-5d4dd4b4db-9ssg6" [dbd8dc2c-1af2-4c99-89f0-d44686805742] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:27:19.460571 665623 system_pods.go:89] "etcd-crio-20200724220901-14997" [2a598f36-1d3e-4099-a544-319a0e8d8147] Running I0724 22:27:19.460580 665623 system_pods.go:89] "kindnet-4qfcd" [9fb35a28-5601-47e4-88e6-be2a18fa55ef] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:27:19.460586 665623 system_pods.go:89] "kube-apiserver-crio-20200724220901-14997" [0bcd6051-f6dd-4389-835d-e5dc98ede131] Running I0724 22:27:19.460592 665623 system_pods.go:89] "kube-controller-manager-crio-20200724220901-14997" [9216c440-48f4-426b-8d4c-06c6da411b8f] Running I0724 22:27:19.460598 665623 system_pods.go:89] "kube-proxy-6wf4w" [208ac09e-2a8d-43bf-a2da-f91dc042aaef] Running I0724 22:27:19.460604 665623 system_pods.go:89] "kube-scheduler-crio-20200724220901-14997" [463a5a27-eb31-4d31-9302-6a4646a51080] Running I0724 22:27:19.460611 665623 system_pods.go:89] "storage-provisioner" [e76b38cb-d66f-4b4b-a962-cdd9c670e250] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:27:19.460642 665623 retry.go:30] will retry after 3.11822781s: missing components: kube-dns I0724 22:27:22.583465 665623 system_pods.go:86] 8 kube-system pods found I0724 22:27:22.583497 665623 system_pods.go:89] "coredns-5d4dd4b4db-9ssg6" [dbd8dc2c-1af2-4c99-89f0-d44686805742] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:27:22.583504 665623 system_pods.go:89] "etcd-crio-20200724220901-14997" [2a598f36-1d3e-4099-a544-319a0e8d8147] Running I0724 22:27:22.583513 665623 system_pods.go:89] "kindnet-4qfcd" [9fb35a28-5601-47e4-88e6-be2a18fa55ef] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:27:22.583523 665623 system_pods.go:89] "kube-apiserver-crio-20200724220901-14997" [0bcd6051-f6dd-4389-835d-e5dc98ede131] Running I0724 22:27:22.583541 665623 system_pods.go:89] "kube-controller-manager-crio-20200724220901-14997" [9216c440-48f4-426b-8d4c-06c6da411b8f] Running I0724 22:27:22.583546 665623 system_pods.go:89] "kube-proxy-6wf4w" [208ac09e-2a8d-43bf-a2da-f91dc042aaef] Running I0724 22:27:22.583559 665623 system_pods.go:89] "kube-scheduler-crio-20200724220901-14997" [463a5a27-eb31-4d31-9302-6a4646a51080] Running I0724 22:27:22.583564 665623 system_pods.go:89] "storage-provisioner" [e76b38cb-d66f-4b4b-a962-cdd9c670e250] Running I0724 22:27:22.583573 665623 retry.go:30] will retry after 4.276119362s: missing components: kube-dns I0724 22:27:26.868665 665623 system_pods.go:86] 8 kube-system pods found I0724 22:27:26.868757 665623 system_pods.go:89] "coredns-5d4dd4b4db-9ssg6" [dbd8dc2c-1af2-4c99-89f0-d44686805742] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:27:26.868785 665623 system_pods.go:89] "etcd-crio-20200724220901-14997" [2a598f36-1d3e-4099-a544-319a0e8d8147] Running I0724 22:27:26.868813 665623 system_pods.go:89] "kindnet-4qfcd" [9fb35a28-5601-47e4-88e6-be2a18fa55ef] Running I0724 22:27:26.868831 665623 system_pods.go:89] "kube-apiserver-crio-20200724220901-14997" [0bcd6051-f6dd-4389-835d-e5dc98ede131] Running I0724 22:27:26.868840 665623 system_pods.go:89] "kube-controller-manager-crio-20200724220901-14997" [9216c440-48f4-426b-8d4c-06c6da411b8f] Running I0724 22:27:26.868848 665623 system_pods.go:89] "kube-proxy-6wf4w" [208ac09e-2a8d-43bf-a2da-f91dc042aaef] Running I0724 22:27:26.868856 665623 system_pods.go:89] "kube-scheduler-crio-20200724220901-14997" [463a5a27-eb31-4d31-9302-6a4646a51080] Running I0724 22:27:26.868876 665623 system_pods.go:89] "storage-provisioner" [e76b38cb-d66f-4b4b-a962-cdd9c670e250] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:27:26.868898 665623 retry.go:30] will retry after 5.167232101s: missing components: kube-dns I0724 22:27:32.040694 665623 system_pods.go:86] 8 kube-system pods found I0724 22:27:32.040766 665623 system_pods.go:89] "coredns-5d4dd4b4db-9ssg6" [dbd8dc2c-1af2-4c99-89f0-d44686805742] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:27:32.040773 665623 system_pods.go:89] "etcd-crio-20200724220901-14997" [2a598f36-1d3e-4099-a544-319a0e8d8147] Running I0724 22:27:32.040780 665623 system_pods.go:89] "kindnet-4qfcd" [9fb35a28-5601-47e4-88e6-be2a18fa55ef] Running I0724 22:27:32.040788 665623 system_pods.go:89] "kube-apiserver-crio-20200724220901-14997" [0bcd6051-f6dd-4389-835d-e5dc98ede131] Running I0724 22:27:32.040797 665623 system_pods.go:89] "kube-controller-manager-crio-20200724220901-14997" [9216c440-48f4-426b-8d4c-06c6da411b8f] Running I0724 22:27:32.040805 665623 system_pods.go:89] "kube-proxy-6wf4w" [208ac09e-2a8d-43bf-a2da-f91dc042aaef] Running I0724 22:27:32.040813 665623 system_pods.go:89] "kube-scheduler-crio-20200724220901-14997" [463a5a27-eb31-4d31-9302-6a4646a51080] Running I0724 22:27:32.040833 665623 system_pods.go:89] "storage-provisioner" [e76b38cb-d66f-4b4b-a962-cdd9c670e250] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:27:32.040847 665623 retry.go:30] will retry after 6.994901864s: missing components: kube-dns I0724 22:27:39.040136 665623 system_pods.go:86] 8 kube-system pods found I0724 22:27:39.040170 665623 system_pods.go:89] "coredns-5d4dd4b4db-9ssg6" [dbd8dc2c-1af2-4c99-89f0-d44686805742] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:27:39.040177 665623 system_pods.go:89] "etcd-crio-20200724220901-14997" [2a598f36-1d3e-4099-a544-319a0e8d8147] Running I0724 22:27:39.040187 665623 system_pods.go:89] "kindnet-4qfcd" [9fb35a28-5601-47e4-88e6-be2a18fa55ef] Running I0724 22:27:39.040195 665623 system_pods.go:89] "kube-apiserver-crio-20200724220901-14997" [0bcd6051-f6dd-4389-835d-e5dc98ede131] Running I0724 22:27:39.040204 665623 system_pods.go:89] "kube-controller-manager-crio-20200724220901-14997" [9216c440-48f4-426b-8d4c-06c6da411b8f] Running I0724 22:27:39.040216 665623 system_pods.go:89] "kube-proxy-6wf4w" [208ac09e-2a8d-43bf-a2da-f91dc042aaef] Running I0724 22:27:39.040221 665623 system_pods.go:89] "kube-scheduler-crio-20200724220901-14997" [463a5a27-eb31-4d31-9302-6a4646a51080] Running I0724 22:27:39.040229 665623 system_pods.go:89] "storage-provisioner" [e76b38cb-d66f-4b4b-a962-cdd9c670e250] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:27:39.040247 665623 retry.go:30] will retry after 7.91826225s: missing components: kube-dns I0724 22:27:46.963499 665623 system_pods.go:86] 8 kube-system pods found I0724 22:27:46.963541 665623 system_pods.go:89] "coredns-5d4dd4b4db-9ssg6" [dbd8dc2c-1af2-4c99-89f0-d44686805742] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:27:46.963548 665623 system_pods.go:89] "etcd-crio-20200724220901-14997" [2a598f36-1d3e-4099-a544-319a0e8d8147] Running I0724 22:27:46.963555 665623 system_pods.go:89] "kindnet-4qfcd" [9fb35a28-5601-47e4-88e6-be2a18fa55ef] Running I0724 22:27:46.963562 665623 system_pods.go:89] "kube-apiserver-crio-20200724220901-14997" [0bcd6051-f6dd-4389-835d-e5dc98ede131] Running I0724 22:27:46.963571 665623 system_pods.go:89] "kube-controller-manager-crio-20200724220901-14997" [9216c440-48f4-426b-8d4c-06c6da411b8f] Running I0724 22:27:46.963579 665623 system_pods.go:89] "kube-proxy-6wf4w" [208ac09e-2a8d-43bf-a2da-f91dc042aaef] Running I0724 22:27:46.963588 665623 system_pods.go:89] "kube-scheduler-crio-20200724220901-14997" [463a5a27-eb31-4d31-9302-6a4646a51080] Running I0724 22:27:46.963601 665623 system_pods.go:89] "storage-provisioner" [e76b38cb-d66f-4b4b-a962-cdd9c670e250] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:27:46.963623 665623 retry.go:30] will retry after 9.953714808s: missing components: kube-dns I0724 22:27:56.921293 665623 system_pods.go:86] 8 kube-system pods found I0724 22:27:56.921328 665623 system_pods.go:89] "coredns-5d4dd4b4db-9ssg6" [dbd8dc2c-1af2-4c99-89f0-d44686805742] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:27:56.921334 665623 system_pods.go:89] "etcd-crio-20200724220901-14997" [2a598f36-1d3e-4099-a544-319a0e8d8147] Running I0724 22:27:56.921342 665623 system_pods.go:89] "kindnet-4qfcd" [9fb35a28-5601-47e4-88e6-be2a18fa55ef] Running I0724 22:27:56.921347 665623 system_pods.go:89] "kube-apiserver-crio-20200724220901-14997" [0bcd6051-f6dd-4389-835d-e5dc98ede131] Running I0724 22:27:56.921354 665623 system_pods.go:89] "kube-controller-manager-crio-20200724220901-14997" [9216c440-48f4-426b-8d4c-06c6da411b8f] Running I0724 22:27:56.921358 665623 system_pods.go:89] "kube-proxy-6wf4w" [208ac09e-2a8d-43bf-a2da-f91dc042aaef] Running I0724 22:27:56.921364 665623 system_pods.go:89] "kube-scheduler-crio-20200724220901-14997" [463a5a27-eb31-4d31-9302-6a4646a51080] Running I0724 22:27:56.921370 665623 system_pods.go:89] "storage-provisioner" [e76b38cb-d66f-4b4b-a962-cdd9c670e250] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:27:56.921381 665623 retry.go:30] will retry after 15.120437328s: missing components: kube-dns I0724 22:28:16.586842 665623 system_pods.go:86] 8 kube-system pods found I0724 22:28:18.842221 665623 system_pods.go:89] "coredns-5d4dd4b4db-9ssg6" [dbd8dc2c-1af2-4c99-89f0-d44686805742] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:28:18.842245 665623 system_pods.go:89] "etcd-crio-20200724220901-14997" [2a598f36-1d3e-4099-a544-319a0e8d8147] Running I0724 22:28:18.842258 665623 system_pods.go:89] "kindnet-4qfcd" [9fb35a28-5601-47e4-88e6-be2a18fa55ef] Running I0724 22:28:18.842275 665623 system_pods.go:89] "kube-apiserver-crio-20200724220901-14997" [0bcd6051-f6dd-4389-835d-e5dc98ede131] Running I0724 22:28:18.842282 665623 system_pods.go:89] "kube-controller-manager-crio-20200724220901-14997" [9216c440-48f4-426b-8d4c-06c6da411b8f] Running I0724 22:28:18.842287 665623 system_pods.go:89] "kube-proxy-6wf4w" [208ac09e-2a8d-43bf-a2da-f91dc042aaef] Running I0724 22:28:18.842294 665623 system_pods.go:89] "kube-scheduler-crio-20200724220901-14997" [463a5a27-eb31-4d31-9302-6a4646a51080] Running I0724 22:28:18.842302 665623 system_pods.go:89] "storage-provisioner" [e76b38cb-d66f-4b4b-a962-cdd9c670e250] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:28:18.842324 665623 retry.go:30] will retry after 14.90607158s: missing components: kube-dns I0724 22:28:33.778612 665623 system_pods.go:86] 8 kube-system pods found I0724 22:28:33.778643 665623 system_pods.go:89] "coredns-5d4dd4b4db-9ssg6" [dbd8dc2c-1af2-4c99-89f0-d44686805742] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:28:33.778650 665623 system_pods.go:89] "etcd-crio-20200724220901-14997" [2a598f36-1d3e-4099-a544-319a0e8d8147] Running I0724 22:28:33.778659 665623 system_pods.go:89] "kindnet-4qfcd" [9fb35a28-5601-47e4-88e6-be2a18fa55ef] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:28:33.778665 665623 system_pods.go:89] "kube-apiserver-crio-20200724220901-14997" [0bcd6051-f6dd-4389-835d-e5dc98ede131] Running I0724 22:28:33.778672 665623 system_pods.go:89] "kube-controller-manager-crio-20200724220901-14997" [9216c440-48f4-426b-8d4c-06c6da411b8f] Running I0724 22:28:33.778677 665623 system_pods.go:89] "kube-proxy-6wf4w" [208ac09e-2a8d-43bf-a2da-f91dc042aaef] Running I0724 22:28:33.778682 665623 system_pods.go:89] "kube-scheduler-crio-20200724220901-14997" [463a5a27-eb31-4d31-9302-6a4646a51080] Running I0724 22:28:33.778689 665623 system_pods.go:89] "storage-provisioner" [e76b38cb-d66f-4b4b-a962-cdd9c670e250] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:28:33.778706 665623 retry.go:30] will retry after 18.465989061s: missing components: kube-dns I0724 22:28:52.248957 665623 system_pods.go:86] 8 kube-system pods found I0724 22:28:52.248993 665623 system_pods.go:89] "coredns-5d4dd4b4db-9ssg6" [dbd8dc2c-1af2-4c99-89f0-d44686805742] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:28:52.249000 665623 system_pods.go:89] "etcd-crio-20200724220901-14997" [2a598f36-1d3e-4099-a544-319a0e8d8147] Running I0724 22:28:52.249007 665623 system_pods.go:89] "kindnet-4qfcd" [9fb35a28-5601-47e4-88e6-be2a18fa55ef] Running I0724 22:28:52.249012 665623 system_pods.go:89] "kube-apiserver-crio-20200724220901-14997" [0bcd6051-f6dd-4389-835d-e5dc98ede131] Running I0724 22:28:52.249018 665623 system_pods.go:89] "kube-controller-manager-crio-20200724220901-14997" [9216c440-48f4-426b-8d4c-06c6da411b8f] Running I0724 22:28:52.249023 665623 system_pods.go:89] "kube-proxy-6wf4w" [208ac09e-2a8d-43bf-a2da-f91dc042aaef] Running I0724 22:28:52.249028 665623 system_pods.go:89] "kube-scheduler-crio-20200724220901-14997" [463a5a27-eb31-4d31-9302-6a4646a51080] Running I0724 22:28:52.249035 665623 system_pods.go:89] "storage-provisioner" [e76b38cb-d66f-4b4b-a962-cdd9c670e250] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:28:52.249054 665623 retry.go:30] will retry after 25.219510332s: missing components: kube-dns I0724 22:29:17.472624 665623 system_pods.go:86] 8 kube-system pods found I0724 22:29:17.472657 665623 system_pods.go:89] "coredns-5d4dd4b4db-9ssg6" [dbd8dc2c-1af2-4c99-89f0-d44686805742] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:29:17.472664 665623 system_pods.go:89] "etcd-crio-20200724220901-14997" [2a598f36-1d3e-4099-a544-319a0e8d8147] Running I0724 22:29:17.472673 665623 system_pods.go:89] "kindnet-4qfcd" [9fb35a28-5601-47e4-88e6-be2a18fa55ef] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:29:17.472680 665623 system_pods.go:89] "kube-apiserver-crio-20200724220901-14997" [0bcd6051-f6dd-4389-835d-e5dc98ede131] Running I0724 22:29:17.472686 665623 system_pods.go:89] "kube-controller-manager-crio-20200724220901-14997" [9216c440-48f4-426b-8d4c-06c6da411b8f] Running I0724 22:29:17.472691 665623 system_pods.go:89] "kube-proxy-6wf4w" [208ac09e-2a8d-43bf-a2da-f91dc042aaef] Running I0724 22:29:17.472696 665623 system_pods.go:89] "kube-scheduler-crio-20200724220901-14997" [463a5a27-eb31-4d31-9302-6a4646a51080] Running I0724 22:29:17.472703 665623 system_pods.go:89] "storage-provisioner" [e76b38cb-d66f-4b4b-a962-cdd9c670e250] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:29:17.472714 665623 retry.go:30] will retry after 35.078569648s: missing components: kube-dns I0724 22:29:52.555291 665623 system_pods.go:86] 8 kube-system pods found I0724 22:29:52.555340 665623 system_pods.go:89] "coredns-5d4dd4b4db-9ssg6" [dbd8dc2c-1af2-4c99-89f0-d44686805742] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:29:52.555347 665623 system_pods.go:89] "etcd-crio-20200724220901-14997" [2a598f36-1d3e-4099-a544-319a0e8d8147] Running I0724 22:29:52.555356 665623 system_pods.go:89] "kindnet-4qfcd" [9fb35a28-5601-47e4-88e6-be2a18fa55ef] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:29:52.555362 665623 system_pods.go:89] "kube-apiserver-crio-20200724220901-14997" [0bcd6051-f6dd-4389-835d-e5dc98ede131] Running I0724 22:29:52.555369 665623 system_pods.go:89] "kube-controller-manager-crio-20200724220901-14997" [9216c440-48f4-426b-8d4c-06c6da411b8f] Running I0724 22:29:52.555374 665623 system_pods.go:89] "kube-proxy-6wf4w" [208ac09e-2a8d-43bf-a2da-f91dc042aaef] Running I0724 22:29:52.555379 665623 system_pods.go:89] "kube-scheduler-crio-20200724220901-14997" [463a5a27-eb31-4d31-9302-6a4646a51080] Running I0724 22:29:52.555386 665623 system_pods.go:89] "storage-provisioner" [e76b38cb-d66f-4b4b-a962-cdd9c670e250] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:29:52.555411 665623 retry.go:30] will retry after 50.027701973s: missing components: kube-dns I0724 22:30:42.587416 665623 system_pods.go:86] 8 kube-system pods found I0724 22:30:42.587463 665623 system_pods.go:89] "coredns-5d4dd4b4db-9ssg6" [dbd8dc2c-1af2-4c99-89f0-d44686805742] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:30:42.587471 665623 system_pods.go:89] "etcd-crio-20200724220901-14997" [2a598f36-1d3e-4099-a544-319a0e8d8147] Running I0724 22:30:42.587479 665623 system_pods.go:89] "kindnet-4qfcd" [9fb35a28-5601-47e4-88e6-be2a18fa55ef] Running I0724 22:30:42.587486 665623 system_pods.go:89] "kube-apiserver-crio-20200724220901-14997" [0bcd6051-f6dd-4389-835d-e5dc98ede131] Running I0724 22:30:42.587492 665623 system_pods.go:89] "kube-controller-manager-crio-20200724220901-14997" [9216c440-48f4-426b-8d4c-06c6da411b8f] Running I0724 22:30:42.587496 665623 system_pods.go:89] "kube-proxy-6wf4w" [208ac09e-2a8d-43bf-a2da-f91dc042aaef] Running I0724 22:30:42.587502 665623 system_pods.go:89] "kube-scheduler-crio-20200724220901-14997" [463a5a27-eb31-4d31-9302-6a4646a51080] Running I0724 22:30:42.587511 665623 system_pods.go:89] "storage-provisioner" [e76b38cb-d66f-4b4b-a962-cdd9c670e250] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:30:42.587530 665623 retry.go:30] will retry after 47.463338706s: missing components: kube-dns I0724 22:31:30.055285 665623 system_pods.go:86] 8 kube-system pods found I0724 22:31:30.055325 665623 system_pods.go:89] "coredns-5d4dd4b4db-9ssg6" [dbd8dc2c-1af2-4c99-89f0-d44686805742] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:31:30.055331 665623 system_pods.go:89] "etcd-crio-20200724220901-14997" [2a598f36-1d3e-4099-a544-319a0e8d8147] Running I0724 22:31:30.055341 665623 system_pods.go:89] "kindnet-4qfcd" [9fb35a28-5601-47e4-88e6-be2a18fa55ef] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:31:30.055347 665623 system_pods.go:89] "kube-apiserver-crio-20200724220901-14997" [0bcd6051-f6dd-4389-835d-e5dc98ede131] Running I0724 22:31:30.055354 665623 system_pods.go:89] "kube-controller-manager-crio-20200724220901-14997" [9216c440-48f4-426b-8d4c-06c6da411b8f] Running I0724 22:31:30.055358 665623 system_pods.go:89] "kube-proxy-6wf4w" [208ac09e-2a8d-43bf-a2da-f91dc042aaef] Running I0724 22:31:30.055366 665623 system_pods.go:89] "kube-scheduler-crio-20200724220901-14997" [463a5a27-eb31-4d31-9302-6a4646a51080] Running I0724 22:31:30.055372 665623 system_pods.go:89] "storage-provisioner" [e76b38cb-d66f-4b4b-a962-cdd9c670e250] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:31:30.055383 665623 retry.go:30] will retry after 53.912476906s: missing components: kube-dns I0724 22:32:23.972497 665623 system_pods.go:86] 8 kube-system pods found I0724 22:32:23.972533 665623 system_pods.go:89] "coredns-5d4dd4b4db-9ssg6" [dbd8dc2c-1af2-4c99-89f0-d44686805742] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:32:23.972541 665623 system_pods.go:89] "etcd-crio-20200724220901-14997" [2a598f36-1d3e-4099-a544-319a0e8d8147] Running I0724 22:32:23.972548 665623 system_pods.go:89] "kindnet-4qfcd" [9fb35a28-5601-47e4-88e6-be2a18fa55ef] Running I0724 22:32:23.972556 665623 system_pods.go:89] "kube-apiserver-crio-20200724220901-14997" [0bcd6051-f6dd-4389-835d-e5dc98ede131] Running I0724 22:32:23.972563 665623 system_pods.go:89] "kube-controller-manager-crio-20200724220901-14997" [9216c440-48f4-426b-8d4c-06c6da411b8f] Running I0724 22:32:23.972569 665623 system_pods.go:89] "kube-proxy-6wf4w" [208ac09e-2a8d-43bf-a2da-f91dc042aaef] Running I0724 22:32:23.972575 665623 system_pods.go:89] "kube-scheduler-crio-20200724220901-14997" [463a5a27-eb31-4d31-9302-6a4646a51080] Running I0724 22:32:23.972592 665623 system_pods.go:89] "storage-provisioner" [e76b38cb-d66f-4b4b-a962-cdd9c670e250] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:32:23.972605 665623 retry.go:30] will retry after 1m7.577191067s: missing components: kube-dns I0724 22:33:31.555566 665623 system_pods.go:86] 8 kube-system pods found I0724 22:33:31.555618 665623 system_pods.go:89] "coredns-5d4dd4b4db-9ssg6" [dbd8dc2c-1af2-4c99-89f0-d44686805742] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:33:31.555629 665623 system_pods.go:89] "etcd-crio-20200724220901-14997" [2a598f36-1d3e-4099-a544-319a0e8d8147] Running I0724 22:33:31.555643 665623 system_pods.go:89] "kindnet-4qfcd" [9fb35a28-5601-47e4-88e6-be2a18fa55ef] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:33:31.555653 665623 system_pods.go:89] "kube-apiserver-crio-20200724220901-14997" [0bcd6051-f6dd-4389-835d-e5dc98ede131] Running I0724 22:33:31.555665 665623 system_pods.go:89] "kube-controller-manager-crio-20200724220901-14997" [9216c440-48f4-426b-8d4c-06c6da411b8f] Running I0724 22:33:31.555673 665623 system_pods.go:89] "kube-proxy-6wf4w" [208ac09e-2a8d-43bf-a2da-f91dc042aaef] Running I0724 22:33:31.555682 665623 system_pods.go:89] "kube-scheduler-crio-20200724220901-14997" [463a5a27-eb31-4d31-9302-6a4646a51080] Running I0724 22:33:31.555693 665623 system_pods.go:89] "storage-provisioner" [e76b38cb-d66f-4b4b-a962-cdd9c670e250] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:33:31.555840 665623 exit.go:58] WithError(failed to start node)=startup failed: wait 6m0s for node: waiting for apps_running: expected k8s-apps: missing components: kube-dns called from: goroutine 1 [running]: runtime/debug.Stack(0x0, 0x0, 0x100000000000000) /home/jenkins/actions-runner/_work/_tool/go/1.14.6/x64/src/runtime/debug/stack.go:24 +0x9d k8s.io/minikube/pkg/minikube/exit.WithError(0x1ba7c56, 0x14, 0x1ebf200, 0xc0007fa3e0) /home/jenkins/actions-runner/_work/minikube/minikube/pkg/minikube/exit/exit.go:58 +0x34 k8s.io/minikube/cmd/minikube/cmd.runStart(0x2cd0820, 0xc000878b60, 0x2, 0xd) /home/jenkins/actions-runner/_work/minikube/minikube/cmd/minikube/cmd/start.go:206 +0x505 github.com/spf13/cobra.(*Command).execute(0x2cd0820, 0xc000878a90, 0xd, 0xd, 0x2cd0820, 0xc000878a90) /home/jenkins/go/pkg/mod/github.com/spf13/cobra@v1.0.0/command.go:846 +0x29d github.com/spf13/cobra.(*Command).ExecuteC(0x2ccf860, 0x0, 0x1, 0xc000042b40) /home/jenkins/go/pkg/mod/github.com/spf13/cobra@v1.0.0/command.go:950 +0x349 github.com/spf13/cobra.(*Command).Execute(...) /home/jenkins/go/pkg/mod/github.com/spf13/cobra@v1.0.0/command.go:887 k8s.io/minikube/cmd/minikube/cmd.Execute() /home/jenkins/actions-runner/_work/minikube/minikube/cmd/minikube/cmd/root.go:106 +0x72c main.main() /home/jenkins/actions-runner/_work/minikube/minikube/cmd/minikube/main.go:71 +0x11f W0724 22:33:31.556033 665623 out.go:249] failed to start node: startup failed: wait 6m0s for node: waiting for apps_running: expected k8s-apps: missing components: kube-dns * X failed to start node: startup failed: wait 6m0s for node: waiting for apps_running: expected k8s-apps: missing components: kube-dns * * minikube is exiting due to an error. If the above message is not useful, open an issue: - https://github.com/kubernetes/minikube/issues/new/choose ** /stderr ** start_stop_delete_test.go:193: failed to start minikube post-stop. args "./minikube-linux-amd64 start -p crio-20200724220901-14997 --memory=2200 --alsologtostderr --wait=true --container-runtime=crio --disable-driver-mounts --extra-config=kubeadm.ignore-preflight-errors=SystemVerification --vm-driver=docker --base-image=local/kicbase:-snapshot --kubernetes-version=v1.15.7": exit status 70 helpers_test.go:215: -----------------------post-mortem-------------------------------- helpers_test.go:223: ======> post-mortem[TestStartStop/group/crio/serial/SecondStart]: docker inspect <====== helpers_test.go:224: (dbg) Run: docker inspect crio-20200724220901-14997 helpers_test.go:228: (dbg) docker inspect crio-20200724220901-14997: -- stdout -- [ { "Id": "d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a", "Created": "2020-07-24T22:09:11.178770681Z", "Path": "/usr/local/bin/entrypoint", "Args": [ "/sbin/init" ], "State": { "Status": "running", "Running": true, "Paused": false, "Restarting": false, "OOMKilled": false, "Dead": false, "Pid": 667119, "ExitCode": 0, "Error": "", "StartedAt": "2020-07-24T22:26:07.35920725Z", "FinishedAt": "2020-07-24T22:26:00.416732195Z" }, "Image": "sha256:e6bc41c39dc48b2b472936db36aedb28527ce0f675ed1bc20d029125c9ccf578", "ResolvConfPath": "/var/lib/docker/containers/d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a/resolv.conf", "HostnamePath": "/var/lib/docker/containers/d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a/hostname", "HostsPath": "/var/lib/docker/containers/d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a/hosts", "LogPath": "/var/lib/docker/containers/d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a/d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a-json.log", "Name": "/crio-20200724220901-14997", "RestartCount": 0, "Driver": "overlay2", "Platform": "linux", "MountLabel": "", "ProcessLabel": "", "AppArmorProfile": "unconfined", "ExecIDs": null, "HostConfig": { "Binds": [ "/lib/modules:/lib/modules:ro", "crio-20200724220901-14997:/var" ], "ContainerIDFile": "", "LogConfig": { "Type": "json-file", "Config": {} }, "NetworkMode": "default", "PortBindings": { "22/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "2376/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "5000/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "8443/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ] }, "RestartPolicy": { "Name": "no", "MaximumRetryCount": 0 }, "AutoRemove": false, "VolumeDriver": "", "VolumesFrom": null, "CapAdd": null, "CapDrop": null, "Capabilities": null, "Dns": [], "DnsOptions": [], "DnsSearch": [], "ExtraHosts": null, "GroupAdd": null, "IpcMode": "private", "Cgroup": "", "Links": null, "OomScoreAdj": 0, "PidMode": "", "Privileged": true, "PublishAllPorts": false, "ReadonlyRootfs": false, "SecurityOpt": [ "seccomp=unconfined", "apparmor=unconfined", "label=disable" ], "Tmpfs": { "/run": "", "/tmp": "" }, "UTSMode": "", "UsernsMode": "", "ShmSize": 67108864, "Runtime": "runc", "ConsoleSize": [ 0, 0 ], "Isolation": "", "CpuShares": 0, "Memory": 2306867200, "NanoCpus": 2000000000, "CgroupParent": "", "BlkioWeight": 0, "BlkioWeightDevice": [], "BlkioDeviceReadBps": null, "BlkioDeviceWriteBps": null, "BlkioDeviceReadIOps": null, "BlkioDeviceWriteIOps": null, "CpuPeriod": 0, "CpuQuota": 0, "CpuRealtimePeriod": 0, "CpuRealtimeRuntime": 0, "CpusetCpus": "", "CpusetMems": "", "Devices": [], "DeviceCgroupRules": null, "DeviceRequests": null, "KernelMemory": 0, "KernelMemoryTCP": 0, "MemoryReservation": 0, "MemorySwap": -1, "MemorySwappiness": null, "OomKillDisable": false, "PidsLimit": null, "Ulimits": null, "CpuCount": 0, "CpuPercent": 0, "IOMaximumIOps": 0, "IOMaximumBandwidth": 0, "MaskedPaths": null, "ReadonlyPaths": null }, "GraphDriver": { "Data": { "LowerDir": "/var/lib/docker/overlay2/25235e8a657907fa460b6393f79aab4ea109b0924e4843714a3776886a051c03-init/diff:/var/lib/docker/overlay2/33ef5d7ed4fbb30d55d2e5925de9b5c359ee2f6bcaf5a31308f0c4c11e358759/diff:/var/lib/docker/overlay2/3cf31b1deb19cf9de586952311d9f94baab00dac6393d96e1a80da943c3823e6/diff:/var/lib/docker/overlay2/5e25be0edbb143d613aab16e23056ece18370c2427214464ccc0b8f033fc76c5/diff:/var/lib/docker/overlay2/933d7b760faf1726c0231eb48cbf3e7af8aeea7d1f449959b7bf07bbe9a84376/diff:/var/lib/docker/overlay2/69ec5555996fe29cf36b7e927034690ecb66f32d4c72396f3c2f3d73303ab10c/diff:/var/lib/docker/overlay2/891f9311e0e93aca44c576d873c0860bf67e972178833fc3ac0569308996933c/diff:/var/lib/docker/overlay2/431ddf5eea9395c020c3a27539c6984764cb7a0dd8f3eeadfbb24f37a897caf7/diff:/var/lib/docker/overlay2/ef7d3d9d53c985fdb46a41c8bcbc04c7d98baa3d8030b1432d115657269a4611/diff:/var/lib/docker/overlay2/d976f7471257da8ef85e0de564cae8e6b16eeb8f83fff6750b3985387f87052c/diff:/var/lib/docker/overlay2/16ed6f6abe25e98b50b10529e70d835da8b64ca99934243f70e5cc63cca44ba1/diff:/var/lib/docker/overlay2/a6495d6f579c32bd0faf387dc3b48c733dae0219e3cfc5d94f9f6ba027261668/diff:/var/lib/docker/overlay2/a34db548da9e24b0323e43e53767c49484fa19509ba9c1e738fb42330788ec52/diff:/var/lib/docker/overlay2/03f93f74dafc48e2bb0234297546abf70dc9918c047a7e3b500ed2242aec09ff/diff:/var/lib/docker/overlay2/b0d08d0be034080c25561d9451d8081050a591c2991a7f3d242234b9bf7c046d/diff:/var/lib/docker/overlay2/d7be6ed61eedc76988e74fe1d87a6a1ef287912654acea5566e7f9cb797b3e3c/diff:/var/lib/docker/overlay2/bde6a4a42d5652b3ef22e3a1dddd0f6fb765d2cd9f5b1a38d4ecb9b44c45b998/diff:/var/lib/docker/overlay2/0f384da0b034b730d4bdd22f270821719c0ac0b7b7d608c3a52fa89b9a386b90/diff:/var/lib/docker/overlay2/46e9d50bc162d2d523c12da3e378e5b859515056dc7d7f11344c1c42c64718d4/diff:/var/lib/docker/overlay2/6e7552cdb94dfc1c9a0a8efae25c758d93fff3d2eec58a3e2b710496d0c94653/diff:/var/lib/docker/overlay2/b9217960caef08b70ded801ca89f5a0808227383f2ab1de5ae7313c200a8090b/diff:/var/lib/docker/overlay2/d11fc39cafccd3e3cb8fba6cfdda2bbfdb99c2e41619ce169b0d6ede0b3f6a64/diff:/var/lib/docker/overlay2/0f89620291cba4bc3afac7c9fa1faf205450d5dcae032c3a9882009633b72b4a/diff", "MergedDir": "/var/lib/docker/overlay2/25235e8a657907fa460b6393f79aab4ea109b0924e4843714a3776886a051c03/merged", "UpperDir": "/var/lib/docker/overlay2/25235e8a657907fa460b6393f79aab4ea109b0924e4843714a3776886a051c03/diff", "WorkDir": "/var/lib/docker/overlay2/25235e8a657907fa460b6393f79aab4ea109b0924e4843714a3776886a051c03/work" }, "Name": "overlay2" }, "Mounts": [ { "Type": "volume", "Name": "crio-20200724220901-14997", "Source": "/var/lib/docker/volumes/crio-20200724220901-14997/_data", "Destination": "/var", "Driver": "local", "Mode": "z", "RW": true, "Propagation": "" }, { "Type": "bind", "Source": "/lib/modules", "Destination": "/lib/modules", "Mode": "ro", "RW": false, "Propagation": "rprivate" } ], "Config": { "Hostname": "crio-20200724220901-14997", "Domainname": "", "User": "root", "AttachStdin": false, "AttachStdout": false, "AttachStderr": false, "ExposedPorts": { "22/tcp": {}, "2376/tcp": {}, "5000/tcp": {}, "8443/tcp": {} }, "Tty": true, "OpenStdin": false, "StdinOnce": false, "Env": [ "container=docker", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], "Cmd": null, "Image": "kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438", "Volumes": null, "WorkingDir": "", "Entrypoint": [ "/usr/local/bin/entrypoint", "/sbin/init" ], "OnBuild": null, "Labels": { "created_by.minikube.sigs.k8s.io": "true", "mode.minikube.sigs.k8s.io": "crio-20200724220901-14997", "name.minikube.sigs.k8s.io": "crio-20200724220901-14997", "role.minikube.sigs.k8s.io": "" }, "StopSignal": "SIGRTMIN+3" }, "NetworkSettings": { "Bridge": "", "SandboxID": "5953f0ce29bf68bb7146ac5384ca1a6be3ccb39427dcde4428b82a503b037325", "HairpinMode": false, "LinkLocalIPv6Address": "", "LinkLocalIPv6PrefixLen": 0, "Ports": { "22/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32916" } ], "2376/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32915" } ], "5000/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32914" } ], "8443/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32913" } ] }, "SandboxKey": "/var/run/docker/netns/5953f0ce29bf", "SecondaryIPAddresses": null, "SecondaryIPv6Addresses": null, "EndpointID": "280707bfa744faabf6b6bcfee77f56c3d14c11c8d8070fd9a4796c96665833f5", "Gateway": "172.17.0.1", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "IPAddress": "172.17.0.2", "IPPrefixLen": 16, "IPv6Gateway": "", "MacAddress": "02:42:ac:11:00:02", "Networks": { "bridge": { "IPAMConfig": null, "Links": null, "Aliases": null, "NetworkID": "d4a420189740571bc15c14b2ebac1ab66e95c9c6796461aa96bb3486a5f7e03d", "EndpointID": "280707bfa744faabf6b6bcfee77f56c3d14c11c8d8070fd9a4796c96665833f5", "Gateway": "172.17.0.1", "IPAddress": "172.17.0.2", "IPPrefixLen": 16, "IPv6Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "MacAddress": "02:42:ac:11:00:02", "DriverOpts": null } } } } ] -- /stdout -- helpers_test.go:232: (dbg) Run: ./minikube-linux-amd64 status --format={{.Host}} -p crio-20200724220901-14997 -n crio-20200724220901-14997 helpers_test.go:237: <<< TestStartStop/group/crio/serial/SecondStart FAILED: start of post-mortem logs <<< helpers_test.go:238: ======> post-mortem[TestStartStop/group/crio/serial/SecondStart]: minikube logs <====== helpers_test.go:240: (dbg) Run: ./minikube-linux-amd64 -p crio-20200724220901-14997 logs -n 25 helpers_test.go:245: TestStartStop/group/crio/serial/SecondStart logs: -- stdout -- * ==> CRI-O <== * -- Logs begin at Fri 2020-07-24 22:26:07 UTC, end at Fri 2020-07-24 22:33:32 UTC. -- * Jul 24 22:33:24 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:33:24.319207409Z" level=info msg="About to del CNI network lo (type=loopback)" * Jul 24 22:33:24 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:33:24.323918512Z" level=info msg="Got pod network &{Name:kubernetes-dashboard-6979c57f4c-wbxrt Namespace:kubernetes-dashboard ID:c03849839decf2938363cc1f276511daf07a78d1e7354cb9acbc4eded4a7f4d4 NetNS:/proc/16659/ns/net Networks:[] RuntimeConfig:map[crio-bridge:{IP: MAC: PortMappings:[] Bandwidth: IpRanges:[]}]}" * Jul 24 22:33:24 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:33:24.323969816Z" level=error msg="error loading cached network config: network \"crio-bridge\" not found in CNI cache" * Jul 24 22:33:24 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:33:24.323981516Z" level=info msg="About to del CNI network crio-bridge (type=bridge)" * Jul 24 22:33:24 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:33:24.402371660Z" level=error msg="Error deleting network: running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.0.186 -j CNI-8c5b403c06d8e85fd3c788db -m comment --comment name: \"crio-bridge\" id: \"c03849839decf2938363cc1f276511daf07a78d1e7354cb9acbc4eded4a7f4d4\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-8c5b403c06d8e85fd3c788db':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" * Jul 24 22:33:24 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:33:24.402444964Z" level=error msg="Error while removing pod from CNI network \"crio-bridge\": running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.0.186 -j CNI-8c5b403c06d8e85fd3c788db -m comment --comment name: \"crio-bridge\" id: \"c03849839decf2938363cc1f276511daf07a78d1e7354cb9acbc4eded4a7f4d4\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-8c5b403c06d8e85fd3c788db':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" * Jul 24 22:33:24 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:33:24.402546371Z" level=error msg="error stopping network on cleanup: failed to destroy network for pod sandbox k8s_kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard_2734d7c0-daef-420d-966b-49f33f94e547_0(c03849839decf2938363cc1f276511daf07a78d1e7354cb9acbc4eded4a7f4d4): running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.0.186 -j CNI-8c5b403c06d8e85fd3c788db -m comment --comment name: \"crio-bridge\" id: \"c03849839decf2938363cc1f276511daf07a78d1e7354cb9acbc4eded4a7f4d4\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-8c5b403c06d8e85fd3c788db':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" id=45faa062-93b8-4879-a675-1c1b05cec029 * Jul 24 22:33:25 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:33:25.744655619Z" level=info msg="attempting to run pod sandbox with infra container: default/busybox/POD" id=eb16d5f8-4cae-420e-b78e-3034b9712ada * Jul 24 22:33:25 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:33:25.911534856Z" level=info msg="About to add CNI network lo (type=loopback)" * Jul 24 22:33:25 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:33:25.915491111Z" level=info msg="Got pod network &{Name:busybox Namespace:default ID:3f28adcb0e33adbc7a35e05c2439fa1b47515d04ad1fb5bf8a947281c0eae34b NetNS:/proc/16823/ns/net Networks:[] RuntimeConfig:map[crio-bridge:{IP: MAC: PortMappings:[] Bandwidth: IpRanges:[]}]}" * Jul 24 22:33:25 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:33:25.915533613Z" level=info msg="About to add CNI network crio-bridge (type=bridge)" * Jul 24 22:33:27 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:33:27.506260649Z" level=info msg="exec'd [/bin/sh -ec ETCDCTL_API=3 etcdctl --endpoints=https://[127.0.0.1]:2379 --cacert=/var/lib/minikube/certs/etcd/ca.crt --cert=/var/lib/minikube/certs/etcd/healthcheck-client.crt --key=/var/lib/minikube/certs/etcd/healthcheck-client.key get foo] in kube-system/etcd-crio-20200724220901-14997/etcd" id=77858e33-3606-405e-96e4-44b696164d0c * Jul 24 22:33:27 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:33:27.998682526Z" level=error msg="Error adding network: failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:33:27 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:33:27.998735730Z" level=error msg="Error while adding pod to CNI network \"crio-bridge\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:33:27 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:33:27.998901441Z" level=info msg="About to del CNI network lo (type=loopback)" * Jul 24 22:33:28 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:33:28.003056008Z" level=info msg="Got pod network &{Name:busybox Namespace:default ID:3f28adcb0e33adbc7a35e05c2439fa1b47515d04ad1fb5bf8a947281c0eae34b NetNS:/proc/16823/ns/net Networks:[] RuntimeConfig:map[crio-bridge:{IP: MAC: PortMappings:[] Bandwidth: IpRanges:[]}]}" * Jul 24 22:33:28 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:33:28.003108311Z" level=error msg="error loading cached network config: network \"crio-bridge\" not found in CNI cache" * Jul 24 22:33:28 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:33:28.003121112Z" level=info msg="About to del CNI network crio-bridge (type=bridge)" * Jul 24 22:33:28 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:33:28.065528927Z" level=error msg="Error deleting network: running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.0.187 -j CNI-d0381890f5e988059e7de75c -m comment --comment name: \"crio-bridge\" id: \"3f28adcb0e33adbc7a35e05c2439fa1b47515d04ad1fb5bf8a947281c0eae34b\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-d0381890f5e988059e7de75c':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" * Jul 24 22:33:28 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:33:28.065633533Z" level=error msg="Error while removing pod from CNI network \"crio-bridge\": running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.0.187 -j CNI-d0381890f5e988059e7de75c -m comment --comment name: \"crio-bridge\" id: \"3f28adcb0e33adbc7a35e05c2439fa1b47515d04ad1fb5bf8a947281c0eae34b\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-d0381890f5e988059e7de75c':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" * Jul 24 22:33:28 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:33:28.065725739Z" level=error msg="error stopping network on cleanup: failed to destroy network for pod sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(3f28adcb0e33adbc7a35e05c2439fa1b47515d04ad1fb5bf8a947281c0eae34b): running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.0.187 -j CNI-d0381890f5e988059e7de75c -m comment --comment name: \"crio-bridge\" id: \"3f28adcb0e33adbc7a35e05c2439fa1b47515d04ad1fb5bf8a947281c0eae34b\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-d0381890f5e988059e7de75c':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" id=eb16d5f8-4cae-420e-b78e-3034b9712ada * Jul 24 22:33:31 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:33:31.744566700Z" level=info msg="attempting to run pod sandbox with infra container: kubernetes-dashboard/dashboard-metrics-scraper-c8b69c96c-ljbr4/POD" id=2899583a-181a-41b3-a060-10d5ca978ce5 * Jul 24 22:33:32 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:33:32.118840378Z" level=info msg="About to add CNI network lo (type=loopback)" * Jul 24 22:33:32 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:33:32.123021147Z" level=info msg="Got pod network &{Name:dashboard-metrics-scraper-c8b69c96c-ljbr4 Namespace:kubernetes-dashboard ID:9194c32a4f5e5d716969721ed65699d323967bb56eeb71c70b21395c4eecdb4b NetNS:/proc/16958/ns/net Networks:[] RuntimeConfig:map[crio-bridge:{IP: MAC: PortMappings:[] Bandwidth: IpRanges:[]}]}" * Jul 24 22:33:32 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:33:32.123065549Z" level=info msg="About to add CNI network crio-bridge (type=bridge)" * * ==> container status <== * CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID * 8de37e3749db7 4689081edb103a9e8174bf23a255bfbe0b2d9ed82edc907abab6989d1c60f02c 26 seconds ago Exited storage-provisioner 5 ed36006be83da * 18ae74251fd32 2186a1a396deb58f1ea5eaf20193a518ca05049b46ccd754ec83366b5c8c13d5 About a minute ago Exited kindnet-cni 5 22d8d62f2f19c * 4e28cb6807125 ae3d9889423ede337df3814baa77445e566597a5a882f3cdf933b4d9e0025f0f 6 minutes ago Running kube-proxy 0 8b8f6c79bb715 * 5e2ae26239902 d2f090f2479fbf92c508100e0a6106b3516bb70421a465586661feb1494145a2 6 minutes ago Running kube-controller-manager 0 f5860ba3909ed * 9881d2c304cdc 78b4180ab00d0fb99b1be2b5ef92a4831ad07f00f27e6746828f374497d79367 6 minutes ago Running kube-scheduler 0 4b2a96d2f6659 * 7cd3d2c27ce8d 2c4adeb21b4ff8ed3309d0e42b6b4ae39872399f7b37e0856e673b13c4aba13d 6 minutes ago Running etcd 0 4187a344080e1 * 60aac29a1caea c500a024ff843278184e5454ff6ee040a106c867c5a0361886fd3057cace2264 6 minutes ago Running kube-apiserver 0 79ca654167789 * * ==> describe nodes <== * Name: crio-20200724220901-14997 * Roles: master * Labels: beta.kubernetes.io/arch=amd64 * beta.kubernetes.io/os=linux * kubernetes.io/arch=amd64 * kubernetes.io/hostname=crio-20200724220901-14997 * kubernetes.io/os=linux * minikube.k8s.io/commit=40eac8ce825d2bb784efa63b900c8d788ea49faf * minikube.k8s.io/name=crio-20200724220901-14997 * minikube.k8s.io/updated_at=2020_07_24T22_10_37_0700 * minikube.k8s.io/version=v1.12.1 * node-role.kubernetes.io/master= * Annotations: kubeadm.alpha.kubernetes.io/cri-socket: /var/run/crio/crio.sock * node.alpha.kubernetes.io/ttl: 0 * volumes.kubernetes.io/controller-managed-attach-detach: true * CreationTimestamp: Fri, 24 Jul 2020 22:10:31 +0000 * Taints: * Unschedulable: false * Conditions: * Type Status LastHeartbeatTime LastTransitionTime Reason Message * ---- ------ ----------------- ------------------ ------ ------- * MemoryPressure False Fri, 24 Jul 2020 22:33:03 +0000 Fri, 24 Jul 2020 22:10:27 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available * DiskPressure False Fri, 24 Jul 2020 22:33:03 +0000 Fri, 24 Jul 2020 22:10:27 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure * PIDPressure False Fri, 24 Jul 2020 22:33:03 +0000 Fri, 24 Jul 2020 22:10:27 +0000 KubeletHasSufficientPID kubelet has sufficient PID available * Ready True Fri, 24 Jul 2020 22:33:03 +0000 Fri, 24 Jul 2020 22:10:27 +0000 KubeletReady kubelet is posting ready status * Addresses: * InternalIP: 172.17.0.2 * Hostname: crio-20200724220901-14997 * Capacity: * cpu: 16 * ephemeral-storage: 128884272Ki * hugepages-1Gi: 0 * hugepages-2Mi: 0 * memory: 65817044Ki * pods: 110 * Allocatable: * cpu: 16 * ephemeral-storage: 128884272Ki * hugepages-1Gi: 0 * hugepages-2Mi: 0 * memory: 65817044Ki * pods: 110 * System Info: * Machine ID: e7c76c839aa944e99c5c76ea1345e361 * System UUID: 8677386b-5379-4ccc-90e7-5b585098762e * Boot ID: 65219ec9-ab55-4151-85fa-6cbcd6144529 * Kernel Version: 5.4.0-1022-azure * OS Image: Ubuntu 19.10 * Operating System: linux * Architecture: amd64 * Container Runtime Version: cri-o://1.17.3 * Kubelet Version: v1.15.7 * Kube-Proxy Version: v1.15.7 * PodCIDR: 10.244.0.0/24 * Non-terminated Pods: (11 in total) * Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits AGE * --------- ---- ------------ ---------- --------------- ------------- --- * default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 16m * kube-system coredns-5d4dd4b4db-9ssg6 100m (0%) 0 (0%) 70Mi (0%) 170Mi (0%) 22m * kube-system etcd-crio-20200724220901-14997 0 (0%) 0 (0%) 0 (0%) 0 (0%) 22m * kube-system kindnet-4qfcd 100m (0%) 100m (0%) 50Mi (0%) 50Mi (0%) 22m * kube-system kube-apiserver-crio-20200724220901-14997 250m (1%) 0 (0%) 0 (0%) 0 (0%) 21m * kube-system kube-controller-manager-crio-20200724220901-14997 200m (1%) 0 (0%) 0 (0%) 0 (0%) 6m31s * kube-system kube-proxy-6wf4w 0 (0%) 0 (0%) 0 (0%) 0 (0%) 22m * kube-system kube-scheduler-crio-20200724220901-14997 100m (0%) 0 (0%) 0 (0%) 0 (0%) 21m * kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 22m * kubernetes-dashboard dashboard-metrics-scraper-c8b69c96c-ljbr4 0 (0%) 0 (0%) 0 (0%) 0 (0%) 6m16s * kubernetes-dashboard kubernetes-dashboard-6979c57f4c-wbxrt 0 (0%) 0 (0%) 0 (0%) 0 (0%) 6m16s * Allocated resources: * (Total limits may be over 100 percent, i.e., overcommitted.) * Resource Requests Limits * -------- -------- ------ * cpu 750m (4%) 100m (0%) * memory 120Mi (0%) 220Mi (0%) * ephemeral-storage 0 (0%) 0 (0%) * Events: * Type Reason Age From Message * ---- ------ ---- ---- ------- * Normal NodeHasSufficientMemory 23m (x7 over 23m) kubelet, crio-20200724220901-14997 Node crio-20200724220901-14997 status is now: NodeHasSufficientMemory * Normal NodeHasNoDiskPressure 23m (x7 over 23m) kubelet, crio-20200724220901-14997 Node crio-20200724220901-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientPID 23m (x7 over 23m) kubelet, crio-20200724220901-14997 Node crio-20200724220901-14997 status is now: NodeHasSufficientPID * Warning readOnlySysFS 22m kube-proxy, crio-20200724220901-14997 CRI error: /sys is read-only: cannot modify conntrack limits, problems may arise later (If running Docker, see docker issue #24000) * Normal Starting 22m kube-proxy, crio-20200724220901-14997 Starting kube-proxy. * Normal Starting 6m41s kubelet, crio-20200724220901-14997 Starting kubelet. * Warning SystemOOM 6m41s (x2 over 6m41s) kubelet, crio-20200724220901-14997 System OOM encountered * Normal NodeHasSufficientMemory 6m40s (x7 over 6m41s) kubelet, crio-20200724220901-14997 Node crio-20200724220901-14997 status is now: NodeHasSufficientMemory * Normal NodeHasNoDiskPressure 6m40s (x7 over 6m41s) kubelet, crio-20200724220901-14997 Node crio-20200724220901-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientPID 6m40s (x7 over 6m41s) kubelet, crio-20200724220901-14997 Node crio-20200724220901-14997 status is now: NodeHasSufficientPID * Normal NodeAllocatableEnforced 6m40s kubelet, crio-20200724220901-14997 Updated Node Allocatable limit across pods * Warning readOnlySysFS 6m30s kube-proxy, crio-20200724220901-14997 CRI error: /sys is read-only: cannot modify conntrack limits, problems may arise later (If running Docker, see docker issue #24000) * Normal Starting 6m30s kube-proxy, crio-20200724220901-14997 Starting kube-proxy. * * ==> dmesg <== * [ +0.008042] FS-Cache: N-cookie d=00000000087e516b n=00000000639e5e65 * [ +0.007221] FS-Cache: N-key=[8] '56c00f0000000000' * [ +0.005631] FS-Cache: Duplicate cookie detected * [ +0.003841] FS-Cache: O-cookie c=0000000011bf07ab [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.005951] FS-Cache: O-cookie d=00000000087e516b n=00000000251e97b7 * [ +0.004790] FS-Cache: O-key=[8] '56c00f0000000000' * [ +0.003643] FS-Cache: N-cookie c=00000000e2eec606 [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.005689] FS-Cache: N-cookie d=00000000087e516b n=000000007550b1ca * [ +0.004826] FS-Cache: N-key=[8] '56c00f0000000000' * [ +1.890005] FS-Cache: Duplicate cookie detected * [ +0.022632] FS-Cache: O-cookie c=0000000036b15294 [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.008521] FS-Cache: O-cookie d=00000000087e516b n=00000000efd734b0 * [ +0.007450] FS-Cache: O-key=[8] '55c00f0000000000' * [ +0.004298] FS-Cache: N-cookie c=00000000c6760349 [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.007729] FS-Cache: N-cookie d=00000000087e516b n=000000002b6d28f2 * [ +0.006730] FS-Cache: N-key=[8] '55c00f0000000000' * [ +2.781512] FS-Cache: Duplicate cookie detected * [ +0.004691] FS-Cache: O-cookie c=0000000026b60cdb [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.006817] FS-Cache: O-cookie d=00000000087e516b n=0000000083eaff9d * [ +0.006146] FS-Cache: O-key=[8] '5ac00f0000000000' * [ +0.004163] FS-Cache: N-cookie c=000000006a4278df [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.006241] FS-Cache: N-cookie d=00000000087e516b n=00000000e4c3dd6e * [ +0.005074] FS-Cache: N-key=[8] '5ac00f0000000000' * [Jul24 22:02] tee (116897): /proc/108378/oom_adj is deprecated, please use /proc/108378/oom_score_adj instead. * [Jul24 22:09] hrtimer: interrupt took 1877431 ns * * ==> etcd [7cd3d2c27ce8d87211660e16ef5baac83ffc42799754d9ad03e5cf733dcd820c] <== * 2020-07-24 22:28:20.692839 W | wal: sync duration of 4.117351935s, expected less than 1s * 2020-07-24 22:28:20.693031 W | etcdserver: read-only range request "key:\"/registry/cronjobs/\" range_end:\"/registry/cronjobs0\" limit:500 " with result "range_response_count:0 size:5" took too long (4.1699111s) to execute * 2020-07-24 22:28:20.693347 W | etcdserver: failed to revoke 3c247382efb5110a ("lease not found") * 2020-07-24 22:28:20.693461 W | etcdserver: failed to revoke 3c247382efb5110a ("lease not found") * 2020-07-24 22:28:20.693501 W | etcdserver: failed to revoke 3c247382efb5110a ("lease not found") * 2020-07-24 22:28:20.693514 W | etcdserver: failed to revoke 3c247382efb5110a ("lease not found") * 2020-07-24 22:28:20.693526 W | etcdserver: failed to revoke 3c247382efb5110a ("lease not found") * 2020-07-24 22:28:20.693537 W | etcdserver: failed to revoke 3c247382efb5110a ("lease not found") * 2020-07-24 22:28:20.693546 W | etcdserver: failed to revoke 3c247382efb5110a ("lease not found") * 2020-07-24 22:28:20.715917 W | etcdserver: read-only range request "key:\"foo\" " with result "range_response_count:0 size:5" took too long (3.215685555s) to execute * 2020-07-24 22:28:20.715967 W | etcdserver: read-only range request "key:\"/registry/statefulsets\" range_end:\"/registry/statefulsett\" count_only:true " with result "range_response_count:0 size:5" took too long (3.467781535s) to execute * 2020-07-24 22:28:20.715982 W | etcdserver: read-only range request "key:\"/registry/horizontalpodautoscalers\" range_end:\"/registry/horizontalpodautoscalert\" count_only:true " with result "range_response_count:0 size:5" took too long (1.594353687s) to execute * 2020-07-24 22:28:20.716160 W | etcdserver: read-only range request "key:\"/registry/statefulsets\" range_end:\"/registry/statefulsett\" count_only:true " with result "range_response_count:0 size:5" took too long (2.909137077s) to execute * 2020-07-24 22:28:20.716193 W | etcdserver: read-only range request "key:\"/registry/pods/kube-system/storage-provisioner\" " with result "range_response_count:1 size:2416" took too long (4.129795802s) to execute * 2020-07-24 22:28:20.716213 W | etcdserver: read-only range request "key:\"/registry/horizontalpodautoscalers\" range_end:\"/registry/horizontalpodautoscalert\" count_only:true " with result "range_response_count:0 size:5" took too long (3.658155312s) to execute * 2020-07-24 22:28:20.716294 W | etcdserver: read-only range request "key:\"/registry/services/specs/default/kubernetes\" " with result "range_response_count:1 size:293" took too long (4.130839176s) to execute * 2020-07-24 22:28:20.716427 W | etcdserver: read-only range request "key:\"/registry/clusterroles\" range_end:\"/registry/clusterrolet\" count_only:true " with result "range_response_count:0 size:7" took too long (1.282647649s) to execute * 2020-07-24 22:28:20.844313 W | etcdserver: read-only range request "key:\"/registry/events/kube-system/storage-provisioner.1624d0b10da12b9b\" " with result "range_response_count:1 size:483" took too long (122.063812ms) to execute * 2020-07-24 22:28:20.950803 W | etcdserver: read-only range request "key:\"/registry/services/endpoints/default/kubernetes\" " with result "range_response_count:1 size:213" took too long (105.11143ms) to execute * 2020-07-24 22:31:03.116431 W | etcdserver: read-only range request "key:\"/registry/jobs/\" range_end:\"/registry/jobs0\" limit:500 " with result "range_response_count:0 size:5" took too long (2.348768413s) to execute * 2020-07-24 22:31:03.116531 W | etcdserver: request "header: txn: success:> failure: >>" with result "size:16" took too long (2.159753959s) to execute * 2020-07-24 22:31:03.117809 W | etcdserver: read-only range request "key:\"/registry/health\" " with result "error:context deadline exceeded" took too long (2.000019975s) to execute * 2020-07-24 22:31:03.118930 W | wal: sync duration of 2.16222982s, expected less than 1s * 2020-07-24 22:31:03.119141 W | etcdserver: read-only range request "key:\"/registry/configmaps\" range_end:\"/registry/configmapt\" count_only:true " with result "range_response_count:0 size:7" took too long (1.514657725s) to execute * 2020-07-24 22:31:03.119390 W | etcdserver: read-only range request "key:\"/registry/namespaces/default\" " with result "range_response_count:1 size:172" took too long (1.472246821s) to execute * * ==> kernel <== * 22:33:32 up 1:00, 0 users, load average: 1.67, 5.86, 7.26 * Linux crio-20200724220901-14997 5.4.0-1022-azure #22-Ubuntu SMP Fri Jul 10 06:14:37 UTC 2020 x86_64 x86_64 x86_64 GNU/Linux * PRETTY_NAME="Ubuntu 19.10" * * ==> kube-apiserver [60aac29a1caeafa329981bf8f9a73c7f579633e998d2e82caabedfba3f1d1138] <== * I0724 22:31:56.143953 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:31:56.144054 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:31:56.152647 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:16.143859 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [] * I0724 22:32:16.144038 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:16.144097 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:16.144148 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:16.152885 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:36.144044 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [] * I0724 22:32:36.144250 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:36.144381 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:36.144412 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:36.144424 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:36.153545 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:56.144278 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [] * I0724 22:32:56.144441 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:56.144512 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:56.144567 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:56.144612 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:56.156564 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:33:16.144467 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [] * I0724 22:33:16.144655 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:33:16.144710 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:33:16.144738 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:33:16.154631 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * * ==> kube-controller-manager [5e2ae262399021f9b26d8e88b478a4ff629d1bc4dffef0901936b18cc1e4548c] <== * I0724 22:27:16.476945 1 taint_manager.go:182] Starting NoExecuteTaintManager * I0724 22:27:16.476964 1 event.go:258] Event(v1.ObjectReference{Kind:"Node", Namespace:"", Name:"crio-20200724220901-14997", UID:"ef0f0ad1-a556-4ea4-8b4e-4df0035f7a0b", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'RegisteredNode' Node crio-20200724220901-14997 event: Registered Node crio-20200724220901-14997 in Controller * W0724 22:27:16.476984 1 node_lifecycle_controller.go:863] Missing timestamp for Node crio-20200724220901-14997. Assuming now as a timestamp. * I0724 22:27:16.477018 1 node_lifecycle_controller.go:1089] Controller detected that zone is now in state Normal. * I0724 22:27:16.496120 1 controller_utils.go:1036] Caches are synced for daemon sets controller * I0724 22:27:16.517177 1 controller_utils.go:1036] Caches are synced for endpoint controller * I0724 22:27:16.522134 1 controller_utils.go:1036] Caches are synced for ReplicationController controller * I0724 22:27:16.526074 1 controller_utils.go:1036] Caches are synced for disruption controller * I0724 22:27:16.526095 1 disruption.go:338] Sending events to api server. * I0724 22:27:16.526636 1 controller_utils.go:1036] Caches are synced for job controller * I0724 22:27:16.526835 1 controller_utils.go:1036] Caches are synced for PVC protection controller * I0724 22:27:16.531393 1 controller_utils.go:1036] Caches are synced for attach detach controller * I0724 22:27:16.540016 1 controller_utils.go:1036] Caches are synced for persistent volume controller * I0724 22:27:16.551348 1 controller_utils.go:1036] Caches are synced for ReplicaSet controller * I0724 22:27:16.577221 1 controller_utils.go:1036] Caches are synced for deployment controller * I0724 22:27:16.580595 1 event.go:258] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"kubernetes-dashboard", Name:"kubernetes-dashboard", UID:"db313703-4ad6-497b-8b1b-c7657766a8c5", APIVersion:"apps/v1", ResourceVersion:"821", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set kubernetes-dashboard-6979c57f4c to 1 * I0724 22:27:16.584728 1 event.go:258] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"kubernetes-dashboard", Name:"dashboard-metrics-scraper", UID:"a8d0d7cc-7814-4653-af48-855bd4001c27", APIVersion:"apps/v1", ResourceVersion:"819", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set dashboard-metrics-scraper-c8b69c96c to 1 * I0724 22:27:16.589419 1 event.go:258] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"kubernetes-dashboard", Name:"dashboard-metrics-scraper-c8b69c96c", UID:"79acf5d3-9c82-447f-b71d-910de5b42f1b", APIVersion:"apps/v1", ResourceVersion:"855", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: dashboard-metrics-scraper-c8b69c96c-ljbr4 * I0724 22:27:16.589458 1 event.go:258] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"kubernetes-dashboard", Name:"kubernetes-dashboard-6979c57f4c", UID:"b43829eb-0088-472e-9584-6679aa6b72c4", APIVersion:"apps/v1", ResourceVersion:"854", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: kubernetes-dashboard-6979c57f4c-wbxrt * I0724 22:27:16.589508 1 controller_utils.go:1036] Caches are synced for resource quota controller * I0724 22:27:16.635535 1 controller_utils.go:1036] Caches are synced for garbage collector controller * I0724 22:27:16.635571 1 garbagecollector.go:137] Garbage collector: all resource monitors have synced. Proceeding to collect garbage * I0724 22:27:16.640852 1 controller_utils.go:1036] Caches are synced for resource quota controller * I0724 22:27:17.230884 1 controller_utils.go:1029] Waiting for caches to sync for garbage collector controller * I0724 22:27:17.331259 1 controller_utils.go:1036] Caches are synced for garbage collector controller * * ==> kube-proxy [4e28cb6807125f410044e3fce6354030e93abbb1dd7a5b0efeb136ab9f75cc6f] <== * I0724 22:10:51.711236 1 server_others.go:143] Using iptables Proxier. * I0724 22:10:51.711733 1 server.go:534] Version: v1.15.7 * I0724 22:10:51.747623 1 conntrack.go:52] Setting nf_conntrack_max to 524288 * E0724 22:10:51.748800 1 conntrack.go:127] sysfs is not writable: {Device:sysfs Path:/sys Type:sysfs Opts:[ro nosuid nodev noexec relatime] Freq:0 Pass:0} (mount options are [ro nosuid nodev noexec relatime]) * I0724 22:10:51.749038 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_established' to 86400 * I0724 22:10:51.749127 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_close_wait' to 3600 * I0724 22:10:51.749315 1 config.go:187] Starting service config controller * I0724 22:10:51.749352 1 controller_utils.go:1029] Waiting for caches to sync for service config controller * I0724 22:10:51.749404 1 config.go:96] Starting endpoints config controller * I0724 22:10:51.749502 1 controller_utils.go:1029] Waiting for caches to sync for endpoints config controller * I0724 22:10:51.849555 1 controller_utils.go:1036] Caches are synced for service config controller * I0724 22:10:51.849731 1 controller_utils.go:1036] Caches are synced for endpoints config controller * W0724 22:27:02.252143 1 server_others.go:249] Flag proxy-mode="" unknown, assuming iptables proxy * I0724 22:27:02.336837 1 server_others.go:143] Using iptables Proxier. * I0724 22:27:02.337285 1 server.go:534] Version: v1.15.7 * I0724 22:27:02.437683 1 conntrack.go:52] Setting nf_conntrack_max to 524288 * E0724 22:27:02.438093 1 conntrack.go:127] sysfs is not writable: {Device:sysfs Path:/sys Type:sysfs Opts:[ro nosuid nodev noexec relatime] Freq:0 Pass:0} (mount options are [ro nosuid nodev noexec relatime]) * I0724 22:27:02.438352 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_established' to 86400 * I0724 22:27:02.438437 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_close_wait' to 3600 * I0724 22:27:02.438748 1 config.go:96] Starting endpoints config controller * I0724 22:27:02.438828 1 controller_utils.go:1029] Waiting for caches to sync for endpoints config controller * I0724 22:27:02.438814 1 config.go:187] Starting service config controller * I0724 22:27:02.438854 1 controller_utils.go:1029] Waiting for caches to sync for service config controller * I0724 22:27:02.539002 1 controller_utils.go:1036] Caches are synced for endpoints config controller * I0724 22:27:02.539024 1 controller_utils.go:1036] Caches are synced for service config controller * * ==> kube-scheduler [9881d2c304cdca9fe0f38468be746e289b9fa419917792f7aa9019077b4a4374] <== * E0724 22:10:32.938093 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope * E0724 22:10:32.940907 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope * E0724 22:10:32.940967 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope * E0724 22:10:32.941768 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope * E0724 22:10:32.942905 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope * E0724 22:10:32.943936 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope * E0724 22:10:32.947924 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope * I0724 22:26:55.290417 1 serving.go:319] Generated self-signed cert in-memory * W0724 22:26:55.521509 1 authentication.go:249] No authentication-kubeconfig provided in order to lookup client-ca-file in configmap/extension-apiserver-authentication in kube-system, so client certificate authentication won't work. * W0724 22:26:55.521537 1 authentication.go:252] No authentication-kubeconfig provided in order to lookup requestheader-client-ca-file in configmap/extension-apiserver-authentication in kube-system, so request-header client certificate authentication won't work. * W0724 22:26:55.521549 1 authorization.go:146] No authorization-kubeconfig provided, so SubjectAccessReview of authorization tokens won't work. * I0724 22:26:55.524427 1 server.go:142] Version: v1.15.7 * I0724 22:26:55.524493 1 defaults.go:87] TaintNodesByCondition is enabled, PodToleratesNodeTaints predicate is mandatory * W0724 22:26:55.525401 1 authorization.go:47] Authorization is disabled * W0724 22:26:55.525419 1 authentication.go:55] Authentication is disabled * I0724 22:26:55.525435 1 deprecated_insecure_serving.go:51] Serving healthz insecurely on [::]:10251 * I0724 22:26:55.535873 1 secure_serving.go:116] Serving securely on 127.0.0.1:10259 * E0724 22:27:00.040558 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope * E0724 22:27:00.140295 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope: RBAC: [clusterrole.rbac.authorization.k8s.io "system:public-info-viewer" not found, clusterrole.rbac.authorization.k8s.io "system:discovery" not found, clusterrole.rbac.authorization.k8s.io "system:kube-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:volume-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:basic-user" not found] * E0724 22:27:00.140491 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope * E0724 22:27:00.140663 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope * E0724 22:27:00.140655 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1beta1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope: RBAC: [clusterrole.rbac.authorization.k8s.io "system:kube-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:discovery" not found, clusterrole.rbac.authorization.k8s.io "system:basic-user" not found, clusterrole.rbac.authorization.k8s.io "system:volume-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:public-info-viewer" not found] * E0724 22:27:00.140755 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope: RBAC: [clusterrole.rbac.authorization.k8s.io "system:public-info-viewer" not found, clusterrole.rbac.authorization.k8s.io "system:kube-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:discovery" not found, clusterrole.rbac.authorization.k8s.io "system:basic-user" not found, clusterrole.rbac.authorization.k8s.io "system:volume-scheduler" not found] * E0724 22:27:00.140806 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope: RBAC: [clusterrole.rbac.authorization.k8s.io "system:kube-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:discovery" not found, clusterrole.rbac.authorization.k8s.io "system:basic-user" not found, clusterrole.rbac.authorization.k8s.io "system:volume-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:public-info-viewer" not found] * E0724 22:27:00.140869 1 reflector.go:125] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:226: Failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope: RBAC: [clusterrole.rbac.authorization.k8s.io "system:basic-user" not found, clusterrole.rbac.authorization.k8s.io "system:volume-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:public-info-viewer" not found, clusterrole.rbac.authorization.k8s.io "system:kube-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:discovery" not found] * * ==> kubelet <== * -- Logs begin at Fri 2020-07-24 22:26:07 UTC, end at Fri 2020-07-24 22:33:32 UTC. -- * Jul 24 22:33:09 crio-20200724220901-14997 kubelet[1598]: E0724 22:33:09.759193 1598 pod_workers.go:190] Error syncing pod 2734d7c0-daef-420d-966b-49f33f94e547 ("kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard(2734d7c0-daef-420d-966b-49f33f94e547)"), skipping: failed to "CreatePodSandbox" for "kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard(2734d7c0-daef-420d-966b-49f33f94e547)" with CreatePodSandboxError: "CreatePodSandbox for pod \"kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard(2734d7c0-daef-420d-966b-49f33f94e547)\" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard_2734d7c0-daef-420d-966b-49f33f94e547_0(755647f943fc43d106ebde81ad942ba796c3f845f8c03a6daf5a74de8fc5b2d2): failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:33:14 crio-20200724220901-14997 kubelet[1598]: E0724 22:33:14.744520 1598 pod_workers.go:190] Error syncing pod 9fb35a28-5601-47e4-88e6-be2a18fa55ef ("kindnet-4qfcd_kube-system(9fb35a28-5601-47e4-88e6-be2a18fa55ef)"), skipping: failed to "StartContainer" for "kindnet-cni" with CrashLoopBackOff: "Back-off 2m40s restarting failed container=kindnet-cni pod=kindnet-4qfcd_kube-system(9fb35a28-5601-47e4-88e6-be2a18fa55ef)" * Jul 24 22:33:15 crio-20200724220901-14997 kubelet[1598]: E0724 22:33:15.300665 1598 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(61c043b072725e9c2daf96c0719bd635b81323bb39a1040320876024abb3c846): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:33:15 crio-20200724220901-14997 kubelet[1598]: E0724 22:33:15.300733 1598 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(61c043b072725e9c2daf96c0719bd635b81323bb39a1040320876024abb3c846): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:33:15 crio-20200724220901-14997 kubelet[1598]: E0724 22:33:15.300772 1598 kuberuntime_manager.go:692] createPodSandbox for pod "busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(61c043b072725e9c2daf96c0719bd635b81323bb39a1040320876024abb3c846): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:33:15 crio-20200724220901-14997 kubelet[1598]: E0724 22:33:15.300853 1598 pod_workers.go:190] Error syncing pod b947226f-0b0e-4916-9b65-c7b70a6e137e ("busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)"), skipping: failed to "CreatePodSandbox" for "busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)" with CreatePodSandboxError: "CreatePodSandbox for pod \"busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)\" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(61c043b072725e9c2daf96c0719bd635b81323bb39a1040320876024abb3c846): failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:33:17 crio-20200724220901-14997 kubelet[1598]: E0724 22:33:17.490727 1598 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard_145a2077-6b60-46d0-9d89-7151f4a78b4b_0(2b6726933ec42128ef91d94673c333155b998407ddcbae5acb18f7b583c71cba): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:33:17 crio-20200724220901-14997 kubelet[1598]: E0724 22:33:17.490786 1598 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard_145a2077-6b60-46d0-9d89-7151f4a78b4b_0(2b6726933ec42128ef91d94673c333155b998407ddcbae5acb18f7b583c71cba): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:33:17 crio-20200724220901-14997 kubelet[1598]: E0724 22:33:17.490810 1598 kuberuntime_manager.go:692] createPodSandbox for pod "dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard_145a2077-6b60-46d0-9d89-7151f4a78b4b_0(2b6726933ec42128ef91d94673c333155b998407ddcbae5acb18f7b583c71cba): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:33:17 crio-20200724220901-14997 kubelet[1598]: E0724 22:33:17.490867 1598 pod_workers.go:190] Error syncing pod 145a2077-6b60-46d0-9d89-7151f4a78b4b ("dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)"), skipping: failed to "CreatePodSandbox" for "dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)" with CreatePodSandboxError: "CreatePodSandbox for pod \"dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)\" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard_145a2077-6b60-46d0-9d89-7151f4a78b4b_0(2b6726933ec42128ef91d94673c333155b998407ddcbae5acb18f7b583c71cba): failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:33:20 crio-20200724220901-14997 kubelet[1598]: E0724 22:33:20.744179 1598 pod_workers.go:190] Error syncing pod e76b38cb-d66f-4b4b-a962-cdd9c670e250 ("storage-provisioner_kube-system(e76b38cb-d66f-4b4b-a962-cdd9c670e250)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "Back-off 5m0s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(e76b38cb-d66f-4b4b-a962-cdd9c670e250)" * Jul 24 22:33:24 crio-20200724220901-14997 kubelet[1598]: E0724 22:33:24.275896 1598 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(fcedd3487c26a4215b1d430680a154e40c17b3afde34bbae69ecc2b7854dc485): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:33:24 crio-20200724220901-14997 kubelet[1598]: E0724 22:33:24.275961 1598 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(fcedd3487c26a4215b1d430680a154e40c17b3afde34bbae69ecc2b7854dc485): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:33:24 crio-20200724220901-14997 kubelet[1598]: E0724 22:33:24.275984 1598 kuberuntime_manager.go:692] createPodSandbox for pod "coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(fcedd3487c26a4215b1d430680a154e40c17b3afde34bbae69ecc2b7854dc485): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:33:24 crio-20200724220901-14997 kubelet[1598]: E0724 22:33:24.276041 1598 pod_workers.go:190] Error syncing pod dbd8dc2c-1af2-4c99-89f0-d44686805742 ("coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)"), skipping: failed to "CreatePodSandbox" for "coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)" with CreatePodSandboxError: "CreatePodSandbox for pod \"coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)\" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(fcedd3487c26a4215b1d430680a154e40c17b3afde34bbae69ecc2b7854dc485): failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:33:24 crio-20200724220901-14997 kubelet[1598]: E0724 22:33:24.599237 1598 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard_2734d7c0-daef-420d-966b-49f33f94e547_0(c03849839decf2938363cc1f276511daf07a78d1e7354cb9acbc4eded4a7f4d4): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:33:24 crio-20200724220901-14997 kubelet[1598]: E0724 22:33:24.599306 1598 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard(2734d7c0-daef-420d-966b-49f33f94e547)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard_2734d7c0-daef-420d-966b-49f33f94e547_0(c03849839decf2938363cc1f276511daf07a78d1e7354cb9acbc4eded4a7f4d4): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:33:24 crio-20200724220901-14997 kubelet[1598]: E0724 22:33:24.599337 1598 kuberuntime_manager.go:692] createPodSandbox for pod "kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard(2734d7c0-daef-420d-966b-49f33f94e547)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard_2734d7c0-daef-420d-966b-49f33f94e547_0(c03849839decf2938363cc1f276511daf07a78d1e7354cb9acbc4eded4a7f4d4): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:33:24 crio-20200724220901-14997 kubelet[1598]: E0724 22:33:24.599403 1598 pod_workers.go:190] Error syncing pod 2734d7c0-daef-420d-966b-49f33f94e547 ("kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard(2734d7c0-daef-420d-966b-49f33f94e547)"), skipping: failed to "CreatePodSandbox" for "kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard(2734d7c0-daef-420d-966b-49f33f94e547)" with CreatePodSandboxError: "CreatePodSandbox for pod \"kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard(2734d7c0-daef-420d-966b-49f33f94e547)\" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard_2734d7c0-daef-420d-966b-49f33f94e547_0(c03849839decf2938363cc1f276511daf07a78d1e7354cb9acbc4eded4a7f4d4): failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:33:27 crio-20200724220901-14997 kubelet[1598]: E0724 22:33:27.744200 1598 pod_workers.go:190] Error syncing pod 9fb35a28-5601-47e4-88e6-be2a18fa55ef ("kindnet-4qfcd_kube-system(9fb35a28-5601-47e4-88e6-be2a18fa55ef)"), skipping: failed to "StartContainer" for "kindnet-cni" with CrashLoopBackOff: "Back-off 2m40s restarting failed container=kindnet-cni pod=kindnet-4qfcd_kube-system(9fb35a28-5601-47e4-88e6-be2a18fa55ef)" * Jul 24 22:33:28 crio-20200724220901-14997 kubelet[1598]: E0724 22:33:28.263997 1598 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(3f28adcb0e33adbc7a35e05c2439fa1b47515d04ad1fb5bf8a947281c0eae34b): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:33:28 crio-20200724220901-14997 kubelet[1598]: E0724 22:33:28.264081 1598 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(3f28adcb0e33adbc7a35e05c2439fa1b47515d04ad1fb5bf8a947281c0eae34b): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:33:28 crio-20200724220901-14997 kubelet[1598]: E0724 22:33:28.264112 1598 kuberuntime_manager.go:692] createPodSandbox for pod "busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(3f28adcb0e33adbc7a35e05c2439fa1b47515d04ad1fb5bf8a947281c0eae34b): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:33:28 crio-20200724220901-14997 kubelet[1598]: E0724 22:33:28.264186 1598 pod_workers.go:190] Error syncing pod b947226f-0b0e-4916-9b65-c7b70a6e137e ("busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)"), skipping: failed to "CreatePodSandbox" for "busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)" with CreatePodSandboxError: "CreatePodSandbox for pod \"busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)\" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(3f28adcb0e33adbc7a35e05c2439fa1b47515d04ad1fb5bf8a947281c0eae34b): failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:33:32 crio-20200724220901-14997 kubelet[1598]: E0724 22:33:32.744053 1598 pod_workers.go:190] Error syncing pod e76b38cb-d66f-4b4b-a962-cdd9c670e250 ("storage-provisioner_kube-system(e76b38cb-d66f-4b4b-a962-cdd9c670e250)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "Back-off 5m0s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(e76b38cb-d66f-4b4b-a962-cdd9c670e250)" * * ==> storage-provisioner [8de37e3749db70c2f4f86f0614bc7cffa296c86ac46648b153437530d22f2f92] <== * F0724 22:33:09.128648 1 main.go:37] Error getting server version: Get https://10.96.0.1:443/version: dial tcp 10.96.0.1:443: getsockopt: no route to host -- /stdout -- helpers_test.go:247: (dbg) Run: ./minikube-linux-amd64 status --format={{.APIServer}} -p crio-20200724220901-14997 -n crio-20200724220901-14997 helpers_test.go:254: (dbg) Run: kubectl --context crio-20200724220901-14997 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running helpers_test.go:260: non-running pods: busybox coredns-5d4dd4b4db-9ssg6 dashboard-metrics-scraper-c8b69c96c-ljbr4 kubernetes-dashboard-6979c57f4c-wbxrt helpers_test.go:262: ======> post-mortem[TestStartStop/group/crio/serial/SecondStart]: describe non-running pods <====== helpers_test.go:265: (dbg) Run: kubectl --context crio-20200724220901-14997 describe pod busybox coredns-5d4dd4b4db-9ssg6 dashboard-metrics-scraper-c8b69c96c-ljbr4 kubernetes-dashboard-6979c57f4c-wbxrt helpers_test.go:265: (dbg) Non-zero exit: kubectl --context crio-20200724220901-14997 describe pod busybox coredns-5d4dd4b4db-9ssg6 dashboard-metrics-scraper-c8b69c96c-ljbr4 kubernetes-dashboard-6979c57f4c-wbxrt: exit status 1 (84.465534ms) -- stdout -- Name: busybox Namespace: default Priority: 0 Node: crio-20200724220901-14997/172.17.0.2 Start Time: Fri, 24 Jul 2020 22:17:30 +0000 Labels: integration-test=busybox Annotations: Status: Pending IP: IPs: Containers: busybox: Container ID: Image: busybox:1.28.4-glibc Image ID: Port: Host Port: Command: sleep 3600 State: Waiting Reason: ContainerCreating Ready: False Restart Count: 0 Environment: Mounts: /var/run/secrets/kubernetes.io/serviceaccount from default-token-2jsfl (ro) Conditions: Type Status Initialized True Ready False ContainersReady False PodScheduled True Volumes: default-token-2jsfl: Type: Secret (a volume populated by a Secret) SecretName: default-token-2jsfl Optional: false QoS Class: BestEffort Node-Selectors: Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s node.kubernetes.io/unreachable:NoExecute for 300s Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Scheduled 16m default-scheduler Successfully assigned default/busybox to crio-20200724220901-14997 Warning FailedCreatePodSandBox 16m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(df0c3a384af70876e631b79afa238f8219e909abbaf7684ff30d1dce8a9a54fa): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 15m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(7e1e51912dee7c82a399942b88c2992e156a60bb2bdcfc5a0340b0f5eb894fe0): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 15m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(eb6542b74bcd5168e50e03b41a224e2df5f3761d00e96252755d3d77cabf510e): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 15m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(95381eb738e20acf009ca3de52d9dd44c0e10363127a19f55cc9d8b9b15d935d): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 15m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(5da5f64daf132db1d2f57fcbbd23261f5b055438f42fdc9e58f02b1dd57b9240): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 14m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(d843353d3186fd450ada3d11767e5294d07ef90a741caba47e62a59eb82f9e97): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 14m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(cdb66590d250947869cc36af8c6d22ecc1fd99752b4ba0d09be444e2de9b06eb): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 14m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(590dbdf2276a1c93cf59fe15b35411b3d19be44299a08851ea9957e8c6c03681): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 14m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(fe109ba9d55515001bcdb9c5ba7b51262263252184892313119b14411c1ba69a): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 9m46s (x17 over 13m) kubelet, crio-20200724220901-14997 (combined from similar events): Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(4bee5e35c962aafeecadb6e69afb27ad039cbcf256940e8735ce4f079bf10862): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 6m31s kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(e8fcb63d4fbd6ae3b748da2e936f37eeaa140b43d0f98056e85b082da82fbb01): netplugin failed with no error message Warning FailedCreatePodSandBox 6m15s kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(60cd38ffb8a1dd4fa390aba070fecc1acec7811fd5775ea0d2a431e9e2424e62): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 6m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(68df75d364336016be8023c132977fe34319aa4b9f92c7d70e457a57217a281e): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 5m42s kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(aaae971e15b6884b2bb1799dd4abb10b195368e13b5e4f290f43c5b4b11ec194): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 5m10s kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(bd1d721cd146bbfe1fa05dc287b325ab526dc19d3404ff58cef06e259e2b5986): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 4m52s kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(df52b05097dfc5b141cfc44d2e97b43954c3d247a95a09c7cfb30e7c69871395): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 4m36s kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(d5729ae94c7a9707bb77f5dc43fb6a6d6d451a9f25758519bb72c37e64012cbe): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 4m21s kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(1080ac6803a0e7d14628d92578ed58468a1fddfae32c4476765ee0ff2580e28e): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 4m4s kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(7b666099a72822b0a53ba3bb57632f31f4390032be187dca2fe8fb309f923937): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 6s (x16 over 3m52s) kubelet, crio-20200724220901-14997 (combined from similar events): Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(3f28adcb0e33adbc7a35e05c2439fa1b47515d04ad1fb5bf8a947281c0eae34b): failed to set bridge addr: could not add IP address to "cni0": permission denied -- /stdout -- ** stderr ** Error from server (NotFound): pods "coredns-5d4dd4b4db-9ssg6" not found Error from server (NotFound): pods "dashboard-metrics-scraper-c8b69c96c-ljbr4" not found Error from server (NotFound): pods "kubernetes-dashboard-6979c57f4c-wbxrt" not found ** /stderr ** helpers_test.go:267: kubectl --context crio-20200724220901-14997 describe pod busybox coredns-5d4dd4b4db-9ssg6 dashboard-metrics-scraper-c8b69c96c-ljbr4 kubernetes-dashboard-6979c57f4c-wbxrt: exit status 1 === RUN TestStartStop/group/crio/serial/UserAppExistsAfterStop start_stop_delete_test.go:208: (dbg) TestStartStop/group/crio/serial/UserAppExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ... helpers_test.go:332: "kubernetes-dashboard-6979c57f4c-wbxrt" [2734d7c0-daef-420d-966b-49f33f94e547] Pending / Ready:ContainersNotReady (containers with unready status: [kubernetes-dashboard]) / ContainersReady:ContainersNotReady (containers with unready status: [kubernetes-dashboard]) === CONT TestStartStop/group/containerd/serial/SecondStart start_stop_delete_test.go:190: (dbg) Non-zero exit: ./minikube-linux-amd64 start -p containerd-20200724221200-14997 --memory=2200 --alsologtostderr --wait=true --container-runtime=containerd --docker-opt containerd=/var/run/containerd/containerd.sock --apiserver-port=8444 --vm-driver=docker --base-image=local/kicbase:-snapshot --kubernetes-version=v1.18.3: exit status 70 (8m5.525003845s) -- stdout -- * [containerd-20200724221200-14997] minikube v1.12.1 on Ubuntu 20.04 - KUBECONFIG=/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/kubeconfig - MINIKUBE_HOME=/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome * Using the docker driver based on existing profile * Starting control plane node containerd-20200724221200-14997 in cluster containerd-20200724221200-14997 * Pulling base image ... * Restarting existing docker container for "containerd-20200724221200-14997" ... * Preparing Kubernetes v1.18.3 on containerd 1.3.3-14-g449e9269 ... - opt containerd=/var/run/containerd/containerd.sock - Jul 24 22:28:57 containerd-20200724221200-14997 kubelet[544]: E0724 22:28:57.401527 544 pod_workers.go:191] Error syncing pod fbf2e82d7389c2cc92ba5b8449d13b18 ("kube-controller-manager-containerd-20200724221200-14997_kube-system(fbf2e82d7389c2cc92ba5b8449d13b18)"), skipping: failed to "StartContainer" for "kube-controller-manager" with CrashLoopBackOff: "back-off 10s restarting failed container=kube-controller-manager pod=kube-controller-manager-containerd-20200724221200-14997_kube-system(fbf2e82d7389c2cc92ba5b8449d13b18)" - Jul 24 22:28:59 containerd-20200724221200-14997 kubelet[544]: E0724 22:28:59.610920 544 pod_workers.go:191] Error syncing pod fbf2e82d7389c2cc92ba5b8449d13b18 ("kube-controller-manager-containerd-20200724221200-14997_kube-system(fbf2e82d7389c2cc92ba5b8449d13b18)"), skipping: failed to "StartContainer" for "kube-controller-manager" with CrashLoopBackOff: "back-off 10s restarting failed container=kube-controller-manager pod=kube-controller-manager-containerd-20200724221200-14997_kube-system(fbf2e82d7389c2cc92ba5b8449d13b18)" - Jul 24 22:28:57 containerd-20200724221200-14997 kubelet[544]: E0724 22:28:57.401527 544 pod_workers.go:191] Error syncing pod fbf2e82d7389c2cc92ba5b8449d13b18 ("kube-controller-manager-containerd-20200724221200-14997_kube-system(fbf2e82d7389c2cc92ba5b8449d13b18)"), skipping: failed to "StartContainer" for "kube-controller-manager" with CrashLoopBackOff: "back-off 10s restarting failed container=kube-controller-manager pod=kube-controller-manager-containerd-20200724221200-14997_kube-system(fbf2e82d7389c2cc92ba5b8449d13b18)" - Jul 24 22:28:59 containerd-20200724221200-14997 kubelet[544]: E0724 22:28:59.610920 544 pod_workers.go:191] Error syncing pod fbf2e82d7389c2cc92ba5b8449d13b18 ("kube-controller-manager-containerd-20200724221200-14997_kube-system(fbf2e82d7389c2cc92ba5b8449d13b18)"), skipping: failed to "StartContainer" for "kube-controller-manager" with CrashLoopBackOff: "back-off 10s restarting failed container=kube-controller-manager pod=kube-controller-manager-containerd-20200724221200-14997_kube-system(fbf2e82d7389c2cc92ba5b8449d13b18)" * Configuring CNI (Container Networking Interface) ... * Verifying Kubernetes components... * Enabled addons: dashboard, default-storageclass, storage-provisioner -- /stdout -- ** stderr ** I0724 22:27:48.211367 706086 out.go:188] Setting JSON to false I0724 22:27:48.215625 706086 start.go:101] hostinfo: {"hostname":"mini-test-11-ubuntu","uptime":3307,"bootTime":1595626361,"procs":853,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.4.0-1022-azure","virtualizationSystem":"kvm","virtualizationRole":"host","hostid":"c95cb721-f5cd-cb47-980f-2a6f7a0ad6b2"} I0724 22:27:48.216415 706086 start.go:111] virtualization: kvm host I0724 22:27:48.235289 706086 notify.go:125] Checking for updates... I0724 22:27:48.248922 706086 driver.go:287] Setting default libvirt URI to qemu:///system I0724 22:27:48.312408 706086 docker.go:87] docker version: linux-19.03.8 I0724 22:27:48.323120 706086 start.go:217] selected driver: docker I0724 22:27:48.323130 706086 start.go:623] validating driver "docker" against &{Name:containerd-20200724221200-14997 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 Memory:2200 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.99.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false DockerOpt:[containerd=/var/run/containerd/containerd.sock] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio KubernetesConfig:{KubernetesVersion:v1.18.3 ClusterName:containerd-20200724221200-14997 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8444 NodeName:} Nodes:[{Name: IP:172.17.0.5 Port:8444 KubernetesVersion:v1.18.3 ControlPlane:true Worker:true}] Addons:map[dashboard:true] VerifyComponents:map[apiserver:true apps_running:true default_sa:true system_pods:true]} I0724 22:27:48.323231 706086 start.go:634] status for docker: {Installed:true Healthy:true NeedsImprovement:false Error: Fix: Doc:} I0724 22:27:48.323314 706086 cli_runner.go:109] Run: docker system info --format "{{json .}}" I0724 22:27:48.415977 706086 start_flags.go:617] Waiting for all components: map[apiserver:true apps_running:true default_sa:true system_pods:true] I0724 22:27:48.416031 706086 start_flags.go:345] config: {Name:containerd-20200724221200-14997 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:local/kicbase:-snapshot Memory:2200 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.99.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false DockerOpt:[containerd=/var/run/containerd/containerd.sock] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio KubernetesConfig:{KubernetesVersion:v1.18.3 ClusterName:containerd-20200724221200-14997 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8444 NodeName:} Nodes:[{Name: IP:172.17.0.5 Port:8444 KubernetesVersion:v1.18.3 ControlPlane:true Worker:true}] Addons:map[dashboard:true] VerifyComponents:map[apiserver:true apps_running:true default_sa:true system_pods:true]} I0724 22:27:48.484446 706086 cache.go:117] Beginning downloading kic base image for docker with containerd I0724 22:27:48.492660 706086 preload.go:97] Checking if preload exists for k8s version v1.18.3 and runtime containerd I0724 22:27:48.492701 706086 preload.go:105] Found local preload: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v4-v1.18.3-containerd-overlay2-amd64.tar.lz4 I0724 22:27:48.492713 706086 cache.go:51] Caching tarball of preloaded images I0724 22:27:48.492725 706086 preload.go:131] Found /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v4-v1.18.3-containerd-overlay2-amd64.tar.lz4 in cache, skipping download I0724 22:27:48.492738 706086 cache.go:54] Finished verifying existence of preloaded tar for v1.18.3 on containerd I0724 22:27:48.492808 706086 cache.go:137] Downloading local/kicbase:-snapshot to local daemon I0724 22:27:48.492835 706086 image.go:140] Writing local/kicbase:-snapshot to local daemon I0724 22:27:48.493310 706086 profile.go:150] Saving config to /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/containerd-20200724221200-14997/config.json ... I0724 22:27:48.838850 706086 cache.go:151] failed to download local/kicbase:-snapshot, will try fallback image if available: getting remote image: GET https://index.docker.io/v2/local/kicbase/manifests/-snapshot: unsupported status code 404; body: 404 page not found I0724 22:27:48.838921 706086 cache.go:137] Downloading kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 to local daemon I0724 22:27:48.838930 706086 image.go:140] Writing kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 to local daemon I0724 22:27:53.189168 706086 cache.go:140] successfully downloaded kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 ! minikube was unable to download local/kicbase:-snapshot, but successfully downloaded kicbase/stable:v0.0.10 as a fallback image I0724 22:27:53.189236 706086 cache.go:178] Successfully downloaded all kic artifacts I0724 22:27:53.189277 706086 start.go:241] acquiring machines lock for containerd-20200724221200-14997: {Name:mk91e0531dbac8bf0bab062b1deb1a07bd4532ce Clock:{} Delay:500ms Timeout:15m0s Cancel:} I0724 22:27:53.189490 706086 start.go:245] acquired machines lock for "containerd-20200724221200-14997" in 150.91µs I0724 22:27:53.189524 706086 start.go:89] Skipping create...Using existing machine configuration I0724 22:27:53.189531 706086 fix.go:53] fixHost starting: I0724 22:27:53.189933 706086 cli_runner.go:109] Run: docker container inspect containerd-20200724221200-14997 --format={{.State.Status}} I0724 22:27:53.245028 706086 fix.go:105] recreateIfNeeded on containerd-20200724221200-14997: state=Stopped err= W0724 22:27:53.245056 706086 fix.go:131] unexpected machine state, will restart: I0724 22:27:53.318647 706086 cli_runner.go:109] Run: docker start containerd-20200724221200-14997 I0724 22:27:53.897515 706086 cli_runner.go:109] Run: docker container inspect containerd-20200724221200-14997 --format={{.State.Status}} I0724 22:27:53.951738 706086 kic.go:330] container "containerd-20200724221200-14997" state is running. I0724 22:27:53.952162 706086 cli_runner.go:109] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" containerd-20200724221200-14997 I0724 22:27:54.018362 706086 profile.go:150] Saving config to /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/containerd-20200724221200-14997/config.json ... I0724 22:27:54.018542 706086 machine.go:88] provisioning docker machine ... I0724 22:27:54.018562 706086 ubuntu.go:166] provisioning hostname "containerd-20200724221200-14997" I0724 22:27:54.018630 706086 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" containerd-20200724221200-14997 I0724 22:27:54.089588 706086 main.go:115] libmachine: Using SSH client type: native I0724 22:27:54.090427 706086 main.go:115] libmachine: &{{{ 0 [] [] []} docker [0x7b99a0] 0x7b9970 [] 0s} 127.0.0.1 32920 } I0724 22:27:54.090472 706086 main.go:115] libmachine: About to run SSH command: sudo hostname containerd-20200724221200-14997 && echo "containerd-20200724221200-14997" | sudo tee /etc/hostname I0724 22:27:54.091387 706086 main.go:115] libmachine: Error dialing TCP: ssh: handshake failed: read tcp 127.0.0.1:54578->127.0.0.1:32920: read: connection reset by peer I0724 22:27:57.224531 706086 main.go:115] libmachine: SSH cmd err, output: : containerd-20200724221200-14997 I0724 22:27:57.224603 706086 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" containerd-20200724221200-14997 I0724 22:27:57.275462 706086 main.go:115] libmachine: Using SSH client type: native I0724 22:27:57.275633 706086 main.go:115] libmachine: &{{{ 0 [] [] []} docker [0x7b99a0] 0x7b9970 [] 0s} 127.0.0.1 32920 } I0724 22:27:57.275659 706086 main.go:115] libmachine: About to run SSH command: if ! grep -xq '.*\scontainerd-20200724221200-14997' /etc/hosts; then if grep -xq '127.0.1.1\s.*' /etc/hosts; then sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 containerd-20200724221200-14997/g' /etc/hosts; else echo '127.0.1.1 containerd-20200724221200-14997' | sudo tee -a /etc/hosts; fi fi I0724 22:27:57.401386 706086 main.go:115] libmachine: SSH cmd err, output: : I0724 22:27:57.401415 706086 ubuntu.go:172] set auth options {CertDir:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube CaCertPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube} I0724 22:27:57.401433 706086 ubuntu.go:174] setting up certificates I0724 22:27:57.401441 706086 provision.go:82] configureAuth start I0724 22:27:57.401493 706086 cli_runner.go:109] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" containerd-20200724221200-14997 I0724 22:27:57.491318 706086 provision.go:131] copyHostCerts I0724 22:27:57.491389 706086 exec_runner.go:91] found /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/key.pem, removing ... I0724 22:27:57.491452 706086 exec_runner.go:98] cp: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/key.pem --> /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/key.pem (1675 bytes) I0724 22:27:57.491529 706086 exec_runner.go:91] found /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/ca.pem, removing ... I0724 22:27:57.491560 706086 exec_runner.go:98] cp: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca.pem --> /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/ca.pem (1038 bytes) I0724 22:27:57.491614 706086 exec_runner.go:91] found /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cert.pem, removing ... I0724 22:27:57.491642 706086 exec_runner.go:98] cp: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/cert.pem --> /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cert.pem (1078 bytes) I0724 22:27:57.491686 706086 provision.go:105] generating server cert: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/server.pem ca-key=/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca.pem private-key=/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca-key.pem org=jenkins.containerd-20200724221200-14997 san=[172.17.0.3 localhost 127.0.0.1] I0724 22:27:57.941432 706086 provision.go:159] copyRemoteCerts I0724 22:27:57.941528 706086 ssh_runner.go:148] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker I0724 22:27:57.941599 706086 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" containerd-20200724221200-14997 I0724 22:27:57.997399 706086 sshutil.go:44] new ssh client: &{IP:127.0.0.1 Port:32920 SSHKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/containerd-20200724221200-14997/id_rsa Username:docker} I0724 22:27:58.089970 706086 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1038 bytes) I0724 22:27:58.113630 706086 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/server.pem --> /etc/docker/server.pem (1151 bytes) I0724 22:27:58.137515 706086 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes) I0724 22:27:58.160817 706086 provision.go:85] duration metric: configureAuth took 759.359136ms I0724 22:27:58.160840 706086 ubuntu.go:190] setting minikube options for container-runtime I0724 22:27:58.160980 706086 machine.go:91] provisioned docker machine in 4.142424095s I0724 22:27:58.160994 706086 start.go:204] post-start starting for "containerd-20200724221200-14997" (driver="docker") I0724 22:27:58.161002 706086 start.go:214] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs] I0724 22:27:58.161056 706086 ssh_runner.go:148] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs I0724 22:27:58.161102 706086 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" containerd-20200724221200-14997 I0724 22:27:58.214186 706086 sshutil.go:44] new ssh client: &{IP:127.0.0.1 Port:32920 SSHKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/containerd-20200724221200-14997/id_rsa Username:docker} I0724 22:27:58.306124 706086 ssh_runner.go:148] Run: cat /etc/os-release I0724 22:27:58.309568 706086 main.go:115] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found I0724 22:27:58.309593 706086 main.go:115] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found I0724 22:27:58.309604 706086 main.go:115] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found I0724 22:27:58.309610 706086 info.go:98] Remote host: Ubuntu 19.10 I0724 22:27:58.309619 706086 filesync.go:118] Scanning /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/addons for local assets ... I0724 22:27:58.309703 706086 filesync.go:118] Scanning /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/files for local assets ... I0724 22:27:58.309810 706086 start.go:207] post-start completed in 148.808285ms I0724 22:27:58.309822 706086 fix.go:55] fixHost completed within 5.120292955s I0724 22:27:58.309827 706086 start.go:76] releasing machines lock for "containerd-20200724221200-14997", held for 5.120319456s I0724 22:27:58.309888 706086 cli_runner.go:109] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" containerd-20200724221200-14997 I0724 22:27:58.362389 706086 ssh_runner.go:148] Run: systemctl --version I0724 22:27:58.362447 706086 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" containerd-20200724221200-14997 I0724 22:27:58.362449 706086 ssh_runner.go:148] Run: curl -sS -m 2 https://k8s.gcr.io/ I0724 22:27:58.362548 706086 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" containerd-20200724221200-14997 I0724 22:27:58.413944 706086 sshutil.go:44] new ssh client: &{IP:127.0.0.1 Port:32920 SSHKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/containerd-20200724221200-14997/id_rsa Username:docker} I0724 22:27:58.415535 706086 sshutil.go:44] new ssh client: &{IP:127.0.0.1 Port:32920 SSHKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/containerd-20200724221200-14997/id_rsa Username:docker} I0724 22:27:58.501453 706086 ssh_runner.go:148] Run: sudo systemctl is-active --quiet service crio I0724 22:27:58.567984 706086 ssh_runner.go:148] Run: sudo systemctl is-active --quiet service docker I0724 22:27:58.579036 706086 ssh_runner.go:148] Run: sudo systemctl stop -f docker I0724 22:27:58.597216 706086 ssh_runner.go:148] Run: sudo systemctl is-active --quiet service docker I0724 22:27:58.607820 706086 ssh_runner.go:148] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock image-endpoint: unix:///run/containerd/containerd.sock " | sudo tee /etc/crictl.yaml" I0724 22:27:58.623342 706086 ssh_runner.go:148] Run: /bin/bash -c "sudo mkdir -p /etc/containerd && printf %s "cm9vdCA9ICIvdmFyL2xpYi9jb250YWluZXJkIgpzdGF0ZSA9ICIvcnVuL2NvbnRhaW5lcmQiCm9vbV9zY29yZSA9IDAKCltncnBjXQogIGFkZHJlc3MgPSAiL3J1bi9jb250YWluZXJkL2NvbnRhaW5lcmQuc29jayIKICB1aWQgPSAwCiAgZ2lkID0gMAogIG1heF9yZWN2X21lc3NhZ2Vfc2l6ZSA9IDE2Nzc3MjE2CiAgbWF4X3NlbmRfbWVzc2FnZV9zaXplID0gMTY3NzcyMTYKCltkZWJ1Z10KICBhZGRyZXNzID0gIiIKICB1aWQgPSAwCiAgZ2lkID0gMAogIGxldmVsID0gIiIKClttZXRyaWNzXQogIGFkZHJlc3MgPSAiIgogIGdycGNfaGlzdG9ncmFtID0gZmFsc2UKCltjZ3JvdXBdCiAgcGF0aCA9ICIiCgpbcGx1Z2luc10KICBbcGx1Z2lucy5jZ3JvdXBzXQogICAgbm9fcHJvbWV0aGV1cyA9IGZhbHNlCiAgW3BsdWdpbnMuY3JpXQogICAgc3RyZWFtX3NlcnZlcl9hZGRyZXNzID0gIiIKICAgIHN0cmVhbV9zZXJ2ZXJfcG9ydCA9ICIxMDAxMCIKICAgIGVuYWJsZV9zZWxpbnV4ID0gZmFsc2UKICAgIHNhbmRib3hfaW1hZ2UgPSAiazhzLmdjci5pby9wYXVzZTozLjIiCiAgICBzdGF0c19jb2xsZWN0X3BlcmlvZCA9IDEwCiAgICBzeXN0ZW1kX2Nncm91cCA9IGZhbHNlCiAgICBlbmFibGVfdGxzX3N0cmVhbWluZyA9IGZhbHNlCiAgICBtYXhfY29udGFpbmVyX2xvZ19saW5lX3NpemUgPSAxNjM4NAogICAgW3BsdWdpbnMuY3JpLmNvbnRhaW5lcmRdCiAgICAgIHNuYXBzaG90dGVyID0gIm92ZXJsYXlmcyIKICAgICAgbm9fcGl2b3QgPSB0cnVlCiAgICAgIFtwbHVnaW5zLmNyaS5jb250YWluZXJkLmRlZmF1bHRfcnVudGltZV0KICAgICAgICBydW50aW1lX3R5cGUgPSAiaW8uY29udGFpbmVyZC5ydW50aW1lLnYxLmxpbnV4IgogICAgICAgIHJ1bnRpbWVfZW5naW5lID0gIiIKICAgICAgICBydW50aW1lX3Jvb3QgPSAiIgogICAgICBbcGx1Z2lucy5jcmkuY29udGFpbmVyZC51bnRydXN0ZWRfd29ya2xvYWRfcnVudGltZV0KICAgICAgICBydW50aW1lX3R5cGUgPSAiIgogICAgICAgIHJ1bnRpbWVfZW5naW5lID0gIiIKICAgICAgICBydW50aW1lX3Jvb3QgPSAiIgogICAgW3BsdWdpbnMuY3JpLmNuaV0KICAgICAgYmluX2RpciA9ICIvb3B0L2NuaS9iaW4iCiAgICAgIGNvbmZfZGlyID0gIi9ldGMvY25pL25ldC5kIgogICAgICBjb25mX3RlbXBsYXRlID0gIiIKICAgIFtwbHVnaW5zLmNyaS5yZWdpc3RyeV0KICAgICAgW3BsdWdpbnMuY3JpLnJlZ2lzdHJ5Lm1pcnJvcnNdCiAgICAgICAgW3BsdWdpbnMuY3JpLnJlZ2lzdHJ5Lm1pcnJvcnMuImRvY2tlci5pbyJdCiAgICAgICAgICBlbmRwb2ludCA9IFsiaHR0cHM6Ly9yZWdpc3RyeS0xLmRvY2tlci5pbyJdCiAgW3BsdWdpbnMuZGlmZi1zZXJ2aWNlXQogICAgZGVmYXVsdCA9IFsid2Fsa2luZyJdCiAgW3BsdWdpbnMubGludXhdCiAgICBzaGltID0gImNvbnRhaW5lcmQtc2hpbSIKICAgIHJ1bnRpbWUgPSAicnVuYyIKICAgIHJ1bnRpbWVfcm9vdCA9ICIiCiAgICBub19zaGltID0gZmFsc2UKICAgIHNoaW1fZGVidWcgPSBmYWxzZQogIFtwbHVnaW5zLnNjaGVkdWxlcl0KICAgIHBhdXNlX3RocmVzaG9sZCA9IDAuMDIKICAgIGRlbGV0aW9uX3RocmVzaG9sZCA9IDAKICAgIG11dGF0aW9uX3RocmVzaG9sZCA9IDEwMAogICAgc2NoZWR1bGVfZGVsYXkgPSAiMHMiCiAgICBzdGFydHVwX2RlbGF5ID0gIjEwMG1zIgo=" | base64 -d | sudo tee /etc/containerd/config.toml" I0724 22:27:58.641304 706086 ssh_runner.go:148] Run: sudo sysctl net.bridge.bridge-nf-call-iptables I0724 22:27:58.649246 706086 ssh_runner.go:148] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward" I0724 22:27:58.657861 706086 ssh_runner.go:148] Run: sudo systemctl daemon-reload I0724 22:27:58.772277 706086 ssh_runner.go:148] Run: sudo systemctl restart containerd I0724 22:27:58.800203 706086 ssh_runner.go:148] Run: containerd --version I0724 22:27:58.886428 706086 cli_runner.go:109] Run: docker network ls --filter name=bridge --format {{.ID}} I0724 22:27:58.941224 706086 cli_runner.go:109] Run: docker network inspect --format "{{(index .IPAM.Config 0).Gateway}}" d4a420189740 I0724 22:27:58.991615 706086 network.go:77] got host ip for mount in container by inspect docker network: 172.17.0.1 I0724 22:27:58.991687 706086 ssh_runner.go:148] Run: grep 172.17.0.1 host.minikube.internal$ /etc/hosts I0724 22:27:58.996019 706086 ssh_runner.go:148] Run: /bin/bash -c "{ grep -v '\thost.minikube.internal$' /etc/hosts; echo "172.17.0.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ /etc/hosts" I0724 22:27:59.011081 706086 preload.go:97] Checking if preload exists for k8s version v1.18.3 and runtime containerd I0724 22:27:59.011112 706086 preload.go:105] Found local preload: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v4-v1.18.3-containerd-overlay2-amd64.tar.lz4 I0724 22:27:59.011169 706086 ssh_runner.go:148] Run: sudo crictl images --output json I0724 22:27:59.029972 706086 ssh_runner.go:148] Run: which lz4 I0724 22:27:59.033909 706086 ssh_runner.go:148] Run: stat -c "%s %y" /preloaded.tar.lz4 I0724 22:27:59.037489 706086 ssh_runner.go:205] existence check for /preloaded.tar.lz4: stat -c "%s %y" /preloaded.tar.lz4: Process exited with status 1 stdout: stderr: stat: cannot stat '/preloaded.tar.lz4': No such file or directory I0724 22:27:59.037513 706086 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v4-v1.18.3-containerd-overlay2-amd64.tar.lz4 --> /preloaded.tar.lz4 (1007816108 bytes) I0724 22:28:01.114267 706086 containerd.go:345] Took 2.080411 seconds to copy over tarball I0724 22:28:01.114336 706086 ssh_runner.go:148] Run: sudo tar -I lz4 -C /var -xvf /preloaded.tar.lz4 I0724 22:28:07.312065 706086 ssh_runner.go:188] Completed: sudo tar -I lz4 -C /var -xvf /preloaded.tar.lz4: (6.197704861s) I0724 22:28:07.312091 706086 containerd.go:352] Took 6.197800 seconds t extract the tarball I0724 22:28:07.312101 706086 ssh_runner.go:99] rm: /preloaded.tar.lz4 I0724 22:28:07.381221 706086 ssh_runner.go:148] Run: sudo systemctl daemon-reload I0724 22:28:07.469803 706086 ssh_runner.go:148] Run: sudo systemctl restart containerd I0724 22:28:07.495792 706086 ssh_runner.go:148] Run: sudo crictl images --output json I0724 22:28:08.783377 706086 ssh_runner.go:188] Completed: sudo crictl images --output json: (1.287550897s) I0724 22:28:08.783559 706086 containerd.go:412] all images are preloaded for containerd runtime. I0724 22:28:08.783572 706086 cache_images.go:69] Images are preloaded, skipping loading I0724 22:28:08.783623 706086 ssh_runner.go:148] Run: sudo crictl info I0724 22:28:08.800850 706086 cni.go:74] Creating CNI manager for "" I0724 22:28:08.800869 706086 cni.go:105] "docker" driver + containerd runtime found, recommending kindnet I0724 22:28:08.800878 706086 kubeadm.go:84] Using pod CIDR: 10.244.0.0/16 I0724 22:28:08.800890 706086 kubeadm.go:150] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:172.17.0.3 APIServerPort:8444 KubernetesVersion:v1.18.3 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:containerd-20200724221200-14997 NodeName:containerd-20200724221200-14997 DNSDomain:cluster.local CRISocket:/run/containerd/containerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "172.17.0.3"]]}] FeatureArgs:map[] NoTaintMaster:true NodeIP:172.17.0.3 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[]} I0724 22:28:08.800995 706086 kubeadm.go:154] kubeadm config: apiVersion: kubeadm.k8s.io/v1beta2 kind: InitConfiguration localAPIEndpoint: advertiseAddress: 172.17.0.3 bindPort: 8444 bootstrapTokens: - groups: - system:bootstrappers:kubeadm:default-node-token ttl: 24h0m0s usages: - signing - authentication nodeRegistration: criSocket: /run/containerd/containerd.sock name: "containerd-20200724221200-14997" kubeletExtraArgs: node-ip: 172.17.0.3 taints: [] --- apiVersion: kubeadm.k8s.io/v1beta2 kind: ClusterConfiguration apiServer: certSANs: ["127.0.0.1", "localhost", "172.17.0.3"] extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs clusterName: mk controlPlaneEndpoint: control-plane.minikube.internal:8444 dns: type: CoreDNS etcd: local: dataDir: /var/lib/minikube/etcd controllerManager: extraArgs: "leader-elect": "false" scheduler: extraArgs: "leader-elect": "false" kubernetesVersion: v1.18.3 networking: dnsDomain: cluster.local podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: cgroupfs clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 evictionHard: nodefs.available: "0%" nodefs.inodesFree: "0%" imagefs.available: "0%" failSwapOn: false staticPodPath: /etc/kubernetes/manifests --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration clusterCIDR: "10.244.0.0/16" metricsBindAddress: 172.17.0.3:10249 I0724 22:28:08.801068 706086 kubeadm.go:790] kubelet [Unit] Wants=containerd.service [Service] ExecStart= ExecStart=/var/lib/minikube/binaries/v1.18.3/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock --hostname-override=containerd-20200724221200-14997 --image-service-endpoint=unix:///run/containerd/containerd.sock --kubeconfig=/etc/kubernetes/kubelet.conf --network-plugin=cni --node-ip=172.17.0.3 --runtime-request-timeout=15m [Install] config: {KubernetesVersion:v1.18.3 ClusterName:containerd-20200724221200-14997 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8444 NodeName:} I0724 22:28:08.801123 706086 ssh_runner.go:148] Run: sudo ls /var/lib/minikube/binaries/v1.18.3 I0724 22:28:08.809722 706086 binaries.go:43] Found k8s binaries, skipping transfer I0724 22:28:08.809784 706086 ssh_runner.go:148] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube I0724 22:28:08.818484 706086 ssh_runner.go:215] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (543 bytes) I0724 22:28:08.841464 706086 ssh_runner.go:215] scp memory --> /lib/systemd/system/kubelet.service (349 bytes) I0724 22:28:08.861811 706086 ssh_runner.go:215] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (1786 bytes) I0724 22:28:08.881795 706086 ssh_runner.go:148] Run: grep 172.17.0.3 control-plane.minikube.internal$ /etc/hosts I0724 22:28:08.885229 706086 ssh_runner.go:148] Run: /bin/bash -c "{ grep -v '\tcontrol-plane.minikube.internal$' /etc/hosts; echo "172.17.0.3 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ /etc/hosts" I0724 22:28:08.970161 706086 ssh_runner.go:148] Run: sudo systemctl daemon-reload I0724 22:28:09.043038 706086 ssh_runner.go:148] Run: sudo systemctl start kubelet I0724 22:28:09.057947 706086 certs.go:52] Setting up /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/containerd-20200724221200-14997 for IP: 172.17.0.3 I0724 22:28:09.058002 706086 certs.go:169] skipping minikubeCA CA generation: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/ca.key I0724 22:28:09.058021 706086 certs.go:169] skipping proxyClientCA CA generation: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/proxy-client-ca.key I0724 22:28:09.058095 706086 certs.go:269] skipping minikube-user signed cert generation: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/containerd-20200724221200-14997/client.key I0724 22:28:09.058125 706086 certs.go:273] generating minikube signed cert: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/containerd-20200724221200-14997/apiserver.key.0f3e66d0 I0724 22:28:09.058131 706086 crypto.go:69] Generating cert /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/containerd-20200724221200-14997/apiserver.crt.0f3e66d0 with IP's: [172.17.0.3 10.96.0.1 127.0.0.1 10.0.0.1] I0724 22:28:09.282158 706086 crypto.go:157] Writing cert to /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/containerd-20200724221200-14997/apiserver.crt.0f3e66d0 ... I0724 22:28:09.282192 706086 lock.go:35] WriteFile acquiring /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/containerd-20200724221200-14997/apiserver.crt.0f3e66d0: {Name:mk1b296a4d7414df948e6006fd67ab3c6ee7cbc0 Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0724 22:28:09.282429 706086 crypto.go:165] Writing key to /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/containerd-20200724221200-14997/apiserver.key.0f3e66d0 ... I0724 22:28:09.282453 706086 lock.go:35] WriteFile acquiring /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/containerd-20200724221200-14997/apiserver.key.0f3e66d0: {Name:mk1fc15b6c86c87cdf39af96dff351c6f2f93eb3 Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0724 22:28:09.282580 706086 certs.go:284] copying /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/containerd-20200724221200-14997/apiserver.crt.0f3e66d0 -> /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/containerd-20200724221200-14997/apiserver.crt I0724 22:28:09.564429 706086 certs.go:288] copying /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/containerd-20200724221200-14997/apiserver.key.0f3e66d0 -> /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/containerd-20200724221200-14997/apiserver.key I0724 22:28:09.668429 706086 certs.go:269] skipping aggregator signed cert generation: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/containerd-20200724221200-14997/proxy-client.key I0724 22:28:09.668578 706086 certs.go:348] found cert: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/14997.pem (1338 bytes) W0724 22:28:09.668724 706086 certs.go:344] ignoring /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/14997_empty.pem, impossibly tiny 0 bytes I0724 22:28:09.668736 706086 certs.go:348] found cert: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca-key.pem (1675 bytes) I0724 22:28:09.668767 706086 certs.go:348] found cert: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/ca.pem (1038 bytes) I0724 22:28:09.668799 706086 certs.go:348] found cert: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/cert.pem (1078 bytes) I0724 22:28:09.668860 706086 certs.go:348] found cert: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/key.pem (1675 bytes) I0724 22:28:09.669775 706086 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/containerd-20200724221200-14997/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1350 bytes) I0724 22:28:09.694923 706086 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/containerd-20200724221200-14997/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes) I0724 22:28:09.716006 706086 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/containerd-20200724221200-14997/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1103 bytes) I0724 22:28:09.736895 706086 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/profiles/containerd-20200724221200-14997/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes) I0724 22:28:09.758460 706086 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1066 bytes) I0724 22:28:09.779106 706086 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1679 bytes) I0724 22:28:09.799617 706086 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1074 bytes) I0724 22:28:09.820403 706086 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes) I0724 22:28:09.842092 706086 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1066 bytes) I0724 22:28:09.862786 706086 ssh_runner.go:215] scp /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/certs/14997.pem --> /usr/share/ca-certificates/14997.pem (1338 bytes) I0724 22:28:09.882954 706086 ssh_runner.go:215] scp memory --> /var/lib/minikube/kubeconfig (392 bytes) I0724 22:28:09.905395 706086 ssh_runner.go:148] Run: openssl version I0724 22:28:09.911241 706086 ssh_runner.go:148] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem" I0724 22:28:09.919765 706086 ssh_runner.go:148] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem I0724 22:28:09.923475 706086 certs.go:389] hashing: -rw-r--r-- 1 root root 1066 Jul 24 21:47 /usr/share/ca-certificates/minikubeCA.pem I0724 22:28:09.923525 706086 ssh_runner.go:148] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem I0724 22:28:09.929168 706086 ssh_runner.go:148] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0" I0724 22:28:09.936906 706086 ssh_runner.go:148] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/14997.pem && ln -fs /usr/share/ca-certificates/14997.pem /etc/ssl/certs/14997.pem" I0724 22:28:09.945585 706086 ssh_runner.go:148] Run: ls -la /usr/share/ca-certificates/14997.pem I0724 22:28:09.949320 706086 certs.go:389] hashing: -rw-r--r-- 1 root root 1338 Jul 24 21:50 /usr/share/ca-certificates/14997.pem I0724 22:28:09.949371 706086 ssh_runner.go:148] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/14997.pem I0724 22:28:09.955027 706086 ssh_runner.go:148] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/14997.pem /etc/ssl/certs/51391683.0" I0724 22:28:09.962614 706086 kubeadm.go:327] StartCluster: {Name:containerd-20200724221200-14997 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438 Memory:2200 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.99.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false DockerOpt:[containerd=/var/run/containerd/containerd.sock] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio KubernetesConfig:{KubernetesVersion:v1.18.3 ClusterName:containerd-20200724221200-14997 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8444 NodeName:} Nodes:[{Name: IP:172.17.0.3 Port:8444 KubernetesVersion:v1.18.3 ControlPlane:true Worker:true}] Addons:map[dashboard:true] VerifyComponents:map[apiserver:true apps_running:true default_sa:true system_pods:true]} I0724 22:28:09.962673 706086 cri.go:41] listing CRI containers in root /run/containerd/runc/k8s.io: {State:paused Name: Namespaces:[kube-system]} I0724 22:28:09.962716 706086 ssh_runner.go:148] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system" I0724 22:28:09.978238 706086 cri.go:76] found id: "" I0724 22:28:09.978288 706086 ssh_runner.go:148] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd I0724 22:28:09.986366 706086 kubeadm.go:338] found existing configuration files, will attempt cluster restart I0724 22:28:09.986384 706086 kubeadm.go:512] restartCluster start I0724 22:28:09.986429 706086 ssh_runner.go:148] Run: sudo test -d /data/minikube I0724 22:28:09.993476 706086 kubeadm.go:122] /data/minikube skipping compat symlinks: sudo test -d /data/minikube: Process exited with status 1 stdout: stderr: W0724 22:28:09.994960 706086 kubeadm.go:385] Overriding stale ClientConfig host https://172.17.0.5:8444 with https://172.17.0.3:8444 I0724 22:28:09.996690 706086 ssh_runner.go:148] Run: sudo diff -u /var/tmp/minikube/kubeadm.yaml /var/tmp/minikube/kubeadm.yaml.new I0724 22:28:10.005036 706086 kubeadm.go:480] needs reconfigure: configs differ: -- stdout -- --- /var/tmp/minikube/kubeadm.yaml 2020-07-24 22:12:32.285097208 +0000 +++ /var/tmp/minikube/kubeadm.yaml.new 1901-12-13 20:45:52.000000000 +0000 @@ -1,7 +1,7 @@ apiVersion: kubeadm.k8s.io/v1beta2 kind: InitConfiguration localAPIEndpoint: - advertiseAddress: 172.17.0.5 + advertiseAddress: 172.17.0.3 bindPort: 8444 bootstrapTokens: - groups: @@ -14,13 +14,13 @@ criSocket: /run/containerd/containerd.sock name: "containerd-20200724221200-14997" kubeletExtraArgs: - node-ip: 172.17.0.5 + node-ip: 172.17.0.3 taints: [] --- apiVersion: kubeadm.k8s.io/v1beta2 kind: ClusterConfiguration apiServer: - certSANs: ["127.0.0.1", "localhost", "172.17.0.5"] + certSANs: ["127.0.0.1", "localhost", "172.17.0.3"] extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" certificatesDir: /var/lib/minikube/certs @@ -62,4 +62,4 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration clusterCIDR: "10.244.0.0/16" -metricsBindAddress: 172.17.0.5:10249 +metricsBindAddress: 172.17.0.3:10249 -- /stdout -- I0724 22:28:10.005058 706086 kubeadm.go:913] stopping kube-system containers ... I0724 22:28:10.005070 706086 cri.go:41] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name: Namespaces:[kube-system]} I0724 22:28:10.005110 706086 ssh_runner.go:148] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system" I0724 22:28:10.020645 706086 cri.go:76] found id: "" I0724 22:28:10.020704 706086 ssh_runner.go:148] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf I0724 22:28:10.028877 706086 kubeadm.go:150] found existing configuration files: -rw------- 1 root root 5495 Jul 24 22:12 /etc/kubernetes/admin.conf -rw------- 1 root root 5527 Jul 24 22:12 /etc/kubernetes/controller-manager.conf -rw------- 1 root root 2003 Jul 24 22:13 /etc/kubernetes/kubelet.conf -rw------- 1 root root 5475 Jul 24 22:12 /etc/kubernetes/scheduler.conf I0724 22:28:10.028925 706086 ssh_runner.go:148] Run: sudo grep https://control-plane.minikube.internal:8444 /etc/kubernetes/admin.conf I0724 22:28:10.036944 706086 ssh_runner.go:148] Run: sudo grep https://control-plane.minikube.internal:8444 /etc/kubernetes/kubelet.conf I0724 22:28:10.044800 706086 ssh_runner.go:148] Run: sudo grep https://control-plane.minikube.internal:8444 /etc/kubernetes/controller-manager.conf I0724 22:28:10.052560 706086 ssh_runner.go:148] Run: sudo grep https://control-plane.minikube.internal:8444 /etc/kubernetes/scheduler.conf I0724 22:28:10.060751 706086 ssh_runner.go:148] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml I0724 22:28:10.733681 706086 kubeadm.go:576] reconfiguring cluster from /var/tmp/minikube/kubeadm.yaml I0724 22:28:10.733704 706086 ssh_runner.go:148] Run: /bin/bash -c "sudo env PATH=/var/lib/minikube/binaries/v1.18.3:$PATH kubeadm init phase certs all --config /var/tmp/minikube/kubeadm.yaml" I0724 22:28:10.794683 706086 ssh_runner.go:148] Run: /bin/bash -c "sudo env PATH=/var/lib/minikube/binaries/v1.18.3:$PATH kubeadm init phase kubeconfig all --config /var/tmp/minikube/kubeadm.yaml" I0724 22:28:11.577800 706086 ssh_runner.go:148] Run: /bin/bash -c "sudo env PATH=/var/lib/minikube/binaries/v1.18.3:$PATH kubeadm init phase control-plane all --config /var/tmp/minikube/kubeadm.yaml" I0724 22:28:11.898723 706086 ssh_runner.go:148] Run: /bin/bash -c "sudo env PATH=/var/lib/minikube/binaries/v1.18.3:$PATH kubeadm init phase etcd local --config /var/tmp/minikube/kubeadm.yaml" I0724 22:28:11.999603 706086 api_server.go:48] waiting for apiserver process to appear ... I0724 22:28:11.999741 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:12.510401 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:13.010403 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:13.510687 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:14.010373 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:14.510539 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:15.010371 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:15.510387 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:16.010412 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:16.510521 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:17.010317 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:17.510421 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:18.010403 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:18.510497 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:21.010418 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:21.510341 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:22.010420 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:22.510382 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:23.010378 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:23.510484 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:24.010366 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:24.510589 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:25.010356 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:25.510362 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:26.010382 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:26.510513 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:27.010372 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:27.510362 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:28.010368 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:28.510404 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:29.010338 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:29.510390 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:30.010314 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:30.510326 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:31.010401 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:31.510493 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:32.010389 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:32.510443 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:33.010431 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:33.510517 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:34.010386 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:34.510500 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:35.010361 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:35.510450 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:36.010424 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:36.510354 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:37.010385 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:37.510401 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:38.010379 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:38.510364 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:39.010377 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:39.510318 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:40.010416 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:40.510702 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:41.010390 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:41.510391 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:42.010397 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:42.510354 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:43.010375 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:43.510491 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:44.010360 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:44.510351 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:45.010369 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:45.510501 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:46.010370 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:46.510503 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:47.010366 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:47.510313 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:48.010440 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:48.510428 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:49.010388 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:49.510318 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:50.010397 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:50.510403 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:51.010395 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:51.510511 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:52.010357 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:52.510398 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:53.010430 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:53.510436 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:54.010357 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:54.510445 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:55.010383 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:55.510374 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:56.010354 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:56.510378 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:57.010378 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:57.510378 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:58.010363 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:58.510372 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:59.010405 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:28:59.510395 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:29:00.010362 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:29:00.510364 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:29:01.010394 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:29:01.510498 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:29:02.010397 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:29:02.510451 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:29:03.010560 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:29:03.510376 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:29:04.010372 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:29:04.510339 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:29:05.010385 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:29:05.510422 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:29:06.010389 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:29:06.510371 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:29:07.010608 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:29:07.510499 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:29:08.010358 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:29:08.510380 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:29:09.010376 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:29:09.510436 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:29:10.010366 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:29:10.510401 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:29:11.010364 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:29:11.510526 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:29:12.010322 706086 cri.go:41] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]} I0724 22:29:12.010414 706086 ssh_runner.go:148] Run: sudo crictl ps -a --quiet --name=kube-apiserver I0724 22:29:12.059030 706086 cri.go:76] found id: "" I0724 22:29:12.059067 706086 logs.go:203] 0 containers: [] W0724 22:29:12.059080 706086 logs.go:205] No container was found matching "kube-apiserver" I0724 22:29:12.059092 706086 cri.go:41] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]} I0724 22:29:12.059191 706086 ssh_runner.go:148] Run: sudo crictl ps -a --quiet --name=etcd I0724 22:29:12.081371 706086 cri.go:76] found id: "e1da8367b2af71bf928910238acc6b877c309718e6be22ad7c222ff9731c93cc" I0724 22:29:12.081393 706086 cri.go:76] found id: "" I0724 22:29:12.081400 706086 logs.go:203] 1 containers: [e1da8367b2af71bf928910238acc6b877c309718e6be22ad7c222ff9731c93cc] I0724 22:29:12.081449 706086 ssh_runner.go:148] Run: which crictl I0724 22:29:12.085150 706086 cri.go:41] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]} I0724 22:29:12.085214 706086 ssh_runner.go:148] Run: sudo crictl ps -a --quiet --name=coredns I0724 22:29:12.120726 706086 cri.go:76] found id: "" I0724 22:29:12.120747 706086 logs.go:203] 0 containers: [] W0724 22:29:12.120754 706086 logs.go:205] No container was found matching "coredns" I0724 22:29:12.120761 706086 cri.go:41] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]} I0724 22:29:12.120818 706086 ssh_runner.go:148] Run: sudo crictl ps -a --quiet --name=kube-scheduler I0724 22:29:12.135827 706086 cri.go:76] found id: "574a9379a97ce5c734627b9b0340a240dea259b901f1dca96ad8d9978331c503" I0724 22:29:12.135848 706086 cri.go:76] found id: "" I0724 22:29:12.135854 706086 logs.go:203] 1 containers: [574a9379a97ce5c734627b9b0340a240dea259b901f1dca96ad8d9978331c503] I0724 22:29:12.135901 706086 ssh_runner.go:148] Run: which crictl I0724 22:29:12.139612 706086 cri.go:41] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]} I0724 22:29:12.139672 706086 ssh_runner.go:148] Run: sudo crictl ps -a --quiet --name=kube-proxy I0724 22:29:12.155608 706086 cri.go:76] found id: "" I0724 22:29:12.155630 706086 logs.go:203] 0 containers: [] W0724 22:29:12.155636 706086 logs.go:205] No container was found matching "kube-proxy" I0724 22:29:12.155643 706086 cri.go:41] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kubernetes-dashboard Namespaces:[]} I0724 22:29:12.155699 706086 ssh_runner.go:148] Run: sudo crictl ps -a --quiet --name=kubernetes-dashboard I0724 22:29:12.172963 706086 cri.go:76] found id: "" I0724 22:29:12.172985 706086 logs.go:203] 0 containers: [] W0724 22:29:12.172991 706086 logs.go:205] No container was found matching "kubernetes-dashboard" I0724 22:29:12.172998 706086 cri.go:41] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]} I0724 22:29:12.173066 706086 ssh_runner.go:148] Run: sudo crictl ps -a --quiet --name=storage-provisioner I0724 22:29:12.188560 706086 cri.go:76] found id: "" I0724 22:29:12.188581 706086 logs.go:203] 0 containers: [] W0724 22:29:12.188588 706086 logs.go:205] No container was found matching "storage-provisioner" I0724 22:29:12.188595 706086 cri.go:41] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]} I0724 22:29:12.188658 706086 ssh_runner.go:148] Run: sudo crictl ps -a --quiet --name=kube-controller-manager I0724 22:29:12.206140 706086 cri.go:76] found id: "90f7be9dd5648fdf7d6e6f282cc9119322bc28911e75d3b73a928fa1ad2134ec" I0724 22:29:12.206194 706086 cri.go:76] found id: "9a4b0879feccb01895fcc020632085f8c18e2e3ad14407307cf92964f5e5e51c" I0724 22:29:12.206204 706086 cri.go:76] found id: "" I0724 22:29:12.206212 706086 logs.go:203] 2 containers: [90f7be9dd5648fdf7d6e6f282cc9119322bc28911e75d3b73a928fa1ad2134ec 9a4b0879feccb01895fcc020632085f8c18e2e3ad14407307cf92964f5e5e51c] I0724 22:29:12.206285 706086 ssh_runner.go:148] Run: which crictl I0724 22:29:12.210109 706086 ssh_runner.go:148] Run: which crictl I0724 22:29:12.214284 706086 logs.go:117] Gathering logs for kube-scheduler [574a9379a97ce5c734627b9b0340a240dea259b901f1dca96ad8d9978331c503] ... I0724 22:29:12.214311 706086 ssh_runner.go:148] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 574a9379a97ce5c734627b9b0340a240dea259b901f1dca96ad8d9978331c503" I0724 22:29:12.260895 706086 logs.go:117] Gathering logs for kube-controller-manager [90f7be9dd5648fdf7d6e6f282cc9119322bc28911e75d3b73a928fa1ad2134ec] ... I0724 22:29:12.260929 706086 ssh_runner.go:148] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 90f7be9dd5648fdf7d6e6f282cc9119322bc28911e75d3b73a928fa1ad2134ec" I0724 22:29:12.280488 706086 logs.go:117] Gathering logs for describe nodes ... I0724 22:29:12.280517 706086 ssh_runner.go:148] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.18.3/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" W0724 22:29:12.373176 706086 logs.go:124] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.18.3/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.18.3/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1 stdout: stderr: The connection to the server localhost:8444 was refused - did you specify the right host or port? output: ** stderr ** The connection to the server localhost:8444 was refused - did you specify the right host or port? ** /stderr ** I0724 22:29:12.373200 706086 logs.go:117] Gathering logs for dmesg ... I0724 22:29:12.373211 706086 ssh_runner.go:148] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400" I0724 22:29:12.453100 706086 logs.go:117] Gathering logs for etcd [e1da8367b2af71bf928910238acc6b877c309718e6be22ad7c222ff9731c93cc] ... I0724 22:29:12.453137 706086 ssh_runner.go:148] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 e1da8367b2af71bf928910238acc6b877c309718e6be22ad7c222ff9731c93cc" I0724 22:29:12.472895 706086 logs.go:117] Gathering logs for kube-controller-manager [9a4b0879feccb01895fcc020632085f8c18e2e3ad14407307cf92964f5e5e51c] ... I0724 22:29:12.472982 706086 ssh_runner.go:148] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 9a4b0879feccb01895fcc020632085f8c18e2e3ad14407307cf92964f5e5e51c" I0724 22:29:12.504168 706086 logs.go:117] Gathering logs for containerd ... I0724 22:29:12.504197 706086 ssh_runner.go:148] Run: /bin/bash -c "sudo journalctl -u containerd -n 400" I0724 22:29:12.554158 706086 logs.go:117] Gathering logs for container status ... I0724 22:29:12.554187 706086 ssh_runner.go:148] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a" I0724 22:29:12.574610 706086 logs.go:117] Gathering logs for kubelet ... I0724 22:29:12.574634 706086 ssh_runner.go:148] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400" W0724 22:29:12.622052 706086 logs.go:132] Found kubelet problem: Jul 24 22:28:57 containerd-20200724221200-14997 kubelet[544]: E0724 22:28:57.401527 544 pod_workers.go:191] Error syncing pod fbf2e82d7389c2cc92ba5b8449d13b18 ("kube-controller-manager-containerd-20200724221200-14997_kube-system(fbf2e82d7389c2cc92ba5b8449d13b18)"), skipping: failed to "StartContainer" for "kube-controller-manager" with CrashLoopBackOff: "back-off 10s restarting failed container=kube-controller-manager pod=kube-controller-manager-containerd-20200724221200-14997_kube-system(fbf2e82d7389c2cc92ba5b8449d13b18)" W0724 22:29:12.625667 706086 logs.go:132] Found kubelet problem: Jul 24 22:28:59 containerd-20200724221200-14997 kubelet[544]: E0724 22:28:59.610920 544 pod_workers.go:191] Error syncing pod fbf2e82d7389c2cc92ba5b8449d13b18 ("kube-controller-manager-containerd-20200724221200-14997_kube-system(fbf2e82d7389c2cc92ba5b8449d13b18)"), skipping: failed to "StartContainer" for "kube-controller-manager" with CrashLoopBackOff: "back-off 10s restarting failed container=kube-controller-manager pod=kube-controller-manager-containerd-20200724221200-14997_kube-system(fbf2e82d7389c2cc92ba5b8449d13b18)" * Problems detected in kubelet: I0724 22:29:22.654680 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:29:23.010357 706086 cri.go:41] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]} I0724 22:29:23.010453 706086 ssh_runner.go:148] Run: sudo crictl ps -a --quiet --name=kube-apiserver I0724 22:29:23.032444 706086 cri.go:76] found id: "" I0724 22:29:23.032470 706086 logs.go:203] 0 containers: [] W0724 22:29:23.032477 706086 logs.go:205] No container was found matching "kube-apiserver" I0724 22:29:23.032484 706086 cri.go:41] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]} I0724 22:29:23.032541 706086 ssh_runner.go:148] Run: sudo crictl ps -a --quiet --name=etcd I0724 22:29:23.051015 706086 cri.go:76] found id: "e1da8367b2af71bf928910238acc6b877c309718e6be22ad7c222ff9731c93cc" I0724 22:29:23.051044 706086 cri.go:76] found id: "" I0724 22:29:23.051051 706086 logs.go:203] 1 containers: [e1da8367b2af71bf928910238acc6b877c309718e6be22ad7c222ff9731c93cc] I0724 22:29:23.051099 706086 ssh_runner.go:148] Run: which crictl I0724 22:29:23.055229 706086 cri.go:41] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]} I0724 22:29:23.055295 706086 ssh_runner.go:148] Run: sudo crictl ps -a --quiet --name=coredns I0724 22:29:23.072838 706086 cri.go:76] found id: "" I0724 22:29:23.072859 706086 logs.go:203] 0 containers: [] W0724 22:29:23.072865 706086 logs.go:205] No container was found matching "coredns" I0724 22:29:23.072873 706086 cri.go:41] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]} I0724 22:29:23.072925 706086 ssh_runner.go:148] Run: sudo crictl ps -a --quiet --name=kube-scheduler I0724 22:29:23.118829 706086 cri.go:76] found id: "574a9379a97ce5c734627b9b0340a240dea259b901f1dca96ad8d9978331c503" I0724 22:29:23.118852 706086 cri.go:76] found id: "" I0724 22:29:23.118858 706086 logs.go:203] 1 containers: [574a9379a97ce5c734627b9b0340a240dea259b901f1dca96ad8d9978331c503] I0724 22:29:23.118907 706086 ssh_runner.go:148] Run: which crictl I0724 22:29:23.123011 706086 cri.go:41] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]} I0724 22:29:23.123072 706086 ssh_runner.go:148] Run: sudo crictl ps -a --quiet --name=kube-proxy I0724 22:29:23.171142 706086 cri.go:76] found id: "" I0724 22:29:23.171169 706086 logs.go:203] 0 containers: [] W0724 22:29:23.171177 706086 logs.go:205] No container was found matching "kube-proxy" I0724 22:29:23.171183 706086 cri.go:41] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kubernetes-dashboard Namespaces:[]} I0724 22:29:23.171248 706086 ssh_runner.go:148] Run: sudo crictl ps -a --quiet --name=kubernetes-dashboard I0724 22:29:23.188053 706086 cri.go:76] found id: "" I0724 22:29:23.188075 706086 logs.go:203] 0 containers: [] W0724 22:29:23.188082 706086 logs.go:205] No container was found matching "kubernetes-dashboard" I0724 22:29:23.188090 706086 cri.go:41] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]} I0724 22:29:23.188145 706086 ssh_runner.go:148] Run: sudo crictl ps -a --quiet --name=storage-provisioner I0724 22:29:23.204085 706086 cri.go:76] found id: "" I0724 22:29:23.204106 706086 logs.go:203] 0 containers: [] W0724 22:29:23.204113 706086 logs.go:205] No container was found matching "storage-provisioner" I0724 22:29:23.204124 706086 cri.go:41] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]} I0724 22:29:23.204187 706086 ssh_runner.go:148] Run: sudo crictl ps -a --quiet --name=kube-controller-manager I0724 22:29:23.219419 706086 cri.go:76] found id: "90f7be9dd5648fdf7d6e6f282cc9119322bc28911e75d3b73a928fa1ad2134ec" I0724 22:29:23.219447 706086 cri.go:76] found id: "9a4b0879feccb01895fcc020632085f8c18e2e3ad14407307cf92964f5e5e51c" I0724 22:29:23.219456 706086 cri.go:76] found id: "" I0724 22:29:23.219465 706086 logs.go:203] 2 containers: [90f7be9dd5648fdf7d6e6f282cc9119322bc28911e75d3b73a928fa1ad2134ec 9a4b0879feccb01895fcc020632085f8c18e2e3ad14407307cf92964f5e5e51c] I0724 22:29:23.219532 706086 ssh_runner.go:148] Run: which crictl I0724 22:29:23.223252 706086 ssh_runner.go:148] Run: which crictl I0724 22:29:23.227288 706086 logs.go:117] Gathering logs for etcd [e1da8367b2af71bf928910238acc6b877c309718e6be22ad7c222ff9731c93cc] ... I0724 22:29:23.227307 706086 ssh_runner.go:148] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 e1da8367b2af71bf928910238acc6b877c309718e6be22ad7c222ff9731c93cc" I0724 22:29:23.246027 706086 logs.go:117] Gathering logs for kube-scheduler [574a9379a97ce5c734627b9b0340a240dea259b901f1dca96ad8d9978331c503] ... I0724 22:29:23.246082 706086 ssh_runner.go:148] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 574a9379a97ce5c734627b9b0340a240dea259b901f1dca96ad8d9978331c503" I0724 22:29:23.282553 706086 logs.go:117] Gathering logs for kube-controller-manager [9a4b0879feccb01895fcc020632085f8c18e2e3ad14407307cf92964f5e5e51c] ... I0724 22:29:23.282584 706086 ssh_runner.go:148] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 9a4b0879feccb01895fcc020632085f8c18e2e3ad14407307cf92964f5e5e51c" I0724 22:29:23.313131 706086 logs.go:117] Gathering logs for containerd ... I0724 22:29:23.313159 706086 ssh_runner.go:148] Run: /bin/bash -c "sudo journalctl -u containerd -n 400" I0724 22:29:23.366874 706086 logs.go:117] Gathering logs for container status ... I0724 22:29:23.366906 706086 ssh_runner.go:148] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a" I0724 22:29:23.386849 706086 logs.go:117] Gathering logs for kubelet ... I0724 22:29:23.386876 706086 ssh_runner.go:148] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400" W0724 22:29:23.420319 706086 logs.go:132] Found kubelet problem: Jul 24 22:28:57 containerd-20200724221200-14997 kubelet[544]: E0724 22:28:57.401527 544 pod_workers.go:191] Error syncing pod fbf2e82d7389c2cc92ba5b8449d13b18 ("kube-controller-manager-containerd-20200724221200-14997_kube-system(fbf2e82d7389c2cc92ba5b8449d13b18)"), skipping: failed to "StartContainer" for "kube-controller-manager" with CrashLoopBackOff: "back-off 10s restarting failed container=kube-controller-manager pod=kube-controller-manager-containerd-20200724221200-14997_kube-system(fbf2e82d7389c2cc92ba5b8449d13b18)" W0724 22:29:23.424257 706086 logs.go:132] Found kubelet problem: Jul 24 22:28:59 containerd-20200724221200-14997 kubelet[544]: E0724 22:28:59.610920 544 pod_workers.go:191] Error syncing pod fbf2e82d7389c2cc92ba5b8449d13b18 ("kube-controller-manager-containerd-20200724221200-14997_kube-system(fbf2e82d7389c2cc92ba5b8449d13b18)"), skipping: failed to "StartContainer" for "kube-controller-manager" with CrashLoopBackOff: "back-off 10s restarting failed container=kube-controller-manager pod=kube-controller-manager-containerd-20200724221200-14997_kube-system(fbf2e82d7389c2cc92ba5b8449d13b18)" I0724 22:29:23.450759 706086 logs.go:117] Gathering logs for dmesg ... I0724 22:29:23.450794 706086 ssh_runner.go:148] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400" I0724 22:29:23.490258 706086 logs.go:117] Gathering logs for describe nodes ... I0724 22:29:23.490286 706086 ssh_runner.go:148] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.18.3/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" W0724 22:29:23.561780 706086 logs.go:124] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.18.3/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.18.3/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1 stdout: stderr: The connection to the server localhost:8444 was refused - did you specify the right host or port? output: ** stderr ** The connection to the server localhost:8444 was refused - did you specify the right host or port? ** /stderr ** I0724 22:29:23.561807 706086 logs.go:117] Gathering logs for kube-controller-manager [90f7be9dd5648fdf7d6e6f282cc9119322bc28911e75d3b73a928fa1ad2134ec] ... I0724 22:29:23.561821 706086 ssh_runner.go:148] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 90f7be9dd5648fdf7d6e6f282cc9119322bc28911e75d3b73a928fa1ad2134ec" * Problems detected in kubelet: I0724 22:29:33.593572 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:29:33.610590 706086 api_server.go:68] duration metric: took 1m21.610985859s to wait for apiserver process to appear ... I0724 22:29:33.610612 706086 api_server.go:84] waiting for apiserver healthz status ... I0724 22:29:33.610622 706086 api_server.go:221] Checking apiserver healthz at https://172.17.0.3:8444/healthz ... I0724 22:29:33.615823 706086 api_server.go:241] https://172.17.0.3:8444/healthz returned 200: ok I0724 22:29:33.623698 706086 api_server.go:137] control plane version: v1.18.3 I0724 22:29:33.623727 706086 api_server.go:127] duration metric: took 13.109313ms to wait for apiserver health ... I0724 22:29:33.623735 706086 cni.go:74] Creating CNI manager for "" I0724 22:29:33.623740 706086 cni.go:105] "docker" driver + containerd runtime found, recommending kindnet I0724 22:29:33.695412 706086 ssh_runner.go:148] Run: stat /opt/cni/bin/portmap I0724 22:29:33.700743 706086 cni.go:137] applying CNI manifest using /var/lib/minikube/binaries/v1.18.3/kubectl ... I0724 22:29:33.700761 706086 ssh_runner.go:215] scp memory --> /var/tmp/minikube/cni.yaml (2285 bytes) I0724 22:29:33.726710 706086 ssh_runner.go:148] Run: sudo /var/lib/minikube/binaries/v1.18.3/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml I0724 22:29:34.188176 706086 system_pods.go:43] waiting for kube-system pods to appear ... I0724 22:29:34.198226 706086 system_pods.go:59] 8 kube-system pods found I0724 22:29:34.198256 706086 system_pods.go:61] "coredns-66bff467f8-hlk9j" [584286ea-2ddf-4194-8a3c-48d505f7f607] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:29:34.198264 706086 system_pods.go:61] "etcd-containerd-20200724221200-14997" [8610fe2f-3eeb-4715-bd02-2eb75b77541c] Running I0724 22:29:34.198274 706086 system_pods.go:61] "kindnet-nsc8k" [562af1c8-c195-4fed-b475-4ff2a3cca8b5] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:29:34.198283 706086 system_pods.go:61] "kube-apiserver-containerd-20200724221200-14997" [08b29940-e531-4528-ae07-4607aa6050e8] Running I0724 22:29:34.198299 706086 system_pods.go:61] "kube-controller-manager-containerd-20200724221200-14997" [6460d851-bac6-4fd9-977f-c71ff25d78cd] Running I0724 22:29:34.198304 706086 system_pods.go:61] "kube-proxy-x7fwq" [4a6282eb-2be4-496a-890f-bf90dab64aa5] Running I0724 22:29:34.198314 706086 system_pods.go:61] "kube-scheduler-containerd-20200724221200-14997" [0784d162-3188-4cef-947b-950b5f11830c] Running I0724 22:29:34.198320 706086 system_pods.go:61] "storage-provisioner" [25727a03-b071-4a2c-8dfb-ea2a7038e4cd] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:29:34.198325 706086 system_pods.go:74] duration metric: took 10.129806ms to wait for pod list to return data ... I0724 22:29:34.198336 706086 node_conditions.go:101] verifying NodePressure condition ... I0724 22:29:34.201615 706086 node_conditions.go:121] node storage ephemeral capacity is 128884272Ki I0724 22:29:34.201643 706086 node_conditions.go:122] node cpu capacity is 16 I0724 22:29:34.201654 706086 node_conditions.go:104] duration metric: took 3.312931ms to run NodePressure ... I0724 22:29:34.201668 706086 ssh_runner.go:148] Run: /bin/bash -c "sudo env PATH=/var/lib/minikube/binaries/v1.18.3:$PATH kubeadm init phase addon all --config /var/tmp/minikube/kubeadm.yaml" I0724 22:29:34.795740 706086 ssh_runner.go:148] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj" I0724 22:29:34.804209 706086 ops.go:35] apiserver oom_adj: -16 I0724 22:29:34.804226 706086 kubeadm.go:516] restartCluster took 1m24.81783701s I0724 22:29:34.804234 706086 kubeadm.go:329] StartCluster complete in 1m24.841624768s I0724 22:29:34.804249 706086 settings.go:123] acquiring lock: {Name:mk120aead41f4abf9b6da50636235ecd4ae2a41a Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0724 22:29:34.804382 706086 settings.go:131] Updating kubeconfig: /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/kubeconfig I0724 22:29:34.805564 706086 lock.go:35] WriteFile acquiring /home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/kubeconfig: {Name:mk94f19b810ab6208411eb086ed6241d89a90d8c Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0724 22:29:34.805741 706086 start.go:195] Will wait wait-timeout for node ... I0724 22:29:34.805786 706086 addons.go:353] enableAddons start: toEnable=map[dashboard:true], additional=[] I0724 22:29:34.854039 706086 addons.go:53] Setting storage-provisioner=true in profile "containerd-20200724221200-14997" I0724 22:29:34.854045 706086 api_server.go:48] waiting for apiserver process to appear ... I0724 22:29:34.805896 706086 ssh_runner.go:148] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.18.3/kubectl scale deployment --replicas=1 coredns -n=kube-system I0724 22:29:34.854173 706086 ssh_runner.go:148] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0724 22:29:34.854209 706086 addons.go:53] Setting default-storageclass=true in profile "containerd-20200724221200-14997" I0724 22:29:34.854235 706086 addons.go:267] enableOrDisableStorageClasses default-storageclass=true on "containerd-20200724221200-14997" I0724 22:29:34.854162 706086 addons.go:53] Setting dashboard=true in profile "containerd-20200724221200-14997" I0724 22:29:34.854394 706086 addons.go:129] Setting addon dashboard=true in "containerd-20200724221200-14997" W0724 22:29:34.854405 706086 addons.go:138] addon dashboard should already be in state true I0724 22:29:34.854434 706086 host.go:65] Checking if "containerd-20200724221200-14997" exists ... I0724 22:29:34.854201 706086 addons.go:129] Setting addon storage-provisioner=true in "containerd-20200724221200-14997" W0724 22:29:34.854541 706086 addons.go:138] addon storage-provisioner should already be in state true I0724 22:29:34.854573 706086 host.go:65] Checking if "containerd-20200724221200-14997" exists ... I0724 22:29:34.855014 706086 cli_runner.go:109] Run: docker container inspect containerd-20200724221200-14997 --format={{.State.Status}} I0724 22:29:34.855088 706086 cli_runner.go:109] Run: docker container inspect containerd-20200724221200-14997 --format={{.State.Status}} I0724 22:29:34.855129 706086 cli_runner.go:109] Run: docker container inspect containerd-20200724221200-14997 --format={{.State.Status}} I0724 22:29:34.909928 706086 addons.go:236] installing /etc/kubernetes/addons/dashboard-ns.yaml I0724 22:29:34.909977 706086 ssh_runner.go:215] scp deploy/addons/dashboard/dashboard-ns.yaml --> /etc/kubernetes/addons/dashboard-ns.yaml (759 bytes) I0724 22:29:34.910075 706086 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" containerd-20200724221200-14997 I0724 22:29:34.917499 706086 addons.go:236] installing /etc/kubernetes/addons/storage-provisioner.yaml I0724 22:29:34.917521 706086 ssh_runner.go:215] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2668 bytes) I0724 22:29:34.917584 706086 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" containerd-20200724221200-14997 I0724 22:29:34.931544 706086 addons.go:129] Setting addon default-storageclass=true in "containerd-20200724221200-14997" W0724 22:29:34.931567 706086 addons.go:138] addon default-storageclass should already be in state true I0724 22:29:34.931581 706086 host.go:65] Checking if "containerd-20200724221200-14997" exists ... I0724 22:29:34.932004 706086 cli_runner.go:109] Run: docker container inspect containerd-20200724221200-14997 --format={{.State.Status}} I0724 22:29:34.968085 706086 sshutil.go:44] new ssh client: &{IP:127.0.0.1 Port:32920 SSHKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/containerd-20200724221200-14997/id_rsa Username:docker} I0724 22:29:34.970781 706086 api_server.go:68] duration metric: took 165.010506ms to wait for apiserver process to appear ... I0724 22:29:34.970802 706086 api_server.go:84] waiting for apiserver healthz status ... I0724 22:29:34.970810 706086 api_server.go:221] Checking apiserver healthz at https://172.17.0.3:8444/healthz ... I0724 22:29:34.970898 706086 start.go:549] successfully scaled coredns replicas to 1 I0724 22:29:34.975669 706086 api_server.go:241] https://172.17.0.3:8444/healthz returned 200: ok I0724 22:29:34.976496 706086 api_server.go:137] control plane version: v1.18.3 I0724 22:29:34.976515 706086 api_server.go:127] duration metric: took 5.708198ms to wait for apiserver health ... I0724 22:29:34.976523 706086 system_pods.go:43] waiting for kube-system pods to appear ... I0724 22:29:34.979581 706086 sshutil.go:44] new ssh client: &{IP:127.0.0.1 Port:32920 SSHKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/containerd-20200724221200-14997/id_rsa Username:docker} I0724 22:29:34.981425 706086 system_pods.go:59] 8 kube-system pods found I0724 22:29:34.981452 706086 system_pods.go:61] "coredns-66bff467f8-hlk9j" [584286ea-2ddf-4194-8a3c-48d505f7f607] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:29:34.981458 706086 system_pods.go:61] "etcd-containerd-20200724221200-14997" [8610fe2f-3eeb-4715-bd02-2eb75b77541c] Running I0724 22:29:34.981469 706086 system_pods.go:61] "kindnet-nsc8k" [562af1c8-c195-4fed-b475-4ff2a3cca8b5] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:29:34.981478 706086 system_pods.go:61] "kube-apiserver-containerd-20200724221200-14997" [08b29940-e531-4528-ae07-4607aa6050e8] Running I0724 22:29:34.981501 706086 system_pods.go:61] "kube-controller-manager-containerd-20200724221200-14997" [6460d851-bac6-4fd9-977f-c71ff25d78cd] Running I0724 22:29:34.981506 706086 system_pods.go:61] "kube-proxy-x7fwq" [4a6282eb-2be4-496a-890f-bf90dab64aa5] Running I0724 22:29:34.981511 706086 system_pods.go:61] "kube-scheduler-containerd-20200724221200-14997" [0784d162-3188-4cef-947b-950b5f11830c] Running I0724 22:29:34.981518 706086 system_pods.go:61] "storage-provisioner" [25727a03-b071-4a2c-8dfb-ea2a7038e4cd] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:29:34.981531 706086 system_pods.go:74] duration metric: took 5.002849ms to wait for pod list to return data ... I0724 22:29:34.981538 706086 default_sa.go:33] waiting for default service account to be created ... I0724 22:29:34.987127 706086 default_sa.go:44] found service account: "default" I0724 22:29:34.987151 706086 default_sa.go:54] duration metric: took 5.606991ms for default service account to be created ... I0724 22:29:34.987159 706086 system_pods.go:116] waiting for k8s-apps to be running ... I0724 22:29:34.990350 706086 addons.go:236] installing /etc/kubernetes/addons/storageclass.yaml I0724 22:29:34.990368 706086 ssh_runner.go:215] scp deploy/addons/storageclass/storageclass.yaml.tmpl --> /etc/kubernetes/addons/storageclass.yaml (271 bytes) I0724 22:29:34.990419 706086 cli_runner.go:109] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" containerd-20200724221200-14997 I0724 22:29:34.991952 706086 system_pods.go:86] 8 kube-system pods found I0724 22:29:34.991978 706086 system_pods.go:89] "coredns-66bff467f8-hlk9j" [584286ea-2ddf-4194-8a3c-48d505f7f607] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:29:34.991984 706086 system_pods.go:89] "etcd-containerd-20200724221200-14997" [8610fe2f-3eeb-4715-bd02-2eb75b77541c] Running I0724 22:29:34.991994 706086 system_pods.go:89] "kindnet-nsc8k" [562af1c8-c195-4fed-b475-4ff2a3cca8b5] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:29:34.992007 706086 system_pods.go:89] "kube-apiserver-containerd-20200724221200-14997" [08b29940-e531-4528-ae07-4607aa6050e8] Running I0724 22:29:34.992014 706086 system_pods.go:89] "kube-controller-manager-containerd-20200724221200-14997" [6460d851-bac6-4fd9-977f-c71ff25d78cd] Running I0724 22:29:34.992025 706086 system_pods.go:89] "kube-proxy-x7fwq" [4a6282eb-2be4-496a-890f-bf90dab64aa5] Running I0724 22:29:34.992032 706086 system_pods.go:89] "kube-scheduler-containerd-20200724221200-14997" [0784d162-3188-4cef-947b-950b5f11830c] Running I0724 22:29:34.992039 706086 system_pods.go:89] "storage-provisioner" [25727a03-b071-4a2c-8dfb-ea2a7038e4cd] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:29:34.992055 706086 retry.go:30] will retry after 263.082536ms: missing components: kube-dns I0724 22:29:35.045850 706086 sshutil.go:44] new ssh client: &{IP:127.0.0.1 Port:32920 SSHKeyPath:/home/jenkins/actions-runner/_work/minikube/minikube/out/testhome/.minikube/machines/containerd-20200724221200-14997/id_rsa Username:docker} I0724 22:29:35.070449 706086 addons.go:236] installing /etc/kubernetes/addons/dashboard-clusterrole.yaml I0724 22:29:35.070471 706086 ssh_runner.go:215] scp deploy/addons/dashboard/dashboard-clusterrole.yaml --> /etc/kubernetes/addons/dashboard-clusterrole.yaml (1001 bytes) I0724 22:29:35.087089 706086 ssh_runner.go:148] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.18.3/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml I0724 22:29:35.093051 706086 addons.go:236] installing /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml I0724 22:29:35.093069 706086 ssh_runner.go:215] scp deploy/addons/dashboard/dashboard-clusterrolebinding.yaml --> /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml (1018 bytes) I0724 22:29:35.119450 706086 addons.go:236] installing /etc/kubernetes/addons/dashboard-configmap.yaml I0724 22:29:35.119479 706086 ssh_runner.go:215] scp deploy/addons/dashboard/dashboard-configmap.yaml --> /etc/kubernetes/addons/dashboard-configmap.yaml (837 bytes) I0724 22:29:35.154612 706086 addons.go:236] installing /etc/kubernetes/addons/dashboard-dp.yaml I0724 22:29:35.154635 706086 ssh_runner.go:215] scp deploy/addons/dashboard/dashboard-dp.yaml --> /etc/kubernetes/addons/dashboard-dp.yaml (4097 bytes) I0724 22:29:35.157161 706086 ssh_runner.go:148] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.18.3/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml I0724 22:29:35.238967 706086 addons.go:236] installing /etc/kubernetes/addons/dashboard-role.yaml I0724 22:29:35.238997 706086 ssh_runner.go:215] scp deploy/addons/dashboard/dashboard-role.yaml --> /etc/kubernetes/addons/dashboard-role.yaml (1724 bytes) I0724 22:29:35.266999 706086 addons.go:236] installing /etc/kubernetes/addons/dashboard-rolebinding.yaml I0724 22:29:35.267036 706086 ssh_runner.go:215] scp deploy/addons/dashboard/dashboard-rolebinding.yaml --> /etc/kubernetes/addons/dashboard-rolebinding.yaml (1046 bytes) I0724 22:29:35.289022 706086 system_pods.go:86] 8 kube-system pods found I0724 22:29:35.289056 706086 system_pods.go:89] "coredns-66bff467f8-hlk9j" [584286ea-2ddf-4194-8a3c-48d505f7f607] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:29:35.289062 706086 system_pods.go:89] "etcd-containerd-20200724221200-14997" [8610fe2f-3eeb-4715-bd02-2eb75b77541c] Running I0724 22:29:35.289075 706086 system_pods.go:89] "kindnet-nsc8k" [562af1c8-c195-4fed-b475-4ff2a3cca8b5] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:29:35.289085 706086 system_pods.go:89] "kube-apiserver-containerd-20200724221200-14997" [08b29940-e531-4528-ae07-4607aa6050e8] Running I0724 22:29:35.289105 706086 system_pods.go:89] "kube-controller-manager-containerd-20200724221200-14997" [6460d851-bac6-4fd9-977f-c71ff25d78cd] Running I0724 22:29:35.289110 706086 system_pods.go:89] "kube-proxy-x7fwq" [4a6282eb-2be4-496a-890f-bf90dab64aa5] Running I0724 22:29:35.289115 706086 system_pods.go:89] "kube-scheduler-containerd-20200724221200-14997" [0784d162-3188-4cef-947b-950b5f11830c] Running I0724 22:29:35.289123 706086 system_pods.go:89] "storage-provisioner" [25727a03-b071-4a2c-8dfb-ea2a7038e4cd] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:29:35.289139 706086 retry.go:30] will retry after 381.329545ms: missing components: kube-dns I0724 22:29:35.336430 706086 addons.go:236] installing /etc/kubernetes/addons/dashboard-sa.yaml I0724 22:29:35.336458 706086 ssh_runner.go:215] scp deploy/addons/dashboard/dashboard-sa.yaml --> /etc/kubernetes/addons/dashboard-sa.yaml (837 bytes) I0724 22:29:35.363623 706086 addons.go:236] installing /etc/kubernetes/addons/dashboard-secret.yaml I0724 22:29:35.363644 706086 ssh_runner.go:215] scp deploy/addons/dashboard/dashboard-secret.yaml --> /etc/kubernetes/addons/dashboard-secret.yaml (1401 bytes) I0724 22:29:35.436935 706086 addons.go:236] installing /etc/kubernetes/addons/dashboard-svc.yaml I0724 22:29:35.436968 706086 ssh_runner.go:215] scp deploy/addons/dashboard/dashboard-svc.yaml --> /etc/kubernetes/addons/dashboard-svc.yaml (1294 bytes) I0724 22:29:35.463152 706086 ssh_runner.go:148] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.18.3/kubectl apply -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml I0724 22:29:35.687716 706086 system_pods.go:86] 8 kube-system pods found I0724 22:29:35.687779 706086 system_pods.go:89] "coredns-66bff467f8-hlk9j" [584286ea-2ddf-4194-8a3c-48d505f7f607] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:29:35.687786 706086 system_pods.go:89] "etcd-containerd-20200724221200-14997" [8610fe2f-3eeb-4715-bd02-2eb75b77541c] Running I0724 22:29:35.687797 706086 system_pods.go:89] "kindnet-nsc8k" [562af1c8-c195-4fed-b475-4ff2a3cca8b5] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:29:35.687805 706086 system_pods.go:89] "kube-apiserver-containerd-20200724221200-14997" [08b29940-e531-4528-ae07-4607aa6050e8] Running I0724 22:29:35.687824 706086 system_pods.go:89] "kube-controller-manager-containerd-20200724221200-14997" [6460d851-bac6-4fd9-977f-c71ff25d78cd] Running I0724 22:29:35.687846 706086 system_pods.go:89] "kube-proxy-x7fwq" [4a6282eb-2be4-496a-890f-bf90dab64aa5] Running I0724 22:29:35.687852 706086 system_pods.go:89] "kube-scheduler-containerd-20200724221200-14997" [0784d162-3188-4cef-947b-950b5f11830c] Running I0724 22:29:35.687866 706086 system_pods.go:89] "storage-provisioner" [25727a03-b071-4a2c-8dfb-ea2a7038e4cd] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:29:35.687876 706086 retry.go:30] will retry after 422.765636ms: missing components: kube-dns I0724 22:29:35.902877 706086 addons.go:355] enableAddons completed in 1.097093502s I0724 22:29:36.124718 706086 system_pods.go:86] 8 kube-system pods found I0724 22:29:36.124784 706086 system_pods.go:89] "coredns-66bff467f8-hlk9j" [584286ea-2ddf-4194-8a3c-48d505f7f607] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:29:36.124797 706086 system_pods.go:89] "etcd-containerd-20200724221200-14997" [8610fe2f-3eeb-4715-bd02-2eb75b77541c] Running I0724 22:29:36.124807 706086 system_pods.go:89] "kindnet-nsc8k" [562af1c8-c195-4fed-b475-4ff2a3cca8b5] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:29:36.124819 706086 system_pods.go:89] "kube-apiserver-containerd-20200724221200-14997" [08b29940-e531-4528-ae07-4607aa6050e8] Running I0724 22:29:36.124825 706086 system_pods.go:89] "kube-controller-manager-containerd-20200724221200-14997" [6460d851-bac6-4fd9-977f-c71ff25d78cd] Running I0724 22:29:36.124836 706086 system_pods.go:89] "kube-proxy-x7fwq" [4a6282eb-2be4-496a-890f-bf90dab64aa5] Running I0724 22:29:36.124841 706086 system_pods.go:89] "kube-scheduler-containerd-20200724221200-14997" [0784d162-3188-4cef-947b-950b5f11830c] Running I0724 22:29:36.124848 706086 system_pods.go:89] "storage-provisioner" [25727a03-b071-4a2c-8dfb-ea2a7038e4cd] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:29:36.124864 706086 retry.go:30] will retry after 473.074753ms: missing components: kube-dns I0724 22:29:36.604419 706086 system_pods.go:86] 8 kube-system pods found I0724 22:29:36.604468 706086 system_pods.go:89] "coredns-66bff467f8-hlk9j" [584286ea-2ddf-4194-8a3c-48d505f7f607] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:29:36.604479 706086 system_pods.go:89] "etcd-containerd-20200724221200-14997" [8610fe2f-3eeb-4715-bd02-2eb75b77541c] Running I0724 22:29:36.604508 706086 system_pods.go:89] "kindnet-nsc8k" [562af1c8-c195-4fed-b475-4ff2a3cca8b5] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:29:36.604527 706086 system_pods.go:89] "kube-apiserver-containerd-20200724221200-14997" [08b29940-e531-4528-ae07-4607aa6050e8] Running I0724 22:29:36.604539 706086 system_pods.go:89] "kube-controller-manager-containerd-20200724221200-14997" [6460d851-bac6-4fd9-977f-c71ff25d78cd] Running I0724 22:29:36.604562 706086 system_pods.go:89] "kube-proxy-x7fwq" [4a6282eb-2be4-496a-890f-bf90dab64aa5] Running I0724 22:29:36.604572 706086 system_pods.go:89] "kube-scheduler-containerd-20200724221200-14997" [0784d162-3188-4cef-947b-950b5f11830c] Running I0724 22:29:36.604592 706086 system_pods.go:89] "storage-provisioner" [25727a03-b071-4a2c-8dfb-ea2a7038e4cd] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:29:36.604612 706086 retry.go:30] will retry after 587.352751ms: missing components: kube-dns I0724 22:29:37.197433 706086 system_pods.go:86] 8 kube-system pods found I0724 22:29:37.197465 706086 system_pods.go:89] "coredns-66bff467f8-hlk9j" [584286ea-2ddf-4194-8a3c-48d505f7f607] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:29:37.197472 706086 system_pods.go:89] "etcd-containerd-20200724221200-14997" [8610fe2f-3eeb-4715-bd02-2eb75b77541c] Running I0724 22:29:37.197481 706086 system_pods.go:89] "kindnet-nsc8k" [562af1c8-c195-4fed-b475-4ff2a3cca8b5] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:29:37.197488 706086 system_pods.go:89] "kube-apiserver-containerd-20200724221200-14997" [08b29940-e531-4528-ae07-4607aa6050e8] Running I0724 22:29:37.197494 706086 system_pods.go:89] "kube-controller-manager-containerd-20200724221200-14997" [6460d851-bac6-4fd9-977f-c71ff25d78cd] Running I0724 22:29:37.197500 706086 system_pods.go:89] "kube-proxy-x7fwq" [4a6282eb-2be4-496a-890f-bf90dab64aa5] Running I0724 22:29:37.197506 706086 system_pods.go:89] "kube-scheduler-containerd-20200724221200-14997" [0784d162-3188-4cef-947b-950b5f11830c] Running I0724 22:29:37.197513 706086 system_pods.go:89] "storage-provisioner" [25727a03-b071-4a2c-8dfb-ea2a7038e4cd] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:29:37.197528 706086 retry.go:30] will retry after 834.206799ms: missing components: kube-dns I0724 22:29:38.038138 706086 system_pods.go:86] 8 kube-system pods found I0724 22:29:38.038196 706086 system_pods.go:89] "coredns-66bff467f8-hlk9j" [584286ea-2ddf-4194-8a3c-48d505f7f607] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:29:38.038208 706086 system_pods.go:89] "etcd-containerd-20200724221200-14997" [8610fe2f-3eeb-4715-bd02-2eb75b77541c] Running I0724 22:29:38.038222 706086 system_pods.go:89] "kindnet-nsc8k" [562af1c8-c195-4fed-b475-4ff2a3cca8b5] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:29:38.038235 706086 system_pods.go:89] "kube-apiserver-containerd-20200724221200-14997" [08b29940-e531-4528-ae07-4607aa6050e8] Running I0724 22:29:38.038243 706086 system_pods.go:89] "kube-controller-manager-containerd-20200724221200-14997" [6460d851-bac6-4fd9-977f-c71ff25d78cd] Running I0724 22:29:38.038248 706086 system_pods.go:89] "kube-proxy-x7fwq" [4a6282eb-2be4-496a-890f-bf90dab64aa5] Running I0724 22:29:38.038254 706086 system_pods.go:89] "kube-scheduler-containerd-20200724221200-14997" [0784d162-3188-4cef-947b-950b5f11830c] Running I0724 22:29:38.038261 706086 system_pods.go:89] "storage-provisioner" [25727a03-b071-4a2c-8dfb-ea2a7038e4cd] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:29:38.038281 706086 retry.go:30] will retry after 746.553905ms: missing components: kube-dns I0724 22:29:38.790316 706086 system_pods.go:86] 8 kube-system pods found I0724 22:29:38.790347 706086 system_pods.go:89] "coredns-66bff467f8-hlk9j" [584286ea-2ddf-4194-8a3c-48d505f7f607] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:29:38.790355 706086 system_pods.go:89] "etcd-containerd-20200724221200-14997" [8610fe2f-3eeb-4715-bd02-2eb75b77541c] Running I0724 22:29:38.790363 706086 system_pods.go:89] "kindnet-nsc8k" [562af1c8-c195-4fed-b475-4ff2a3cca8b5] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:29:38.790370 706086 system_pods.go:89] "kube-apiserver-containerd-20200724221200-14997" [08b29940-e531-4528-ae07-4607aa6050e8] Running I0724 22:29:38.790376 706086 system_pods.go:89] "kube-controller-manager-containerd-20200724221200-14997" [6460d851-bac6-4fd9-977f-c71ff25d78cd] Running I0724 22:29:38.790381 706086 system_pods.go:89] "kube-proxy-x7fwq" [4a6282eb-2be4-496a-890f-bf90dab64aa5] Running I0724 22:29:38.790386 706086 system_pods.go:89] "kube-scheduler-containerd-20200724221200-14997" [0784d162-3188-4cef-947b-950b5f11830c] Running I0724 22:29:38.790394 706086 system_pods.go:89] "storage-provisioner" [25727a03-b071-4a2c-8dfb-ea2a7038e4cd] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:29:38.790404 706086 retry.go:30] will retry after 987.362415ms: missing components: kube-dns I0724 22:29:39.783156 706086 system_pods.go:86] 8 kube-system pods found I0724 22:29:39.783187 706086 system_pods.go:89] "coredns-66bff467f8-hlk9j" [584286ea-2ddf-4194-8a3c-48d505f7f607] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:29:39.783194 706086 system_pods.go:89] "etcd-containerd-20200724221200-14997" [8610fe2f-3eeb-4715-bd02-2eb75b77541c] Running I0724 22:29:39.783202 706086 system_pods.go:89] "kindnet-nsc8k" [562af1c8-c195-4fed-b475-4ff2a3cca8b5] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:29:39.783208 706086 system_pods.go:89] "kube-apiserver-containerd-20200724221200-14997" [08b29940-e531-4528-ae07-4607aa6050e8] Running I0724 22:29:39.783215 706086 system_pods.go:89] "kube-controller-manager-containerd-20200724221200-14997" [6460d851-bac6-4fd9-977f-c71ff25d78cd] Running I0724 22:29:39.783220 706086 system_pods.go:89] "kube-proxy-x7fwq" [4a6282eb-2be4-496a-890f-bf90dab64aa5] Running I0724 22:29:39.783227 706086 system_pods.go:89] "kube-scheduler-containerd-20200724221200-14997" [0784d162-3188-4cef-947b-950b5f11830c] Running I0724 22:29:39.783233 706086 system_pods.go:89] "storage-provisioner" [25727a03-b071-4a2c-8dfb-ea2a7038e4cd] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:29:39.783250 706086 retry.go:30] will retry after 1.189835008s: missing components: kube-dns I0724 22:29:40.978274 706086 system_pods.go:86] 8 kube-system pods found I0724 22:29:40.978305 706086 system_pods.go:89] "coredns-66bff467f8-hlk9j" [584286ea-2ddf-4194-8a3c-48d505f7f607] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:29:40.978312 706086 system_pods.go:89] "etcd-containerd-20200724221200-14997" [8610fe2f-3eeb-4715-bd02-2eb75b77541c] Running I0724 22:29:40.978321 706086 system_pods.go:89] "kindnet-nsc8k" [562af1c8-c195-4fed-b475-4ff2a3cca8b5] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:29:40.978328 706086 system_pods.go:89] "kube-apiserver-containerd-20200724221200-14997" [08b29940-e531-4528-ae07-4607aa6050e8] Running I0724 22:29:40.978334 706086 system_pods.go:89] "kube-controller-manager-containerd-20200724221200-14997" [6460d851-bac6-4fd9-977f-c71ff25d78cd] Running I0724 22:29:40.978339 706086 system_pods.go:89] "kube-proxy-x7fwq" [4a6282eb-2be4-496a-890f-bf90dab64aa5] Running I0724 22:29:40.978344 706086 system_pods.go:89] "kube-scheduler-containerd-20200724221200-14997" [0784d162-3188-4cef-947b-950b5f11830c] Running I0724 22:29:40.978351 706086 system_pods.go:89] "storage-provisioner" [25727a03-b071-4a2c-8dfb-ea2a7038e4cd] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:29:40.978362 706086 retry.go:30] will retry after 1.677229867s: missing components: kube-dns I0724 22:29:42.662521 706086 system_pods.go:86] 8 kube-system pods found I0724 22:29:42.662558 706086 system_pods.go:89] "coredns-66bff467f8-hlk9j" [584286ea-2ddf-4194-8a3c-48d505f7f607] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:29:42.662565 706086 system_pods.go:89] "etcd-containerd-20200724221200-14997" [8610fe2f-3eeb-4715-bd02-2eb75b77541c] Running I0724 22:29:42.662575 706086 system_pods.go:89] "kindnet-nsc8k" [562af1c8-c195-4fed-b475-4ff2a3cca8b5] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:29:42.662581 706086 system_pods.go:89] "kube-apiserver-containerd-20200724221200-14997" [08b29940-e531-4528-ae07-4607aa6050e8] Running I0724 22:29:42.662587 706086 system_pods.go:89] "kube-controller-manager-containerd-20200724221200-14997" [6460d851-bac6-4fd9-977f-c71ff25d78cd] Running I0724 22:29:42.662592 706086 system_pods.go:89] "kube-proxy-x7fwq" [4a6282eb-2be4-496a-890f-bf90dab64aa5] Running I0724 22:29:42.662598 706086 system_pods.go:89] "kube-scheduler-containerd-20200724221200-14997" [0784d162-3188-4cef-947b-950b5f11830c] Running I0724 22:29:42.662605 706086 system_pods.go:89] "storage-provisioner" [25727a03-b071-4a2c-8dfb-ea2a7038e4cd] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:29:42.662615 706086 retry.go:30] will retry after 2.346016261s: missing components: kube-dns I0724 22:29:45.013848 706086 system_pods.go:86] 8 kube-system pods found I0724 22:29:45.013886 706086 system_pods.go:89] "coredns-66bff467f8-hlk9j" [584286ea-2ddf-4194-8a3c-48d505f7f607] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:29:45.013892 706086 system_pods.go:89] "etcd-containerd-20200724221200-14997" [8610fe2f-3eeb-4715-bd02-2eb75b77541c] Running I0724 22:29:45.013902 706086 system_pods.go:89] "kindnet-nsc8k" [562af1c8-c195-4fed-b475-4ff2a3cca8b5] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:29:45.013908 706086 system_pods.go:89] "kube-apiserver-containerd-20200724221200-14997" [08b29940-e531-4528-ae07-4607aa6050e8] Running I0724 22:29:45.013915 706086 system_pods.go:89] "kube-controller-manager-containerd-20200724221200-14997" [6460d851-bac6-4fd9-977f-c71ff25d78cd] Running I0724 22:29:45.013920 706086 system_pods.go:89] "kube-proxy-x7fwq" [4a6282eb-2be4-496a-890f-bf90dab64aa5] Running I0724 22:29:45.013925 706086 system_pods.go:89] "kube-scheduler-containerd-20200724221200-14997" [0784d162-3188-4cef-947b-950b5f11830c] Running I0724 22:29:45.013932 706086 system_pods.go:89] "storage-provisioner" [25727a03-b071-4a2c-8dfb-ea2a7038e4cd] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:29:45.013942 706086 retry.go:30] will retry after 3.36678925s: missing components: kube-dns I0724 22:29:48.386413 706086 system_pods.go:86] 8 kube-system pods found I0724 22:29:48.386454 706086 system_pods.go:89] "coredns-66bff467f8-hlk9j" [584286ea-2ddf-4194-8a3c-48d505f7f607] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:29:48.386463 706086 system_pods.go:89] "etcd-containerd-20200724221200-14997" [8610fe2f-3eeb-4715-bd02-2eb75b77541c] Running I0724 22:29:48.386484 706086 system_pods.go:89] "kindnet-nsc8k" [562af1c8-c195-4fed-b475-4ff2a3cca8b5] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:29:48.386493 706086 system_pods.go:89] "kube-apiserver-containerd-20200724221200-14997" [08b29940-e531-4528-ae07-4607aa6050e8] Running I0724 22:29:48.386509 706086 system_pods.go:89] "kube-controller-manager-containerd-20200724221200-14997" [6460d851-bac6-4fd9-977f-c71ff25d78cd] Running I0724 22:29:48.386517 706086 system_pods.go:89] "kube-proxy-x7fwq" [4a6282eb-2be4-496a-890f-bf90dab64aa5] Running I0724 22:29:48.386532 706086 system_pods.go:89] "kube-scheduler-containerd-20200724221200-14997" [0784d162-3188-4cef-947b-950b5f11830c] Running I0724 22:29:48.386542 706086 system_pods.go:89] "storage-provisioner" [25727a03-b071-4a2c-8dfb-ea2a7038e4cd] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:29:48.386556 706086 retry.go:30] will retry after 3.11822781s: missing components: kube-dns I0724 22:29:51.510070 706086 system_pods.go:86] 8 kube-system pods found I0724 22:29:51.510099 706086 system_pods.go:89] "coredns-66bff467f8-hlk9j" [584286ea-2ddf-4194-8a3c-48d505f7f607] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:29:51.510105 706086 system_pods.go:89] "etcd-containerd-20200724221200-14997" [8610fe2f-3eeb-4715-bd02-2eb75b77541c] Running I0724 22:29:51.510114 706086 system_pods.go:89] "kindnet-nsc8k" [562af1c8-c195-4fed-b475-4ff2a3cca8b5] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:29:51.510120 706086 system_pods.go:89] "kube-apiserver-containerd-20200724221200-14997" [08b29940-e531-4528-ae07-4607aa6050e8] Running I0724 22:29:51.510126 706086 system_pods.go:89] "kube-controller-manager-containerd-20200724221200-14997" [6460d851-bac6-4fd9-977f-c71ff25d78cd] Running I0724 22:29:51.510131 706086 system_pods.go:89] "kube-proxy-x7fwq" [4a6282eb-2be4-496a-890f-bf90dab64aa5] Running I0724 22:29:51.510153 706086 system_pods.go:89] "kube-scheduler-containerd-20200724221200-14997" [0784d162-3188-4cef-947b-950b5f11830c] Running I0724 22:29:51.510159 706086 system_pods.go:89] "storage-provisioner" [25727a03-b071-4a2c-8dfb-ea2a7038e4cd] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:29:51.510169 706086 retry.go:30] will retry after 4.276119362s: missing components: kube-dns I0724 22:29:55.791322 706086 system_pods.go:86] 8 kube-system pods found I0724 22:29:55.791355 706086 system_pods.go:89] "coredns-66bff467f8-hlk9j" [584286ea-2ddf-4194-8a3c-48d505f7f607] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:29:55.791361 706086 system_pods.go:89] "etcd-containerd-20200724221200-14997" [8610fe2f-3eeb-4715-bd02-2eb75b77541c] Running I0724 22:29:55.791378 706086 system_pods.go:89] "kindnet-nsc8k" [562af1c8-c195-4fed-b475-4ff2a3cca8b5] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:29:55.791385 706086 system_pods.go:89] "kube-apiserver-containerd-20200724221200-14997" [08b29940-e531-4528-ae07-4607aa6050e8] Running I0724 22:29:55.791391 706086 system_pods.go:89] "kube-controller-manager-containerd-20200724221200-14997" [6460d851-bac6-4fd9-977f-c71ff25d78cd] Running I0724 22:29:55.791396 706086 system_pods.go:89] "kube-proxy-x7fwq" [4a6282eb-2be4-496a-890f-bf90dab64aa5] Running I0724 22:29:55.791402 706086 system_pods.go:89] "kube-scheduler-containerd-20200724221200-14997" [0784d162-3188-4cef-947b-950b5f11830c] Running I0724 22:29:55.791409 706086 system_pods.go:89] "storage-provisioner" [25727a03-b071-4a2c-8dfb-ea2a7038e4cd] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:29:55.791428 706086 retry.go:30] will retry after 5.167232101s: missing components: kube-dns I0724 22:30:00.963250 706086 system_pods.go:86] 6 kube-system pods found I0724 22:30:00.963284 706086 system_pods.go:89] "coredns-66bff467f8-hlk9j" [584286ea-2ddf-4194-8a3c-48d505f7f607] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:30:00.963293 706086 system_pods.go:89] "kindnet-nsc8k" [562af1c8-c195-4fed-b475-4ff2a3cca8b5] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:30:00.963303 706086 system_pods.go:89] "kube-controller-manager-containerd-20200724221200-14997" [6460d851-bac6-4fd9-977f-c71ff25d78cd] Running / Ready:ContainersNotReady (containers with unready status: [kube-controller-manager]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-controller-manager]) I0724 22:30:00.963312 706086 system_pods.go:89] "kube-proxy-x7fwq" [4a6282eb-2be4-496a-890f-bf90dab64aa5] Running I0724 22:30:00.963320 706086 system_pods.go:89] "kube-scheduler-containerd-20200724221200-14997" [0784d162-3188-4cef-947b-950b5f11830c] Running I0724 22:30:00.963352 706086 system_pods.go:89] "storage-provisioner" [25727a03-b071-4a2c-8dfb-ea2a7038e4cd] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:30:00.963363 706086 retry.go:30] will retry after 6.994901864s: missing components: kube-dns, etcd, kube-apiserver I0724 22:30:07.962666 706086 system_pods.go:86] 6 kube-system pods found I0724 22:30:07.962698 706086 system_pods.go:89] "coredns-66bff467f8-hlk9j" [584286ea-2ddf-4194-8a3c-48d505f7f607] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:30:07.962707 706086 system_pods.go:89] "kindnet-nsc8k" [562af1c8-c195-4fed-b475-4ff2a3cca8b5] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:30:07.962720 706086 system_pods.go:89] "kube-controller-manager-containerd-20200724221200-14997" [6460d851-bac6-4fd9-977f-c71ff25d78cd] Running / Ready:ContainersNotReady (containers with unready status: [kube-controller-manager]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-controller-manager]) I0724 22:30:07.962729 706086 system_pods.go:89] "kube-proxy-x7fwq" [4a6282eb-2be4-496a-890f-bf90dab64aa5] Running I0724 22:30:07.962744 706086 system_pods.go:89] "kube-scheduler-containerd-20200724221200-14997" [0784d162-3188-4cef-947b-950b5f11830c] Running I0724 22:30:07.962750 706086 system_pods.go:89] "storage-provisioner" [25727a03-b071-4a2c-8dfb-ea2a7038e4cd] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:30:07.962770 706086 retry.go:30] will retry after 7.91826225s: missing components: kube-dns, etcd, kube-apiserver I0724 22:30:15.885338 706086 system_pods.go:86] 6 kube-system pods found I0724 22:30:15.885372 706086 system_pods.go:89] "coredns-66bff467f8-hlk9j" [584286ea-2ddf-4194-8a3c-48d505f7f607] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:30:15.885380 706086 system_pods.go:89] "kindnet-nsc8k" [562af1c8-c195-4fed-b475-4ff2a3cca8b5] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:30:15.885388 706086 system_pods.go:89] "kube-controller-manager-containerd-20200724221200-14997" [6460d851-bac6-4fd9-977f-c71ff25d78cd] Running I0724 22:30:15.885395 706086 system_pods.go:89] "kube-proxy-x7fwq" [4a6282eb-2be4-496a-890f-bf90dab64aa5] Running I0724 22:30:15.885401 706086 system_pods.go:89] "kube-scheduler-containerd-20200724221200-14997" [0784d162-3188-4cef-947b-950b5f11830c] Running I0724 22:30:15.885408 706086 system_pods.go:89] "storage-provisioner" [25727a03-b071-4a2c-8dfb-ea2a7038e4cd] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:30:15.885419 706086 retry.go:30] will retry after 9.953714808s: missing components: kube-dns, etcd, kube-apiserver I0724 22:30:25.845191 706086 system_pods.go:86] 6 kube-system pods found I0724 22:30:25.845223 706086 system_pods.go:89] "coredns-66bff467f8-hlk9j" [584286ea-2ddf-4194-8a3c-48d505f7f607] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:30:25.845234 706086 system_pods.go:89] "kindnet-nsc8k" [562af1c8-c195-4fed-b475-4ff2a3cca8b5] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:30:25.845242 706086 system_pods.go:89] "kube-controller-manager-containerd-20200724221200-14997" [6460d851-bac6-4fd9-977f-c71ff25d78cd] Running I0724 22:30:25.845248 706086 system_pods.go:89] "kube-proxy-x7fwq" [4a6282eb-2be4-496a-890f-bf90dab64aa5] Running I0724 22:30:25.845253 706086 system_pods.go:89] "kube-scheduler-containerd-20200724221200-14997" [0784d162-3188-4cef-947b-950b5f11830c] Running I0724 22:30:25.845260 706086 system_pods.go:89] "storage-provisioner" [25727a03-b071-4a2c-8dfb-ea2a7038e4cd] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:30:25.845277 706086 retry.go:30] will retry after 15.120437328s: missing components: kube-dns, etcd, kube-apiserver I0724 22:30:40.973542 706086 system_pods.go:86] 6 kube-system pods found I0724 22:30:40.973590 706086 system_pods.go:89] "coredns-66bff467f8-hlk9j" [584286ea-2ddf-4194-8a3c-48d505f7f607] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:30:40.973603 706086 system_pods.go:89] "kindnet-nsc8k" [562af1c8-c195-4fed-b475-4ff2a3cca8b5] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:30:40.973617 706086 system_pods.go:89] "kube-controller-manager-containerd-20200724221200-14997" [6460d851-bac6-4fd9-977f-c71ff25d78cd] Running I0724 22:30:40.973626 706086 system_pods.go:89] "kube-proxy-x7fwq" [4a6282eb-2be4-496a-890f-bf90dab64aa5] Running I0724 22:30:40.973635 706086 system_pods.go:89] "kube-scheduler-containerd-20200724221200-14997" [0784d162-3188-4cef-947b-950b5f11830c] Running I0724 22:30:40.973646 706086 system_pods.go:89] "storage-provisioner" [25727a03-b071-4a2c-8dfb-ea2a7038e4cd] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:30:40.973662 706086 retry.go:30] will retry after 14.90607158s: missing components: kube-dns, etcd, kube-apiserver I0724 22:30:55.884529 706086 system_pods.go:86] 6 kube-system pods found I0724 22:30:55.884561 706086 system_pods.go:89] "coredns-66bff467f8-hlk9j" [584286ea-2ddf-4194-8a3c-48d505f7f607] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:30:55.884568 706086 system_pods.go:89] "kindnet-nsc8k" [562af1c8-c195-4fed-b475-4ff2a3cca8b5] Running I0724 22:30:55.884576 706086 system_pods.go:89] "kube-controller-manager-containerd-20200724221200-14997" [6460d851-bac6-4fd9-977f-c71ff25d78cd] Running I0724 22:30:55.884581 706086 system_pods.go:89] "kube-proxy-x7fwq" [4a6282eb-2be4-496a-890f-bf90dab64aa5] Running I0724 22:30:55.884587 706086 system_pods.go:89] "kube-scheduler-containerd-20200724221200-14997" [0784d162-3188-4cef-947b-950b5f11830c] Running I0724 22:30:55.884594 706086 system_pods.go:89] "storage-provisioner" [25727a03-b071-4a2c-8dfb-ea2a7038e4cd] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:30:55.884613 706086 retry.go:30] will retry after 18.465989061s: missing components: kube-dns, etcd, kube-apiserver I0724 22:31:14.355738 706086 system_pods.go:86] 7 kube-system pods found I0724 22:31:14.355770 706086 system_pods.go:89] "coredns-66bff467f8-hlk9j" [584286ea-2ddf-4194-8a3c-48d505f7f607] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:31:14.355777 706086 system_pods.go:89] "etcd-containerd-20200724221200-14997" [0e18d20d-20a7-4dfa-84e2-ae20c22af0c0] Running I0724 22:31:14.355786 706086 system_pods.go:89] "kindnet-nsc8k" [562af1c8-c195-4fed-b475-4ff2a3cca8b5] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:31:14.355795 706086 system_pods.go:89] "kube-controller-manager-containerd-20200724221200-14997" [6460d851-bac6-4fd9-977f-c71ff25d78cd] Running I0724 22:31:14.355803 706086 system_pods.go:89] "kube-proxy-x7fwq" [4a6282eb-2be4-496a-890f-bf90dab64aa5] Running I0724 22:31:14.355809 706086 system_pods.go:89] "kube-scheduler-containerd-20200724221200-14997" [0784d162-3188-4cef-947b-950b5f11830c] Running I0724 22:31:14.355816 706086 system_pods.go:89] "storage-provisioner" [25727a03-b071-4a2c-8dfb-ea2a7038e4cd] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:31:14.355834 706086 retry.go:30] will retry after 25.219510332s: missing components: kube-dns, kube-apiserver I0724 22:31:39.581090 706086 system_pods.go:86] 8 kube-system pods found I0724 22:31:39.581124 706086 system_pods.go:89] "coredns-66bff467f8-hlk9j" [584286ea-2ddf-4194-8a3c-48d505f7f607] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:31:39.581131 706086 system_pods.go:89] "etcd-containerd-20200724221200-14997" [0e18d20d-20a7-4dfa-84e2-ae20c22af0c0] Running I0724 22:31:39.581140 706086 system_pods.go:89] "kindnet-nsc8k" [562af1c8-c195-4fed-b475-4ff2a3cca8b5] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:31:39.581147 706086 system_pods.go:89] "kube-apiserver-containerd-20200724221200-14997" [b0b81410-9ba7-4802-b380-bd3a916151e7] Running I0724 22:31:39.581153 706086 system_pods.go:89] "kube-controller-manager-containerd-20200724221200-14997" [6460d851-bac6-4fd9-977f-c71ff25d78cd] Running I0724 22:31:39.581158 706086 system_pods.go:89] "kube-proxy-x7fwq" [4a6282eb-2be4-496a-890f-bf90dab64aa5] Running I0724 22:31:39.581164 706086 system_pods.go:89] "kube-scheduler-containerd-20200724221200-14997" [0784d162-3188-4cef-947b-950b5f11830c] Running I0724 22:31:39.581171 706086 system_pods.go:89] "storage-provisioner" [25727a03-b071-4a2c-8dfb-ea2a7038e4cd] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:31:39.581182 706086 retry.go:30] will retry after 35.078569648s: missing components: kube-dns I0724 22:32:14.665004 706086 system_pods.go:86] 8 kube-system pods found I0724 22:32:14.665037 706086 system_pods.go:89] "coredns-66bff467f8-hlk9j" [584286ea-2ddf-4194-8a3c-48d505f7f607] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:32:14.665042 706086 system_pods.go:89] "etcd-containerd-20200724221200-14997" [0e18d20d-20a7-4dfa-84e2-ae20c22af0c0] Running I0724 22:32:14.665048 706086 system_pods.go:89] "kindnet-nsc8k" [562af1c8-c195-4fed-b475-4ff2a3cca8b5] Running I0724 22:32:14.665053 706086 system_pods.go:89] "kube-apiserver-containerd-20200724221200-14997" [b0b81410-9ba7-4802-b380-bd3a916151e7] Running I0724 22:32:14.665058 706086 system_pods.go:89] "kube-controller-manager-containerd-20200724221200-14997" [6460d851-bac6-4fd9-977f-c71ff25d78cd] Running I0724 22:32:14.665063 706086 system_pods.go:89] "kube-proxy-x7fwq" [4a6282eb-2be4-496a-890f-bf90dab64aa5] Running I0724 22:32:14.665067 706086 system_pods.go:89] "kube-scheduler-containerd-20200724221200-14997" [0784d162-3188-4cef-947b-950b5f11830c] Running I0724 22:32:14.665073 706086 system_pods.go:89] "storage-provisioner" [25727a03-b071-4a2c-8dfb-ea2a7038e4cd] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:32:14.665084 706086 retry.go:30] will retry after 50.027701973s: missing components: kube-dns I0724 22:33:04.699878 706086 system_pods.go:86] 8 kube-system pods found I0724 22:33:04.699932 706086 system_pods.go:89] "coredns-66bff467f8-hlk9j" [584286ea-2ddf-4194-8a3c-48d505f7f607] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:33:04.699944 706086 system_pods.go:89] "etcd-containerd-20200724221200-14997" [0e18d20d-20a7-4dfa-84e2-ae20c22af0c0] Running I0724 22:33:04.699957 706086 system_pods.go:89] "kindnet-nsc8k" [562af1c8-c195-4fed-b475-4ff2a3cca8b5] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:33:04.699971 706086 system_pods.go:89] "kube-apiserver-containerd-20200724221200-14997" [b0b81410-9ba7-4802-b380-bd3a916151e7] Running I0724 22:33:04.699981 706086 system_pods.go:89] "kube-controller-manager-containerd-20200724221200-14997" [6460d851-bac6-4fd9-977f-c71ff25d78cd] Running I0724 22:33:04.699989 706086 system_pods.go:89] "kube-proxy-x7fwq" [4a6282eb-2be4-496a-890f-bf90dab64aa5] Running I0724 22:33:04.699999 706086 system_pods.go:89] "kube-scheduler-containerd-20200724221200-14997" [0784d162-3188-4cef-947b-950b5f11830c] Running I0724 22:33:04.700010 706086 system_pods.go:89] "storage-provisioner" [25727a03-b071-4a2c-8dfb-ea2a7038e4cd] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:33:04.700024 706086 retry.go:30] will retry after 47.463338706s: missing components: kube-dns I0724 22:33:52.170722 706086 system_pods.go:86] 8 kube-system pods found I0724 22:33:52.170754 706086 system_pods.go:89] "coredns-66bff467f8-hlk9j" [584286ea-2ddf-4194-8a3c-48d505f7f607] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:33:52.170761 706086 system_pods.go:89] "etcd-containerd-20200724221200-14997" [0e18d20d-20a7-4dfa-84e2-ae20c22af0c0] Running I0724 22:33:52.170769 706086 system_pods.go:89] "kindnet-nsc8k" [562af1c8-c195-4fed-b475-4ff2a3cca8b5] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:33:52.170776 706086 system_pods.go:89] "kube-apiserver-containerd-20200724221200-14997" [b0b81410-9ba7-4802-b380-bd3a916151e7] Running I0724 22:33:52.170782 706086 system_pods.go:89] "kube-controller-manager-containerd-20200724221200-14997" [6460d851-bac6-4fd9-977f-c71ff25d78cd] Running I0724 22:33:52.170806 706086 system_pods.go:89] "kube-proxy-x7fwq" [4a6282eb-2be4-496a-890f-bf90dab64aa5] Running I0724 22:33:52.170812 706086 system_pods.go:89] "kube-scheduler-containerd-20200724221200-14997" [0784d162-3188-4cef-947b-950b5f11830c] Running I0724 22:33:52.170818 706086 system_pods.go:89] "storage-provisioner" [25727a03-b071-4a2c-8dfb-ea2a7038e4cd] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:33:52.170828 706086 retry.go:30] will retry after 53.912476906s: missing components: kube-dns I0724 22:34:46.091240 706086 system_pods.go:86] 8 kube-system pods found I0724 22:34:46.091292 706086 system_pods.go:89] "coredns-66bff467f8-hlk9j" [584286ea-2ddf-4194-8a3c-48d505f7f607] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:34:46.091312 706086 system_pods.go:89] "etcd-containerd-20200724221200-14997" [0e18d20d-20a7-4dfa-84e2-ae20c22af0c0] Running I0724 22:34:46.091340 706086 system_pods.go:89] "kindnet-nsc8k" [562af1c8-c195-4fed-b475-4ff2a3cca8b5] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:34:46.091398 706086 system_pods.go:89] "kube-apiserver-containerd-20200724221200-14997" [b0b81410-9ba7-4802-b380-bd3a916151e7] Running I0724 22:34:46.091422 706086 system_pods.go:89] "kube-controller-manager-containerd-20200724221200-14997" [6460d851-bac6-4fd9-977f-c71ff25d78cd] Running I0724 22:34:46.091434 706086 system_pods.go:89] "kube-proxy-x7fwq" [4a6282eb-2be4-496a-890f-bf90dab64aa5] Running I0724 22:34:46.091456 706086 system_pods.go:89] "kube-scheduler-containerd-20200724221200-14997" [0784d162-3188-4cef-947b-950b5f11830c] Running I0724 22:34:46.091472 706086 system_pods.go:89] "storage-provisioner" [25727a03-b071-4a2c-8dfb-ea2a7038e4cd] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:34:46.091498 706086 retry.go:30] will retry after 1m7.577191067s: missing components: kube-dns I0724 22:35:53.674785 706086 system_pods.go:86] 8 kube-system pods found I0724 22:35:53.674836 706086 system_pods.go:89] "coredns-66bff467f8-hlk9j" [584286ea-2ddf-4194-8a3c-48d505f7f607] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns]) I0724 22:35:53.674847 706086 system_pods.go:89] "etcd-containerd-20200724221200-14997" [0e18d20d-20a7-4dfa-84e2-ae20c22af0c0] Running I0724 22:35:53.674862 706086 system_pods.go:89] "kindnet-nsc8k" [562af1c8-c195-4fed-b475-4ff2a3cca8b5] Running / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni]) I0724 22:35:53.674881 706086 system_pods.go:89] "kube-apiserver-containerd-20200724221200-14997" [b0b81410-9ba7-4802-b380-bd3a916151e7] Running I0724 22:35:53.674892 706086 system_pods.go:89] "kube-controller-manager-containerd-20200724221200-14997" [6460d851-bac6-4fd9-977f-c71ff25d78cd] Running I0724 22:35:53.674907 706086 system_pods.go:89] "kube-proxy-x7fwq" [4a6282eb-2be4-496a-890f-bf90dab64aa5] Running I0724 22:35:53.674916 706086 system_pods.go:89] "kube-scheduler-containerd-20200724221200-14997" [0784d162-3188-4cef-947b-950b5f11830c] Running I0724 22:35:53.674927 706086 system_pods.go:89] "storage-provisioner" [25727a03-b071-4a2c-8dfb-ea2a7038e4cd] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner]) I0724 22:35:53.675070 706086 exit.go:58] WithError(failed to start node)=startup failed: wait 6m0s for node: waiting for apps_running: expected k8s-apps: missing components: kube-dns called from: goroutine 1 [running]: runtime/debug.Stack(0x0, 0x0, 0x100000000000000) /home/jenkins/actions-runner/_work/_tool/go/1.14.6/x64/src/runtime/debug/stack.go:24 +0x9d k8s.io/minikube/pkg/minikube/exit.WithError(0x1ba7c56, 0x14, 0x1ebf200, 0xc000039040) /home/jenkins/actions-runner/_work/minikube/minikube/pkg/minikube/exit/exit.go:58 +0x34 k8s.io/minikube/cmd/minikube/cmd.runStart(0x2cd0820, 0xc0005ce2a0, 0x2, 0xe) /home/jenkins/actions-runner/_work/minikube/minikube/cmd/minikube/cmd/start.go:206 +0x505 github.com/spf13/cobra.(*Command).execute(0x2cd0820, 0xc0005ce1c0, 0xe, 0xe, 0x2cd0820, 0xc0005ce1c0) /home/jenkins/go/pkg/mod/github.com/spf13/cobra@v1.0.0/command.go:846 +0x29d github.com/spf13/cobra.(*Command).ExecuteC(0x2ccf860, 0x0, 0x1, 0xc0002605f0) /home/jenkins/go/pkg/mod/github.com/spf13/cobra@v1.0.0/command.go:950 +0x349 github.com/spf13/cobra.(*Command).Execute(...) /home/jenkins/go/pkg/mod/github.com/spf13/cobra@v1.0.0/command.go:887 k8s.io/minikube/cmd/minikube/cmd.Execute() /home/jenkins/actions-runner/_work/minikube/minikube/cmd/minikube/cmd/root.go:106 +0x72c main.main() /home/jenkins/actions-runner/_work/minikube/minikube/cmd/minikube/main.go:71 +0x11f W0724 22:35:53.675204 706086 out.go:249] failed to start node: startup failed: wait 6m0s for node: waiting for apps_running: expected k8s-apps: missing components: kube-dns * X failed to start node: startup failed: wait 6m0s for node: waiting for apps_running: expected k8s-apps: missing components: kube-dns * * minikube is exiting due to an error. If the above message is not useful, open an issue: - https://github.com/kubernetes/minikube/issues/new/choose ** /stderr ** start_stop_delete_test.go:193: failed to start minikube post-stop. args "./minikube-linux-amd64 start -p containerd-20200724221200-14997 --memory=2200 --alsologtostderr --wait=true --container-runtime=containerd --docker-opt containerd=/var/run/containerd/containerd.sock --apiserver-port=8444 --vm-driver=docker --base-image=local/kicbase:-snapshot --kubernetes-version=v1.18.3": exit status 70 helpers_test.go:215: -----------------------post-mortem-------------------------------- helpers_test.go:223: ======> post-mortem[TestStartStop/group/containerd/serial/SecondStart]: docker inspect <====== helpers_test.go:224: (dbg) Run: docker inspect containerd-20200724221200-14997 helpers_test.go:228: (dbg) docker inspect containerd-20200724221200-14997: -- stdout -- [ { "Id": "0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318", "Created": "2020-07-24T22:12:08.823590057Z", "Path": "/usr/local/bin/entrypoint", "Args": [ "/sbin/init" ], "State": { "Status": "running", "Running": true, "Paused": false, "Restarting": false, "OOMKilled": false, "Dead": false, "Pid": 707805, "ExitCode": 0, "Error": "", "StartedAt": "2020-07-24T22:27:53.861621416Z", "FinishedAt": "2020-07-24T22:27:47.093520839Z" }, "Image": "sha256:e6bc41c39dc48b2b472936db36aedb28527ce0f675ed1bc20d029125c9ccf578", "ResolvConfPath": "/var/lib/docker/containers/0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318/resolv.conf", "HostnamePath": "/var/lib/docker/containers/0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318/hostname", "HostsPath": "/var/lib/docker/containers/0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318/hosts", "LogPath": "/var/lib/docker/containers/0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318/0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318-json.log", "Name": "/containerd-20200724221200-14997", "RestartCount": 0, "Driver": "overlay2", "Platform": "linux", "MountLabel": "", "ProcessLabel": "", "AppArmorProfile": "unconfined", "ExecIDs": null, "HostConfig": { "Binds": [ "/lib/modules:/lib/modules:ro", "containerd-20200724221200-14997:/var" ], "ContainerIDFile": "", "LogConfig": { "Type": "json-file", "Config": {} }, "NetworkMode": "default", "PortBindings": { "22/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "2376/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "5000/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "8444/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ] }, "RestartPolicy": { "Name": "no", "MaximumRetryCount": 0 }, "AutoRemove": false, "VolumeDriver": "", "VolumesFrom": null, "CapAdd": null, "CapDrop": null, "Capabilities": null, "Dns": [], "DnsOptions": [], "DnsSearch": [], "ExtraHosts": null, "GroupAdd": null, "IpcMode": "private", "Cgroup": "", "Links": null, "OomScoreAdj": 0, "PidMode": "", "Privileged": true, "PublishAllPorts": false, "ReadonlyRootfs": false, "SecurityOpt": [ "seccomp=unconfined", "apparmor=unconfined", "label=disable" ], "Tmpfs": { "/run": "", "/tmp": "" }, "UTSMode": "", "UsernsMode": "", "ShmSize": 67108864, "Runtime": "runc", "ConsoleSize": [ 0, 0 ], "Isolation": "", "CpuShares": 0, "Memory": 2306867200, "NanoCpus": 2000000000, "CgroupParent": "", "BlkioWeight": 0, "BlkioWeightDevice": [], "BlkioDeviceReadBps": null, "BlkioDeviceWriteBps": null, "BlkioDeviceReadIOps": null, "BlkioDeviceWriteIOps": null, "CpuPeriod": 0, "CpuQuota": 0, "CpuRealtimePeriod": 0, "CpuRealtimeRuntime": 0, "CpusetCpus": "", "CpusetMems": "", "Devices": [], "DeviceCgroupRules": null, "DeviceRequests": null, "KernelMemory": 0, "KernelMemoryTCP": 0, "MemoryReservation": 0, "MemorySwap": -1, "MemorySwappiness": null, "OomKillDisable": false, "PidsLimit": null, "Ulimits": null, "CpuCount": 0, "CpuPercent": 0, "IOMaximumIOps": 0, "IOMaximumBandwidth": 0, "MaskedPaths": null, "ReadonlyPaths": null }, "GraphDriver": { "Data": { "LowerDir": "/var/lib/docker/overlay2/302021dc5d13c095c42529cd14fcb13b8e0e062683e3b8dd811d0a384f95b4d8-init/diff:/var/lib/docker/overlay2/33ef5d7ed4fbb30d55d2e5925de9b5c359ee2f6bcaf5a31308f0c4c11e358759/diff:/var/lib/docker/overlay2/3cf31b1deb19cf9de586952311d9f94baab00dac6393d96e1a80da943c3823e6/diff:/var/lib/docker/overlay2/5e25be0edbb143d613aab16e23056ece18370c2427214464ccc0b8f033fc76c5/diff:/var/lib/docker/overlay2/933d7b760faf1726c0231eb48cbf3e7af8aeea7d1f449959b7bf07bbe9a84376/diff:/var/lib/docker/overlay2/69ec5555996fe29cf36b7e927034690ecb66f32d4c72396f3c2f3d73303ab10c/diff:/var/lib/docker/overlay2/891f9311e0e93aca44c576d873c0860bf67e972178833fc3ac0569308996933c/diff:/var/lib/docker/overlay2/431ddf5eea9395c020c3a27539c6984764cb7a0dd8f3eeadfbb24f37a897caf7/diff:/var/lib/docker/overlay2/ef7d3d9d53c985fdb46a41c8bcbc04c7d98baa3d8030b1432d115657269a4611/diff:/var/lib/docker/overlay2/d976f7471257da8ef85e0de564cae8e6b16eeb8f83fff6750b3985387f87052c/diff:/var/lib/docker/overlay2/16ed6f6abe25e98b50b10529e70d835da8b64ca99934243f70e5cc63cca44ba1/diff:/var/lib/docker/overlay2/a6495d6f579c32bd0faf387dc3b48c733dae0219e3cfc5d94f9f6ba027261668/diff:/var/lib/docker/overlay2/a34db548da9e24b0323e43e53767c49484fa19509ba9c1e738fb42330788ec52/diff:/var/lib/docker/overlay2/03f93f74dafc48e2bb0234297546abf70dc9918c047a7e3b500ed2242aec09ff/diff:/var/lib/docker/overlay2/b0d08d0be034080c25561d9451d8081050a591c2991a7f3d242234b9bf7c046d/diff:/var/lib/docker/overlay2/d7be6ed61eedc76988e74fe1d87a6a1ef287912654acea5566e7f9cb797b3e3c/diff:/var/lib/docker/overlay2/bde6a4a42d5652b3ef22e3a1dddd0f6fb765d2cd9f5b1a38d4ecb9b44c45b998/diff:/var/lib/docker/overlay2/0f384da0b034b730d4bdd22f270821719c0ac0b7b7d608c3a52fa89b9a386b90/diff:/var/lib/docker/overlay2/46e9d50bc162d2d523c12da3e378e5b859515056dc7d7f11344c1c42c64718d4/diff:/var/lib/docker/overlay2/6e7552cdb94dfc1c9a0a8efae25c758d93fff3d2eec58a3e2b710496d0c94653/diff:/var/lib/docker/overlay2/b9217960caef08b70ded801ca89f5a0808227383f2ab1de5ae7313c200a8090b/diff:/var/lib/docker/overlay2/d11fc39cafccd3e3cb8fba6cfdda2bbfdb99c2e41619ce169b0d6ede0b3f6a64/diff:/var/lib/docker/overlay2/0f89620291cba4bc3afac7c9fa1faf205450d5dcae032c3a9882009633b72b4a/diff", "MergedDir": "/var/lib/docker/overlay2/302021dc5d13c095c42529cd14fcb13b8e0e062683e3b8dd811d0a384f95b4d8/merged", "UpperDir": "/var/lib/docker/overlay2/302021dc5d13c095c42529cd14fcb13b8e0e062683e3b8dd811d0a384f95b4d8/diff", "WorkDir": "/var/lib/docker/overlay2/302021dc5d13c095c42529cd14fcb13b8e0e062683e3b8dd811d0a384f95b4d8/work" }, "Name": "overlay2" }, "Mounts": [ { "Type": "bind", "Source": "/lib/modules", "Destination": "/lib/modules", "Mode": "ro", "RW": false, "Propagation": "rprivate" }, { "Type": "volume", "Name": "containerd-20200724221200-14997", "Source": "/var/lib/docker/volumes/containerd-20200724221200-14997/_data", "Destination": "/var", "Driver": "local", "Mode": "z", "RW": true, "Propagation": "" } ], "Config": { "Hostname": "containerd-20200724221200-14997", "Domainname": "", "User": "root", "AttachStdin": false, "AttachStdout": false, "AttachStderr": false, "ExposedPorts": { "22/tcp": {}, "2376/tcp": {}, "5000/tcp": {}, "8444/tcp": {} }, "Tty": true, "OpenStdin": false, "StdinOnce": false, "Env": [ "container=docker", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], "Cmd": null, "Image": "kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438", "Volumes": null, "WorkingDir": "", "Entrypoint": [ "/usr/local/bin/entrypoint", "/sbin/init" ], "OnBuild": null, "Labels": { "created_by.minikube.sigs.k8s.io": "true", "mode.minikube.sigs.k8s.io": "containerd-20200724221200-14997", "name.minikube.sigs.k8s.io": "containerd-20200724221200-14997", "role.minikube.sigs.k8s.io": "" }, "StopSignal": "SIGRTMIN+3" }, "NetworkSettings": { "Bridge": "", "SandboxID": "14a5046cd9d512b8bd2af14bc8fd545f797506a7c8ca9b3e98d538950faaa7ca", "HairpinMode": false, "LinkLocalIPv6Address": "", "LinkLocalIPv6PrefixLen": 0, "Ports": { "22/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32920" } ], "2376/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32919" } ], "5000/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32918" } ], "8444/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32917" } ] }, "SandboxKey": "/var/run/docker/netns/14a5046cd9d5", "SecondaryIPAddresses": null, "SecondaryIPv6Addresses": null, "EndpointID": "8ef326025492f4d9f256559b312826a73e65239c14de22e27539be8d3e9580f4", "Gateway": "172.17.0.1", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "IPAddress": "172.17.0.3", "IPPrefixLen": 16, "IPv6Gateway": "", "MacAddress": "02:42:ac:11:00:03", "Networks": { "bridge": { "IPAMConfig": null, "Links": null, "Aliases": null, "NetworkID": "d4a420189740571bc15c14b2ebac1ab66e95c9c6796461aa96bb3486a5f7e03d", "EndpointID": "8ef326025492f4d9f256559b312826a73e65239c14de22e27539be8d3e9580f4", "Gateway": "172.17.0.1", "IPAddress": "172.17.0.3", "IPPrefixLen": 16, "IPv6Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "MacAddress": "02:42:ac:11:00:03", "DriverOpts": null } } } } ] -- /stdout -- helpers_test.go:232: (dbg) Run: ./minikube-linux-amd64 status --format={{.Host}} -p containerd-20200724221200-14997 -n containerd-20200724221200-14997 helpers_test.go:237: <<< TestStartStop/group/containerd/serial/SecondStart FAILED: start of post-mortem logs <<< helpers_test.go:238: ======> post-mortem[TestStartStop/group/containerd/serial/SecondStart]: minikube logs <====== helpers_test.go:240: (dbg) Run: ./minikube-linux-amd64 -p containerd-20200724221200-14997 logs -n 25 helpers_test.go:245: TestStartStop/group/containerd/serial/SecondStart logs: -- stdout -- * ==> container status <== * CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID * bbfb4ea37ab48 2186a1a396deb 55 seconds ago Exited kindnet-cni 5 c2246648589e4 * c9bef67c4e66e 4689081edb103 3 minutes ago Exited storage-provisioner 5 0418a87b40342 * 8662707b3b0f7 da26705ccb4b5 5 minutes ago Running kube-controller-manager 3 7b50a18d935ac * cdb7f0919992f 3439b7546f29b 5 minutes ago Running kube-proxy 0 2fca05c662478 * 7dc9f83693a99 7e28efa976bd1 6 minutes ago Running kube-apiserver 0 236935089e2fb * 90f7be9dd5648 da26705ccb4b5 6 minutes ago Exited kube-controller-manager 2 7b50a18d935ac * 574a9379a97ce 76216c34ed0c7 7 minutes ago Running kube-scheduler 0 9b3229d6d9980 * e1da8367b2af7 303ce5db0e90d 7 minutes ago Running etcd 0 0941e8d40a5bd * * ==> containerd <== * -- Logs begin at Fri 2020-07-24 22:27:54 UTC, end at Fri 2020-07-24 22:35:54 UTC. -- * Jul 24 22:35:19 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:35:19.312674181Z" level=info msg="TaskExit event &TaskExit{ContainerID:bbfb4ea37ab4890279b72f6e3828e6f9ec01958f33500ffde383f9f8d6a82da0,ID:bbfb4ea37ab4890279b72f6e3828e6f9ec01958f33500ffde383f9f8d6a82da0,Pid:6099,ExitStatus:2,ExitedAt:2020-07-24 22:35:19.312302655 +0000 UTC,XXX_unrecognized:[],}" * Jul 24 22:35:19 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:35:19.338288913Z" level=info msg="shim reaped" id=bbfb4ea37ab4890279b72f6e3828e6f9ec01958f33500ffde383f9f8d6a82da0 * Jul 24 22:35:20 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:35:20.075582990Z" level=info msg="RemoveContainer for \"8398d86f6acce72e252af4294f7136c0144cb5d47fabd7581b57bf472c2d96af\"" * Jul 24 22:35:20 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:35:20.106347771Z" level=info msg="RemoveContainer for \"8398d86f6acce72e252af4294f7136c0144cb5d47fabd7581b57bf472c2d96af\" returns successfully" * Jul 24 22:35:20 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:35:20.338628184Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:dashboard-metrics-scraper-dc6947fbf-xphhd,Uid:02a59124-2c99-4a38-abea-eedfd3e1ba46,Namespace:kubernetes-dashboard,Attempt:0,}" * Jul 24 22:35:21 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:35:21.583789316Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:dashboard-metrics-scraper-dc6947fbf-xphhd,Uid:02a59124-2c99-4a38-abea-eedfd3e1ba46,Namespace:kubernetes-dashboard,Attempt:0,} failed, error" error="failed to setup network for sandbox \"b93a96075426a8a6a35f895e321d487e01f61452e328a861807f614b35e5c43b\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:35:25 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:35:25.338806711Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:busybox,Uid:4b662b5f-6e78-48de-818c-81989d7f4ea9,Namespace:default,Attempt:0,}" * Jul 24 22:35:27 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:35:27.306719928Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:4b662b5f-6e78-48de-818c-81989d7f4ea9,Namespace:default,Attempt:0,} failed, error" error="failed to setup network for sandbox \"9fbfcc5e8a553be73ab4e7bb4f838c7de5f329cf84bc04600163be2e59c493f8\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:35:28 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:35:28.338802925Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:kubernetes-dashboard-6dbb54fd95-ms9wg,Uid:267f98a1-434e-45a2-abda-53ad3bb7bea1,Namespace:kubernetes-dashboard,Attempt:0,}" * Jul 24 22:35:30 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:35:30.222220355Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:kubernetes-dashboard-6dbb54fd95-ms9wg,Uid:267f98a1-434e-45a2-abda-53ad3bb7bea1,Namespace:kubernetes-dashboard,Attempt:0,} failed, error" error="failed to setup network for sandbox \"05d105454cb481e57aaf46cc68d4bcb074322fd911b2ba3df4ef97e7cb7aea8c\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:35:32 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:35:32.338874035Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,}" * Jul 24 22:35:34 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:35:34.303976636Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,} failed, error" error="failed to setup network for sandbox \"5afcabe4c1c67ebc52122145d184165eef681a08b7174ad6cd6e540ae7364125\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:35:35 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:35:35.338620326Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:dashboard-metrics-scraper-dc6947fbf-xphhd,Uid:02a59124-2c99-4a38-abea-eedfd3e1ba46,Namespace:kubernetes-dashboard,Attempt:0,}" * Jul 24 22:35:37 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:35:37.410305666Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:dashboard-metrics-scraper-dc6947fbf-xphhd,Uid:02a59124-2c99-4a38-abea-eedfd3e1ba46,Namespace:kubernetes-dashboard,Attempt:0,} failed, error" error="failed to setup network for sandbox \"b1a4fa4b59b7d63fcaefe2dadddbb208a1ce3ab2b7dac0fc66a984b62077500a\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:35:38 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:35:38.338835002Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:busybox,Uid:4b662b5f-6e78-48de-818c-81989d7f4ea9,Namespace:default,Attempt:0,}" * Jul 24 22:35:40 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:35:40.036464983Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:4b662b5f-6e78-48de-818c-81989d7f4ea9,Namespace:default,Attempt:0,} failed, error" error="failed to setup network for sandbox \"31c50c7117aa1c685c9a8ebf40791afc8bee54f169e2b81974fe5515237db06c\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:35:45 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:35:45.338965734Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:kubernetes-dashboard-6dbb54fd95-ms9wg,Uid:267f98a1-434e-45a2-abda-53ad3bb7bea1,Namespace:kubernetes-dashboard,Attempt:0,}" * Jul 24 22:35:47 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:35:47.340864091Z" level=info msg="CreateContainer within sandbox \"0418a87b4034227ca6f69790e2a456cf1f08e80d41f15f957c45381453e4f4b9\" for container &ContainerMetadata{Name:storage-provisioner,Attempt:6,}" * Jul 24 22:35:47 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:35:47.357757797Z" level=error msg="CreateContainer within sandbox \"0418a87b4034227ca6f69790e2a456cf1f08e80d41f15f957c45381453e4f4b9\" for &ContainerMetadata{Name:storage-provisioner,Attempt:6,} failed" error="failed to create containerd container: failed to rename: rename /var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/new-649630069 /var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/57: file exists" * Jul 24 22:35:47 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:35:47.403403582Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:kubernetes-dashboard-6dbb54fd95-ms9wg,Uid:267f98a1-434e-45a2-abda-53ad3bb7bea1,Namespace:kubernetes-dashboard,Attempt:0,} failed, error" error="failed to setup network for sandbox \"1eca3a5d8d03fc07233f3501b3058b479bc2f91308e4e857cc507d08988372fb\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:35:48 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:35:48.338710064Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:dashboard-metrics-scraper-dc6947fbf-xphhd,Uid:02a59124-2c99-4a38-abea-eedfd3e1ba46,Namespace:kubernetes-dashboard,Attempt:0,}" * Jul 24 22:35:49 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:35:49.339533031Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,}" * Jul 24 22:35:50 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:35:50.189815950Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:dashboard-metrics-scraper-dc6947fbf-xphhd,Uid:02a59124-2c99-4a38-abea-eedfd3e1ba46,Namespace:kubernetes-dashboard,Attempt:0,} failed, error" error="failed to setup network for sandbox \"3a4075227dfaa8b776aa82041691e4d056b8b6bbfaae3b60ebc0191ad5448354\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:35:51 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:35:51.488504186Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,} failed, error" error="failed to setup network for sandbox \"2f2bfc31dcd4e7ef42bc5e67248a5378d7efaf7c6743612da23a239534f5bba5\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:35:54 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:35:54.338553701Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:busybox,Uid:4b662b5f-6e78-48de-818c-81989d7f4ea9,Namespace:default,Attempt:0,}" * * ==> describe nodes <== * Name: containerd-20200724221200-14997 * Roles: master * Labels: beta.kubernetes.io/arch=amd64 * beta.kubernetes.io/os=linux * kubernetes.io/arch=amd64 * kubernetes.io/hostname=containerd-20200724221200-14997 * kubernetes.io/os=linux * minikube.k8s.io/commit=40eac8ce825d2bb784efa63b900c8d788ea49faf * minikube.k8s.io/name=containerd-20200724221200-14997 * minikube.k8s.io/updated_at=2020_07_24T22_13_02_0700 * minikube.k8s.io/version=v1.12.1 * node-role.kubernetes.io/master= * Annotations: kubeadm.alpha.kubernetes.io/cri-socket: /run/containerd/containerd.sock * node.alpha.kubernetes.io/ttl: 0 * volumes.kubernetes.io/controller-managed-attach-detach: true * CreationTimestamp: Fri, 24 Jul 2020 22:12:47 +0000 * Taints: * Unschedulable: false * Lease: * HolderIdentity: containerd-20200724221200-14997 * AcquireTime: * RenewTime: Fri, 24 Jul 2020 22:35:53 +0000 * Conditions: * Type Status LastHeartbeatTime LastTransitionTime Reason Message * ---- ------ ----------------- ------------------ ------ ------- * MemoryPressure False Fri, 24 Jul 2020 22:34:32 +0000 Fri, 24 Jul 2020 22:12:43 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available * DiskPressure False Fri, 24 Jul 2020 22:34:32 +0000 Fri, 24 Jul 2020 22:12:43 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure * PIDPressure False Fri, 24 Jul 2020 22:34:32 +0000 Fri, 24 Jul 2020 22:12:43 +0000 KubeletHasSufficientPID kubelet has sufficient PID available * Ready True Fri, 24 Jul 2020 22:34:32 +0000 Fri, 24 Jul 2020 22:13:11 +0000 KubeletReady kubelet is posting ready status * Addresses: * InternalIP: 172.17.0.3 * Hostname: containerd-20200724221200-14997 * Capacity: * cpu: 16 * ephemeral-storage: 128884272Ki * hugepages-1Gi: 0 * hugepages-2Mi: 0 * memory: 65817044Ki * pods: 110 * Allocatable: * cpu: 16 * ephemeral-storage: 128884272Ki * hugepages-1Gi: 0 * hugepages-2Mi: 0 * memory: 65817044Ki * pods: 110 * System Info: * Machine ID: 5ea7312d3bbd4189a79e31122cb237a6 * System UUID: 763ff36b-3261-45b1-b62e-092cbae790ce * Boot ID: 65219ec9-ab55-4151-85fa-6cbcd6144529 * Kernel Version: 5.4.0-1022-azure * OS Image: Ubuntu 19.10 * Operating System: linux * Architecture: amd64 * Container Runtime Version: containerd://1.3.3-14-g449e9269 * Kubelet Version: v1.18.3 * Kube-Proxy Version: v1.18.3 * PodCIDR: 10.244.0.0/24 * PodCIDRs: 10.244.0.0/24 * Non-terminated Pods: (11 in total) * Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits AGE * --------- ---- ------------ ---------- --------------- ------------- --- * default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 16m * kube-system coredns-66bff467f8-hlk9j 100m (0%) 0 (0%) 70Mi (0%) 170Mi (0%) 22m * kube-system etcd-containerd-20200724221200-14997 0 (0%) 0 (0%) 0 (0%) 0 (0%) 4m48s * kube-system kindnet-nsc8k 100m (0%) 100m (0%) 50Mi (0%) 50Mi (0%) 22m * kube-system kube-apiserver-containerd-20200724221200-14997 250m (1%) 0 (0%) 0 (0%) 0 (0%) 4m36s * kube-system kube-controller-manager-containerd-20200724221200-14997 200m (1%) 0 (0%) 0 (0%) 0 (0%) 22m * kube-system kube-proxy-x7fwq 0 (0%) 0 (0%) 0 (0%) 0 (0%) 22m * kube-system kube-scheduler-containerd-20200724221200-14997 100m (0%) 0 (0%) 0 (0%) 0 (0%) 22m * kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 22m * kubernetes-dashboard dashboard-metrics-scraper-dc6947fbf-xphhd 0 (0%) 0 (0%) 0 (0%) 0 (0%) 5m29s * kubernetes-dashboard kubernetes-dashboard-6dbb54fd95-ms9wg 0 (0%) 0 (0%) 0 (0%) 0 (0%) 5m29s * Allocated resources: * (Total limits may be over 100 percent, i.e., overcommitted.) * Resource Requests Limits * -------- -------- ------ * cpu 750m (4%) 100m (0%) * memory 120Mi (0%) 220Mi (0%) * ephemeral-storage 0 (0%) 0 (0%) * hugepages-1Gi 0 (0%) 0 (0%) * hugepages-2Mi 0 (0%) 0 (0%) * Events: * Type Reason Age From Message * ---- ------ ---- ---- ------- * Normal NodeHasNoDiskPressure 23m (x6 over 23m) kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientPID 23m (x5 over 23m) kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasSufficientPID * Normal NodeHasSufficientMemory 23m (x6 over 23m) kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasSufficientMemory * Normal Starting 22m kubelet, containerd-20200724221200-14997 Starting kubelet. * Warning SystemOOM 22m kubelet, containerd-20200724221200-14997 System OOM encountered, victim process: registry-server, pid: 26041 * Warning SystemOOM 22m kubelet, containerd-20200724221200-14997 System OOM encountered, victim process: registry-server, pid: 28567 * Normal NodeHasSufficientMemory 22m kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasSufficientMemory * Normal NodeHasNoDiskPressure 22m kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientPID 22m kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasSufficientPID * Normal NodeAllocatableEnforced 22m kubelet, containerd-20200724221200-14997 Updated Node Allocatable limit across pods * Normal NodeReady 22m kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeReady * Normal Starting 22m kube-proxy, containerd-20200724221200-14997 Starting kube-proxy. * Warning readOnlySysFS 22m kube-proxy, containerd-20200724221200-14997 CRI error: /sys is read-only: cannot modify conntrack limits, problems may arise later (If running Docker, see docker issue #24000) * Normal Starting 7m45s kubelet, containerd-20200724221200-14997 Starting kubelet. * Warning SystemOOM 7m45s kubelet, containerd-20200724221200-14997 System OOM encountered, victim process: registry-server, pid: 26041 * Warning SystemOOM 7m45s kubelet, containerd-20200724221200-14997 System OOM encountered, victim process: registry-server, pid: 28567 * Normal NodeAllocatableEnforced 7m45s kubelet, containerd-20200724221200-14997 Updated Node Allocatable limit across pods * Normal NodeHasSufficientMemory 7m39s (x7 over 7m45s) kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasSufficientMemory * Normal NodeHasNoDiskPressure 7m39s (x7 over 7m45s) kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientPID 7m39s (x7 over 7m45s) kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasSufficientPID * Warning readOnlySysFS 5m54s kube-proxy, containerd-20200724221200-14997 CRI error: /sys is read-only: cannot modify conntrack limits, problems may arise later (If running Docker, see docker issue #24000) * Normal Starting 5m54s kube-proxy, containerd-20200724221200-14997 Starting kube-proxy. * * ==> dmesg <== * [ +0.008042] FS-Cache: N-cookie d=00000000087e516b n=00000000639e5e65 * [ +0.007221] FS-Cache: N-key=[8] '56c00f0000000000' * [ +0.005631] FS-Cache: Duplicate cookie detected * [ +0.003841] FS-Cache: O-cookie c=0000000011bf07ab [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.005951] FS-Cache: O-cookie d=00000000087e516b n=00000000251e97b7 * [ +0.004790] FS-Cache: O-key=[8] '56c00f0000000000' * [ +0.003643] FS-Cache: N-cookie c=00000000e2eec606 [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.005689] FS-Cache: N-cookie d=00000000087e516b n=000000007550b1ca * [ +0.004826] FS-Cache: N-key=[8] '56c00f0000000000' * [ +1.890005] FS-Cache: Duplicate cookie detected * [ +0.022632] FS-Cache: O-cookie c=0000000036b15294 [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.008521] FS-Cache: O-cookie d=00000000087e516b n=00000000efd734b0 * [ +0.007450] FS-Cache: O-key=[8] '55c00f0000000000' * [ +0.004298] FS-Cache: N-cookie c=00000000c6760349 [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.007729] FS-Cache: N-cookie d=00000000087e516b n=000000002b6d28f2 * [ +0.006730] FS-Cache: N-key=[8] '55c00f0000000000' * [ +2.781512] FS-Cache: Duplicate cookie detected * [ +0.004691] FS-Cache: O-cookie c=0000000026b60cdb [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.006817] FS-Cache: O-cookie d=00000000087e516b n=0000000083eaff9d * [ +0.006146] FS-Cache: O-key=[8] '5ac00f0000000000' * [ +0.004163] FS-Cache: N-cookie c=000000006a4278df [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.006241] FS-Cache: N-cookie d=00000000087e516b n=00000000e4c3dd6e * [ +0.005074] FS-Cache: N-key=[8] '5ac00f0000000000' * [Jul24 22:02] tee (116897): /proc/108378/oom_adj is deprecated, please use /proc/108378/oom_score_adj instead. * [Jul24 22:09] hrtimer: interrupt took 1877431 ns * * ==> etcd [e1da8367b2af71bf928910238acc6b877c309718e6be22ad7c222ff9731c93cc] <== * raft2020/07/24 22:28:24 INFO: 952f31ff200093ba is starting a new election at term 2 * raft2020/07/24 22:28:24 INFO: 952f31ff200093ba became candidate at term 3 * raft2020/07/24 22:28:24 INFO: 952f31ff200093ba received MsgVoteResp from 952f31ff200093ba at term 3 * raft2020/07/24 22:28:24 INFO: 952f31ff200093ba became leader at term 3 * raft2020/07/24 22:28:24 INFO: raft.node: 952f31ff200093ba elected leader 952f31ff200093ba at term 3 * 2020-07-24 22:28:24.327120 I | etcdserver: published {Name:containerd-20200724221200-14997 ClientURLs:[https://172.17.0.3:2379]} to cluster 5af0857ece1ce0e5 * 2020-07-24 22:28:24.327146 I | embed: ready to serve client requests * 2020-07-24 22:28:24.327181 I | embed: ready to serve client requests * 2020-07-24 22:28:24.328918 I | embed: serving client requests on 127.0.0.1:2379 * 2020-07-24 22:28:24.328956 I | embed: serving client requests on 172.17.0.3:2379 * 2020-07-24 22:29:30.036095 W | etcdserver: read-only range request "key:\"/registry/ranges/serviceips\" " with result "range_response_count:1 size:118" took too long (100.036069ms) to execute * 2020-07-24 22:29:30.036950 W | etcdserver: read-only range request "key:\"/registry/health\" " with result "range_response_count:0 size:5" took too long (100.736017ms) to execute * 2020-07-24 22:29:30.036995 W | etcdserver: read-only range request "key:\"/registry/csinodes/containerd-20200724221200-14997\" " with result "range_response_count:1 size:521" took too long (100.33589ms) to execute * 2020-07-24 22:29:30.037878 W | etcdserver: read-only range request "key:\"/registry/ranges/servicenodeports\" " with result "range_response_count:1 size:120" took too long (101.214251ms) to execute * 2020-07-24 22:29:30.038314 W | etcdserver: read-only range request "key:\"/registry/namespaces/kube-system\" " with result "range_response_count:1 size:263" took too long (102.05731ms) to execute * 2020-07-24 22:29:30.038475 W | etcdserver: read-only range request "key:\"/registry/pods/kube-system/kube-apiserver-containerd-20200724221200-14997\" " with result "range_response_count:1 size:6560" took too long (102.234322ms) to execute * 2020-07-24 22:30:00.757431 W | etcdserver: read-only range request "key:\"/registry/events/kube-system/storage-provisioner.1624d0d94aaa7ad0\" " with result "range_response_count:1 size:865" took too long (111.660057ms) to execute * 2020-07-24 22:31:03.103374 W | wal: sync duration of 4.321780012s, expected less than 1s * 2020-07-24 22:31:03.104064 W | etcdserver: read-only range request "key:\"/registry/roles\" range_end:\"/registry/rolet\" count_only:true " with result "range_response_count:0 size:7" took too long (4.143856667s) to execute * 2020-07-24 22:31:03.110173 W | etcdserver: read-only range request "key:\"/registry/csidrivers\" range_end:\"/registry/csidrivert\" count_only:true " with result "range_response_count:0 size:5" took too long (3.893694523s) to execute * 2020-07-24 22:31:03.110244 W | etcdserver: read-only range request "key:\"/registry/mutatingwebhookconfigurations\" range_end:\"/registry/mutatingwebhookconfigurationt\" count_only:true " with result "range_response_count:0 size:5" took too long (748.311405ms) to execute * 2020-07-24 22:31:03.110295 W | etcdserver: read-only range request "key:\"/registry/namespaces/default\" " with result "range_response_count:1 size:257" took too long (2.065079912s) to execute * 2020-07-24 22:31:27.394849 W | wal: sync duration of 1.161100746s, expected less than 1s * 2020-07-24 22:31:28.903901 W | etcdserver: read-only range request "key:\"/registry/mutatingwebhookconfigurations\" range_end:\"/registry/mutatingwebhookconfigurationt\" count_only:true " with result "range_response_count:0 size:5" took too long (638.619667ms) to execute * 2020-07-24 22:31:28.903925 W | etcdserver: read-only range request "key:\"/registry/controllers\" range_end:\"/registry/controllert\" count_only:true " with result "range_response_count:0 size:5" took too long (469.929992ms) to execute * * ==> kernel <== * 22:35:54 up 1:03, 0 users, load average: 0.89, 3.97, 6.35 * Linux containerd-20200724221200-14997 5.4.0-1022-azure #22-Ubuntu SMP Fri Jul 10 06:14:37 UTC 2020 x86_64 x86_64 x86_64 GNU/Linux * PRETTY_NAME="Ubuntu 19.10" * * ==> kube-apiserver [7dc9f83693a992e6e76f2932a3ce43efa3ffd6bbef97f8a3dab2e9ab04809fda] <== * I0724 22:29:29.935607 1 cache.go:39] Caches are synced for autoregister controller * I0724 22:29:29.935618 1 cache.go:39] Caches are synced for APIServiceRegistrationController controller * I0724 22:29:29.935640 1 shared_informer.go:230] Caches are synced for cluster_authentication_trust_controller * I0724 22:29:29.936515 1 shared_informer.go:230] Caches are synced for crd-autoregister * I0724 22:29:30.833239 1 controller.go:130] OpenAPI AggregationController: action for item : Nothing (removed from the queue). * I0724 22:29:30.833268 1 controller.go:130] OpenAPI AggregationController: action for item k8s_internal_local_delegation_chain_0000000000: Nothing (removed from the queue). * I0724 22:29:30.838357 1 storage_scheduling.go:143] all system priority classes are created successfully or already exist. * W0724 22:29:31.103453 1 lease.go:224] Resetting endpoints for master service "kubernetes" to [172.17.0.3] * I0724 22:29:31.104915 1 controller.go:606] quota admission added evaluator for: endpoints * I0724 22:29:31.150383 1 controller.go:606] quota admission added evaluator for: endpointslices.discovery.k8s.io * I0724 22:29:31.881534 1 controller.go:606] quota admission added evaluator for: leases.coordination.k8s.io * I0724 22:29:34.181947 1 controller.go:606] quota admission added evaluator for: daemonsets.apps * I0724 22:29:34.431603 1 controller.go:606] quota admission added evaluator for: serviceaccounts * I0724 22:29:34.453874 1 controller.go:606] quota admission added evaluator for: deployments.apps * I0724 22:29:34.700275 1 controller.go:606] quota admission added evaluator for: roles.rbac.authorization.k8s.io * I0724 22:29:34.711676 1 controller.go:606] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io * I0724 22:30:24.960939 1 controller.go:606] quota admission added evaluator for: replicasets.apps * I0724 22:31:03.104488 1 trace.go:116] Trace[910046044]: "GuaranteedUpdate etcd3" type:*coordination.Lease (started: 2020-07-24 22:31:01.925257756 +0000 UTC m=+96.221406956) (total time: 1.179189211s): * Trace[910046044]: [1.179160609s] [1.178557271s] Transaction committed * I0724 22:31:03.104575 1 trace.go:116] Trace[1472583103]: "Create" url:/api/v1/namespaces/kubernetes-dashboard/events,user-agent:kubelet/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:172.17.0.3 (started: 2020-07-24 22:30:58.882075293 +0000 UTC m=+93.178224393) (total time: 4.222377974s): * Trace[1472583103]: [4.222302769s] [4.222217764s] Object stored in database * I0724 22:31:03.104626 1 trace.go:116] Trace[118677254]: "Update" url:/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/containerd-20200724221200-14997,user-agent:kubelet/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:172.17.0.3 (started: 2020-07-24 22:31:01.925076644 +0000 UTC m=+96.221225844) (total time: 1.179528033s): * Trace[118677254]: [1.17947683s] [1.179338821s] Object stored in database * I0724 22:31:03.110755 1 trace.go:116] Trace[1968230455]: "Get" url:/api/v1/namespaces/default,user-agent:kube-apiserver/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:127.0.0.1 (started: 2020-07-24 22:31:01.044778208 +0000 UTC m=+95.340927408) (total time: 2.065944568s): * Trace[1968230455]: [2.065912066s] [2.065904566s] About to write a response * * ==> kube-controller-manager [8662707b3b0f7d44ff6c5e080ea0aba7dd34f515065281accd9db2796d1eabbb] <== * I0724 22:30:25.016265 1 event.go:278] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"kubernetes-dashboard", Name:"dashboard-metrics-scraper-dc6947fbf", UID:"f7f90ea4-8326-451c-9d3d-7807a3878e9a", APIVersion:"apps/v1", ResourceVersion:"932", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: dashboard-metrics-scraper-dc6947fbf-xphhd * I0724 22:30:25.047095 1 event.go:278] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"kubernetes-dashboard", Name:"kubernetes-dashboard-6dbb54fd95", UID:"7d191e90-0bb9-4f25-b536-3a59e9a338ae", APIVersion:"apps/v1", ResourceVersion:"934", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: kubernetes-dashboard-6dbb54fd95-ms9wg * W0724 22:30:25.140679 1 actual_state_of_world.go:506] Failed to update statusUpdateNeeded field in actual state of world: Failed to set statusUpdateNeeded to needed true, because nodeName="containerd-20200724221200-14997" does not exist * I0724 22:30:25.167526 1 shared_informer.go:230] Caches are synced for disruption * I0724 22:30:25.167548 1 disruption.go:339] Sending events to api server. * I0724 22:30:25.174767 1 shared_informer.go:230] Caches are synced for persistent volume * I0724 22:30:25.194865 1 shared_informer.go:230] Caches are synced for TTL * I0724 22:30:25.222168 1 shared_informer.go:230] Caches are synced for node * I0724 22:30:25.222200 1 range_allocator.go:172] Starting range CIDR allocator * I0724 22:30:25.222204 1 shared_informer.go:223] Waiting for caches to sync for cidrallocator * I0724 22:30:25.222209 1 shared_informer.go:230] Caches are synced for cidrallocator * I0724 22:30:25.225090 1 shared_informer.go:230] Caches are synced for GC * I0724 22:30:25.407503 1 shared_informer.go:230] Caches are synced for resource quota * I0724 22:30:25.410855 1 shared_informer.go:230] Caches are synced for attach detach * I0724 22:30:25.419771 1 shared_informer.go:230] Caches are synced for taint * I0724 22:30:25.419834 1 node_lifecycle_controller.go:1433] Initializing eviction metric for zone: * I0724 22:30:25.419834 1 taint_manager.go:187] Starting NoExecuteTaintManager * I0724 22:30:25.419876 1 event.go:278] Event(v1.ObjectReference{Kind:"Node", Namespace:"", Name:"containerd-20200724221200-14997", UID:"878dba67-2126-43d2-a5be-2ad809c96173", APIVersion:"v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'RegisteredNode' Node containerd-20200724221200-14997 event: Registered Node containerd-20200724221200-14997 in Controller * W0724 22:30:25.419894 1 node_lifecycle_controller.go:1048] Missing timestamp for Node containerd-20200724221200-14997. Assuming now as a timestamp. * I0724 22:30:25.419932 1 node_lifecycle_controller.go:1249] Controller detected that zone is now in state Normal. * I0724 22:30:25.474250 1 shared_informer.go:230] Caches are synced for resource quota * I0724 22:30:25.474554 1 shared_informer.go:230] Caches are synced for daemon sets * I0724 22:30:25.475756 1 shared_informer.go:230] Caches are synced for garbage collector * I0724 22:30:25.513010 1 shared_informer.go:230] Caches are synced for garbage collector * I0724 22:30:25.513029 1 garbagecollector.go:142] Garbage collector: all resource monitors have synced. Proceeding to collect garbage * * ==> kube-controller-manager [90f7be9dd5648fdf7d6e6f282cc9119322bc28911e75d3b73a928fa1ad2134ec] <== * I0724 22:29:12.067440 1 serving.go:313] Generated self-signed cert in-memory * I0724 22:29:12.831319 1 controllermanager.go:161] Version: v1.18.3 * I0724 22:29:12.832693 1 dynamic_cafile_content.go:167] Starting request-header::/var/lib/minikube/certs/front-proxy-ca.crt * I0724 22:29:12.832741 1 dynamic_cafile_content.go:167] Starting client-ca-bundle::/var/lib/minikube/certs/ca.crt * I0724 22:29:12.833285 1 secure_serving.go:178] Serving securely on 127.0.0.1:10257 * I0724 22:29:12.833363 1 tlsconfig.go:240] Starting DynamicServingCertificateController * I0724 22:29:12.833994 1 deprecated_insecure_serving.go:53] Serving insecurely on [::]:10252 * W0724 22:29:12.834610 1 controllermanager.go:612] fetch api resource lists failed, use legacy client builder: Get https://control-plane.minikube.internal:8444/api/v1?timeout=32s: dial tcp 172.17.0.3:8444: connect: connection refused * F0724 22:29:22.836138 1 controllermanager.go:230] error building controller context: failed to wait for apiserver being healthy: timed out waiting for the condition: failed to get apiserver /healthz status: Get https://control-plane.minikube.internal:8444/healthz?timeout=32s: dial tcp 172.17.0.3:8444: connect: connection refused * * ==> kube-proxy [cdb7f0919992f1b17fe65623faf7b9984d3edb56993d5621bef544d49a0ce798] <== * I0724 22:13:19.703119 1 server.go:583] Version: v1.18.3 * I0724 22:13:19.703827 1 conntrack.go:52] Setting nf_conntrack_max to 524288 * E0724 22:13:19.704435 1 conntrack.go:127] sysfs is not writable: {Device:sysfs Path:/sys Type:sysfs Opts:[ro nosuid nodev noexec relatime] Freq:0 Pass:0} (mount options are [ro nosuid nodev noexec relatime]) * I0724 22:13:19.704682 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_established' to 86400 * I0724 22:13:19.704788 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_close_wait' to 3600 * I0724 22:13:19.706305 1 config.go:133] Starting endpoints config controller * I0724 22:13:19.706338 1 config.go:315] Starting service config controller * I0724 22:13:19.706344 1 shared_informer.go:223] Waiting for caches to sync for endpoints config * I0724 22:13:19.706365 1 shared_informer.go:223] Waiting for caches to sync for service config * I0724 22:13:19.806590 1 shared_informer.go:230] Caches are synced for service config * I0724 22:13:19.806609 1 shared_informer.go:230] Caches are synced for endpoints config * W0724 22:30:00.451331 1 server_others.go:559] Unknown proxy mode "", assuming iptables proxy * I0724 22:30:00.459120 1 node.go:136] Successfully retrieved node IP: 172.17.0.3 * I0724 22:30:00.459158 1 server_others.go:186] Using iptables Proxier. * I0724 22:30:00.459548 1 server.go:583] Version: v1.18.3 * I0724 22:30:00.460146 1 conntrack.go:52] Setting nf_conntrack_max to 524288 * E0724 22:30:00.460599 1 conntrack.go:127] sysfs is not writable: {Device:sysfs Path:/sys Type:sysfs Opts:[ro nosuid nodev noexec relatime] Freq:0 Pass:0} (mount options are [ro nosuid nodev noexec relatime]) * I0724 22:30:00.460736 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_established' to 86400 * I0724 22:30:00.460806 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_close_wait' to 3600 * I0724 22:30:00.460979 1 config.go:315] Starting service config controller * I0724 22:30:00.460995 1 shared_informer.go:223] Waiting for caches to sync for service config * I0724 22:30:00.461156 1 config.go:133] Starting endpoints config controller * I0724 22:30:00.462702 1 shared_informer.go:223] Waiting for caches to sync for endpoints config * I0724 22:30:00.561150 1 shared_informer.go:230] Caches are synced for service config * I0724 22:30:00.562880 1 shared_informer.go:230] Caches are synced for endpoints config * * ==> kube-scheduler [574a9379a97ce5c734627b9b0340a240dea259b901f1dca96ad8d9978331c503] <== * E0724 22:28:46.956193 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Node: Get https://control-plane.minikube.internal:8444/api/v1/nodes?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:47.936021 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolumeClaim: Get https://control-plane.minikube.internal:8444/api/v1/persistentvolumeclaims?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:48.615795 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolume: Get https://control-plane.minikube.internal:8444/api/v1/persistentvolumes?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:48.627285 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Service: Get https://control-plane.minikube.internal:8444/api/v1/services?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:48.844520 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1beta1.PodDisruptionBudget: Get https://control-plane.minikube.internal:8444/apis/policy/v1beta1/poddisruptionbudgets?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:49.268244 1 reflector.go:178] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:233: Failed to list *v1.Pod: Get https://control-plane.minikube.internal:8444/api/v1/pods?fieldSelector=status.phase%21%3DFailed%2Cstatus.phase%21%3DSucceeded&limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:52.983500 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.CSINode: Get https://control-plane.minikube.internal:8444/apis/storage.k8s.io/v1/csinodes?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:54.489861 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.StorageClass: Get https://control-plane.minikube.internal:8444/apis/storage.k8s.io/v1/storageclasses?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:55.798147 1 reflector.go:178] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to list *v1.ConfigMap: Get https://control-plane.minikube.internal:8444/api/v1/namespaces/kube-system/configmaps?fieldSelector=metadata.name%3Dextension-apiserver-authentication&limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:56.415849 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolumeClaim: Get https://control-plane.minikube.internal:8444/api/v1/persistentvolumeclaims?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:57.297889 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Service: Get https://control-plane.minikube.internal:8444/api/v1/services?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:58.395517 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1beta1.PodDisruptionBudget: Get https://control-plane.minikube.internal:8444/apis/policy/v1beta1/poddisruptionbudgets?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:58.615198 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Node: Get https://control-plane.minikube.internal:8444/api/v1/nodes?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:00.876683 1 reflector.go:178] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:233: Failed to list *v1.Pod: Get https://control-plane.minikube.internal:8444/api/v1/pods?fieldSelector=status.phase%21%3DFailed%2Cstatus.phase%21%3DSucceeded&limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:01.380532 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolume: Get https://control-plane.minikube.internal:8444/api/v1/persistentvolumes?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:10.017422 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.StorageClass: Get https://control-plane.minikube.internal:8444/apis/storage.k8s.io/v1/storageclasses?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:13.820706 1 reflector.go:178] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:233: Failed to list *v1.Pod: Get https://control-plane.minikube.internal:8444/api/v1/pods?fieldSelector=status.phase%21%3DFailed%2Cstatus.phase%21%3DSucceeded&limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:14.849393 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.CSINode: Get https://control-plane.minikube.internal:8444/apis/storage.k8s.io/v1/csinodes?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:17.299470 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Node: Get https://control-plane.minikube.internal:8444/api/v1/nodes?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:17.476542 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolumeClaim: Get https://control-plane.minikube.internal:8444/api/v1/persistentvolumeclaims?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:17.787962 1 reflector.go:178] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to list *v1.ConfigMap: Get https://control-plane.minikube.internal:8444/api/v1/namespaces/kube-system/configmaps?fieldSelector=metadata.name%3Dextension-apiserver-authentication&limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:18.847726 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Service: Get https://control-plane.minikube.internal:8444/api/v1/services?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:20.242683 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1beta1.PodDisruptionBudget: Get https://control-plane.minikube.internal:8444/apis/policy/v1beta1/poddisruptionbudgets?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:25.306254 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolume: Get https://control-plane.minikube.internal:8444/api/v1/persistentvolumes?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * I0724 22:30:02.056615 1 shared_informer.go:230] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file * * ==> kubelet <== * -- Logs begin at Fri 2020-07-24 22:27:54 UTC, end at Fri 2020-07-24 22:35:55 UTC. -- * Jul 24 22:35:37 containerd-20200724221200-14997 kubelet[544]: E0724 22:35:37.410620 544 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "dashboard-metrics-scraper-dc6947fbf-xphhd_kubernetes-dashboard(02a59124-2c99-4a38-abea-eedfd3e1ba46)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "b1a4fa4b59b7d63fcaefe2dadddbb208a1ce3ab2b7dac0fc66a984b62077500a": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:35:37 containerd-20200724221200-14997 kubelet[544]: E0724 22:35:37.410636 544 kuberuntime_manager.go:727] createPodSandbox for pod "dashboard-metrics-scraper-dc6947fbf-xphhd_kubernetes-dashboard(02a59124-2c99-4a38-abea-eedfd3e1ba46)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "b1a4fa4b59b7d63fcaefe2dadddbb208a1ce3ab2b7dac0fc66a984b62077500a": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:35:37 containerd-20200724221200-14997 kubelet[544]: E0724 22:35:37.410690 544 pod_workers.go:191] Error syncing pod 02a59124-2c99-4a38-abea-eedfd3e1ba46 ("dashboard-metrics-scraper-dc6947fbf-xphhd_kubernetes-dashboard(02a59124-2c99-4a38-abea-eedfd3e1ba46)"), skipping: failed to "CreatePodSandbox" for "dashboard-metrics-scraper-dc6947fbf-xphhd_kubernetes-dashboard(02a59124-2c99-4a38-abea-eedfd3e1ba46)" with CreatePodSandboxError: "CreatePodSandbox for pod \"dashboard-metrics-scraper-dc6947fbf-xphhd_kubernetes-dashboard(02a59124-2c99-4a38-abea-eedfd3e1ba46)\" failed: rpc error: code = Unknown desc = failed to setup network for sandbox \"b1a4fa4b59b7d63fcaefe2dadddbb208a1ce3ab2b7dac0fc66a984b62077500a\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:35:40 containerd-20200724221200-14997 kubelet[544]: E0724 22:35:40.036792 544 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to setup network for sandbox "31c50c7117aa1c685c9a8ebf40791afc8bee54f169e2b81974fe5515237db06c": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:35:40 containerd-20200724221200-14997 kubelet[544]: E0724 22:35:40.036851 544 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "31c50c7117aa1c685c9a8ebf40791afc8bee54f169e2b81974fe5515237db06c": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:35:40 containerd-20200724221200-14997 kubelet[544]: E0724 22:35:40.036869 544 kuberuntime_manager.go:727] createPodSandbox for pod "busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "31c50c7117aa1c685c9a8ebf40791afc8bee54f169e2b81974fe5515237db06c": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:35:40 containerd-20200724221200-14997 kubelet[544]: E0724 22:35:40.036921 544 pod_workers.go:191] Error syncing pod 4b662b5f-6e78-48de-818c-81989d7f4ea9 ("busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)"), skipping: failed to "CreatePodSandbox" for "busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)" with CreatePodSandboxError: "CreatePodSandbox for pod \"busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)\" failed: rpc error: code = Unknown desc = failed to setup network for sandbox \"31c50c7117aa1c685c9a8ebf40791afc8bee54f169e2b81974fe5515237db06c\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:35:46 containerd-20200724221200-14997 kubelet[544]: I0724 22:35:46.338472 544 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: bbfb4ea37ab4890279b72f6e3828e6f9ec01958f33500ffde383f9f8d6a82da0 * Jul 24 22:35:46 containerd-20200724221200-14997 kubelet[544]: E0724 22:35:46.339048 544 pod_workers.go:191] Error syncing pod 562af1c8-c195-4fed-b475-4ff2a3cca8b5 ("kindnet-nsc8k_kube-system(562af1c8-c195-4fed-b475-4ff2a3cca8b5)"), skipping: failed to "StartContainer" for "kindnet-cni" with CrashLoopBackOff: "back-off 5m0s restarting failed container=kindnet-cni pod=kindnet-nsc8k_kube-system(562af1c8-c195-4fed-b475-4ff2a3cca8b5)" * Jul 24 22:35:47 containerd-20200724221200-14997 kubelet[544]: I0724 22:35:47.338465 544 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: c9bef67c4e66ebc2e78f9fac9234f7745bc2699cdae7af2b7b8db7eed14f8ab8 * Jul 24 22:35:47 containerd-20200724221200-14997 kubelet[544]: E0724 22:35:47.358032 544 remote_runtime.go:200] CreateContainer in sandbox "0418a87b4034227ca6f69790e2a456cf1f08e80d41f15f957c45381453e4f4b9" from runtime service failed: rpc error: code = Unknown desc = failed to create containerd container: failed to rename: rename /var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/new-649630069 /var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/57: file exists * Jul 24 22:35:47 containerd-20200724221200-14997 kubelet[544]: E0724 22:35:47.358100 544 kuberuntime_manager.go:801] container start failed: CreateContainerError: failed to create containerd container: failed to rename: rename /var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/new-649630069 /var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/57: file exists * Jul 24 22:35:47 containerd-20200724221200-14997 kubelet[544]: E0724 22:35:47.358137 544 pod_workers.go:191] Error syncing pod 25727a03-b071-4a2c-8dfb-ea2a7038e4cd ("storage-provisioner_kube-system(25727a03-b071-4a2c-8dfb-ea2a7038e4cd)"), skipping: failed to "StartContainer" for "storage-provisioner" with CreateContainerError: "failed to create containerd container: failed to rename: rename /var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/new-649630069 /var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/57: file exists" * Jul 24 22:35:47 containerd-20200724221200-14997 kubelet[544]: E0724 22:35:47.403683 544 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to setup network for sandbox "1eca3a5d8d03fc07233f3501b3058b479bc2f91308e4e857cc507d08988372fb": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:35:47 containerd-20200724221200-14997 kubelet[544]: E0724 22:35:47.403767 544 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "kubernetes-dashboard-6dbb54fd95-ms9wg_kubernetes-dashboard(267f98a1-434e-45a2-abda-53ad3bb7bea1)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "1eca3a5d8d03fc07233f3501b3058b479bc2f91308e4e857cc507d08988372fb": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:35:47 containerd-20200724221200-14997 kubelet[544]: E0724 22:35:47.403793 544 kuberuntime_manager.go:727] createPodSandbox for pod "kubernetes-dashboard-6dbb54fd95-ms9wg_kubernetes-dashboard(267f98a1-434e-45a2-abda-53ad3bb7bea1)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "1eca3a5d8d03fc07233f3501b3058b479bc2f91308e4e857cc507d08988372fb": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:35:47 containerd-20200724221200-14997 kubelet[544]: E0724 22:35:47.403866 544 pod_workers.go:191] Error syncing pod 267f98a1-434e-45a2-abda-53ad3bb7bea1 ("kubernetes-dashboard-6dbb54fd95-ms9wg_kubernetes-dashboard(267f98a1-434e-45a2-abda-53ad3bb7bea1)"), skipping: failed to "CreatePodSandbox" for "kubernetes-dashboard-6dbb54fd95-ms9wg_kubernetes-dashboard(267f98a1-434e-45a2-abda-53ad3bb7bea1)" with CreatePodSandboxError: "CreatePodSandbox for pod \"kubernetes-dashboard-6dbb54fd95-ms9wg_kubernetes-dashboard(267f98a1-434e-45a2-abda-53ad3bb7bea1)\" failed: rpc error: code = Unknown desc = failed to setup network for sandbox \"1eca3a5d8d03fc07233f3501b3058b479bc2f91308e4e857cc507d08988372fb\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:35:50 containerd-20200724221200-14997 kubelet[544]: E0724 22:35:50.190157 544 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to setup network for sandbox "3a4075227dfaa8b776aa82041691e4d056b8b6bbfaae3b60ebc0191ad5448354": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:35:50 containerd-20200724221200-14997 kubelet[544]: E0724 22:35:50.190218 544 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "dashboard-metrics-scraper-dc6947fbf-xphhd_kubernetes-dashboard(02a59124-2c99-4a38-abea-eedfd3e1ba46)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "3a4075227dfaa8b776aa82041691e4d056b8b6bbfaae3b60ebc0191ad5448354": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:35:50 containerd-20200724221200-14997 kubelet[544]: E0724 22:35:50.190234 544 kuberuntime_manager.go:727] createPodSandbox for pod "dashboard-metrics-scraper-dc6947fbf-xphhd_kubernetes-dashboard(02a59124-2c99-4a38-abea-eedfd3e1ba46)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "3a4075227dfaa8b776aa82041691e4d056b8b6bbfaae3b60ebc0191ad5448354": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:35:50 containerd-20200724221200-14997 kubelet[544]: E0724 22:35:50.190292 544 pod_workers.go:191] Error syncing pod 02a59124-2c99-4a38-abea-eedfd3e1ba46 ("dashboard-metrics-scraper-dc6947fbf-xphhd_kubernetes-dashboard(02a59124-2c99-4a38-abea-eedfd3e1ba46)"), skipping: failed to "CreatePodSandbox" for "dashboard-metrics-scraper-dc6947fbf-xphhd_kubernetes-dashboard(02a59124-2c99-4a38-abea-eedfd3e1ba46)" with CreatePodSandboxError: "CreatePodSandbox for pod \"dashboard-metrics-scraper-dc6947fbf-xphhd_kubernetes-dashboard(02a59124-2c99-4a38-abea-eedfd3e1ba46)\" failed: rpc error: code = Unknown desc = failed to setup network for sandbox \"3a4075227dfaa8b776aa82041691e4d056b8b6bbfaae3b60ebc0191ad5448354\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:35:51 containerd-20200724221200-14997 kubelet[544]: E0724 22:35:51.488812 544 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to setup network for sandbox "2f2bfc31dcd4e7ef42bc5e67248a5378d7efaf7c6743612da23a239534f5bba5": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:35:51 containerd-20200724221200-14997 kubelet[544]: E0724 22:35:51.488866 544 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "2f2bfc31dcd4e7ef42bc5e67248a5378d7efaf7c6743612da23a239534f5bba5": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:35:51 containerd-20200724221200-14997 kubelet[544]: E0724 22:35:51.488884 544 kuberuntime_manager.go:727] createPodSandbox for pod "coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "2f2bfc31dcd4e7ef42bc5e67248a5378d7efaf7c6743612da23a239534f5bba5": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:35:51 containerd-20200724221200-14997 kubelet[544]: E0724 22:35:51.488938 544 pod_workers.go:191] Error syncing pod 584286ea-2ddf-4194-8a3c-48d505f7f607 ("coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)"), skipping: failed to "CreatePodSandbox" for "coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)" with CreatePodSandboxError: "CreatePodSandbox for pod \"coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)\" failed: rpc error: code = Unknown desc = failed to setup network for sandbox \"2f2bfc31dcd4e7ef42bc5e67248a5378d7efaf7c6743612da23a239534f5bba5\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * * ==> storage-provisioner [c9bef67c4e66ebc2e78f9fac9234f7745bc2699cdae7af2b7b8db7eed14f8ab8] <== * F0724 22:32:56.712584 1 main.go:37] Error getting server version: Get https://10.96.0.1:443/version: dial tcp 10.96.0.1:443: getsockopt: no route to host -- /stdout -- helpers_test.go:247: (dbg) Run: ./minikube-linux-amd64 status --format={{.APIServer}} -p containerd-20200724221200-14997 -n containerd-20200724221200-14997 helpers_test.go:254: (dbg) Run: kubectl --context containerd-20200724221200-14997 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running helpers_test.go:260: non-running pods: busybox coredns-66bff467f8-hlk9j dashboard-metrics-scraper-dc6947fbf-xphhd kubernetes-dashboard-6dbb54fd95-ms9wg helpers_test.go:262: ======> post-mortem[TestStartStop/group/containerd/serial/SecondStart]: describe non-running pods <====== helpers_test.go:265: (dbg) Run: kubectl --context containerd-20200724221200-14997 describe pod busybox coredns-66bff467f8-hlk9j dashboard-metrics-scraper-dc6947fbf-xphhd kubernetes-dashboard-6dbb54fd95-ms9wg helpers_test.go:265: (dbg) Non-zero exit: kubectl --context containerd-20200724221200-14997 describe pod busybox coredns-66bff467f8-hlk9j dashboard-metrics-scraper-dc6947fbf-xphhd kubernetes-dashboard-6dbb54fd95-ms9wg: exit status 1 (84.805154ms) -- stdout -- Name: busybox Namespace: default Priority: 0 Node: containerd-20200724221200-14997/172.17.0.3 Start Time: Fri, 24 Jul 2020 22:19:40 +0000 Labels: integration-test=busybox Annotations: Status: Pending IP: IPs: Containers: busybox: Container ID: Image: busybox:1.28.4-glibc Image ID: Port: Host Port: Command: sleep 3600 State: Waiting Reason: ContainerCreating Ready: False Restart Count: 0 Environment: Mounts: /var/run/secrets/kubernetes.io/serviceaccount from default-token-xmm9f (ro) Conditions: Type Status Initialized True Ready False ContainersReady False PodScheduled True Volumes: default-token-xmm9f: Type: Secret (a volume populated by a Secret) SecretName: default-token-xmm9f Optional: false QoS Class: BestEffort Node-Selectors: Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s node.kubernetes.io/unreachable:NoExecute for 300s Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Scheduled 16m default-scheduler Successfully assigned default/busybox to containerd-20200724221200-14997 Warning FailedCreatePodSandBox 16m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "5311a1ffb81c3ec44164ca704d1b425a50851c7a615951d885f3e261bb56b331": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 16m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "e0b3338bd82723225aa39e3a572b31f4b10340fa640d33ac956ec7982b47a365": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 15m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "bce48c23f5090e307249f38d5e9c17615b5ce4547b68fd7dd207f5616546b1ff": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 15m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "c260da116bad44bab092e4453833efc9ce5c3c70209770b30f7aeffed5db766d": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 15m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "e352d7747cb44ae844f2847c8895d8a03a59a5bd62570299b6f91ba0d2b31e93": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 15m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "7a8e1f3fa957ea5ffbd5b203818f66025f20b18e8ba1398842a4a1dcb4beade1": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 14m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "02713a33eaf3192cf9c63e173bd72b67773f28f4dfcef338060dda2a7f8489e1": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 14m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "530cf34860dcb75c557c7cbc9a86910a3f4919230e74cdf10126ba1e75f3f49b": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 14m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "a33b097bd953e28e4d7499a4b0ca06585fe1b9029b95aac214b0bba904677259": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 10m (x17 over 14m) kubelet, containerd-20200724221200-14997 (combined from similar events): Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "47073b588559d0af9dafbb8171df657751c2a35ea5f0466a20f5762627e9cd56": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 5m55s kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "cc2af3d00db0854dd201af575fe5d65f4c2208b59d12cea5f983ef22b810f25d": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 5m41s kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "4cf1a61ad6807315736c91341b7763dc41aca876152d03c2db9814cfa254e7ee": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 5m27s kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "a7cd4b3d30ded88757df1727167c36721efb9fd28978628b3503c0b86fc912e2": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 5m12s kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "14fb4d9bb3714b5e4d1b49fd744c91c0f36ada0ca3c4313f0bb85e74660c9ab1": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 4m57s kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "f5f36f4c79df279b40deef49d26e0ef042c075b3ba24396147e670314e61a159": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 4m41s kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "8f22e7112e69d6377a16927508a438643f8cea01307d448d397775ae85526176": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 4m28s kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "c1197cb670c56b20720daecde3535335370ba2b2f2515fa58f3d6b0bbc3e647d": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 4m15s kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "21a1ae9d38e4f47d5bd6abf3cd1f776466ad5d6dc481a886df660824479af77e": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 4m1s kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "f8e86b484453c0e4d0c68fceed3b3e44519bce50c31c2b6cd480c017ee9d684e": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 1s (x16 over 3m49s) kubelet, containerd-20200724221200-14997 (combined from similar events): Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "7acea3501c8a31976c77abeaa8cc842196ce3e11ab728e41cc37ae3e68de142e": failed to set bridge addr: could not add IP address to "cni0": permission denied -- /stdout -- ** stderr ** Error from server (NotFound): pods "coredns-66bff467f8-hlk9j" not found Error from server (NotFound): pods "dashboard-metrics-scraper-dc6947fbf-xphhd" not found Error from server (NotFound): pods "kubernetes-dashboard-6dbb54fd95-ms9wg" not found ** /stderr ** helpers_test.go:267: kubectl --context containerd-20200724221200-14997 describe pod busybox coredns-66bff467f8-hlk9j dashboard-metrics-scraper-dc6947fbf-xphhd kubernetes-dashboard-6dbb54fd95-ms9wg: exit status 1 === RUN TestStartStop/group/containerd/serial/UserAppExistsAfterStop start_stop_delete_test.go:208: (dbg) TestStartStop/group/containerd/serial/UserAppExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ... helpers_test.go:332: "kubernetes-dashboard-6dbb54fd95-ms9wg" [267f98a1-434e-45a2-abda-53ad3bb7bea1] Pending / Ready:ContainersNotReady (containers with unready status: [kubernetes-dashboard]) / ContainersReady:ContainersNotReady (containers with unready status: [kubernetes-dashboard]) === CONT TestStartStop/group/crio/serial/UserAppExistsAfterStop start_stop_delete_test.go:208: ***** TestStartStop/group/crio/serial/UserAppExistsAfterStop: pod "k8s-app=kubernetes-dashboard" failed to start within 9m0s: timed out waiting for the condition **** start_stop_delete_test.go:208: (dbg) Run: ./minikube-linux-amd64 status --format={{.APIServer}} -p crio-20200724220901-14997 -n crio-20200724220901-14997 start_stop_delete_test.go:208: TestStartStop/group/crio/serial/UserAppExistsAfterStop: showing logs for failed pods as of 2020-07-24 22:42:34.404359515 +0000 UTC m=+3984.528724000 start_stop_delete_test.go:208: (dbg) Run: kubectl --context crio-20200724220901-14997 describe po kubernetes-dashboard-6979c57f4c-wbxrt -n kubernetes-dashboard start_stop_delete_test.go:208: (dbg) kubectl --context crio-20200724220901-14997 describe po kubernetes-dashboard-6979c57f4c-wbxrt -n kubernetes-dashboard: Name: kubernetes-dashboard-6979c57f4c-wbxrt Namespace: kubernetes-dashboard Priority: 0 Node: crio-20200724220901-14997/172.17.0.2 Start Time: Fri, 24 Jul 2020 22:27:16 +0000 Labels: k8s-app=kubernetes-dashboard pod-template-hash=6979c57f4c Annotations: Status: Pending IP: IPs: Controlled By: ReplicaSet/kubernetes-dashboard-6979c57f4c Containers: kubernetes-dashboard: Container ID: Image: kubernetesui/dashboard:v2.0.1 Image ID: Port: 9090/TCP Host Port: 0/TCP Args: --namespace=kubernetes-dashboard --enable-skip-login --disable-settings-authorizer State: Waiting Reason: ContainerCreating Ready: False Restart Count: 0 Liveness: http-get http://:9090/ delay=30s timeout=30s period=10s #success=1 #failure=3 Environment: Mounts: /tmp from tmp-volume (rw) /var/run/secrets/kubernetes.io/serviceaccount from kubernetes-dashboard-token-gtm9d (ro) Conditions: Type Status Initialized True Ready False ContainersReady False PodScheduled True Volumes: tmp-volume: Type: EmptyDir (a temporary directory that shares a pod's lifetime) Medium: SizeLimit: kubernetes-dashboard-token-gtm9d: Type: Secret (a volume populated by a Secret) SecretName: kubernetes-dashboard-token-gtm9d Optional: false QoS Class: BestEffort Node-Selectors: beta.kubernetes.io/os=linux Tolerations: node-role.kubernetes.io/master:NoSchedule node.kubernetes.io/not-ready:NoExecute for 300s node.kubernetes.io/unreachable:NoExecute for 300s Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Scheduled 15m default-scheduler Successfully assigned kubernetes-dashboard/kubernetes-dashboard-6979c57f4c-wbxrt to crio-20200724220901-14997 Warning FailedCreatePodSandBox 15m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard_2734d7c0-daef-420d-966b-49f33f94e547_0(19056e11411b5476eed57c5f9ec729bcec3f6c9b58e3f7ba30d85acafdab7c3b): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 15m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard_2734d7c0-daef-420d-966b-49f33f94e547_0(f89bf2b9d35d4f586773982953638d928daf8f160cbb6a185064b8ee7f164f4a): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 14m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard_2734d7c0-daef-420d-966b-49f33f94e547_0(c6d47e6666fe910d10583c9eeafe35679efcb0f6527392f6c6a50dfce27ea704): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 14m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard_2734d7c0-daef-420d-966b-49f33f94e547_0(48d849fe7038e4bb4e74b292e22c964416865fa52b3204e03ba29030aa906f82): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 13m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard_2734d7c0-daef-420d-966b-49f33f94e547_0(9c331805bd4e2980e00621ddfac6f462570e3b407c2a3ed6b77753d59f557c24): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 13m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard_2734d7c0-daef-420d-966b-49f33f94e547_0(b6eef04dda52921788e7c5bb36538cc873f603d34725807c7c3180104fa49107): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 13m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard_2734d7c0-daef-420d-966b-49f33f94e547_0(383ffcb825d081e95ac0f20445f755f62850caebbccc5e936b9cadace0f2d8bc): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 13m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard_2734d7c0-daef-420d-966b-49f33f94e547_0(f7e847ccee1dc26f37d289e119ac324611ff8d3132b6a8d6f3f57e03af124f67): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 12m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard_2734d7c0-daef-420d-966b-49f33f94e547_0(cbc5df48e6597b49b735704c1feca0c1e5f35e56ca9a3edbcbfc442f995373d3): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 4s (x51 over 12m) kubelet, crio-20200724220901-14997 (combined from similar events): Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard_2734d7c0-daef-420d-966b-49f33f94e547_0(d8a67ca96aaeeeed6d35210a4cc80cf65923d3c328a6546642e7bf20fa79bea4): failed to set bridge addr: could not add IP address to "cni0": permission denied start_stop_delete_test.go:208: (dbg) Run: kubectl --context crio-20200724220901-14997 logs kubernetes-dashboard-6979c57f4c-wbxrt -n kubernetes-dashboard start_stop_delete_test.go:208: (dbg) Non-zero exit: kubectl --context crio-20200724220901-14997 logs kubernetes-dashboard-6979c57f4c-wbxrt -n kubernetes-dashboard: exit status 1 (84.47207ms) ** stderr ** Error from server (BadRequest): container "kubernetes-dashboard" in pod "kubernetes-dashboard-6979c57f4c-wbxrt" is waiting to start: ContainerCreating ** /stderr ** start_stop_delete_test.go:208: kubectl --context crio-20200724220901-14997 logs kubernetes-dashboard-6979c57f4c-wbxrt -n kubernetes-dashboard: exit status 1 start_stop_delete_test.go:209: failed waiting for 'addon dashboard' pod post-stop-start: k8s-app=kubernetes-dashboard within 9m0s: timed out waiting for the condition helpers_test.go:215: -----------------------post-mortem-------------------------------- helpers_test.go:223: ======> post-mortem[TestStartStop/group/crio/serial/UserAppExistsAfterStop]: docker inspect <====== helpers_test.go:224: (dbg) Run: docker inspect crio-20200724220901-14997 helpers_test.go:228: (dbg) docker inspect crio-20200724220901-14997: -- stdout -- [ { "Id": "d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a", "Created": "2020-07-24T22:09:11.178770681Z", "Path": "/usr/local/bin/entrypoint", "Args": [ "/sbin/init" ], "State": { "Status": "running", "Running": true, "Paused": false, "Restarting": false, "OOMKilled": false, "Dead": false, "Pid": 667119, "ExitCode": 0, "Error": "", "StartedAt": "2020-07-24T22:26:07.35920725Z", "FinishedAt": "2020-07-24T22:26:00.416732195Z" }, "Image": "sha256:e6bc41c39dc48b2b472936db36aedb28527ce0f675ed1bc20d029125c9ccf578", "ResolvConfPath": "/var/lib/docker/containers/d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a/resolv.conf", "HostnamePath": "/var/lib/docker/containers/d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a/hostname", "HostsPath": "/var/lib/docker/containers/d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a/hosts", "LogPath": "/var/lib/docker/containers/d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a/d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a-json.log", "Name": "/crio-20200724220901-14997", "RestartCount": 0, "Driver": "overlay2", "Platform": "linux", "MountLabel": "", "ProcessLabel": "", "AppArmorProfile": "unconfined", "ExecIDs": null, "HostConfig": { "Binds": [ "/lib/modules:/lib/modules:ro", "crio-20200724220901-14997:/var" ], "ContainerIDFile": "", "LogConfig": { "Type": "json-file", "Config": {} }, "NetworkMode": "default", "PortBindings": { "22/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "2376/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "5000/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "8443/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ] }, "RestartPolicy": { "Name": "no", "MaximumRetryCount": 0 }, "AutoRemove": false, "VolumeDriver": "", "VolumesFrom": null, "CapAdd": null, "CapDrop": null, "Capabilities": null, "Dns": [], "DnsOptions": [], "DnsSearch": [], "ExtraHosts": null, "GroupAdd": null, "IpcMode": "private", "Cgroup": "", "Links": null, "OomScoreAdj": 0, "PidMode": "", "Privileged": true, "PublishAllPorts": false, "ReadonlyRootfs": false, "SecurityOpt": [ "seccomp=unconfined", "apparmor=unconfined", "label=disable" ], "Tmpfs": { "/run": "", "/tmp": "" }, "UTSMode": "", "UsernsMode": "", "ShmSize": 67108864, "Runtime": "runc", "ConsoleSize": [ 0, 0 ], "Isolation": "", "CpuShares": 0, "Memory": 2306867200, "NanoCpus": 2000000000, "CgroupParent": "", "BlkioWeight": 0, "BlkioWeightDevice": [], "BlkioDeviceReadBps": null, "BlkioDeviceWriteBps": null, "BlkioDeviceReadIOps": null, "BlkioDeviceWriteIOps": null, "CpuPeriod": 0, "CpuQuota": 0, "CpuRealtimePeriod": 0, "CpuRealtimeRuntime": 0, "CpusetCpus": "", "CpusetMems": "", "Devices": [], "DeviceCgroupRules": null, "DeviceRequests": null, "KernelMemory": 0, "KernelMemoryTCP": 0, "MemoryReservation": 0, "MemorySwap": -1, "MemorySwappiness": null, "OomKillDisable": false, "PidsLimit": null, "Ulimits": null, "CpuCount": 0, "CpuPercent": 0, "IOMaximumIOps": 0, "IOMaximumBandwidth": 0, "MaskedPaths": null, "ReadonlyPaths": null }, "GraphDriver": { "Data": { "LowerDir": "/var/lib/docker/overlay2/25235e8a657907fa460b6393f79aab4ea109b0924e4843714a3776886a051c03-init/diff:/var/lib/docker/overlay2/33ef5d7ed4fbb30d55d2e5925de9b5c359ee2f6bcaf5a31308f0c4c11e358759/diff:/var/lib/docker/overlay2/3cf31b1deb19cf9de586952311d9f94baab00dac6393d96e1a80da943c3823e6/diff:/var/lib/docker/overlay2/5e25be0edbb143d613aab16e23056ece18370c2427214464ccc0b8f033fc76c5/diff:/var/lib/docker/overlay2/933d7b760faf1726c0231eb48cbf3e7af8aeea7d1f449959b7bf07bbe9a84376/diff:/var/lib/docker/overlay2/69ec5555996fe29cf36b7e927034690ecb66f32d4c72396f3c2f3d73303ab10c/diff:/var/lib/docker/overlay2/891f9311e0e93aca44c576d873c0860bf67e972178833fc3ac0569308996933c/diff:/var/lib/docker/overlay2/431ddf5eea9395c020c3a27539c6984764cb7a0dd8f3eeadfbb24f37a897caf7/diff:/var/lib/docker/overlay2/ef7d3d9d53c985fdb46a41c8bcbc04c7d98baa3d8030b1432d115657269a4611/diff:/var/lib/docker/overlay2/d976f7471257da8ef85e0de564cae8e6b16eeb8f83fff6750b3985387f87052c/diff:/var/lib/docker/overlay2/16ed6f6abe25e98b50b10529e70d835da8b64ca99934243f70e5cc63cca44ba1/diff:/var/lib/docker/overlay2/a6495d6f579c32bd0faf387dc3b48c733dae0219e3cfc5d94f9f6ba027261668/diff:/var/lib/docker/overlay2/a34db548da9e24b0323e43e53767c49484fa19509ba9c1e738fb42330788ec52/diff:/var/lib/docker/overlay2/03f93f74dafc48e2bb0234297546abf70dc9918c047a7e3b500ed2242aec09ff/diff:/var/lib/docker/overlay2/b0d08d0be034080c25561d9451d8081050a591c2991a7f3d242234b9bf7c046d/diff:/var/lib/docker/overlay2/d7be6ed61eedc76988e74fe1d87a6a1ef287912654acea5566e7f9cb797b3e3c/diff:/var/lib/docker/overlay2/bde6a4a42d5652b3ef22e3a1dddd0f6fb765d2cd9f5b1a38d4ecb9b44c45b998/diff:/var/lib/docker/overlay2/0f384da0b034b730d4bdd22f270821719c0ac0b7b7d608c3a52fa89b9a386b90/diff:/var/lib/docker/overlay2/46e9d50bc162d2d523c12da3e378e5b859515056dc7d7f11344c1c42c64718d4/diff:/var/lib/docker/overlay2/6e7552cdb94dfc1c9a0a8efae25c758d93fff3d2eec58a3e2b710496d0c94653/diff:/var/lib/docker/overlay2/b9217960caef08b70ded801ca89f5a0808227383f2ab1de5ae7313c200a8090b/diff:/var/lib/docker/overlay2/d11fc39cafccd3e3cb8fba6cfdda2bbfdb99c2e41619ce169b0d6ede0b3f6a64/diff:/var/lib/docker/overlay2/0f89620291cba4bc3afac7c9fa1faf205450d5dcae032c3a9882009633b72b4a/diff", "MergedDir": "/var/lib/docker/overlay2/25235e8a657907fa460b6393f79aab4ea109b0924e4843714a3776886a051c03/merged", "UpperDir": "/var/lib/docker/overlay2/25235e8a657907fa460b6393f79aab4ea109b0924e4843714a3776886a051c03/diff", "WorkDir": "/var/lib/docker/overlay2/25235e8a657907fa460b6393f79aab4ea109b0924e4843714a3776886a051c03/work" }, "Name": "overlay2" }, "Mounts": [ { "Type": "bind", "Source": "/lib/modules", "Destination": "/lib/modules", "Mode": "ro", "RW": false, "Propagation": "rprivate" }, { "Type": "volume", "Name": "crio-20200724220901-14997", "Source": "/var/lib/docker/volumes/crio-20200724220901-14997/_data", "Destination": "/var", "Driver": "local", "Mode": "z", "RW": true, "Propagation": "" } ], "Config": { "Hostname": "crio-20200724220901-14997", "Domainname": "", "User": "root", "AttachStdin": false, "AttachStdout": false, "AttachStderr": false, "ExposedPorts": { "22/tcp": {}, "2376/tcp": {}, "5000/tcp": {}, "8443/tcp": {} }, "Tty": true, "OpenStdin": false, "StdinOnce": false, "Env": [ "container=docker", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], "Cmd": null, "Image": "kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438", "Volumes": null, "WorkingDir": "", "Entrypoint": [ "/usr/local/bin/entrypoint", "/sbin/init" ], "OnBuild": null, "Labels": { "created_by.minikube.sigs.k8s.io": "true", "mode.minikube.sigs.k8s.io": "crio-20200724220901-14997", "name.minikube.sigs.k8s.io": "crio-20200724220901-14997", "role.minikube.sigs.k8s.io": "" }, "StopSignal": "SIGRTMIN+3" }, "NetworkSettings": { "Bridge": "", "SandboxID": "5953f0ce29bf68bb7146ac5384ca1a6be3ccb39427dcde4428b82a503b037325", "HairpinMode": false, "LinkLocalIPv6Address": "", "LinkLocalIPv6PrefixLen": 0, "Ports": { "22/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32916" } ], "2376/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32915" } ], "5000/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32914" } ], "8443/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32913" } ] }, "SandboxKey": "/var/run/docker/netns/5953f0ce29bf", "SecondaryIPAddresses": null, "SecondaryIPv6Addresses": null, "EndpointID": "280707bfa744faabf6b6bcfee77f56c3d14c11c8d8070fd9a4796c96665833f5", "Gateway": "172.17.0.1", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "IPAddress": "172.17.0.2", "IPPrefixLen": 16, "IPv6Gateway": "", "MacAddress": "02:42:ac:11:00:02", "Networks": { "bridge": { "IPAMConfig": null, "Links": null, "Aliases": null, "NetworkID": "d4a420189740571bc15c14b2ebac1ab66e95c9c6796461aa96bb3486a5f7e03d", "EndpointID": "280707bfa744faabf6b6bcfee77f56c3d14c11c8d8070fd9a4796c96665833f5", "Gateway": "172.17.0.1", "IPAddress": "172.17.0.2", "IPPrefixLen": 16, "IPv6Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "MacAddress": "02:42:ac:11:00:02", "DriverOpts": null } } } } ] -- /stdout -- helpers_test.go:232: (dbg) Run: ./minikube-linux-amd64 status --format={{.Host}} -p crio-20200724220901-14997 -n crio-20200724220901-14997 helpers_test.go:237: <<< TestStartStop/group/crio/serial/UserAppExistsAfterStop FAILED: start of post-mortem logs <<< helpers_test.go:238: ======> post-mortem[TestStartStop/group/crio/serial/UserAppExistsAfterStop]: minikube logs <====== helpers_test.go:240: (dbg) Run: ./minikube-linux-amd64 -p crio-20200724220901-14997 logs -n 25 helpers_test.go:240: (dbg) Done: ./minikube-linux-amd64 -p crio-20200724220901-14997 logs -n 25: (1.496468292s) helpers_test.go:245: TestStartStop/group/crio/serial/UserAppExistsAfterStop logs: -- stdout -- * ==> CRI-O <== * -- Logs begin at Fri 2020-07-24 22:26:07 UTC, end at Fri 2020-07-24 22:42:35 UTC. -- * Jul 24 22:42:29 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:42:29.053746851Z" level=info msg="About to del CNI network lo (type=loopback)" * Jul 24 22:42:29 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:42:29.057885339Z" level=info msg="Got pod network &{Name:dashboard-metrics-scraper-c8b69c96c-ljbr4 Namespace:kubernetes-dashboard ID:f6da9f9fc99396f55e4d8c6b8917efd1bde134b58a482ef7e4c74be3b6499bf5 NetNS:/proc/34615/ns/net Networks:[] RuntimeConfig:map[crio-bridge:{IP: MAC: PortMappings:[] Bandwidth: IpRanges:[]}]}" * Jul 24 22:42:29 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:42:29.057934142Z" level=error msg="error loading cached network config: network \"crio-bridge\" not found in CNI cache" * Jul 24 22:42:29 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:42:29.057945243Z" level=info msg="About to del CNI network crio-bridge (type=bridge)" * Jul 24 22:42:29 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:42:29.129349106Z" level=error msg="Error deleting network: running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.1.71 -j CNI-506d78b0bb0c4c8cb98a2a0d -m comment --comment name: \"crio-bridge\" id: \"f6da9f9fc99396f55e4d8c6b8917efd1bde134b58a482ef7e4c74be3b6499bf5\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-506d78b0bb0c4c8cb98a2a0d':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" * Jul 24 22:42:29 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:42:29.129412811Z" level=error msg="Error while removing pod from CNI network \"crio-bridge\": running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.1.71 -j CNI-506d78b0bb0c4c8cb98a2a0d -m comment --comment name: \"crio-bridge\" id: \"f6da9f9fc99396f55e4d8c6b8917efd1bde134b58a482ef7e4c74be3b6499bf5\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-506d78b0bb0c4c8cb98a2a0d':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" * Jul 24 22:42:29 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:42:29.129505917Z" level=error msg="error stopping network on cleanup: failed to destroy network for pod sandbox k8s_dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard_145a2077-6b60-46d0-9d89-7151f4a78b4b_0(f6da9f9fc99396f55e4d8c6b8917efd1bde134b58a482ef7e4c74be3b6499bf5): running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.1.71 -j CNI-506d78b0bb0c4c8cb98a2a0d -m comment --comment name: \"crio-bridge\" id: \"f6da9f9fc99396f55e4d8c6b8917efd1bde134b58a482ef7e4c74be3b6499bf5\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-506d78b0bb0c4c8cb98a2a0d':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" id=cab3a46e-d06d-4e71-9ea2-b1292318d493 * Jul 24 22:42:29 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:42:29.818252893Z" level=error msg="Error adding network: failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:42:29 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:42:29.818325998Z" level=error msg="Error while adding pod to CNI network \"crio-bridge\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:42:29 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:42:29.818497910Z" level=info msg="About to del CNI network lo (type=loopback)" * Jul 24 22:42:29 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:42:29.822420183Z" level=info msg="Got pod network &{Name:kubernetes-dashboard-6979c57f4c-wbxrt Namespace:kubernetes-dashboard ID:d8a67ca96aaeeeed6d35210a4cc80cf65923d3c328a6546642e7bf20fa79bea4 NetNS:/proc/34701/ns/net Networks:[] RuntimeConfig:map[crio-bridge:{IP: MAC: PortMappings:[] Bandwidth: IpRanges:[]}]}" * Jul 24 22:42:29 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:42:29.822470586Z" level=error msg="error loading cached network config: network \"crio-bridge\" not found in CNI cache" * Jul 24 22:42:29 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:42:29.822482487Z" level=info msg="About to del CNI network crio-bridge (type=bridge)" * Jul 24 22:42:29 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:42:29.880758638Z" level=error msg="Error deleting network: running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.1.72 -j CNI-fcb87aa79471c01a403c2db7 -m comment --comment name: \"crio-bridge\" id: \"d8a67ca96aaeeeed6d35210a4cc80cf65923d3c328a6546642e7bf20fa79bea4\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-fcb87aa79471c01a403c2db7':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" * Jul 24 22:42:29 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:42:29.880801541Z" level=error msg="Error while removing pod from CNI network \"crio-bridge\": running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.1.72 -j CNI-fcb87aa79471c01a403c2db7 -m comment --comment name: \"crio-bridge\" id: \"d8a67ca96aaeeeed6d35210a4cc80cf65923d3c328a6546642e7bf20fa79bea4\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-fcb87aa79471c01a403c2db7':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" * Jul 24 22:42:29 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:42:29.880865945Z" level=error msg="error stopping network on cleanup: failed to destroy network for pod sandbox k8s_kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard_2734d7c0-daef-420d-966b-49f33f94e547_0(d8a67ca96aaeeeed6d35210a4cc80cf65923d3c328a6546642e7bf20fa79bea4): running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.1.72 -j CNI-fcb87aa79471c01a403c2db7 -m comment --comment name: \"crio-bridge\" id: \"d8a67ca96aaeeeed6d35210a4cc80cf65923d3c328a6546642e7bf20fa79bea4\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-fcb87aa79471c01a403c2db7':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" id=f0ee9b75-8d17-4441-9614-c135c6af0860 * Jul 24 22:42:30 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:42:30.983868417Z" level=error msg="Error adding network: failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:42:30 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:42:30.983923721Z" level=error msg="Error while adding pod to CNI network \"crio-bridge\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:42:30 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:42:30.984117034Z" level=info msg="About to del CNI network lo (type=loopback)" * Jul 24 22:42:30 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:42:30.989463406Z" level=info msg="Got pod network &{Name:coredns-5d4dd4b4db-9ssg6 Namespace:kube-system ID:bcfee59a70d80fbeb8c1e28ce5c2246c0a5a401849448af3bd6e8ae9f5fbc70d NetNS:/proc/34758/ns/net Networks:[] RuntimeConfig:map[crio-bridge:{IP: MAC: PortMappings:[] Bandwidth: IpRanges:[]}]}" * Jul 24 22:42:30 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:42:30.989521710Z" level=error msg="error loading cached network config: network \"crio-bridge\" not found in CNI cache" * Jul 24 22:42:30 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:42:30.989533611Z" level=info msg="About to del CNI network crio-bridge (type=bridge)" * Jul 24 22:42:31 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:42:31.077682738Z" level=error msg="Error deleting network: running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.1.73 -j CNI-322f824e7ad28df3c38e15f5 -m comment --comment name: \"crio-bridge\" id: \"bcfee59a70d80fbeb8c1e28ce5c2246c0a5a401849448af3bd6e8ae9f5fbc70d\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-322f824e7ad28df3c38e15f5':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" * Jul 24 22:42:31 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:42:31.077725541Z" level=error msg="Error while removing pod from CNI network \"crio-bridge\": running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.1.73 -j CNI-322f824e7ad28df3c38e15f5 -m comment --comment name: \"crio-bridge\" id: \"bcfee59a70d80fbeb8c1e28ce5c2246c0a5a401849448af3bd6e8ae9f5fbc70d\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-322f824e7ad28df3c38e15f5':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" * Jul 24 22:42:31 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:42:31.077780045Z" level=error msg="error stopping network on cleanup: failed to destroy network for pod sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(bcfee59a70d80fbeb8c1e28ce5c2246c0a5a401849448af3bd6e8ae9f5fbc70d): running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.1.73 -j CNI-322f824e7ad28df3c38e15f5 -m comment --comment name: \"crio-bridge\" id: \"bcfee59a70d80fbeb8c1e28ce5c2246c0a5a401849448af3bd6e8ae9f5fbc70d\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-322f824e7ad28df3c38e15f5':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" id=d0f5be32-a230-4c2f-8e24-cb644c78e19d * * ==> container status <== * CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID * e867fda1958a4 2186a1a396deb58f1ea5eaf20193a518ca05049b46ccd754ec83366b5c8c13d5 About a minute ago Exited kindnet-cni 7 22d8d62f2f19c * 5bac3a6cbfb36 4689081edb103a9e8174bf23a255bfbe0b2d9ed82edc907abab6989d1c60f02c 4 minutes ago Exited storage-provisioner 6 ed36006be83da * 4e28cb6807125 ae3d9889423ede337df3814baa77445e566597a5a882f3cdf933b4d9e0025f0f 15 minutes ago Running kube-proxy 0 8b8f6c79bb715 * 5e2ae26239902 d2f090f2479fbf92c508100e0a6106b3516bb70421a465586661feb1494145a2 15 minutes ago Running kube-controller-manager 0 f5860ba3909ed * 9881d2c304cdc 78b4180ab00d0fb99b1be2b5ef92a4831ad07f00f27e6746828f374497d79367 15 minutes ago Running kube-scheduler 0 4b2a96d2f6659 * 7cd3d2c27ce8d 2c4adeb21b4ff8ed3309d0e42b6b4ae39872399f7b37e0856e673b13c4aba13d 15 minutes ago Running etcd 0 4187a344080e1 * 60aac29a1caea c500a024ff843278184e5454ff6ee040a106c867c5a0361886fd3057cace2264 15 minutes ago Running kube-apiserver 0 79ca654167789 * * ==> describe nodes <== * Name: crio-20200724220901-14997 * Roles: master * Labels: beta.kubernetes.io/arch=amd64 * beta.kubernetes.io/os=linux * kubernetes.io/arch=amd64 * kubernetes.io/hostname=crio-20200724220901-14997 * kubernetes.io/os=linux * minikube.k8s.io/commit=40eac8ce825d2bb784efa63b900c8d788ea49faf * minikube.k8s.io/name=crio-20200724220901-14997 * minikube.k8s.io/updated_at=2020_07_24T22_10_37_0700 * minikube.k8s.io/version=v1.12.1 * node-role.kubernetes.io/master= * Annotations: kubeadm.alpha.kubernetes.io/cri-socket: /var/run/crio/crio.sock * node.alpha.kubernetes.io/ttl: 0 * volumes.kubernetes.io/controller-managed-attach-detach: true * CreationTimestamp: Fri, 24 Jul 2020 22:10:31 +0000 * Taints: * Unschedulable: false * Conditions: * Type Status LastHeartbeatTime LastTransitionTime Reason Message * ---- ------ ----------------- ------------------ ------ ------- * MemoryPressure False Fri, 24 Jul 2020 22:42:04 +0000 Fri, 24 Jul 2020 22:10:27 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available * DiskPressure False Fri, 24 Jul 2020 22:42:04 +0000 Fri, 24 Jul 2020 22:10:27 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure * PIDPressure False Fri, 24 Jul 2020 22:42:04 +0000 Fri, 24 Jul 2020 22:10:27 +0000 KubeletHasSufficientPID kubelet has sufficient PID available * Ready True Fri, 24 Jul 2020 22:42:04 +0000 Fri, 24 Jul 2020 22:10:27 +0000 KubeletReady kubelet is posting ready status * Addresses: * InternalIP: 172.17.0.2 * Hostname: crio-20200724220901-14997 * Capacity: * cpu: 16 * ephemeral-storage: 128884272Ki * hugepages-1Gi: 0 * hugepages-2Mi: 0 * memory: 65817044Ki * pods: 110 * Allocatable: * cpu: 16 * ephemeral-storage: 128884272Ki * hugepages-1Gi: 0 * hugepages-2Mi: 0 * memory: 65817044Ki * pods: 110 * System Info: * Machine ID: e7c76c839aa944e99c5c76ea1345e361 * System UUID: 8677386b-5379-4ccc-90e7-5b585098762e * Boot ID: 65219ec9-ab55-4151-85fa-6cbcd6144529 * Kernel Version: 5.4.0-1022-azure * OS Image: Ubuntu 19.10 * Operating System: linux * Architecture: amd64 * Container Runtime Version: cri-o://1.17.3 * Kubelet Version: v1.15.7 * Kube-Proxy Version: v1.15.7 * PodCIDR: 10.244.0.0/24 * Non-terminated Pods: (11 in total) * Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits AGE * --------- ---- ------------ ---------- --------------- ------------- --- * default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 25m * kube-system coredns-5d4dd4b4db-9ssg6 100m (0%) 0 (0%) 70Mi (0%) 170Mi (0%) 31m * kube-system etcd-crio-20200724220901-14997 0 (0%) 0 (0%) 0 (0%) 0 (0%) 31m * kube-system kindnet-4qfcd 100m (0%) 100m (0%) 50Mi (0%) 50Mi (0%) 31m * kube-system kube-apiserver-crio-20200724220901-14997 250m (1%) 0 (0%) 0 (0%) 0 (0%) 31m * kube-system kube-controller-manager-crio-20200724220901-14997 200m (1%) 0 (0%) 0 (0%) 0 (0%) 15m * kube-system kube-proxy-6wf4w 0 (0%) 0 (0%) 0 (0%) 0 (0%) 31m * kube-system kube-scheduler-crio-20200724220901-14997 100m (0%) 0 (0%) 0 (0%) 0 (0%) 30m * kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 31m * kubernetes-dashboard dashboard-metrics-scraper-c8b69c96c-ljbr4 0 (0%) 0 (0%) 0 (0%) 0 (0%) 15m * kubernetes-dashboard kubernetes-dashboard-6979c57f4c-wbxrt 0 (0%) 0 (0%) 0 (0%) 0 (0%) 15m * Allocated resources: * (Total limits may be over 100 percent, i.e., overcommitted.) * Resource Requests Limits * -------- -------- ------ * cpu 750m (4%) 100m (0%) * memory 120Mi (0%) 220Mi (0%) * ephemeral-storage 0 (0%) 0 (0%) * Events: * Type Reason Age From Message * ---- ------ ---- ---- ------- * Normal NodeHasSufficientMemory 32m (x7 over 32m) kubelet, crio-20200724220901-14997 Node crio-20200724220901-14997 status is now: NodeHasSufficientMemory * Normal NodeHasNoDiskPressure 32m (x7 over 32m) kubelet, crio-20200724220901-14997 Node crio-20200724220901-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientPID 32m (x7 over 32m) kubelet, crio-20200724220901-14997 Node crio-20200724220901-14997 status is now: NodeHasSufficientPID * Warning readOnlySysFS 31m kube-proxy, crio-20200724220901-14997 CRI error: /sys is read-only: cannot modify conntrack limits, problems may arise later (If running Docker, see docker issue #24000) * Normal Starting 31m kube-proxy, crio-20200724220901-14997 Starting kube-proxy. * Normal Starting 15m kubelet, crio-20200724220901-14997 Starting kubelet. * Warning SystemOOM 15m (x2 over 15m) kubelet, crio-20200724220901-14997 System OOM encountered * Normal NodeHasSufficientMemory 15m (x7 over 15m) kubelet, crio-20200724220901-14997 Node crio-20200724220901-14997 status is now: NodeHasSufficientMemory * Normal NodeHasNoDiskPressure 15m (x7 over 15m) kubelet, crio-20200724220901-14997 Node crio-20200724220901-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientPID 15m (x7 over 15m) kubelet, crio-20200724220901-14997 Node crio-20200724220901-14997 status is now: NodeHasSufficientPID * Normal NodeAllocatableEnforced 15m kubelet, crio-20200724220901-14997 Updated Node Allocatable limit across pods * Warning readOnlySysFS 15m kube-proxy, crio-20200724220901-14997 CRI error: /sys is read-only: cannot modify conntrack limits, problems may arise later (If running Docker, see docker issue #24000) * Normal Starting 15m kube-proxy, crio-20200724220901-14997 Starting kube-proxy. * * ==> dmesg <== * [ +0.008042] FS-Cache: N-cookie d=00000000087e516b n=00000000639e5e65 * [ +0.007221] FS-Cache: N-key=[8] '56c00f0000000000' * [ +0.005631] FS-Cache: Duplicate cookie detected * [ +0.003841] FS-Cache: O-cookie c=0000000011bf07ab [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.005951] FS-Cache: O-cookie d=00000000087e516b n=00000000251e97b7 * [ +0.004790] FS-Cache: O-key=[8] '56c00f0000000000' * [ +0.003643] FS-Cache: N-cookie c=00000000e2eec606 [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.005689] FS-Cache: N-cookie d=00000000087e516b n=000000007550b1ca * [ +0.004826] FS-Cache: N-key=[8] '56c00f0000000000' * [ +1.890005] FS-Cache: Duplicate cookie detected * [ +0.022632] FS-Cache: O-cookie c=0000000036b15294 [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.008521] FS-Cache: O-cookie d=00000000087e516b n=00000000efd734b0 * [ +0.007450] FS-Cache: O-key=[8] '55c00f0000000000' * [ +0.004298] FS-Cache: N-cookie c=00000000c6760349 [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.007729] FS-Cache: N-cookie d=00000000087e516b n=000000002b6d28f2 * [ +0.006730] FS-Cache: N-key=[8] '55c00f0000000000' * [ +2.781512] FS-Cache: Duplicate cookie detected * [ +0.004691] FS-Cache: O-cookie c=0000000026b60cdb [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.006817] FS-Cache: O-cookie d=00000000087e516b n=0000000083eaff9d * [ +0.006146] FS-Cache: O-key=[8] '5ac00f0000000000' * [ +0.004163] FS-Cache: N-cookie c=000000006a4278df [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.006241] FS-Cache: N-cookie d=00000000087e516b n=00000000e4c3dd6e * [ +0.005074] FS-Cache: N-key=[8] '5ac00f0000000000' * [Jul24 22:02] tee (116897): /proc/108378/oom_adj is deprecated, please use /proc/108378/oom_score_adj instead. * [Jul24 22:09] hrtimer: interrupt took 1877431 ns * * ==> etcd [7cd3d2c27ce8d87211660e16ef5baac83ffc42799754d9ad03e5cf733dcd820c] <== * 2020-07-24 22:28:20.693501 W | etcdserver: failed to revoke 3c247382efb5110a ("lease not found") * 2020-07-24 22:28:20.693514 W | etcdserver: failed to revoke 3c247382efb5110a ("lease not found") * 2020-07-24 22:28:20.693526 W | etcdserver: failed to revoke 3c247382efb5110a ("lease not found") * 2020-07-24 22:28:20.693537 W | etcdserver: failed to revoke 3c247382efb5110a ("lease not found") * 2020-07-24 22:28:20.693546 W | etcdserver: failed to revoke 3c247382efb5110a ("lease not found") * 2020-07-24 22:28:20.715917 W | etcdserver: read-only range request "key:\"foo\" " with result "range_response_count:0 size:5" took too long (3.215685555s) to execute * 2020-07-24 22:28:20.715967 W | etcdserver: read-only range request "key:\"/registry/statefulsets\" range_end:\"/registry/statefulsett\" count_only:true " with result "range_response_count:0 size:5" took too long (3.467781535s) to execute * 2020-07-24 22:28:20.715982 W | etcdserver: read-only range request "key:\"/registry/horizontalpodautoscalers\" range_end:\"/registry/horizontalpodautoscalert\" count_only:true " with result "range_response_count:0 size:5" took too long (1.594353687s) to execute * 2020-07-24 22:28:20.716160 W | etcdserver: read-only range request "key:\"/registry/statefulsets\" range_end:\"/registry/statefulsett\" count_only:true " with result "range_response_count:0 size:5" took too long (2.909137077s) to execute * 2020-07-24 22:28:20.716193 W | etcdserver: read-only range request "key:\"/registry/pods/kube-system/storage-provisioner\" " with result "range_response_count:1 size:2416" took too long (4.129795802s) to execute * 2020-07-24 22:28:20.716213 W | etcdserver: read-only range request "key:\"/registry/horizontalpodautoscalers\" range_end:\"/registry/horizontalpodautoscalert\" count_only:true " with result "range_response_count:0 size:5" took too long (3.658155312s) to execute * 2020-07-24 22:28:20.716294 W | etcdserver: read-only range request "key:\"/registry/services/specs/default/kubernetes\" " with result "range_response_count:1 size:293" took too long (4.130839176s) to execute * 2020-07-24 22:28:20.716427 W | etcdserver: read-only range request "key:\"/registry/clusterroles\" range_end:\"/registry/clusterrolet\" count_only:true " with result "range_response_count:0 size:7" took too long (1.282647649s) to execute * 2020-07-24 22:28:20.844313 W | etcdserver: read-only range request "key:\"/registry/events/kube-system/storage-provisioner.1624d0b10da12b9b\" " with result "range_response_count:1 size:483" took too long (122.063812ms) to execute * 2020-07-24 22:28:20.950803 W | etcdserver: read-only range request "key:\"/registry/services/endpoints/default/kubernetes\" " with result "range_response_count:1 size:213" took too long (105.11143ms) to execute * 2020-07-24 22:31:03.116431 W | etcdserver: read-only range request "key:\"/registry/jobs/\" range_end:\"/registry/jobs0\" limit:500 " with result "range_response_count:0 size:5" took too long (2.348768413s) to execute * 2020-07-24 22:31:03.116531 W | etcdserver: request "header: txn: success:> failure: >>" with result "size:16" took too long (2.159753959s) to execute * 2020-07-24 22:31:03.117809 W | etcdserver: read-only range request "key:\"/registry/health\" " with result "error:context deadline exceeded" took too long (2.000019975s) to execute * 2020-07-24 22:31:03.118930 W | wal: sync duration of 2.16222982s, expected less than 1s * 2020-07-24 22:31:03.119141 W | etcdserver: read-only range request "key:\"/registry/configmaps\" range_end:\"/registry/configmapt\" count_only:true " with result "range_response_count:0 size:7" took too long (1.514657725s) to execute * 2020-07-24 22:31:03.119390 W | etcdserver: read-only range request "key:\"/registry/namespaces/default\" " with result "range_response_count:1 size:172" took too long (1.472246821s) to execute * 2020-07-24 22:36:56.295220 I | mvcc: store.index: compact 1090 * 2020-07-24 22:36:56.296281 I | mvcc: finished scheduled compaction at 1090 (took 726.75µs) * 2020-07-24 22:41:56.312125 I | mvcc: store.index: compact 1203 * 2020-07-24 22:41:56.312972 I | mvcc: finished scheduled compaction at 1203 (took 523.537µs) * * ==> kernel <== * 22:42:36 up 1:09, 0 users, load average: 0.73, 1.46, 4.30 * Linux crio-20200724220901-14997 5.4.0-1022-azure #22-Ubuntu SMP Fri Jul 10 06:14:37 UTC 2020 x86_64 x86_64 x86_64 GNU/Linux * PRETTY_NAME="Ubuntu 19.10" * * ==> kube-apiserver [60aac29a1caeafa329981bf8f9a73c7f579633e998d2e82caabedfba3f1d1138] <== * I0724 22:32:16.144038 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:16.144097 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:16.144148 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:16.152885 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:36.144044 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [] * I0724 22:32:36.144250 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:36.144381 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:36.144412 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:36.144424 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:36.153545 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:56.144278 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [] * I0724 22:32:56.144441 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:56.144512 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:56.144567 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:56.144612 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:56.156564 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:33:16.144467 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [] * I0724 22:33:16.144655 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:33:16.144710 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:33:16.144738 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:33:16.154631 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:33:36.144698 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [] * I0724 22:33:36.144939 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:33:36.145007 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:33:36.155063 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * * ==> kube-controller-manager [5e2ae262399021f9b26d8e88b478a4ff629d1bc4dffef0901936b18cc1e4548c] <== * I0724 22:27:16.476945 1 taint_manager.go:182] Starting NoExecuteTaintManager * I0724 22:27:16.476964 1 event.go:258] Event(v1.ObjectReference{Kind:"Node", Namespace:"", Name:"crio-20200724220901-14997", UID:"ef0f0ad1-a556-4ea4-8b4e-4df0035f7a0b", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'RegisteredNode' Node crio-20200724220901-14997 event: Registered Node crio-20200724220901-14997 in Controller * W0724 22:27:16.476984 1 node_lifecycle_controller.go:863] Missing timestamp for Node crio-20200724220901-14997. Assuming now as a timestamp. * I0724 22:27:16.477018 1 node_lifecycle_controller.go:1089] Controller detected that zone is now in state Normal. * I0724 22:27:16.496120 1 controller_utils.go:1036] Caches are synced for daemon sets controller * I0724 22:27:16.517177 1 controller_utils.go:1036] Caches are synced for endpoint controller * I0724 22:27:16.522134 1 controller_utils.go:1036] Caches are synced for ReplicationController controller * I0724 22:27:16.526074 1 controller_utils.go:1036] Caches are synced for disruption controller * I0724 22:27:16.526095 1 disruption.go:338] Sending events to api server. * I0724 22:27:16.526636 1 controller_utils.go:1036] Caches are synced for job controller * I0724 22:27:16.526835 1 controller_utils.go:1036] Caches are synced for PVC protection controller * I0724 22:27:16.531393 1 controller_utils.go:1036] Caches are synced for attach detach controller * I0724 22:27:16.540016 1 controller_utils.go:1036] Caches are synced for persistent volume controller * I0724 22:27:16.551348 1 controller_utils.go:1036] Caches are synced for ReplicaSet controller * I0724 22:27:16.577221 1 controller_utils.go:1036] Caches are synced for deployment controller * I0724 22:27:16.580595 1 event.go:258] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"kubernetes-dashboard", Name:"kubernetes-dashboard", UID:"db313703-4ad6-497b-8b1b-c7657766a8c5", APIVersion:"apps/v1", ResourceVersion:"821", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set kubernetes-dashboard-6979c57f4c to 1 * I0724 22:27:16.584728 1 event.go:258] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"kubernetes-dashboard", Name:"dashboard-metrics-scraper", UID:"a8d0d7cc-7814-4653-af48-855bd4001c27", APIVersion:"apps/v1", ResourceVersion:"819", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set dashboard-metrics-scraper-c8b69c96c to 1 * I0724 22:27:16.589419 1 event.go:258] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"kubernetes-dashboard", Name:"dashboard-metrics-scraper-c8b69c96c", UID:"79acf5d3-9c82-447f-b71d-910de5b42f1b", APIVersion:"apps/v1", ResourceVersion:"855", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: dashboard-metrics-scraper-c8b69c96c-ljbr4 * I0724 22:27:16.589458 1 event.go:258] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"kubernetes-dashboard", Name:"kubernetes-dashboard-6979c57f4c", UID:"b43829eb-0088-472e-9584-6679aa6b72c4", APIVersion:"apps/v1", ResourceVersion:"854", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: kubernetes-dashboard-6979c57f4c-wbxrt * I0724 22:27:16.589508 1 controller_utils.go:1036] Caches are synced for resource quota controller * I0724 22:27:16.635535 1 controller_utils.go:1036] Caches are synced for garbage collector controller * I0724 22:27:16.635571 1 garbagecollector.go:137] Garbage collector: all resource monitors have synced. Proceeding to collect garbage * I0724 22:27:16.640852 1 controller_utils.go:1036] Caches are synced for resource quota controller * I0724 22:27:17.230884 1 controller_utils.go:1029] Waiting for caches to sync for garbage collector controller * I0724 22:27:17.331259 1 controller_utils.go:1036] Caches are synced for garbage collector controller * * ==> kube-proxy [4e28cb6807125f410044e3fce6354030e93abbb1dd7a5b0efeb136ab9f75cc6f] <== * I0724 22:10:51.711236 1 server_others.go:143] Using iptables Proxier. * I0724 22:10:51.711733 1 server.go:534] Version: v1.15.7 * I0724 22:10:51.747623 1 conntrack.go:52] Setting nf_conntrack_max to 524288 * E0724 22:10:51.748800 1 conntrack.go:127] sysfs is not writable: {Device:sysfs Path:/sys Type:sysfs Opts:[ro nosuid nodev noexec relatime] Freq:0 Pass:0} (mount options are [ro nosuid nodev noexec relatime]) * I0724 22:10:51.749038 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_established' to 86400 * I0724 22:10:51.749127 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_close_wait' to 3600 * I0724 22:10:51.749315 1 config.go:187] Starting service config controller * I0724 22:10:51.749352 1 controller_utils.go:1029] Waiting for caches to sync for service config controller * I0724 22:10:51.749404 1 config.go:96] Starting endpoints config controller * I0724 22:10:51.749502 1 controller_utils.go:1029] Waiting for caches to sync for endpoints config controller * I0724 22:10:51.849555 1 controller_utils.go:1036] Caches are synced for service config controller * I0724 22:10:51.849731 1 controller_utils.go:1036] Caches are synced for endpoints config controller * W0724 22:27:02.252143 1 server_others.go:249] Flag proxy-mode="" unknown, assuming iptables proxy * I0724 22:27:02.336837 1 server_others.go:143] Using iptables Proxier. * I0724 22:27:02.337285 1 server.go:534] Version: v1.15.7 * I0724 22:27:02.437683 1 conntrack.go:52] Setting nf_conntrack_max to 524288 * E0724 22:27:02.438093 1 conntrack.go:127] sysfs is not writable: {Device:sysfs Path:/sys Type:sysfs Opts:[ro nosuid nodev noexec relatime] Freq:0 Pass:0} (mount options are [ro nosuid nodev noexec relatime]) * I0724 22:27:02.438352 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_established' to 86400 * I0724 22:27:02.438437 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_close_wait' to 3600 * I0724 22:27:02.438748 1 config.go:96] Starting endpoints config controller * I0724 22:27:02.438828 1 controller_utils.go:1029] Waiting for caches to sync for endpoints config controller * I0724 22:27:02.438814 1 config.go:187] Starting service config controller * I0724 22:27:02.438854 1 controller_utils.go:1029] Waiting for caches to sync for service config controller * I0724 22:27:02.539002 1 controller_utils.go:1036] Caches are synced for endpoints config controller * I0724 22:27:02.539024 1 controller_utils.go:1036] Caches are synced for service config controller * * ==> kube-scheduler [9881d2c304cdca9fe0f38468be746e289b9fa419917792f7aa9019077b4a4374] <== * E0724 22:10:32.938093 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope * E0724 22:10:32.940907 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope * E0724 22:10:32.940967 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope * E0724 22:10:32.941768 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope * E0724 22:10:32.942905 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope * E0724 22:10:32.943936 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope * E0724 22:10:32.947924 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope * I0724 22:26:55.290417 1 serving.go:319] Generated self-signed cert in-memory * W0724 22:26:55.521509 1 authentication.go:249] No authentication-kubeconfig provided in order to lookup client-ca-file in configmap/extension-apiserver-authentication in kube-system, so client certificate authentication won't work. * W0724 22:26:55.521537 1 authentication.go:252] No authentication-kubeconfig provided in order to lookup requestheader-client-ca-file in configmap/extension-apiserver-authentication in kube-system, so request-header client certificate authentication won't work. * W0724 22:26:55.521549 1 authorization.go:146] No authorization-kubeconfig provided, so SubjectAccessReview of authorization tokens won't work. * I0724 22:26:55.524427 1 server.go:142] Version: v1.15.7 * I0724 22:26:55.524493 1 defaults.go:87] TaintNodesByCondition is enabled, PodToleratesNodeTaints predicate is mandatory * W0724 22:26:55.525401 1 authorization.go:47] Authorization is disabled * W0724 22:26:55.525419 1 authentication.go:55] Authentication is disabled * I0724 22:26:55.525435 1 deprecated_insecure_serving.go:51] Serving healthz insecurely on [::]:10251 * I0724 22:26:55.535873 1 secure_serving.go:116] Serving securely on 127.0.0.1:10259 * E0724 22:27:00.040558 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope * E0724 22:27:00.140295 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope: RBAC: [clusterrole.rbac.authorization.k8s.io "system:public-info-viewer" not found, clusterrole.rbac.authorization.k8s.io "system:discovery" not found, clusterrole.rbac.authorization.k8s.io "system:kube-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:volume-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:basic-user" not found] * E0724 22:27:00.140491 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope * E0724 22:27:00.140663 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope * E0724 22:27:00.140655 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1beta1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope: RBAC: [clusterrole.rbac.authorization.k8s.io "system:kube-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:discovery" not found, clusterrole.rbac.authorization.k8s.io "system:basic-user" not found, clusterrole.rbac.authorization.k8s.io "system:volume-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:public-info-viewer" not found] * E0724 22:27:00.140755 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope: RBAC: [clusterrole.rbac.authorization.k8s.io "system:public-info-viewer" not found, clusterrole.rbac.authorization.k8s.io "system:kube-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:discovery" not found, clusterrole.rbac.authorization.k8s.io "system:basic-user" not found, clusterrole.rbac.authorization.k8s.io "system:volume-scheduler" not found] * E0724 22:27:00.140806 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope: RBAC: [clusterrole.rbac.authorization.k8s.io "system:kube-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:discovery" not found, clusterrole.rbac.authorization.k8s.io "system:basic-user" not found, clusterrole.rbac.authorization.k8s.io "system:volume-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:public-info-viewer" not found] * E0724 22:27:00.140869 1 reflector.go:125] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:226: Failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope: RBAC: [clusterrole.rbac.authorization.k8s.io "system:basic-user" not found, clusterrole.rbac.authorization.k8s.io "system:volume-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:public-info-viewer" not found, clusterrole.rbac.authorization.k8s.io "system:kube-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:discovery" not found] * * ==> kubelet <== * -- Logs begin at Fri 2020-07-24 22:26:07 UTC, end at Fri 2020-07-24 22:42:36 UTC. -- * Jul 24 22:42:13 crio-20200724220901-14997 kubelet[1598]: E0724 22:42:13.701715 1598 pod_workers.go:190] Error syncing pod 145a2077-6b60-46d0-9d89-7151f4a78b4b ("dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)"), skipping: failed to "CreatePodSandbox" for "dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)" with CreatePodSandboxError: "CreatePodSandbox for pod \"dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)\" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard_145a2077-6b60-46d0-9d89-7151f4a78b4b_0(5c42ccf4db931a2987323de9a0c72d6744b0c85acb9368f16621c4ad1e872fdb): failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:42:15 crio-20200724220901-14997 kubelet[1598]: E0724 22:42:15.744498 1598 pod_workers.go:190] Error syncing pod e76b38cb-d66f-4b4b-a962-cdd9c670e250 ("storage-provisioner_kube-system(e76b38cb-d66f-4b4b-a962-cdd9c670e250)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "Back-off 5m0s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(e76b38cb-d66f-4b4b-a962-cdd9c670e250)" * Jul 24 22:42:16 crio-20200724220901-14997 kubelet[1598]: E0724 22:42:16.152170 1598 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(2a27a50bea94b3189ee7854aa20f112a4fd5eb3dbfd7e9024d80ecec6db5bf41): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:42:16 crio-20200724220901-14997 kubelet[1598]: E0724 22:42:16.152233 1598 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(2a27a50bea94b3189ee7854aa20f112a4fd5eb3dbfd7e9024d80ecec6db5bf41): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:42:16 crio-20200724220901-14997 kubelet[1598]: E0724 22:42:16.152256 1598 kuberuntime_manager.go:692] createPodSandbox for pod "coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(2a27a50bea94b3189ee7854aa20f112a4fd5eb3dbfd7e9024d80ecec6db5bf41): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:42:16 crio-20200724220901-14997 kubelet[1598]: E0724 22:42:16.152312 1598 pod_workers.go:190] Error syncing pod dbd8dc2c-1af2-4c99-89f0-d44686805742 ("coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)"), skipping: failed to "CreatePodSandbox" for "coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)" with CreatePodSandboxError: "CreatePodSandbox for pod \"coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)\" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(2a27a50bea94b3189ee7854aa20f112a4fd5eb3dbfd7e9024d80ecec6db5bf41): failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:42:22 crio-20200724220901-14997 kubelet[1598]: E0724 22:42:22.744177 1598 pod_workers.go:190] Error syncing pod 9fb35a28-5601-47e4-88e6-be2a18fa55ef ("kindnet-4qfcd_kube-system(9fb35a28-5601-47e4-88e6-be2a18fa55ef)"), skipping: failed to "StartContainer" for "kindnet-cni" with CrashLoopBackOff: "Back-off 5m0s restarting failed container=kindnet-cni pod=kindnet-4qfcd_kube-system(9fb35a28-5601-47e4-88e6-be2a18fa55ef)" * Jul 24 22:42:26 crio-20200724220901-14997 kubelet[1598]: E0724 22:42:26.734545 1598 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(b923803e02cce027d052f4d36cc6135f702e4cbe7f7d9b3b39ffb386f4b5344a): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:42:26 crio-20200724220901-14997 kubelet[1598]: E0724 22:42:26.734617 1598 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(b923803e02cce027d052f4d36cc6135f702e4cbe7f7d9b3b39ffb386f4b5344a): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:42:26 crio-20200724220901-14997 kubelet[1598]: E0724 22:42:26.734645 1598 kuberuntime_manager.go:692] createPodSandbox for pod "busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(b923803e02cce027d052f4d36cc6135f702e4cbe7f7d9b3b39ffb386f4b5344a): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:42:26 crio-20200724220901-14997 kubelet[1598]: E0724 22:42:26.734701 1598 pod_workers.go:190] Error syncing pod b947226f-0b0e-4916-9b65-c7b70a6e137e ("busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)"), skipping: failed to "CreatePodSandbox" for "busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)" with CreatePodSandboxError: "CreatePodSandbox for pod \"busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)\" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(b923803e02cce027d052f4d36cc6135f702e4cbe7f7d9b3b39ffb386f4b5344a): failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:42:26 crio-20200724220901-14997 kubelet[1598]: E0724 22:42:26.744158 1598 pod_workers.go:190] Error syncing pod e76b38cb-d66f-4b4b-a962-cdd9c670e250 ("storage-provisioner_kube-system(e76b38cb-d66f-4b4b-a962-cdd9c670e250)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "Back-off 5m0s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(e76b38cb-d66f-4b4b-a962-cdd9c670e250)" * Jul 24 22:42:29 crio-20200724220901-14997 kubelet[1598]: E0724 22:42:29.347557 1598 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard_145a2077-6b60-46d0-9d89-7151f4a78b4b_0(f6da9f9fc99396f55e4d8c6b8917efd1bde134b58a482ef7e4c74be3b6499bf5): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:42:29 crio-20200724220901-14997 kubelet[1598]: E0724 22:42:29.347618 1598 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard_145a2077-6b60-46d0-9d89-7151f4a78b4b_0(f6da9f9fc99396f55e4d8c6b8917efd1bde134b58a482ef7e4c74be3b6499bf5): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:42:29 crio-20200724220901-14997 kubelet[1598]: E0724 22:42:29.347642 1598 kuberuntime_manager.go:692] createPodSandbox for pod "dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard_145a2077-6b60-46d0-9d89-7151f4a78b4b_0(f6da9f9fc99396f55e4d8c6b8917efd1bde134b58a482ef7e4c74be3b6499bf5): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:42:29 crio-20200724220901-14997 kubelet[1598]: E0724 22:42:29.347698 1598 pod_workers.go:190] Error syncing pod 145a2077-6b60-46d0-9d89-7151f4a78b4b ("dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)"), skipping: failed to "CreatePodSandbox" for "dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)" with CreatePodSandboxError: "CreatePodSandbox for pod \"dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)\" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard_145a2077-6b60-46d0-9d89-7151f4a78b4b_0(f6da9f9fc99396f55e4d8c6b8917efd1bde134b58a482ef7e4c74be3b6499bf5): failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:42:30 crio-20200724220901-14997 kubelet[1598]: E0724 22:42:30.084922 1598 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard_2734d7c0-daef-420d-966b-49f33f94e547_0(d8a67ca96aaeeeed6d35210a4cc80cf65923d3c328a6546642e7bf20fa79bea4): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:42:30 crio-20200724220901-14997 kubelet[1598]: E0724 22:42:30.084986 1598 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard(2734d7c0-daef-420d-966b-49f33f94e547)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard_2734d7c0-daef-420d-966b-49f33f94e547_0(d8a67ca96aaeeeed6d35210a4cc80cf65923d3c328a6546642e7bf20fa79bea4): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:42:30 crio-20200724220901-14997 kubelet[1598]: E0724 22:42:30.085015 1598 kuberuntime_manager.go:692] createPodSandbox for pod "kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard(2734d7c0-daef-420d-966b-49f33f94e547)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard_2734d7c0-daef-420d-966b-49f33f94e547_0(d8a67ca96aaeeeed6d35210a4cc80cf65923d3c328a6546642e7bf20fa79bea4): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:42:30 crio-20200724220901-14997 kubelet[1598]: E0724 22:42:30.085072 1598 pod_workers.go:190] Error syncing pod 2734d7c0-daef-420d-966b-49f33f94e547 ("kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard(2734d7c0-daef-420d-966b-49f33f94e547)"), skipping: failed to "CreatePodSandbox" for "kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard(2734d7c0-daef-420d-966b-49f33f94e547)" with CreatePodSandboxError: "CreatePodSandbox for pod \"kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard(2734d7c0-daef-420d-966b-49f33f94e547)\" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard_2734d7c0-daef-420d-966b-49f33f94e547_0(d8a67ca96aaeeeed6d35210a4cc80cf65923d3c328a6546642e7bf20fa79bea4): failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:42:31 crio-20200724220901-14997 kubelet[1598]: E0724 22:42:31.272083 1598 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(bcfee59a70d80fbeb8c1e28ce5c2246c0a5a401849448af3bd6e8ae9f5fbc70d): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:42:31 crio-20200724220901-14997 kubelet[1598]: E0724 22:42:31.272144 1598 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(bcfee59a70d80fbeb8c1e28ce5c2246c0a5a401849448af3bd6e8ae9f5fbc70d): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:42:31 crio-20200724220901-14997 kubelet[1598]: E0724 22:42:31.272168 1598 kuberuntime_manager.go:692] createPodSandbox for pod "coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(bcfee59a70d80fbeb8c1e28ce5c2246c0a5a401849448af3bd6e8ae9f5fbc70d): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:42:31 crio-20200724220901-14997 kubelet[1598]: E0724 22:42:31.272223 1598 pod_workers.go:190] Error syncing pod dbd8dc2c-1af2-4c99-89f0-d44686805742 ("coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)"), skipping: failed to "CreatePodSandbox" for "coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)" with CreatePodSandboxError: "CreatePodSandbox for pod \"coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)\" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(bcfee59a70d80fbeb8c1e28ce5c2246c0a5a401849448af3bd6e8ae9f5fbc70d): failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:42:35 crio-20200724220901-14997 kubelet[1598]: E0724 22:42:35.744478 1598 pod_workers.go:190] Error syncing pod 9fb35a28-5601-47e4-88e6-be2a18fa55ef ("kindnet-4qfcd_kube-system(9fb35a28-5601-47e4-88e6-be2a18fa55ef)"), skipping: failed to "StartContainer" for "kindnet-cni" with CrashLoopBackOff: "Back-off 5m0s restarting failed container=kindnet-cni pod=kindnet-4qfcd_kube-system(9fb35a28-5601-47e4-88e6-be2a18fa55ef)" * * ==> storage-provisioner [5bac3a6cbfb36f99442c9e6f71391fd2e2cc3e628e4245b426900327f0bff2e1] <== * F0724 22:38:14.120655 1 main.go:37] Error getting server version: Get https://10.96.0.1:443/version: dial tcp 10.96.0.1:443: getsockopt: no route to host -- /stdout -- helpers_test.go:247: (dbg) Run: ./minikube-linux-amd64 status --format={{.APIServer}} -p crio-20200724220901-14997 -n crio-20200724220901-14997 helpers_test.go:254: (dbg) Run: kubectl --context crio-20200724220901-14997 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running helpers_test.go:260: non-running pods: busybox coredns-5d4dd4b4db-9ssg6 dashboard-metrics-scraper-c8b69c96c-ljbr4 kubernetes-dashboard-6979c57f4c-wbxrt helpers_test.go:262: ======> post-mortem[TestStartStop/group/crio/serial/UserAppExistsAfterStop]: describe non-running pods <====== helpers_test.go:265: (dbg) Run: kubectl --context crio-20200724220901-14997 describe pod busybox coredns-5d4dd4b4db-9ssg6 dashboard-metrics-scraper-c8b69c96c-ljbr4 kubernetes-dashboard-6979c57f4c-wbxrt helpers_test.go:265: (dbg) Non-zero exit: kubectl --context crio-20200724220901-14997 describe pod busybox coredns-5d4dd4b4db-9ssg6 dashboard-metrics-scraper-c8b69c96c-ljbr4 kubernetes-dashboard-6979c57f4c-wbxrt: exit status 1 (85.396835ms) -- stdout -- Name: busybox Namespace: default Priority: 0 Node: crio-20200724220901-14997/172.17.0.2 Start Time: Fri, 24 Jul 2020 22:17:30 +0000 Labels: integration-test=busybox Annotations: Status: Pending IP: IPs: Containers: busybox: Container ID: Image: busybox:1.28.4-glibc Image ID: Port: Host Port: Command: sleep 3600 State: Waiting Reason: ContainerCreating Ready: False Restart Count: 0 Environment: Mounts: /var/run/secrets/kubernetes.io/serviceaccount from default-token-2jsfl (ro) Conditions: Type Status Initialized True Ready False ContainersReady False PodScheduled True Volumes: default-token-2jsfl: Type: Secret (a volume populated by a Secret) SecretName: default-token-2jsfl Optional: false QoS Class: BestEffort Node-Selectors: Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s node.kubernetes.io/unreachable:NoExecute for 300s Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Scheduled 25m default-scheduler Successfully assigned default/busybox to crio-20200724220901-14997 Warning FailedCreatePodSandBox 25m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(df0c3a384af70876e631b79afa238f8219e909abbaf7684ff30d1dce8a9a54fa): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 24m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(7e1e51912dee7c82a399942b88c2992e156a60bb2bdcfc5a0340b0f5eb894fe0): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 24m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(eb6542b74bcd5168e50e03b41a224e2df5f3761d00e96252755d3d77cabf510e): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 24m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(95381eb738e20acf009ca3de52d9dd44c0e10363127a19f55cc9d8b9b15d935d): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 24m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(5da5f64daf132db1d2f57fcbbd23261f5b055438f42fdc9e58f02b1dd57b9240): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 23m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(d843353d3186fd450ada3d11767e5294d07ef90a741caba47e62a59eb82f9e97): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 23m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(cdb66590d250947869cc36af8c6d22ecc1fd99752b4ba0d09be444e2de9b06eb): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 23m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(590dbdf2276a1c93cf59fe15b35411b3d19be44299a08851ea9957e8c6c03681): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 23m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(fe109ba9d55515001bcdb9c5ba7b51262263252184892313119b14411c1ba69a): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 18m (x17 over 22m) kubelet, crio-20200724220901-14997 (combined from similar events): Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(4bee5e35c962aafeecadb6e69afb27ad039cbcf256940e8735ce4f079bf10862): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 15m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(e8fcb63d4fbd6ae3b748da2e936f37eeaa140b43d0f98056e85b082da82fbb01): netplugin failed with no error message Warning FailedCreatePodSandBox 15m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(60cd38ffb8a1dd4fa390aba070fecc1acec7811fd5775ea0d2a431e9e2424e62): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 15m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(68df75d364336016be8023c132977fe34319aa4b9f92c7d70e457a57217a281e): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 14m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(aaae971e15b6884b2bb1799dd4abb10b195368e13b5e4f290f43c5b4b11ec194): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 14m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(bd1d721cd146bbfe1fa05dc287b325ab526dc19d3404ff58cef06e259e2b5986): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 13m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(df52b05097dfc5b141cfc44d2e97b43954c3d247a95a09c7cfb30e7c69871395): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 13m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(d5729ae94c7a9707bb77f5dc43fb6a6d6d451a9f25758519bb72c37e64012cbe): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 13m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(1080ac6803a0e7d14628d92578ed58468a1fddfae32c4476765ee0ff2580e28e): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 13m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(7b666099a72822b0a53ba3bb57632f31f4390032be187dca2fe8fb309f923937): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 25s (x50 over 12m) kubelet, crio-20200724220901-14997 (combined from similar events): Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(4595ee71425be17730f35d46d7129749144dbf603a922f38ffc4b9fcea67675a): failed to set bridge addr: could not add IP address to "cni0": permission denied -- /stdout -- ** stderr ** Error from server (NotFound): pods "coredns-5d4dd4b4db-9ssg6" not found Error from server (NotFound): pods "dashboard-metrics-scraper-c8b69c96c-ljbr4" not found Error from server (NotFound): pods "kubernetes-dashboard-6979c57f4c-wbxrt" not found ** /stderr ** helpers_test.go:267: kubectl --context crio-20200724220901-14997 describe pod busybox coredns-5d4dd4b4db-9ssg6 dashboard-metrics-scraper-c8b69c96c-ljbr4 kubernetes-dashboard-6979c57f4c-wbxrt: exit status 1 === RUN TestStartStop/group/crio/serial/AddonExistsAfterStop start_stop_delete_test.go:219: (dbg) TestStartStop/group/crio/serial/AddonExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ... helpers_test.go:332: "kubernetes-dashboard-6979c57f4c-wbxrt" [2734d7c0-daef-420d-966b-49f33f94e547] Pending / Ready:ContainersNotReady (containers with unready status: [kubernetes-dashboard]) / ContainersReady:ContainersNotReady (containers with unready status: [kubernetes-dashboard]) === CONT TestStartStop/group/containerd/serial/UserAppExistsAfterStop start_stop_delete_test.go:208: ***** TestStartStop/group/containerd/serial/UserAppExistsAfterStop: pod "k8s-app=kubernetes-dashboard" failed to start within 9m0s: timed out waiting for the condition **** start_stop_delete_test.go:208: (dbg) Run: ./minikube-linux-amd64 status --format={{.APIServer}} -p containerd-20200724221200-14997 -n containerd-20200724221200-14997 start_stop_delete_test.go:208: TestStartStop/group/containerd/serial/UserAppExistsAfterStop: showing logs for failed pods as of 2020-07-24 22:44:56.4601655 +0000 UTC m=+4126.584529985 start_stop_delete_test.go:208: (dbg) Run: kubectl --context containerd-20200724221200-14997 describe po kubernetes-dashboard-6dbb54fd95-ms9wg -n kubernetes-dashboard start_stop_delete_test.go:208: (dbg) kubectl --context containerd-20200724221200-14997 describe po kubernetes-dashboard-6dbb54fd95-ms9wg -n kubernetes-dashboard: Name: kubernetes-dashboard-6dbb54fd95-ms9wg Namespace: kubernetes-dashboard Priority: 0 Node: containerd-20200724221200-14997/172.17.0.3 Start Time: Fri, 24 Jul 2020 22:30:25 +0000 Labels: k8s-app=kubernetes-dashboard pod-template-hash=6dbb54fd95 Annotations: Status: Pending IP: IPs: Controlled By: ReplicaSet/kubernetes-dashboard-6dbb54fd95 Containers: kubernetes-dashboard: Container ID: Image: kubernetesui/dashboard:v2.0.1 Image ID: Port: 9090/TCP Host Port: 0/TCP Args: --namespace=kubernetes-dashboard --enable-skip-login --disable-settings-authorizer State: Waiting Reason: ContainerCreating Ready: False Restart Count: 0 Liveness: http-get http://:9090/ delay=30s timeout=30s period=10s #success=1 #failure=3 Environment: Mounts: /tmp from tmp-volume (rw) /var/run/secrets/kubernetes.io/serviceaccount from kubernetes-dashboard-token-cf4fw (ro) Conditions: Type Status Initialized True Ready False ContainersReady False PodScheduled True Volumes: tmp-volume: Type: EmptyDir (a temporary directory that shares a pod's lifetime) Medium: SizeLimit: kubernetes-dashboard-token-cf4fw: Type: Secret (a volume populated by a Secret) SecretName: kubernetes-dashboard-token-cf4fw Optional: false QoS Class: BestEffort Node-Selectors: beta.kubernetes.io/os=linux Tolerations: node-role.kubernetes.io/master:NoSchedule node.kubernetes.io/not-ready:NoExecute for 300s node.kubernetes.io/unreachable:NoExecute for 300s Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Scheduled 14m default-scheduler Successfully assigned kubernetes-dashboard/kubernetes-dashboard-6dbb54fd95-ms9wg to containerd-20200724221200-14997 Warning FailedCreatePodSandBox 14m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "e79b213c08e44a7f73ace84b89b579911cdf4a0bfbed59c2c0937c55db5b6e10": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 14m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "3142fa68792437a4704afa7f3535dc4020c06b8bb4917390ec26c7cd16ae1f90": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 13m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "af7488a89a2417d800b149e2711da2af032b2ac5581433d2933809cff09c9d0d": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 13m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "271c73946c0bbf4e0b379282a3ef3af1fe6359ff48b0d027c394274866d9cc43": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 13m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "f33ea4797364292b59659b6d5f86fdf3ef283ae31cc01f3af3a5a1b4bf3e0669": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 13m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "a66d211d3332a523c413a9ba32cc87aef19df631192696bf3878b8033626f25f": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 12m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "e0b499cb816b30c5ca67cb740b1627dba76ad9c1cf04d904f3430a8dd6366a3f": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 12m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "e66d94c319ffca2ce3e8ad9ffa38714d77a9e1946c880ee16c86556768dcf4dd": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 12m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "57dc2e4963506ad3cdb53073ee7ec44035cbce5c06f86bb84ba725e1bea1a0e8": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 4m24s (x32 over 12m) kubelet, containerd-20200724221200-14997 (combined from similar events): Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "806ee15894db6461ab2df296b1d64f97b3a7b905774364d71e3b7abf85102b39": failed to set bridge addr: could not add IP address to "cni0": permission denied start_stop_delete_test.go:208: (dbg) Run: kubectl --context containerd-20200724221200-14997 logs kubernetes-dashboard-6dbb54fd95-ms9wg -n kubernetes-dashboard start_stop_delete_test.go:208: (dbg) Non-zero exit: kubectl --context containerd-20200724221200-14997 logs kubernetes-dashboard-6dbb54fd95-ms9wg -n kubernetes-dashboard: exit status 1 (82.73585ms) ** stderr ** Error from server (BadRequest): container "kubernetes-dashboard" in pod "kubernetes-dashboard-6dbb54fd95-ms9wg" is waiting to start: ContainerCreating ** /stderr ** start_stop_delete_test.go:208: kubectl --context containerd-20200724221200-14997 logs kubernetes-dashboard-6dbb54fd95-ms9wg -n kubernetes-dashboard: exit status 1 start_stop_delete_test.go:209: failed waiting for 'addon dashboard' pod post-stop-start: k8s-app=kubernetes-dashboard within 9m0s: timed out waiting for the condition helpers_test.go:215: -----------------------post-mortem-------------------------------- helpers_test.go:223: ======> post-mortem[TestStartStop/group/containerd/serial/UserAppExistsAfterStop]: docker inspect <====== helpers_test.go:224: (dbg) Run: docker inspect containerd-20200724221200-14997 helpers_test.go:228: (dbg) docker inspect containerd-20200724221200-14997: -- stdout -- [ { "Id": "0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318", "Created": "2020-07-24T22:12:08.823590057Z", "Path": "/usr/local/bin/entrypoint", "Args": [ "/sbin/init" ], "State": { "Status": "running", "Running": true, "Paused": false, "Restarting": false, "OOMKilled": false, "Dead": false, "Pid": 707805, "ExitCode": 0, "Error": "", "StartedAt": "2020-07-24T22:27:53.861621416Z", "FinishedAt": "2020-07-24T22:27:47.093520839Z" }, "Image": "sha256:e6bc41c39dc48b2b472936db36aedb28527ce0f675ed1bc20d029125c9ccf578", "ResolvConfPath": "/var/lib/docker/containers/0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318/resolv.conf", "HostnamePath": "/var/lib/docker/containers/0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318/hostname", "HostsPath": "/var/lib/docker/containers/0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318/hosts", "LogPath": "/var/lib/docker/containers/0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318/0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318-json.log", "Name": "/containerd-20200724221200-14997", "RestartCount": 0, "Driver": "overlay2", "Platform": "linux", "MountLabel": "", "ProcessLabel": "", "AppArmorProfile": "unconfined", "ExecIDs": null, "HostConfig": { "Binds": [ "/lib/modules:/lib/modules:ro", "containerd-20200724221200-14997:/var" ], "ContainerIDFile": "", "LogConfig": { "Type": "json-file", "Config": {} }, "NetworkMode": "default", "PortBindings": { "22/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "2376/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "5000/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "8444/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ] }, "RestartPolicy": { "Name": "no", "MaximumRetryCount": 0 }, "AutoRemove": false, "VolumeDriver": "", "VolumesFrom": null, "CapAdd": null, "CapDrop": null, "Capabilities": null, "Dns": [], "DnsOptions": [], "DnsSearch": [], "ExtraHosts": null, "GroupAdd": null, "IpcMode": "private", "Cgroup": "", "Links": null, "OomScoreAdj": 0, "PidMode": "", "Privileged": true, "PublishAllPorts": false, "ReadonlyRootfs": false, "SecurityOpt": [ "seccomp=unconfined", "apparmor=unconfined", "label=disable" ], "Tmpfs": { "/run": "", "/tmp": "" }, "UTSMode": "", "UsernsMode": "", "ShmSize": 67108864, "Runtime": "runc", "ConsoleSize": [ 0, 0 ], "Isolation": "", "CpuShares": 0, "Memory": 2306867200, "NanoCpus": 2000000000, "CgroupParent": "", "BlkioWeight": 0, "BlkioWeightDevice": [], "BlkioDeviceReadBps": null, "BlkioDeviceWriteBps": null, "BlkioDeviceReadIOps": null, "BlkioDeviceWriteIOps": null, "CpuPeriod": 0, "CpuQuota": 0, "CpuRealtimePeriod": 0, "CpuRealtimeRuntime": 0, "CpusetCpus": "", "CpusetMems": "", "Devices": [], "DeviceCgroupRules": null, "DeviceRequests": null, "KernelMemory": 0, "KernelMemoryTCP": 0, "MemoryReservation": 0, "MemorySwap": -1, "MemorySwappiness": null, "OomKillDisable": false, "PidsLimit": null, "Ulimits": null, "CpuCount": 0, "CpuPercent": 0, "IOMaximumIOps": 0, "IOMaximumBandwidth": 0, "MaskedPaths": null, "ReadonlyPaths": null }, "GraphDriver": { "Data": { "LowerDir": "/var/lib/docker/overlay2/302021dc5d13c095c42529cd14fcb13b8e0e062683e3b8dd811d0a384f95b4d8-init/diff:/var/lib/docker/overlay2/33ef5d7ed4fbb30d55d2e5925de9b5c359ee2f6bcaf5a31308f0c4c11e358759/diff:/var/lib/docker/overlay2/3cf31b1deb19cf9de586952311d9f94baab00dac6393d96e1a80da943c3823e6/diff:/var/lib/docker/overlay2/5e25be0edbb143d613aab16e23056ece18370c2427214464ccc0b8f033fc76c5/diff:/var/lib/docker/overlay2/933d7b760faf1726c0231eb48cbf3e7af8aeea7d1f449959b7bf07bbe9a84376/diff:/var/lib/docker/overlay2/69ec5555996fe29cf36b7e927034690ecb66f32d4c72396f3c2f3d73303ab10c/diff:/var/lib/docker/overlay2/891f9311e0e93aca44c576d873c0860bf67e972178833fc3ac0569308996933c/diff:/var/lib/docker/overlay2/431ddf5eea9395c020c3a27539c6984764cb7a0dd8f3eeadfbb24f37a897caf7/diff:/var/lib/docker/overlay2/ef7d3d9d53c985fdb46a41c8bcbc04c7d98baa3d8030b1432d115657269a4611/diff:/var/lib/docker/overlay2/d976f7471257da8ef85e0de564cae8e6b16eeb8f83fff6750b3985387f87052c/diff:/var/lib/docker/overlay2/16ed6f6abe25e98b50b10529e70d835da8b64ca99934243f70e5cc63cca44ba1/diff:/var/lib/docker/overlay2/a6495d6f579c32bd0faf387dc3b48c733dae0219e3cfc5d94f9f6ba027261668/diff:/var/lib/docker/overlay2/a34db548da9e24b0323e43e53767c49484fa19509ba9c1e738fb42330788ec52/diff:/var/lib/docker/overlay2/03f93f74dafc48e2bb0234297546abf70dc9918c047a7e3b500ed2242aec09ff/diff:/var/lib/docker/overlay2/b0d08d0be034080c25561d9451d8081050a591c2991a7f3d242234b9bf7c046d/diff:/var/lib/docker/overlay2/d7be6ed61eedc76988e74fe1d87a6a1ef287912654acea5566e7f9cb797b3e3c/diff:/var/lib/docker/overlay2/bde6a4a42d5652b3ef22e3a1dddd0f6fb765d2cd9f5b1a38d4ecb9b44c45b998/diff:/var/lib/docker/overlay2/0f384da0b034b730d4bdd22f270821719c0ac0b7b7d608c3a52fa89b9a386b90/diff:/var/lib/docker/overlay2/46e9d50bc162d2d523c12da3e378e5b859515056dc7d7f11344c1c42c64718d4/diff:/var/lib/docker/overlay2/6e7552cdb94dfc1c9a0a8efae25c758d93fff3d2eec58a3e2b710496d0c94653/diff:/var/lib/docker/overlay2/b9217960caef08b70ded801ca89f5a0808227383f2ab1de5ae7313c200a8090b/diff:/var/lib/docker/overlay2/d11fc39cafccd3e3cb8fba6cfdda2bbfdb99c2e41619ce169b0d6ede0b3f6a64/diff:/var/lib/docker/overlay2/0f89620291cba4bc3afac7c9fa1faf205450d5dcae032c3a9882009633b72b4a/diff", "MergedDir": "/var/lib/docker/overlay2/302021dc5d13c095c42529cd14fcb13b8e0e062683e3b8dd811d0a384f95b4d8/merged", "UpperDir": "/var/lib/docker/overlay2/302021dc5d13c095c42529cd14fcb13b8e0e062683e3b8dd811d0a384f95b4d8/diff", "WorkDir": "/var/lib/docker/overlay2/302021dc5d13c095c42529cd14fcb13b8e0e062683e3b8dd811d0a384f95b4d8/work" }, "Name": "overlay2" }, "Mounts": [ { "Type": "bind", "Source": "/lib/modules", "Destination": "/lib/modules", "Mode": "ro", "RW": false, "Propagation": "rprivate" }, { "Type": "volume", "Name": "containerd-20200724221200-14997", "Source": "/var/lib/docker/volumes/containerd-20200724221200-14997/_data", "Destination": "/var", "Driver": "local", "Mode": "z", "RW": true, "Propagation": "" } ], "Config": { "Hostname": "containerd-20200724221200-14997", "Domainname": "", "User": "root", "AttachStdin": false, "AttachStdout": false, "AttachStderr": false, "ExposedPorts": { "22/tcp": {}, "2376/tcp": {}, "5000/tcp": {}, "8444/tcp": {} }, "Tty": true, "OpenStdin": false, "StdinOnce": false, "Env": [ "container=docker", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], "Cmd": null, "Image": "kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438", "Volumes": null, "WorkingDir": "", "Entrypoint": [ "/usr/local/bin/entrypoint", "/sbin/init" ], "OnBuild": null, "Labels": { "created_by.minikube.sigs.k8s.io": "true", "mode.minikube.sigs.k8s.io": "containerd-20200724221200-14997", "name.minikube.sigs.k8s.io": "containerd-20200724221200-14997", "role.minikube.sigs.k8s.io": "" }, "StopSignal": "SIGRTMIN+3" }, "NetworkSettings": { "Bridge": "", "SandboxID": "14a5046cd9d512b8bd2af14bc8fd545f797506a7c8ca9b3e98d538950faaa7ca", "HairpinMode": false, "LinkLocalIPv6Address": "", "LinkLocalIPv6PrefixLen": 0, "Ports": { "22/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32920" } ], "2376/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32919" } ], "5000/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32918" } ], "8444/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32917" } ] }, "SandboxKey": "/var/run/docker/netns/14a5046cd9d5", "SecondaryIPAddresses": null, "SecondaryIPv6Addresses": null, "EndpointID": "8ef326025492f4d9f256559b312826a73e65239c14de22e27539be8d3e9580f4", "Gateway": "172.17.0.1", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "IPAddress": "172.17.0.3", "IPPrefixLen": 16, "IPv6Gateway": "", "MacAddress": "02:42:ac:11:00:03", "Networks": { "bridge": { "IPAMConfig": null, "Links": null, "Aliases": null, "NetworkID": "d4a420189740571bc15c14b2ebac1ab66e95c9c6796461aa96bb3486a5f7e03d", "EndpointID": "8ef326025492f4d9f256559b312826a73e65239c14de22e27539be8d3e9580f4", "Gateway": "172.17.0.1", "IPAddress": "172.17.0.3", "IPPrefixLen": 16, "IPv6Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "MacAddress": "02:42:ac:11:00:03", "DriverOpts": null } } } } ] -- /stdout -- helpers_test.go:232: (dbg) Run: ./minikube-linux-amd64 status --format={{.Host}} -p containerd-20200724221200-14997 -n containerd-20200724221200-14997 helpers_test.go:237: <<< TestStartStop/group/containerd/serial/UserAppExistsAfterStop FAILED: start of post-mortem logs <<< helpers_test.go:238: ======> post-mortem[TestStartStop/group/containerd/serial/UserAppExistsAfterStop]: minikube logs <====== helpers_test.go:240: (dbg) Run: ./minikube-linux-amd64 -p containerd-20200724221200-14997 logs -n 25 helpers_test.go:240: (dbg) Done: ./minikube-linux-amd64 -p containerd-20200724221200-14997 logs -n 25: (1.640801833s) helpers_test.go:245: TestStartStop/group/containerd/serial/UserAppExistsAfterStop logs: -- stdout -- * ==> container status <== * CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID * 8df5e72f47d58 4689081edb103 About a minute ago Exited storage-provisioner 7 0418a87b40342 * 7f8a5cfae4a15 2186a1a396deb 4 minutes ago Exited kindnet-cni 6 c2246648589e4 * 8662707b3b0f7 da26705ccb4b5 14 minutes ago Running kube-controller-manager 3 7b50a18d935ac * cdb7f0919992f 3439b7546f29b 14 minutes ago Running kube-proxy 0 2fca05c662478 * 7dc9f83693a99 7e28efa976bd1 15 minutes ago Running kube-apiserver 0 236935089e2fb * 90f7be9dd5648 da26705ccb4b5 15 minutes ago Exited kube-controller-manager 2 7b50a18d935ac * 574a9379a97ce 76216c34ed0c7 16 minutes ago Running kube-scheduler 0 9b3229d6d9980 * e1da8367b2af7 303ce5db0e90d 16 minutes ago Running etcd 0 0941e8d40a5bd * * ==> containerd <== * -- Logs begin at Fri 2020-07-24 22:27:54 UTC, end at Fri 2020-07-24 22:44:57 UTC. -- * Jul 24 22:44:10 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:44:10.040080887Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,} failed, error" error="failed to setup network for sandbox \"60cc38d51ea91764389c1e1134a53b4755acd0bfbd6616210e7e9539535967b4\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:44:17 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:44:17.339014649Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:dashboard-metrics-scraper-dc6947fbf-xphhd,Uid:02a59124-2c99-4a38-abea-eedfd3e1ba46,Namespace:kubernetes-dashboard,Attempt:0,}" * Jul 24 22:44:17 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:44:17.339014749Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:busybox,Uid:4b662b5f-6e78-48de-818c-81989d7f4ea9,Namespace:default,Attempt:0,}" * Jul 24 22:44:18 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:44:18.338794759Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:kubernetes-dashboard-6dbb54fd95-ms9wg,Uid:267f98a1-434e-45a2-abda-53ad3bb7bea1,Namespace:kubernetes-dashboard,Attempt:0,}" * Jul 24 22:44:19 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:44:19.173960024Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:dashboard-metrics-scraper-dc6947fbf-xphhd,Uid:02a59124-2c99-4a38-abea-eedfd3e1ba46,Namespace:kubernetes-dashboard,Attempt:0,} failed, error" error="failed to setup network for sandbox \"716d2e3121e2bd14512552243546c268d09e1733539ae54fe553ce1a94265947\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:44:19 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:44:19.292644076Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:4b662b5f-6e78-48de-818c-81989d7f4ea9,Namespace:default,Attempt:0,} failed, error" error="failed to setup network for sandbox \"7786b5a1ee9473e66a4ae32b0ff968ab5884c08ee68482800ced0e70fe0a1adf\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:44:20 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:44:20.465013385Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:kubernetes-dashboard-6dbb54fd95-ms9wg,Uid:267f98a1-434e-45a2-abda-53ad3bb7bea1,Namespace:kubernetes-dashboard,Attempt:0,} failed, error" error="failed to setup network for sandbox \"4896313a114a8ce46c7d3d78ab38ecc434cd9509dd82b90784c1975184b2f12a\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:44:23 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:44:23.338847996Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,}" * Jul 24 22:44:25 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:44:25.186760798Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,} failed, error" error="failed to setup network for sandbox \"4e179feb59a2c2619eb741ae79c753fd8e93ad2d552f117f1cdd775a1559c6fa\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:44:30 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:44:30.338695939Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:busybox,Uid:4b662b5f-6e78-48de-818c-81989d7f4ea9,Namespace:default,Attempt:0,}" * Jul 24 22:44:31 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:44:31.730212556Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:4b662b5f-6e78-48de-818c-81989d7f4ea9,Namespace:default,Attempt:0,} failed, error" error="failed to setup network for sandbox \"71f7ac075111418e0698c82c03adc34db68c1ba8d634265b2af707b06d3d0531\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:44:32 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:44:32.338854384Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:dashboard-metrics-scraper-dc6947fbf-xphhd,Uid:02a59124-2c99-4a38-abea-eedfd3e1ba46,Namespace:kubernetes-dashboard,Attempt:0,}" * Jul 24 22:44:33 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:44:33.807493721Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:dashboard-metrics-scraper-dc6947fbf-xphhd,Uid:02a59124-2c99-4a38-abea-eedfd3e1ba46,Namespace:kubernetes-dashboard,Attempt:0,} failed, error" error="failed to setup network for sandbox \"90bfc9c9a19e6bf342beb19357e0d978322560260efb524ace51967f0f27eaea\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:44:35 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:44:35.338946226Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:kubernetes-dashboard-6dbb54fd95-ms9wg,Uid:267f98a1-434e-45a2-abda-53ad3bb7bea1,Namespace:kubernetes-dashboard,Attempt:0,}" * Jul 24 22:44:36 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:44:36.570090946Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:kubernetes-dashboard-6dbb54fd95-ms9wg,Uid:267f98a1-434e-45a2-abda-53ad3bb7bea1,Namespace:kubernetes-dashboard,Attempt:0,} failed, error" error="failed to setup network for sandbox \"c91419b7dd560c43c5e1350cc511c157bf0da445f8c1fd7ade4140ed3e02cbe6\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:44:37 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:44:37.338718800Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,}" * Jul 24 22:44:39 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:44:39.224439043Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,} failed, error" error="failed to setup network for sandbox \"af047999608418eced1e576b296aae33e12cefc5b647cc9e1df8c1de047c020a\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:44:46 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:44:46.338806898Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:busybox,Uid:4b662b5f-6e78-48de-818c-81989d7f4ea9,Namespace:default,Attempt:0,}" * Jul 24 22:44:47 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:44:47.339136521Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:dashboard-metrics-scraper-dc6947fbf-xphhd,Uid:02a59124-2c99-4a38-abea-eedfd3e1ba46,Namespace:kubernetes-dashboard,Attempt:0,}" * Jul 24 22:44:48 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:44:48.338743990Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:kubernetes-dashboard-6dbb54fd95-ms9wg,Uid:267f98a1-434e-45a2-abda-53ad3bb7bea1,Namespace:kubernetes-dashboard,Attempt:0,}" * Jul 24 22:44:48 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:44:48.381888588Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:4b662b5f-6e78-48de-818c-81989d7f4ea9,Namespace:default,Attempt:0,} failed, error" error="failed to setup network for sandbox \"53dd4d77250a1f44c01d493312120102932c3c3c321d4ec549a46f84f42f7854\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:44:49 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:44:49.274431016Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:dashboard-metrics-scraper-dc6947fbf-xphhd,Uid:02a59124-2c99-4a38-abea-eedfd3e1ba46,Namespace:kubernetes-dashboard,Attempt:0,} failed, error" error="failed to setup network for sandbox \"084b31b0604813de03cbd4d9c00a8824b50df163900f2b793d6bd0b5d95287c5\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:44:50 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:44:50.347427386Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:kubernetes-dashboard-6dbb54fd95-ms9wg,Uid:267f98a1-434e-45a2-abda-53ad3bb7bea1,Namespace:kubernetes-dashboard,Attempt:0,} failed, error" error="failed to setup network for sandbox \"2aa1443407e76ce1cde6ebf977a49fefbff3223b2fa55118788838ce65eb5380\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:44:52 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:44:52.338980591Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,}" * Jul 24 22:44:54 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:44:54.232434679Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,} failed, error" error="failed to setup network for sandbox \"cc7e75bcaa45e4e114f87ae39e936c9bef727b3949b1a1296a3d7483445f85bb\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * * ==> describe nodes <== * Name: containerd-20200724221200-14997 * Roles: master * Labels: beta.kubernetes.io/arch=amd64 * beta.kubernetes.io/os=linux * kubernetes.io/arch=amd64 * kubernetes.io/hostname=containerd-20200724221200-14997 * kubernetes.io/os=linux * minikube.k8s.io/commit=40eac8ce825d2bb784efa63b900c8d788ea49faf * minikube.k8s.io/name=containerd-20200724221200-14997 * minikube.k8s.io/updated_at=2020_07_24T22_13_02_0700 * minikube.k8s.io/version=v1.12.1 * node-role.kubernetes.io/master= * Annotations: kubeadm.alpha.kubernetes.io/cri-socket: /run/containerd/containerd.sock * node.alpha.kubernetes.io/ttl: 0 * volumes.kubernetes.io/controller-managed-attach-detach: true * CreationTimestamp: Fri, 24 Jul 2020 22:12:47 +0000 * Taints: * Unschedulable: false * Lease: * HolderIdentity: containerd-20200724221200-14997 * AcquireTime: * RenewTime: Fri, 24 Jul 2020 22:44:53 +0000 * Conditions: * Type Status LastHeartbeatTime LastTransitionTime Reason Message * ---- ------ ----------------- ------------------ ------ ------- * MemoryPressure False Fri, 24 Jul 2020 22:44:32 +0000 Fri, 24 Jul 2020 22:12:43 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available * DiskPressure False Fri, 24 Jul 2020 22:44:32 +0000 Fri, 24 Jul 2020 22:12:43 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure * PIDPressure False Fri, 24 Jul 2020 22:44:32 +0000 Fri, 24 Jul 2020 22:12:43 +0000 KubeletHasSufficientPID kubelet has sufficient PID available * Ready True Fri, 24 Jul 2020 22:44:32 +0000 Fri, 24 Jul 2020 22:13:11 +0000 KubeletReady kubelet is posting ready status * Addresses: * InternalIP: 172.17.0.3 * Hostname: containerd-20200724221200-14997 * Capacity: * cpu: 16 * ephemeral-storage: 128884272Ki * hugepages-1Gi: 0 * hugepages-2Mi: 0 * memory: 65817044Ki * pods: 110 * Allocatable: * cpu: 16 * ephemeral-storage: 128884272Ki * hugepages-1Gi: 0 * hugepages-2Mi: 0 * memory: 65817044Ki * pods: 110 * System Info: * Machine ID: 5ea7312d3bbd4189a79e31122cb237a6 * System UUID: 763ff36b-3261-45b1-b62e-092cbae790ce * Boot ID: 65219ec9-ab55-4151-85fa-6cbcd6144529 * Kernel Version: 5.4.0-1022-azure * OS Image: Ubuntu 19.10 * Operating System: linux * Architecture: amd64 * Container Runtime Version: containerd://1.3.3-14-g449e9269 * Kubelet Version: v1.18.3 * Kube-Proxy Version: v1.18.3 * PodCIDR: 10.244.0.0/24 * PodCIDRs: 10.244.0.0/24 * Non-terminated Pods: (11 in total) * Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits AGE * --------- ---- ------------ ---------- --------------- ------------- --- * default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 25m * kube-system coredns-66bff467f8-hlk9j 100m (0%) 0 (0%) 70Mi (0%) 170Mi (0%) 31m * kube-system etcd-containerd-20200724221200-14997 0 (0%) 0 (0%) 0 (0%) 0 (0%) 13m * kube-system kindnet-nsc8k 100m (0%) 100m (0%) 50Mi (0%) 50Mi (0%) 31m * kube-system kube-apiserver-containerd-20200724221200-14997 250m (1%) 0 (0%) 0 (0%) 0 (0%) 13m * kube-system kube-controller-manager-containerd-20200724221200-14997 200m (1%) 0 (0%) 0 (0%) 0 (0%) 32m * kube-system kube-proxy-x7fwq 0 (0%) 0 (0%) 0 (0%) 0 (0%) 31m * kube-system kube-scheduler-containerd-20200724221200-14997 100m (0%) 0 (0%) 0 (0%) 0 (0%) 31m * kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 31m * kubernetes-dashboard dashboard-metrics-scraper-dc6947fbf-xphhd 0 (0%) 0 (0%) 0 (0%) 0 (0%) 14m * kubernetes-dashboard kubernetes-dashboard-6dbb54fd95-ms9wg 0 (0%) 0 (0%) 0 (0%) 0 (0%) 14m * Allocated resources: * (Total limits may be over 100 percent, i.e., overcommitted.) * Resource Requests Limits * -------- -------- ------ * cpu 750m (4%) 100m (0%) * memory 120Mi (0%) 220Mi (0%) * ephemeral-storage 0 (0%) 0 (0%) * hugepages-1Gi 0 (0%) 0 (0%) * hugepages-2Mi 0 (0%) 0 (0%) * Events: * Type Reason Age From Message * ---- ------ ---- ---- ------- * Normal NodeHasNoDiskPressure 32m (x6 over 32m) kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientPID 32m (x5 over 32m) kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasSufficientPID * Normal NodeHasSufficientMemory 32m (x6 over 32m) kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasSufficientMemory * Normal Starting 31m kubelet, containerd-20200724221200-14997 Starting kubelet. * Warning SystemOOM 31m kubelet, containerd-20200724221200-14997 System OOM encountered, victim process: registry-server, pid: 26041 * Warning SystemOOM 31m kubelet, containerd-20200724221200-14997 System OOM encountered, victim process: registry-server, pid: 28567 * Normal NodeHasSufficientMemory 31m kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasSufficientMemory * Normal NodeHasNoDiskPressure 31m kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientPID 31m kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasSufficientPID * Normal NodeAllocatableEnforced 31m kubelet, containerd-20200724221200-14997 Updated Node Allocatable limit across pods * Normal NodeReady 31m kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeReady * Normal Starting 31m kube-proxy, containerd-20200724221200-14997 Starting kube-proxy. * Warning readOnlySysFS 31m kube-proxy, containerd-20200724221200-14997 CRI error: /sys is read-only: cannot modify conntrack limits, problems may arise later (If running Docker, see docker issue #24000) * Normal Starting 16m kubelet, containerd-20200724221200-14997 Starting kubelet. * Warning SystemOOM 16m kubelet, containerd-20200724221200-14997 System OOM encountered, victim process: registry-server, pid: 26041 * Warning SystemOOM 16m kubelet, containerd-20200724221200-14997 System OOM encountered, victim process: registry-server, pid: 28567 * Normal NodeAllocatableEnforced 16m kubelet, containerd-20200724221200-14997 Updated Node Allocatable limit across pods * Normal NodeHasSufficientMemory 16m (x7 over 16m) kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasSufficientMemory * Normal NodeHasNoDiskPressure 16m (x7 over 16m) kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientPID 16m (x7 over 16m) kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasSufficientPID * Warning readOnlySysFS 14m kube-proxy, containerd-20200724221200-14997 CRI error: /sys is read-only: cannot modify conntrack limits, problems may arise later (If running Docker, see docker issue #24000) * Normal Starting 14m kube-proxy, containerd-20200724221200-14997 Starting kube-proxy. * * ==> dmesg <== * [ +0.008042] FS-Cache: N-cookie d=00000000087e516b n=00000000639e5e65 * [ +0.007221] FS-Cache: N-key=[8] '56c00f0000000000' * [ +0.005631] FS-Cache: Duplicate cookie detected * [ +0.003841] FS-Cache: O-cookie c=0000000011bf07ab [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.005951] FS-Cache: O-cookie d=00000000087e516b n=00000000251e97b7 * [ +0.004790] FS-Cache: O-key=[8] '56c00f0000000000' * [ +0.003643] FS-Cache: N-cookie c=00000000e2eec606 [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.005689] FS-Cache: N-cookie d=00000000087e516b n=000000007550b1ca * [ +0.004826] FS-Cache: N-key=[8] '56c00f0000000000' * [ +1.890005] FS-Cache: Duplicate cookie detected * [ +0.022632] FS-Cache: O-cookie c=0000000036b15294 [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.008521] FS-Cache: O-cookie d=00000000087e516b n=00000000efd734b0 * [ +0.007450] FS-Cache: O-key=[8] '55c00f0000000000' * [ +0.004298] FS-Cache: N-cookie c=00000000c6760349 [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.007729] FS-Cache: N-cookie d=00000000087e516b n=000000002b6d28f2 * [ +0.006730] FS-Cache: N-key=[8] '55c00f0000000000' * [ +2.781512] FS-Cache: Duplicate cookie detected * [ +0.004691] FS-Cache: O-cookie c=0000000026b60cdb [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.006817] FS-Cache: O-cookie d=00000000087e516b n=0000000083eaff9d * [ +0.006146] FS-Cache: O-key=[8] '5ac00f0000000000' * [ +0.004163] FS-Cache: N-cookie c=000000006a4278df [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.006241] FS-Cache: N-cookie d=00000000087e516b n=00000000e4c3dd6e * [ +0.005074] FS-Cache: N-key=[8] '5ac00f0000000000' * [Jul24 22:02] tee (116897): /proc/108378/oom_adj is deprecated, please use /proc/108378/oom_score_adj instead. * [Jul24 22:09] hrtimer: interrupt took 1877431 ns * * ==> etcd [e1da8367b2af71bf928910238acc6b877c309718e6be22ad7c222ff9731c93cc] <== * raft2020/07/24 22:28:24 INFO: raft.node: 952f31ff200093ba elected leader 952f31ff200093ba at term 3 * 2020-07-24 22:28:24.327120 I | etcdserver: published {Name:containerd-20200724221200-14997 ClientURLs:[https://172.17.0.3:2379]} to cluster 5af0857ece1ce0e5 * 2020-07-24 22:28:24.327146 I | embed: ready to serve client requests * 2020-07-24 22:28:24.327181 I | embed: ready to serve client requests * 2020-07-24 22:28:24.328918 I | embed: serving client requests on 127.0.0.1:2379 * 2020-07-24 22:28:24.328956 I | embed: serving client requests on 172.17.0.3:2379 * 2020-07-24 22:29:30.036095 W | etcdserver: read-only range request "key:\"/registry/ranges/serviceips\" " with result "range_response_count:1 size:118" took too long (100.036069ms) to execute * 2020-07-24 22:29:30.036950 W | etcdserver: read-only range request "key:\"/registry/health\" " with result "range_response_count:0 size:5" took too long (100.736017ms) to execute * 2020-07-24 22:29:30.036995 W | etcdserver: read-only range request "key:\"/registry/csinodes/containerd-20200724221200-14997\" " with result "range_response_count:1 size:521" took too long (100.33589ms) to execute * 2020-07-24 22:29:30.037878 W | etcdserver: read-only range request "key:\"/registry/ranges/servicenodeports\" " with result "range_response_count:1 size:120" took too long (101.214251ms) to execute * 2020-07-24 22:29:30.038314 W | etcdserver: read-only range request "key:\"/registry/namespaces/kube-system\" " with result "range_response_count:1 size:263" took too long (102.05731ms) to execute * 2020-07-24 22:29:30.038475 W | etcdserver: read-only range request "key:\"/registry/pods/kube-system/kube-apiserver-containerd-20200724221200-14997\" " with result "range_response_count:1 size:6560" took too long (102.234322ms) to execute * 2020-07-24 22:30:00.757431 W | etcdserver: read-only range request "key:\"/registry/events/kube-system/storage-provisioner.1624d0d94aaa7ad0\" " with result "range_response_count:1 size:865" took too long (111.660057ms) to execute * 2020-07-24 22:31:03.103374 W | wal: sync duration of 4.321780012s, expected less than 1s * 2020-07-24 22:31:03.104064 W | etcdserver: read-only range request "key:\"/registry/roles\" range_end:\"/registry/rolet\" count_only:true " with result "range_response_count:0 size:7" took too long (4.143856667s) to execute * 2020-07-24 22:31:03.110173 W | etcdserver: read-only range request "key:\"/registry/csidrivers\" range_end:\"/registry/csidrivert\" count_only:true " with result "range_response_count:0 size:5" took too long (3.893694523s) to execute * 2020-07-24 22:31:03.110244 W | etcdserver: read-only range request "key:\"/registry/mutatingwebhookconfigurations\" range_end:\"/registry/mutatingwebhookconfigurationt\" count_only:true " with result "range_response_count:0 size:5" took too long (748.311405ms) to execute * 2020-07-24 22:31:03.110295 W | etcdserver: read-only range request "key:\"/registry/namespaces/default\" " with result "range_response_count:1 size:257" took too long (2.065079912s) to execute * 2020-07-24 22:31:27.394849 W | wal: sync duration of 1.161100746s, expected less than 1s * 2020-07-24 22:31:28.903901 W | etcdserver: read-only range request "key:\"/registry/mutatingwebhookconfigurations\" range_end:\"/registry/mutatingwebhookconfigurationt\" count_only:true " with result "range_response_count:0 size:5" took too long (638.619667ms) to execute * 2020-07-24 22:31:28.903925 W | etcdserver: read-only range request "key:\"/registry/controllers\" range_end:\"/registry/controllert\" count_only:true " with result "range_response_count:0 size:5" took too long (469.929992ms) to execute * 2020-07-24 22:39:26.228142 I | mvcc: store.index: compact 1128 * 2020-07-24 22:39:26.229399 I | mvcc: finished scheduled compaction at 1128 (took 884.061µs) * 2020-07-24 22:44:26.238245 I | mvcc: store.index: compact 1236 * 2020-07-24 22:44:26.238942 I | mvcc: finished scheduled compaction at 1236 (took 365.326µs) * * ==> kernel <== * 22:44:58 up 1:12, 0 users, load average: 0.50, 1.10, 3.77 * Linux containerd-20200724221200-14997 5.4.0-1022-azure #22-Ubuntu SMP Fri Jul 10 06:14:37 UTC 2020 x86_64 x86_64 x86_64 GNU/Linux * PRETTY_NAME="Ubuntu 19.10" * * ==> kube-apiserver [7dc9f83693a992e6e76f2932a3ce43efa3ffd6bbef97f8a3dab2e9ab04809fda] <== * I0724 22:29:29.935607 1 cache.go:39] Caches are synced for autoregister controller * I0724 22:29:29.935618 1 cache.go:39] Caches are synced for APIServiceRegistrationController controller * I0724 22:29:29.935640 1 shared_informer.go:230] Caches are synced for cluster_authentication_trust_controller * I0724 22:29:29.936515 1 shared_informer.go:230] Caches are synced for crd-autoregister * I0724 22:29:30.833239 1 controller.go:130] OpenAPI AggregationController: action for item : Nothing (removed from the queue). * I0724 22:29:30.833268 1 controller.go:130] OpenAPI AggregationController: action for item k8s_internal_local_delegation_chain_0000000000: Nothing (removed from the queue). * I0724 22:29:30.838357 1 storage_scheduling.go:143] all system priority classes are created successfully or already exist. * W0724 22:29:31.103453 1 lease.go:224] Resetting endpoints for master service "kubernetes" to [172.17.0.3] * I0724 22:29:31.104915 1 controller.go:606] quota admission added evaluator for: endpoints * I0724 22:29:31.150383 1 controller.go:606] quota admission added evaluator for: endpointslices.discovery.k8s.io * I0724 22:29:31.881534 1 controller.go:606] quota admission added evaluator for: leases.coordination.k8s.io * I0724 22:29:34.181947 1 controller.go:606] quota admission added evaluator for: daemonsets.apps * I0724 22:29:34.431603 1 controller.go:606] quota admission added evaluator for: serviceaccounts * I0724 22:29:34.453874 1 controller.go:606] quota admission added evaluator for: deployments.apps * I0724 22:29:34.700275 1 controller.go:606] quota admission added evaluator for: roles.rbac.authorization.k8s.io * I0724 22:29:34.711676 1 controller.go:606] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io * I0724 22:30:24.960939 1 controller.go:606] quota admission added evaluator for: replicasets.apps * I0724 22:31:03.104488 1 trace.go:116] Trace[910046044]: "GuaranteedUpdate etcd3" type:*coordination.Lease (started: 2020-07-24 22:31:01.925257756 +0000 UTC m=+96.221406956) (total time: 1.179189211s): * Trace[910046044]: [1.179160609s] [1.178557271s] Transaction committed * I0724 22:31:03.104575 1 trace.go:116] Trace[1472583103]: "Create" url:/api/v1/namespaces/kubernetes-dashboard/events,user-agent:kubelet/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:172.17.0.3 (started: 2020-07-24 22:30:58.882075293 +0000 UTC m=+93.178224393) (total time: 4.222377974s): * Trace[1472583103]: [4.222302769s] [4.222217764s] Object stored in database * I0724 22:31:03.104626 1 trace.go:116] Trace[118677254]: "Update" url:/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/containerd-20200724221200-14997,user-agent:kubelet/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:172.17.0.3 (started: 2020-07-24 22:31:01.925076644 +0000 UTC m=+96.221225844) (total time: 1.179528033s): * Trace[118677254]: [1.17947683s] [1.179338821s] Object stored in database * I0724 22:31:03.110755 1 trace.go:116] Trace[1968230455]: "Get" url:/api/v1/namespaces/default,user-agent:kube-apiserver/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:127.0.0.1 (started: 2020-07-24 22:31:01.044778208 +0000 UTC m=+95.340927408) (total time: 2.065944568s): * Trace[1968230455]: [2.065912066s] [2.065904566s] About to write a response * * ==> kube-controller-manager [8662707b3b0f7d44ff6c5e080ea0aba7dd34f515065281accd9db2796d1eabbb] <== * I0724 22:30:25.016265 1 event.go:278] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"kubernetes-dashboard", Name:"dashboard-metrics-scraper-dc6947fbf", UID:"f7f90ea4-8326-451c-9d3d-7807a3878e9a", APIVersion:"apps/v1", ResourceVersion:"932", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: dashboard-metrics-scraper-dc6947fbf-xphhd * I0724 22:30:25.047095 1 event.go:278] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"kubernetes-dashboard", Name:"kubernetes-dashboard-6dbb54fd95", UID:"7d191e90-0bb9-4f25-b536-3a59e9a338ae", APIVersion:"apps/v1", ResourceVersion:"934", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: kubernetes-dashboard-6dbb54fd95-ms9wg * W0724 22:30:25.140679 1 actual_state_of_world.go:506] Failed to update statusUpdateNeeded field in actual state of world: Failed to set statusUpdateNeeded to needed true, because nodeName="containerd-20200724221200-14997" does not exist * I0724 22:30:25.167526 1 shared_informer.go:230] Caches are synced for disruption * I0724 22:30:25.167548 1 disruption.go:339] Sending events to api server. * I0724 22:30:25.174767 1 shared_informer.go:230] Caches are synced for persistent volume * I0724 22:30:25.194865 1 shared_informer.go:230] Caches are synced for TTL * I0724 22:30:25.222168 1 shared_informer.go:230] Caches are synced for node * I0724 22:30:25.222200 1 range_allocator.go:172] Starting range CIDR allocator * I0724 22:30:25.222204 1 shared_informer.go:223] Waiting for caches to sync for cidrallocator * I0724 22:30:25.222209 1 shared_informer.go:230] Caches are synced for cidrallocator * I0724 22:30:25.225090 1 shared_informer.go:230] Caches are synced for GC * I0724 22:30:25.407503 1 shared_informer.go:230] Caches are synced for resource quota * I0724 22:30:25.410855 1 shared_informer.go:230] Caches are synced for attach detach * I0724 22:30:25.419771 1 shared_informer.go:230] Caches are synced for taint * I0724 22:30:25.419834 1 node_lifecycle_controller.go:1433] Initializing eviction metric for zone: * I0724 22:30:25.419834 1 taint_manager.go:187] Starting NoExecuteTaintManager * I0724 22:30:25.419876 1 event.go:278] Event(v1.ObjectReference{Kind:"Node", Namespace:"", Name:"containerd-20200724221200-14997", UID:"878dba67-2126-43d2-a5be-2ad809c96173", APIVersion:"v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'RegisteredNode' Node containerd-20200724221200-14997 event: Registered Node containerd-20200724221200-14997 in Controller * W0724 22:30:25.419894 1 node_lifecycle_controller.go:1048] Missing timestamp for Node containerd-20200724221200-14997. Assuming now as a timestamp. * I0724 22:30:25.419932 1 node_lifecycle_controller.go:1249] Controller detected that zone is now in state Normal. * I0724 22:30:25.474250 1 shared_informer.go:230] Caches are synced for resource quota * I0724 22:30:25.474554 1 shared_informer.go:230] Caches are synced for daemon sets * I0724 22:30:25.475756 1 shared_informer.go:230] Caches are synced for garbage collector * I0724 22:30:25.513010 1 shared_informer.go:230] Caches are synced for garbage collector * I0724 22:30:25.513029 1 garbagecollector.go:142] Garbage collector: all resource monitors have synced. Proceeding to collect garbage * * ==> kube-controller-manager [90f7be9dd5648fdf7d6e6f282cc9119322bc28911e75d3b73a928fa1ad2134ec] <== * I0724 22:29:12.067440 1 serving.go:313] Generated self-signed cert in-memory * I0724 22:29:12.831319 1 controllermanager.go:161] Version: v1.18.3 * I0724 22:29:12.832693 1 dynamic_cafile_content.go:167] Starting request-header::/var/lib/minikube/certs/front-proxy-ca.crt * I0724 22:29:12.832741 1 dynamic_cafile_content.go:167] Starting client-ca-bundle::/var/lib/minikube/certs/ca.crt * I0724 22:29:12.833285 1 secure_serving.go:178] Serving securely on 127.0.0.1:10257 * I0724 22:29:12.833363 1 tlsconfig.go:240] Starting DynamicServingCertificateController * I0724 22:29:12.833994 1 deprecated_insecure_serving.go:53] Serving insecurely on [::]:10252 * W0724 22:29:12.834610 1 controllermanager.go:612] fetch api resource lists failed, use legacy client builder: Get https://control-plane.minikube.internal:8444/api/v1?timeout=32s: dial tcp 172.17.0.3:8444: connect: connection refused * F0724 22:29:22.836138 1 controllermanager.go:230] error building controller context: failed to wait for apiserver being healthy: timed out waiting for the condition: failed to get apiserver /healthz status: Get https://control-plane.minikube.internal:8444/healthz?timeout=32s: dial tcp 172.17.0.3:8444: connect: connection refused * * ==> kube-proxy [cdb7f0919992f1b17fe65623faf7b9984d3edb56993d5621bef544d49a0ce798] <== * I0724 22:13:19.703119 1 server.go:583] Version: v1.18.3 * I0724 22:13:19.703827 1 conntrack.go:52] Setting nf_conntrack_max to 524288 * E0724 22:13:19.704435 1 conntrack.go:127] sysfs is not writable: {Device:sysfs Path:/sys Type:sysfs Opts:[ro nosuid nodev noexec relatime] Freq:0 Pass:0} (mount options are [ro nosuid nodev noexec relatime]) * I0724 22:13:19.704682 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_established' to 86400 * I0724 22:13:19.704788 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_close_wait' to 3600 * I0724 22:13:19.706305 1 config.go:133] Starting endpoints config controller * I0724 22:13:19.706338 1 config.go:315] Starting service config controller * I0724 22:13:19.706344 1 shared_informer.go:223] Waiting for caches to sync for endpoints config * I0724 22:13:19.706365 1 shared_informer.go:223] Waiting for caches to sync for service config * I0724 22:13:19.806590 1 shared_informer.go:230] Caches are synced for service config * I0724 22:13:19.806609 1 shared_informer.go:230] Caches are synced for endpoints config * W0724 22:30:00.451331 1 server_others.go:559] Unknown proxy mode "", assuming iptables proxy * I0724 22:30:00.459120 1 node.go:136] Successfully retrieved node IP: 172.17.0.3 * I0724 22:30:00.459158 1 server_others.go:186] Using iptables Proxier. * I0724 22:30:00.459548 1 server.go:583] Version: v1.18.3 * I0724 22:30:00.460146 1 conntrack.go:52] Setting nf_conntrack_max to 524288 * E0724 22:30:00.460599 1 conntrack.go:127] sysfs is not writable: {Device:sysfs Path:/sys Type:sysfs Opts:[ro nosuid nodev noexec relatime] Freq:0 Pass:0} (mount options are [ro nosuid nodev noexec relatime]) * I0724 22:30:00.460736 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_established' to 86400 * I0724 22:30:00.460806 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_close_wait' to 3600 * I0724 22:30:00.460979 1 config.go:315] Starting service config controller * I0724 22:30:00.460995 1 shared_informer.go:223] Waiting for caches to sync for service config * I0724 22:30:00.461156 1 config.go:133] Starting endpoints config controller * I0724 22:30:00.462702 1 shared_informer.go:223] Waiting for caches to sync for endpoints config * I0724 22:30:00.561150 1 shared_informer.go:230] Caches are synced for service config * I0724 22:30:00.562880 1 shared_informer.go:230] Caches are synced for endpoints config * * ==> kube-scheduler [574a9379a97ce5c734627b9b0340a240dea259b901f1dca96ad8d9978331c503] <== * E0724 22:28:46.956193 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Node: Get https://control-plane.minikube.internal:8444/api/v1/nodes?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:47.936021 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolumeClaim: Get https://control-plane.minikube.internal:8444/api/v1/persistentvolumeclaims?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:48.615795 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolume: Get https://control-plane.minikube.internal:8444/api/v1/persistentvolumes?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:48.627285 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Service: Get https://control-plane.minikube.internal:8444/api/v1/services?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:48.844520 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1beta1.PodDisruptionBudget: Get https://control-plane.minikube.internal:8444/apis/policy/v1beta1/poddisruptionbudgets?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:49.268244 1 reflector.go:178] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:233: Failed to list *v1.Pod: Get https://control-plane.minikube.internal:8444/api/v1/pods?fieldSelector=status.phase%21%3DFailed%2Cstatus.phase%21%3DSucceeded&limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:52.983500 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.CSINode: Get https://control-plane.minikube.internal:8444/apis/storage.k8s.io/v1/csinodes?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:54.489861 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.StorageClass: Get https://control-plane.minikube.internal:8444/apis/storage.k8s.io/v1/storageclasses?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:55.798147 1 reflector.go:178] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to list *v1.ConfigMap: Get https://control-plane.minikube.internal:8444/api/v1/namespaces/kube-system/configmaps?fieldSelector=metadata.name%3Dextension-apiserver-authentication&limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:56.415849 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolumeClaim: Get https://control-plane.minikube.internal:8444/api/v1/persistentvolumeclaims?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:57.297889 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Service: Get https://control-plane.minikube.internal:8444/api/v1/services?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:58.395517 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1beta1.PodDisruptionBudget: Get https://control-plane.minikube.internal:8444/apis/policy/v1beta1/poddisruptionbudgets?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:58.615198 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Node: Get https://control-plane.minikube.internal:8444/api/v1/nodes?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:00.876683 1 reflector.go:178] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:233: Failed to list *v1.Pod: Get https://control-plane.minikube.internal:8444/api/v1/pods?fieldSelector=status.phase%21%3DFailed%2Cstatus.phase%21%3DSucceeded&limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:01.380532 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolume: Get https://control-plane.minikube.internal:8444/api/v1/persistentvolumes?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:10.017422 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.StorageClass: Get https://control-plane.minikube.internal:8444/apis/storage.k8s.io/v1/storageclasses?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:13.820706 1 reflector.go:178] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:233: Failed to list *v1.Pod: Get https://control-plane.minikube.internal:8444/api/v1/pods?fieldSelector=status.phase%21%3DFailed%2Cstatus.phase%21%3DSucceeded&limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:14.849393 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.CSINode: Get https://control-plane.minikube.internal:8444/apis/storage.k8s.io/v1/csinodes?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:17.299470 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Node: Get https://control-plane.minikube.internal:8444/api/v1/nodes?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:17.476542 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolumeClaim: Get https://control-plane.minikube.internal:8444/api/v1/persistentvolumeclaims?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:17.787962 1 reflector.go:178] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to list *v1.ConfigMap: Get https://control-plane.minikube.internal:8444/api/v1/namespaces/kube-system/configmaps?fieldSelector=metadata.name%3Dextension-apiserver-authentication&limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:18.847726 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Service: Get https://control-plane.minikube.internal:8444/api/v1/services?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:20.242683 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1beta1.PodDisruptionBudget: Get https://control-plane.minikube.internal:8444/apis/policy/v1beta1/poddisruptionbudgets?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:25.306254 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolume: Get https://control-plane.minikube.internal:8444/api/v1/persistentvolumes?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * I0724 22:30:02.056615 1 shared_informer.go:230] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file * * ==> kubelet <== * -- Logs begin at Fri 2020-07-24 22:27:54 UTC, end at Fri 2020-07-24 22:44:58 UTC. -- * Jul 24 22:44:39 containerd-20200724221200-14997 kubelet[544]: E0724 22:44:39.224789 544 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "af047999608418eced1e576b296aae33e12cefc5b647cc9e1df8c1de047c020a": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:44:39 containerd-20200724221200-14997 kubelet[544]: E0724 22:44:39.224807 544 kuberuntime_manager.go:727] createPodSandbox for pod "coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "af047999608418eced1e576b296aae33e12cefc5b647cc9e1df8c1de047c020a": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:44:39 containerd-20200724221200-14997 kubelet[544]: E0724 22:44:39.224863 544 pod_workers.go:191] Error syncing pod 584286ea-2ddf-4194-8a3c-48d505f7f607 ("coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)"), skipping: failed to "CreatePodSandbox" for "coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)" with CreatePodSandboxError: "CreatePodSandbox for pod \"coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)\" failed: rpc error: code = Unknown desc = failed to setup network for sandbox \"af047999608418eced1e576b296aae33e12cefc5b647cc9e1df8c1de047c020a\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:44:39 containerd-20200724221200-14997 kubelet[544]: I0724 22:44:39.338584 544 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: 8df5e72f47d580e713b2f26adb48a23fa48275d955c774f643423ff47742dfe7 * Jul 24 22:44:39 containerd-20200724221200-14997 kubelet[544]: E0724 22:44:39.338819 544 pod_workers.go:191] Error syncing pod 25727a03-b071-4a2c-8dfb-ea2a7038e4cd ("storage-provisioner_kube-system(25727a03-b071-4a2c-8dfb-ea2a7038e4cd)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 5m0s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(25727a03-b071-4a2c-8dfb-ea2a7038e4cd)" * Jul 24 22:44:47 containerd-20200724221200-14997 kubelet[544]: I0724 22:44:47.338871 544 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: 7f8a5cfae4a150b1f8fc046c723b79baaf43a8bcfaf7ec564c536af11b82390d * Jul 24 22:44:47 containerd-20200724221200-14997 kubelet[544]: E0724 22:44:47.340141 544 pod_workers.go:191] Error syncing pod 562af1c8-c195-4fed-b475-4ff2a3cca8b5 ("kindnet-nsc8k_kube-system(562af1c8-c195-4fed-b475-4ff2a3cca8b5)"), skipping: failed to "StartContainer" for "kindnet-cni" with CrashLoopBackOff: "back-off 5m0s restarting failed container=kindnet-cni pod=kindnet-nsc8k_kube-system(562af1c8-c195-4fed-b475-4ff2a3cca8b5)" * Jul 24 22:44:48 containerd-20200724221200-14997 kubelet[544]: E0724 22:44:48.382128 544 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to setup network for sandbox "53dd4d77250a1f44c01d493312120102932c3c3c321d4ec549a46f84f42f7854": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:44:48 containerd-20200724221200-14997 kubelet[544]: E0724 22:44:48.382182 544 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "53dd4d77250a1f44c01d493312120102932c3c3c321d4ec549a46f84f42f7854": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:44:48 containerd-20200724221200-14997 kubelet[544]: E0724 22:44:48.382197 544 kuberuntime_manager.go:727] createPodSandbox for pod "busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "53dd4d77250a1f44c01d493312120102932c3c3c321d4ec549a46f84f42f7854": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:44:48 containerd-20200724221200-14997 kubelet[544]: E0724 22:44:48.382246 544 pod_workers.go:191] Error syncing pod 4b662b5f-6e78-48de-818c-81989d7f4ea9 ("busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)"), skipping: failed to "CreatePodSandbox" for "busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)" with CreatePodSandboxError: "CreatePodSandbox for pod \"busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)\" failed: rpc error: code = Unknown desc = failed to setup network for sandbox \"53dd4d77250a1f44c01d493312120102932c3c3c321d4ec549a46f84f42f7854\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:44:49 containerd-20200724221200-14997 kubelet[544]: E0724 22:44:49.274693 544 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to setup network for sandbox "084b31b0604813de03cbd4d9c00a8824b50df163900f2b793d6bd0b5d95287c5": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:44:49 containerd-20200724221200-14997 kubelet[544]: E0724 22:44:49.274754 544 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "dashboard-metrics-scraper-dc6947fbf-xphhd_kubernetes-dashboard(02a59124-2c99-4a38-abea-eedfd3e1ba46)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "084b31b0604813de03cbd4d9c00a8824b50df163900f2b793d6bd0b5d95287c5": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:44:49 containerd-20200724221200-14997 kubelet[544]: E0724 22:44:49.274770 544 kuberuntime_manager.go:727] createPodSandbox for pod "dashboard-metrics-scraper-dc6947fbf-xphhd_kubernetes-dashboard(02a59124-2c99-4a38-abea-eedfd3e1ba46)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "084b31b0604813de03cbd4d9c00a8824b50df163900f2b793d6bd0b5d95287c5": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:44:49 containerd-20200724221200-14997 kubelet[544]: E0724 22:44:49.274819 544 pod_workers.go:191] Error syncing pod 02a59124-2c99-4a38-abea-eedfd3e1ba46 ("dashboard-metrics-scraper-dc6947fbf-xphhd_kubernetes-dashboard(02a59124-2c99-4a38-abea-eedfd3e1ba46)"), skipping: failed to "CreatePodSandbox" for "dashboard-metrics-scraper-dc6947fbf-xphhd_kubernetes-dashboard(02a59124-2c99-4a38-abea-eedfd3e1ba46)" with CreatePodSandboxError: "CreatePodSandbox for pod \"dashboard-metrics-scraper-dc6947fbf-xphhd_kubernetes-dashboard(02a59124-2c99-4a38-abea-eedfd3e1ba46)\" failed: rpc error: code = Unknown desc = failed to setup network for sandbox \"084b31b0604813de03cbd4d9c00a8824b50df163900f2b793d6bd0b5d95287c5\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:44:50 containerd-20200724221200-14997 kubelet[544]: E0724 22:44:50.347774 544 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to setup network for sandbox "2aa1443407e76ce1cde6ebf977a49fefbff3223b2fa55118788838ce65eb5380": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:44:50 containerd-20200724221200-14997 kubelet[544]: E0724 22:44:50.347857 544 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "kubernetes-dashboard-6dbb54fd95-ms9wg_kubernetes-dashboard(267f98a1-434e-45a2-abda-53ad3bb7bea1)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "2aa1443407e76ce1cde6ebf977a49fefbff3223b2fa55118788838ce65eb5380": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:44:50 containerd-20200724221200-14997 kubelet[544]: E0724 22:44:50.347884 544 kuberuntime_manager.go:727] createPodSandbox for pod "kubernetes-dashboard-6dbb54fd95-ms9wg_kubernetes-dashboard(267f98a1-434e-45a2-abda-53ad3bb7bea1)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "2aa1443407e76ce1cde6ebf977a49fefbff3223b2fa55118788838ce65eb5380": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:44:50 containerd-20200724221200-14997 kubelet[544]: E0724 22:44:50.347984 544 pod_workers.go:191] Error syncing pod 267f98a1-434e-45a2-abda-53ad3bb7bea1 ("kubernetes-dashboard-6dbb54fd95-ms9wg_kubernetes-dashboard(267f98a1-434e-45a2-abda-53ad3bb7bea1)"), skipping: failed to "CreatePodSandbox" for "kubernetes-dashboard-6dbb54fd95-ms9wg_kubernetes-dashboard(267f98a1-434e-45a2-abda-53ad3bb7bea1)" with CreatePodSandboxError: "CreatePodSandbox for pod \"kubernetes-dashboard-6dbb54fd95-ms9wg_kubernetes-dashboard(267f98a1-434e-45a2-abda-53ad3bb7bea1)\" failed: rpc error: code = Unknown desc = failed to setup network for sandbox \"2aa1443407e76ce1cde6ebf977a49fefbff3223b2fa55118788838ce65eb5380\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:44:54 containerd-20200724221200-14997 kubelet[544]: E0724 22:44:54.232720 544 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to setup network for sandbox "cc7e75bcaa45e4e114f87ae39e936c9bef727b3949b1a1296a3d7483445f85bb": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:44:54 containerd-20200724221200-14997 kubelet[544]: E0724 22:44:54.232773 544 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "cc7e75bcaa45e4e114f87ae39e936c9bef727b3949b1a1296a3d7483445f85bb": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:44:54 containerd-20200724221200-14997 kubelet[544]: E0724 22:44:54.232789 544 kuberuntime_manager.go:727] createPodSandbox for pod "coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "cc7e75bcaa45e4e114f87ae39e936c9bef727b3949b1a1296a3d7483445f85bb": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:44:54 containerd-20200724221200-14997 kubelet[544]: E0724 22:44:54.232841 544 pod_workers.go:191] Error syncing pod 584286ea-2ddf-4194-8a3c-48d505f7f607 ("coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)"), skipping: failed to "CreatePodSandbox" for "coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)" with CreatePodSandboxError: "CreatePodSandbox for pod \"coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)\" failed: rpc error: code = Unknown desc = failed to setup network for sandbox \"cc7e75bcaa45e4e114f87ae39e936c9bef727b3949b1a1296a3d7483445f85bb\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:44:54 containerd-20200724221200-14997 kubelet[544]: I0724 22:44:54.338261 544 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: 8df5e72f47d580e713b2f26adb48a23fa48275d955c774f643423ff47742dfe7 * Jul 24 22:44:54 containerd-20200724221200-14997 kubelet[544]: E0724 22:44:54.338603 544 pod_workers.go:191] Error syncing pod 25727a03-b071-4a2c-8dfb-ea2a7038e4cd ("storage-provisioner_kube-system(25727a03-b071-4a2c-8dfb-ea2a7038e4cd)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 5m0s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(25727a03-b071-4a2c-8dfb-ea2a7038e4cd)" * * ==> storage-provisioner [8df5e72f47d580e713b2f26adb48a23fa48275d955c774f643423ff47742dfe7] <== * F0724 22:24:44.328716 1 main.go:37] Error getting server version: Get https://10.96.0.1:443/version: dial tcp 10.96.0.1:443: getsockopt: no route to host * F0724 22:43:14.728572 1 main.go:37] Error getting server version: Get https://10.96.0.1:443/version: dial tcp 10.96.0.1:443: getsockopt: no route to host -- /stdout -- helpers_test.go:247: (dbg) Run: ./minikube-linux-amd64 status --format={{.APIServer}} -p containerd-20200724221200-14997 -n containerd-20200724221200-14997 helpers_test.go:254: (dbg) Run: kubectl --context containerd-20200724221200-14997 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running helpers_test.go:260: non-running pods: busybox coredns-66bff467f8-hlk9j dashboard-metrics-scraper-dc6947fbf-xphhd kubernetes-dashboard-6dbb54fd95-ms9wg helpers_test.go:262: ======> post-mortem[TestStartStop/group/containerd/serial/UserAppExistsAfterStop]: describe non-running pods <====== helpers_test.go:265: (dbg) Run: kubectl --context containerd-20200724221200-14997 describe pod busybox coredns-66bff467f8-hlk9j dashboard-metrics-scraper-dc6947fbf-xphhd kubernetes-dashboard-6dbb54fd95-ms9wg helpers_test.go:265: (dbg) Non-zero exit: kubectl --context containerd-20200724221200-14997 describe pod busybox coredns-66bff467f8-hlk9j dashboard-metrics-scraper-dc6947fbf-xphhd kubernetes-dashboard-6dbb54fd95-ms9wg: exit status 1 (83.667815ms) -- stdout -- Name: busybox Namespace: default Priority: 0 Node: containerd-20200724221200-14997/172.17.0.3 Start Time: Fri, 24 Jul 2020 22:19:40 +0000 Labels: integration-test=busybox Annotations: Status: Pending IP: IPs: Containers: busybox: Container ID: Image: busybox:1.28.4-glibc Image ID: Port: Host Port: Command: sleep 3600 State: Waiting Reason: ContainerCreating Ready: False Restart Count: 0 Environment: Mounts: /var/run/secrets/kubernetes.io/serviceaccount from default-token-xmm9f (ro) Conditions: Type Status Initialized True Ready False ContainersReady False PodScheduled True Volumes: default-token-xmm9f: Type: Secret (a volume populated by a Secret) SecretName: default-token-xmm9f Optional: false QoS Class: BestEffort Node-Selectors: Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s node.kubernetes.io/unreachable:NoExecute for 300s Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Scheduled 25m default-scheduler Successfully assigned default/busybox to containerd-20200724221200-14997 Warning FailedCreatePodSandBox 25m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "5311a1ffb81c3ec44164ca704d1b425a50851c7a615951d885f3e261bb56b331": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 25m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "e0b3338bd82723225aa39e3a572b31f4b10340fa640d33ac956ec7982b47a365": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 24m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "bce48c23f5090e307249f38d5e9c17615b5ce4547b68fd7dd207f5616546b1ff": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 24m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "c260da116bad44bab092e4453833efc9ce5c3c70209770b30f7aeffed5db766d": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 24m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "e352d7747cb44ae844f2847c8895d8a03a59a5bd62570299b6f91ba0d2b31e93": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 24m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "7a8e1f3fa957ea5ffbd5b203818f66025f20b18e8ba1398842a4a1dcb4beade1": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 23m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "02713a33eaf3192cf9c63e173bd72b67773f28f4dfcef338060dda2a7f8489e1": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 23m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "530cf34860dcb75c557c7cbc9a86910a3f4919230e74cdf10126ba1e75f3f49b": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 23m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "a33b097bd953e28e4d7499a4b0ca06585fe1b9029b95aac214b0bba904677259": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 19m (x17 over 23m) kubelet, containerd-20200724221200-14997 (combined from similar events): Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "47073b588559d0af9dafbb8171df657751c2a35ea5f0466a20f5762627e9cd56": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 14m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "cc2af3d00db0854dd201af575fe5d65f4c2208b59d12cea5f983ef22b810f25d": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 14m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "4cf1a61ad6807315736c91341b7763dc41aca876152d03c2db9814cfa254e7ee": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 14m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "a7cd4b3d30ded88757df1727167c36721efb9fd28978628b3503c0b86fc912e2": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 14m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "14fb4d9bb3714b5e4d1b49fd744c91c0f36ada0ca3c4313f0bb85e74660c9ab1": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 14m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "f5f36f4c79df279b40deef49d26e0ef042c075b3ba24396147e670314e61a159": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 13m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "8f22e7112e69d6377a16927508a438643f8cea01307d448d397775ae85526176": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 13m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "c1197cb670c56b20720daecde3535335370ba2b2f2515fa58f3d6b0bbc3e647d": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 13m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "21a1ae9d38e4f47d5bd6abf3cd1f776466ad5d6dc481a886df660824479af77e": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 13m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "f8e86b484453c0e4d0c68fceed3b3e44519bce50c31c2b6cd480c017ee9d684e": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 4m46s (x34 over 12m) kubelet, containerd-20200724221200-14997 (combined from similar events): Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "d37e97eddf937920ba36c635423ef8d35ff5063f652212650e104905b00b5dc5": failed to set bridge addr: could not add IP address to "cni0": permission denied -- /stdout -- ** stderr ** Error from server (NotFound): pods "coredns-66bff467f8-hlk9j" not found Error from server (NotFound): pods "dashboard-metrics-scraper-dc6947fbf-xphhd" not found Error from server (NotFound): pods "kubernetes-dashboard-6dbb54fd95-ms9wg" not found ** /stderr ** helpers_test.go:267: kubectl --context containerd-20200724221200-14997 describe pod busybox coredns-66bff467f8-hlk9j dashboard-metrics-scraper-dc6947fbf-xphhd kubernetes-dashboard-6dbb54fd95-ms9wg: exit status 1 === RUN TestStartStop/group/containerd/serial/AddonExistsAfterStop start_stop_delete_test.go:219: (dbg) TestStartStop/group/containerd/serial/AddonExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ... helpers_test.go:332: "kubernetes-dashboard-6dbb54fd95-ms9wg" [267f98a1-434e-45a2-abda-53ad3bb7bea1] Pending / Ready:ContainersNotReady (containers with unready status: [kubernetes-dashboard]) / ContainersReady:ContainersNotReady (containers with unready status: [kubernetes-dashboard]) === CONT TestStartStop/group/crio/serial/AddonExistsAfterStop start_stop_delete_test.go:219: ***** TestStartStop/group/crio/serial/AddonExistsAfterStop: pod "k8s-app=kubernetes-dashboard" failed to start within 9m0s: timed out waiting for the condition **** start_stop_delete_test.go:219: (dbg) Run: ./minikube-linux-amd64 status --format={{.APIServer}} -p crio-20200724220901-14997 -n crio-20200724220901-14997 start_stop_delete_test.go:219: TestStartStop/group/crio/serial/AddonExistsAfterStop: showing logs for failed pods as of 2020-07-24 22:51:37.339282809 +0000 UTC m=+4527.463647294 start_stop_delete_test.go:219: (dbg) Run: kubectl --context crio-20200724220901-14997 describe po kubernetes-dashboard-6979c57f4c-wbxrt -n kubernetes-dashboard start_stop_delete_test.go:219: (dbg) Non-zero exit: kubectl --context crio-20200724220901-14997 describe po kubernetes-dashboard-6979c57f4c-wbxrt -n kubernetes-dashboard: context deadline exceeded (1.6µs) start_stop_delete_test.go:219: kubectl --context crio-20200724220901-14997 describe po kubernetes-dashboard-6979c57f4c-wbxrt -n kubernetes-dashboard: context deadline exceeded start_stop_delete_test.go:219: (dbg) Run: kubectl --context crio-20200724220901-14997 logs kubernetes-dashboard-6979c57f4c-wbxrt -n kubernetes-dashboard start_stop_delete_test.go:219: (dbg) Non-zero exit: kubectl --context crio-20200724220901-14997 logs kubernetes-dashboard-6979c57f4c-wbxrt -n kubernetes-dashboard: context deadline exceeded (500ns) start_stop_delete_test.go:219: kubectl --context crio-20200724220901-14997 logs kubernetes-dashboard-6979c57f4c-wbxrt -n kubernetes-dashboard: context deadline exceeded start_stop_delete_test.go:220: failed waiting for 'addon dashboard' pod post-stop-start: k8s-app=kubernetes-dashboard within 9m0s: timed out waiting for the condition helpers_test.go:215: -----------------------post-mortem-------------------------------- helpers_test.go:223: ======> post-mortem[TestStartStop/group/crio/serial/AddonExistsAfterStop]: docker inspect <====== helpers_test.go:224: (dbg) Run: docker inspect crio-20200724220901-14997 helpers_test.go:228: (dbg) docker inspect crio-20200724220901-14997: -- stdout -- [ { "Id": "d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a", "Created": "2020-07-24T22:09:11.178770681Z", "Path": "/usr/local/bin/entrypoint", "Args": [ "/sbin/init" ], "State": { "Status": "running", "Running": true, "Paused": false, "Restarting": false, "OOMKilled": false, "Dead": false, "Pid": 667119, "ExitCode": 0, "Error": "", "StartedAt": "2020-07-24T22:26:07.35920725Z", "FinishedAt": "2020-07-24T22:26:00.416732195Z" }, "Image": "sha256:e6bc41c39dc48b2b472936db36aedb28527ce0f675ed1bc20d029125c9ccf578", "ResolvConfPath": "/var/lib/docker/containers/d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a/resolv.conf", "HostnamePath": "/var/lib/docker/containers/d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a/hostname", "HostsPath": "/var/lib/docker/containers/d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a/hosts", "LogPath": "/var/lib/docker/containers/d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a/d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a-json.log", "Name": "/crio-20200724220901-14997", "RestartCount": 0, "Driver": "overlay2", "Platform": "linux", "MountLabel": "", "ProcessLabel": "", "AppArmorProfile": "unconfined", "ExecIDs": null, "HostConfig": { "Binds": [ "/lib/modules:/lib/modules:ro", "crio-20200724220901-14997:/var" ], "ContainerIDFile": "", "LogConfig": { "Type": "json-file", "Config": {} }, "NetworkMode": "default", "PortBindings": { "22/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "2376/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "5000/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "8443/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ] }, "RestartPolicy": { "Name": "no", "MaximumRetryCount": 0 }, "AutoRemove": false, "VolumeDriver": "", "VolumesFrom": null, "CapAdd": null, "CapDrop": null, "Capabilities": null, "Dns": [], "DnsOptions": [], "DnsSearch": [], "ExtraHosts": null, "GroupAdd": null, "IpcMode": "private", "Cgroup": "", "Links": null, "OomScoreAdj": 0, "PidMode": "", "Privileged": true, "PublishAllPorts": false, "ReadonlyRootfs": false, "SecurityOpt": [ "seccomp=unconfined", "apparmor=unconfined", "label=disable" ], "Tmpfs": { "/run": "", "/tmp": "" }, "UTSMode": "", "UsernsMode": "", "ShmSize": 67108864, "Runtime": "runc", "ConsoleSize": [ 0, 0 ], "Isolation": "", "CpuShares": 0, "Memory": 2306867200, "NanoCpus": 2000000000, "CgroupParent": "", "BlkioWeight": 0, "BlkioWeightDevice": [], "BlkioDeviceReadBps": null, "BlkioDeviceWriteBps": null, "BlkioDeviceReadIOps": null, "BlkioDeviceWriteIOps": null, "CpuPeriod": 0, "CpuQuota": 0, "CpuRealtimePeriod": 0, "CpuRealtimeRuntime": 0, "CpusetCpus": "", "CpusetMems": "", "Devices": [], "DeviceCgroupRules": null, "DeviceRequests": null, "KernelMemory": 0, "KernelMemoryTCP": 0, "MemoryReservation": 0, "MemorySwap": -1, "MemorySwappiness": null, "OomKillDisable": false, "PidsLimit": null, "Ulimits": null, "CpuCount": 0, "CpuPercent": 0, "IOMaximumIOps": 0, "IOMaximumBandwidth": 0, "MaskedPaths": null, "ReadonlyPaths": null }, "GraphDriver": { "Data": { "LowerDir": "/var/lib/docker/overlay2/25235e8a657907fa460b6393f79aab4ea109b0924e4843714a3776886a051c03-init/diff:/var/lib/docker/overlay2/33ef5d7ed4fbb30d55d2e5925de9b5c359ee2f6bcaf5a31308f0c4c11e358759/diff:/var/lib/docker/overlay2/3cf31b1deb19cf9de586952311d9f94baab00dac6393d96e1a80da943c3823e6/diff:/var/lib/docker/overlay2/5e25be0edbb143d613aab16e23056ece18370c2427214464ccc0b8f033fc76c5/diff:/var/lib/docker/overlay2/933d7b760faf1726c0231eb48cbf3e7af8aeea7d1f449959b7bf07bbe9a84376/diff:/var/lib/docker/overlay2/69ec5555996fe29cf36b7e927034690ecb66f32d4c72396f3c2f3d73303ab10c/diff:/var/lib/docker/overlay2/891f9311e0e93aca44c576d873c0860bf67e972178833fc3ac0569308996933c/diff:/var/lib/docker/overlay2/431ddf5eea9395c020c3a27539c6984764cb7a0dd8f3eeadfbb24f37a897caf7/diff:/var/lib/docker/overlay2/ef7d3d9d53c985fdb46a41c8bcbc04c7d98baa3d8030b1432d115657269a4611/diff:/var/lib/docker/overlay2/d976f7471257da8ef85e0de564cae8e6b16eeb8f83fff6750b3985387f87052c/diff:/var/lib/docker/overlay2/16ed6f6abe25e98b50b10529e70d835da8b64ca99934243f70e5cc63cca44ba1/diff:/var/lib/docker/overlay2/a6495d6f579c32bd0faf387dc3b48c733dae0219e3cfc5d94f9f6ba027261668/diff:/var/lib/docker/overlay2/a34db548da9e24b0323e43e53767c49484fa19509ba9c1e738fb42330788ec52/diff:/var/lib/docker/overlay2/03f93f74dafc48e2bb0234297546abf70dc9918c047a7e3b500ed2242aec09ff/diff:/var/lib/docker/overlay2/b0d08d0be034080c25561d9451d8081050a591c2991a7f3d242234b9bf7c046d/diff:/var/lib/docker/overlay2/d7be6ed61eedc76988e74fe1d87a6a1ef287912654acea5566e7f9cb797b3e3c/diff:/var/lib/docker/overlay2/bde6a4a42d5652b3ef22e3a1dddd0f6fb765d2cd9f5b1a38d4ecb9b44c45b998/diff:/var/lib/docker/overlay2/0f384da0b034b730d4bdd22f270821719c0ac0b7b7d608c3a52fa89b9a386b90/diff:/var/lib/docker/overlay2/46e9d50bc162d2d523c12da3e378e5b859515056dc7d7f11344c1c42c64718d4/diff:/var/lib/docker/overlay2/6e7552cdb94dfc1c9a0a8efae25c758d93fff3d2eec58a3e2b710496d0c94653/diff:/var/lib/docker/overlay2/b9217960caef08b70ded801ca89f5a0808227383f2ab1de5ae7313c200a8090b/diff:/var/lib/docker/overlay2/d11fc39cafccd3e3cb8fba6cfdda2bbfdb99c2e41619ce169b0d6ede0b3f6a64/diff:/var/lib/docker/overlay2/0f89620291cba4bc3afac7c9fa1faf205450d5dcae032c3a9882009633b72b4a/diff", "MergedDir": "/var/lib/docker/overlay2/25235e8a657907fa460b6393f79aab4ea109b0924e4843714a3776886a051c03/merged", "UpperDir": "/var/lib/docker/overlay2/25235e8a657907fa460b6393f79aab4ea109b0924e4843714a3776886a051c03/diff", "WorkDir": "/var/lib/docker/overlay2/25235e8a657907fa460b6393f79aab4ea109b0924e4843714a3776886a051c03/work" }, "Name": "overlay2" }, "Mounts": [ { "Type": "bind", "Source": "/lib/modules", "Destination": "/lib/modules", "Mode": "ro", "RW": false, "Propagation": "rprivate" }, { "Type": "volume", "Name": "crio-20200724220901-14997", "Source": "/var/lib/docker/volumes/crio-20200724220901-14997/_data", "Destination": "/var", "Driver": "local", "Mode": "z", "RW": true, "Propagation": "" } ], "Config": { "Hostname": "crio-20200724220901-14997", "Domainname": "", "User": "root", "AttachStdin": false, "AttachStdout": false, "AttachStderr": false, "ExposedPorts": { "22/tcp": {}, "2376/tcp": {}, "5000/tcp": {}, "8443/tcp": {} }, "Tty": true, "OpenStdin": false, "StdinOnce": false, "Env": [ "container=docker", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], "Cmd": null, "Image": "kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438", "Volumes": null, "WorkingDir": "", "Entrypoint": [ "/usr/local/bin/entrypoint", "/sbin/init" ], "OnBuild": null, "Labels": { "created_by.minikube.sigs.k8s.io": "true", "mode.minikube.sigs.k8s.io": "crio-20200724220901-14997", "name.minikube.sigs.k8s.io": "crio-20200724220901-14997", "role.minikube.sigs.k8s.io": "" }, "StopSignal": "SIGRTMIN+3" }, "NetworkSettings": { "Bridge": "", "SandboxID": "5953f0ce29bf68bb7146ac5384ca1a6be3ccb39427dcde4428b82a503b037325", "HairpinMode": false, "LinkLocalIPv6Address": "", "LinkLocalIPv6PrefixLen": 0, "Ports": { "22/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32916" } ], "2376/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32915" } ], "5000/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32914" } ], "8443/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32913" } ] }, "SandboxKey": "/var/run/docker/netns/5953f0ce29bf", "SecondaryIPAddresses": null, "SecondaryIPv6Addresses": null, "EndpointID": "280707bfa744faabf6b6bcfee77f56c3d14c11c8d8070fd9a4796c96665833f5", "Gateway": "172.17.0.1", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "IPAddress": "172.17.0.2", "IPPrefixLen": 16, "IPv6Gateway": "", "MacAddress": "02:42:ac:11:00:02", "Networks": { "bridge": { "IPAMConfig": null, "Links": null, "Aliases": null, "NetworkID": "d4a420189740571bc15c14b2ebac1ab66e95c9c6796461aa96bb3486a5f7e03d", "EndpointID": "280707bfa744faabf6b6bcfee77f56c3d14c11c8d8070fd9a4796c96665833f5", "Gateway": "172.17.0.1", "IPAddress": "172.17.0.2", "IPPrefixLen": 16, "IPv6Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "MacAddress": "02:42:ac:11:00:02", "DriverOpts": null } } } } ] -- /stdout -- helpers_test.go:232: (dbg) Run: ./minikube-linux-amd64 status --format={{.Host}} -p crio-20200724220901-14997 -n crio-20200724220901-14997 helpers_test.go:237: <<< TestStartStop/group/crio/serial/AddonExistsAfterStop FAILED: start of post-mortem logs <<< helpers_test.go:238: ======> post-mortem[TestStartStop/group/crio/serial/AddonExistsAfterStop]: minikube logs <====== helpers_test.go:240: (dbg) Run: ./minikube-linux-amd64 -p crio-20200724220901-14997 logs -n 25 helpers_test.go:245: TestStartStop/group/crio/serial/AddonExistsAfterStop logs: -- stdout -- * ==> CRI-O <== * -- Logs begin at Fri 2020-07-24 22:26:07 UTC, end at Fri 2020-07-24 22:51:38 UTC. -- * Jul 24 22:51:36 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:36.114414755Z" level=info msg="About to add CNI network crio-bridge (type=bridge)" * Jul 24 22:51:36 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:36.748254410Z" level=info msg="Attempting to create container: kube-system/kindnet-4qfcd/kindnet-cni" id=ce94fdb6-1741-49d4-a097-efa6bf9201b9 * Jul 24 22:51:36 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:36.790302613Z" level=warning msg="requested logPath for ctr id af520b21cf962cfe633916631b6dfff80514346fa3f77e21c279c048e3c47ce9 is a relative path: kindnet-cni/9.log" id=ce94fdb6-1741-49d4-a097-efa6bf9201b9 * Jul 24 22:51:36 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:36.790337215Z" level=warning msg="logPath from relative path is now absolute: /var/log/pods/kube-system_kindnet-4qfcd_9fb35a28-5601-47e4-88e6-be2a18fa55ef/kindnet-cni/9.log" id=ce94fdb6-1741-49d4-a097-efa6bf9201b9 * Jul 24 22:51:36 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:36.949567007Z" level=error msg="Error adding network: failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:51:36 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:36.949602509Z" level=error msg="Error while adding pod to CNI network \"crio-bridge\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:51:36 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:36.949730918Z" level=info msg="About to del CNI network lo (type=loopback)" * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.036625217Z" level=info msg="Got pod network &{Name:coredns-5d4dd4b4db-9ssg6 Namespace:kube-system ID:c16b06815e3b08f65dd9e7bea079da08a86b6801b4df5c196e85f0a68b20bffc NetNS:/proc/52890/ns/net Networks:[] RuntimeConfig:map[crio-bridge:{IP: MAC: PortMappings:[] Bandwidth: IpRanges:[]}]}" * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.036699122Z" level=error msg="error loading cached network config: network \"crio-bridge\" not found in CNI cache" * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.036714023Z" level=info msg="About to del CNI network crio-bridge (type=bridge)" * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.067026215Z" level=info msg="Created container af520b21cf962cfe633916631b6dfff80514346fa3f77e21c279c048e3c47ce9: kube-system/kindnet-4qfcd/kindnet-cni" id=ce94fdb6-1741-49d4-a097-efa6bf9201b9 * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.075803521Z" level=info msg="Started container af520b21cf962cfe633916631b6dfff80514346fa3f77e21c279c048e3c47ce9: kube-system/kindnet-4qfcd/kindnet-cni" id=80c4438a-9514-4533-a110-67970c02411d * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.108584484Z" level=error msg="Error deleting network: running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.1.215 -j CNI-9aa498ed525f8c8017ed6801 -m comment --comment name: \"crio-bridge\" id: \"c16b06815e3b08f65dd9e7bea079da08a86b6801b4df5c196e85f0a68b20bffc\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-9aa498ed525f8c8017ed6801':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.108632487Z" level=error msg="Error while removing pod from CNI network \"crio-bridge\": running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.1.215 -j CNI-9aa498ed525f8c8017ed6801 -m comment --comment name: \"crio-bridge\" id: \"c16b06815e3b08f65dd9e7bea079da08a86b6801b4df5c196e85f0a68b20bffc\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-9aa498ed525f8c8017ed6801':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.108687891Z" level=error msg="error stopping network on cleanup: failed to destroy network for pod sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(c16b06815e3b08f65dd9e7bea079da08a86b6801b4df5c196e85f0a68b20bffc): running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.1.215 -j CNI-9aa498ed525f8c8017ed6801 -m comment --comment name: \"crio-bridge\" id: \"c16b06815e3b08f65dd9e7bea079da08a86b6801b4df5c196e85f0a68b20bffc\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-9aa498ed525f8c8017ed6801':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" id=f28b0fbe-43a9-42fe-a945-e8bf9a55d2a7 * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.510470527Z" level=info msg="exec'd [/bin/sh -ec ETCDCTL_API=3 etcdctl --endpoints=https://[127.0.0.1]:2379 --cacert=/var/lib/minikube/certs/etcd/ca.crt --cert=/var/lib/minikube/certs/etcd/healthcheck-client.crt --key=/var/lib/minikube/certs/etcd/healthcheck-client.key get foo] in kube-system/etcd-crio-20200724220901-14997/etcd" id=78468eb4-799e-459a-86d1-ccda369af23d * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.993484870Z" level=error msg="Error adding network: failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.993530573Z" level=error msg="Error while adding pod to CNI network \"crio-bridge\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.993660882Z" level=info msg="About to del CNI network lo (type=loopback)" * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.999273169Z" level=info msg="Got pod network &{Name:dashboard-metrics-scraper-c8b69c96c-ljbr4 Namespace:kubernetes-dashboard ID:b0a7787b999a8e94452730c7c3f574d37d249d15216835555313ddbf498ad6fb NetNS:/proc/52960/ns/net Networks:[] RuntimeConfig:map[crio-bridge:{IP: MAC: PortMappings:[] Bandwidth: IpRanges:[]}]}" * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.999325273Z" level=error msg="error loading cached network config: network \"crio-bridge\" not found in CNI cache" * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.999337074Z" level=info msg="About to del CNI network crio-bridge (type=bridge)" * Jul 24 22:51:38 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:38.064872198Z" level=error msg="Error deleting network: running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.1.216 -j CNI-db244da69fafc09d440b57a8 -m comment --comment name: \"crio-bridge\" id: \"b0a7787b999a8e94452730c7c3f574d37d249d15216835555313ddbf498ad6fb\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-db244da69fafc09d440b57a8':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" * Jul 24 22:51:38 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:38.064918701Z" level=error msg="Error while removing pod from CNI network \"crio-bridge\": running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.1.216 -j CNI-db244da69fafc09d440b57a8 -m comment --comment name: \"crio-bridge\" id: \"b0a7787b999a8e94452730c7c3f574d37d249d15216835555313ddbf498ad6fb\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-db244da69fafc09d440b57a8':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" * Jul 24 22:51:38 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:38.064975305Z" level=error msg="error stopping network on cleanup: failed to destroy network for pod sandbox k8s_dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard_145a2077-6b60-46d0-9d89-7151f4a78b4b_0(b0a7787b999a8e94452730c7c3f574d37d249d15216835555313ddbf498ad6fb): running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.1.216 -j CNI-db244da69fafc09d440b57a8 -m comment --comment name: \"crio-bridge\" id: \"b0a7787b999a8e94452730c7c3f574d37d249d15216835555313ddbf498ad6fb\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-db244da69fafc09d440b57a8':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" id=7717042a-5394-4708-808f-4e6bbaa1ba7d * * ==> container status <== * CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID * af520b21cf962 2186a1a396deb58f1ea5eaf20193a518ca05049b46ccd754ec83366b5c8c13d5 1 second ago Running kindnet-cni 9 22d8d62f2f19c * c5fc8cb46441c 4689081edb103a9e8174bf23a255bfbe0b2d9ed82edc907abab6989d1c60f02c 3 minutes ago Exited storage-provisioner 8 ed36006be83da * 9b721f7ac557c 2186a1a396deb58f1ea5eaf20193a518ca05049b46ccd754ec83366b5c8c13d5 5 minutes ago Exited kindnet-cni 8 22d8d62f2f19c * 4e28cb6807125 ae3d9889423ede337df3814baa77445e566597a5a882f3cdf933b4d9e0025f0f 24 minutes ago Running kube-proxy 0 8b8f6c79bb715 * 5e2ae26239902 d2f090f2479fbf92c508100e0a6106b3516bb70421a465586661feb1494145a2 24 minutes ago Running kube-controller-manager 0 f5860ba3909ed * 9881d2c304cdc 78b4180ab00d0fb99b1be2b5ef92a4831ad07f00f27e6746828f374497d79367 24 minutes ago Running kube-scheduler 0 4b2a96d2f6659 * 7cd3d2c27ce8d 2c4adeb21b4ff8ed3309d0e42b6b4ae39872399f7b37e0856e673b13c4aba13d 24 minutes ago Running etcd 0 4187a344080e1 * 60aac29a1caea c500a024ff843278184e5454ff6ee040a106c867c5a0361886fd3057cace2264 24 minutes ago Running kube-apiserver 0 79ca654167789 * * ==> describe nodes <== * Name: crio-20200724220901-14997 * Roles: master * Labels: beta.kubernetes.io/arch=amd64 * beta.kubernetes.io/os=linux * kubernetes.io/arch=amd64 * kubernetes.io/hostname=crio-20200724220901-14997 * kubernetes.io/os=linux * minikube.k8s.io/commit=40eac8ce825d2bb784efa63b900c8d788ea49faf * minikube.k8s.io/name=crio-20200724220901-14997 * minikube.k8s.io/updated_at=2020_07_24T22_10_37_0700 * minikube.k8s.io/version=v1.12.1 * node-role.kubernetes.io/master= * Annotations: kubeadm.alpha.kubernetes.io/cri-socket: /var/run/crio/crio.sock * node.alpha.kubernetes.io/ttl: 0 * volumes.kubernetes.io/controller-managed-attach-detach: true * CreationTimestamp: Fri, 24 Jul 2020 22:10:31 +0000 * Taints: * Unschedulable: false * Conditions: * Type Status LastHeartbeatTime LastTransitionTime Reason Message * ---- ------ ----------------- ------------------ ------ ------- * MemoryPressure False Fri, 24 Jul 2020 22:51:05 +0000 Fri, 24 Jul 2020 22:10:27 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available * DiskPressure False Fri, 24 Jul 2020 22:51:05 +0000 Fri, 24 Jul 2020 22:10:27 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure * PIDPressure False Fri, 24 Jul 2020 22:51:05 +0000 Fri, 24 Jul 2020 22:10:27 +0000 KubeletHasSufficientPID kubelet has sufficient PID available * Ready True Fri, 24 Jul 2020 22:51:05 +0000 Fri, 24 Jul 2020 22:10:27 +0000 KubeletReady kubelet is posting ready status * Addresses: * InternalIP: 172.17.0.2 * Hostname: crio-20200724220901-14997 * Capacity: * cpu: 16 * ephemeral-storage: 128884272Ki * hugepages-1Gi: 0 * hugepages-2Mi: 0 * memory: 65817044Ki * pods: 110 * Allocatable: * cpu: 16 * ephemeral-storage: 128884272Ki * hugepages-1Gi: 0 * hugepages-2Mi: 0 * memory: 65817044Ki * pods: 110 * System Info: * Machine ID: e7c76c839aa944e99c5c76ea1345e361 * System UUID: 8677386b-5379-4ccc-90e7-5b585098762e * Boot ID: 65219ec9-ab55-4151-85fa-6cbcd6144529 * Kernel Version: 5.4.0-1022-azure * OS Image: Ubuntu 19.10 * Operating System: linux * Architecture: amd64 * Container Runtime Version: cri-o://1.17.3 * Kubelet Version: v1.15.7 * Kube-Proxy Version: v1.15.7 * PodCIDR: 10.244.0.0/24 * Non-terminated Pods: (11 in total) * Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits AGE * --------- ---- ------------ ---------- --------------- ------------- --- * default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 34m * kube-system coredns-5d4dd4b4db-9ssg6 100m (0%) 0 (0%) 70Mi (0%) 170Mi (0%) 40m * kube-system etcd-crio-20200724220901-14997 0 (0%) 0 (0%) 0 (0%) 0 (0%) 40m * kube-system kindnet-4qfcd 100m (0%) 100m (0%) 50Mi (0%) 50Mi (0%) 40m * kube-system kube-apiserver-crio-20200724220901-14997 250m (1%) 0 (0%) 0 (0%) 0 (0%) 40m * kube-system kube-controller-manager-crio-20200724220901-14997 200m (1%) 0 (0%) 0 (0%) 0 (0%) 24m * kube-system kube-proxy-6wf4w 0 (0%) 0 (0%) 0 (0%) 0 (0%) 40m * kube-system kube-scheduler-crio-20200724220901-14997 100m (0%) 0 (0%) 0 (0%) 0 (0%) 39m * kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 40m * kubernetes-dashboard dashboard-metrics-scraper-c8b69c96c-ljbr4 0 (0%) 0 (0%) 0 (0%) 0 (0%) 24m * kubernetes-dashboard kubernetes-dashboard-6979c57f4c-wbxrt 0 (0%) 0 (0%) 0 (0%) 0 (0%) 24m * Allocated resources: * (Total limits may be over 100 percent, i.e., overcommitted.) * Resource Requests Limits * -------- -------- ------ * cpu 750m (4%) 100m (0%) * memory 120Mi (0%) 220Mi (0%) * ephemeral-storage 0 (0%) 0 (0%) * Events: * Type Reason Age From Message * ---- ------ ---- ---- ------- * Normal NodeHasSufficientMemory 41m (x7 over 41m) kubelet, crio-20200724220901-14997 Node crio-20200724220901-14997 status is now: NodeHasSufficientMemory * Normal NodeHasNoDiskPressure 41m (x7 over 41m) kubelet, crio-20200724220901-14997 Node crio-20200724220901-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientPID 41m (x7 over 41m) kubelet, crio-20200724220901-14997 Node crio-20200724220901-14997 status is now: NodeHasSufficientPID * Warning readOnlySysFS 40m kube-proxy, crio-20200724220901-14997 CRI error: /sys is read-only: cannot modify conntrack limits, problems may arise later (If running Docker, see docker issue #24000) * Normal Starting 40m kube-proxy, crio-20200724220901-14997 Starting kube-proxy. * Normal Starting 24m kubelet, crio-20200724220901-14997 Starting kubelet. * Warning SystemOOM 24m (x2 over 24m) kubelet, crio-20200724220901-14997 System OOM encountered * Normal NodeHasSufficientMemory 24m (x7 over 24m) kubelet, crio-20200724220901-14997 Node crio-20200724220901-14997 status is now: NodeHasSufficientMemory * Normal NodeHasNoDiskPressure 24m (x7 over 24m) kubelet, crio-20200724220901-14997 Node crio-20200724220901-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientPID 24m (x7 over 24m) kubelet, crio-20200724220901-14997 Node crio-20200724220901-14997 status is now: NodeHasSufficientPID * Normal NodeAllocatableEnforced 24m kubelet, crio-20200724220901-14997 Updated Node Allocatable limit across pods * Warning readOnlySysFS 24m kube-proxy, crio-20200724220901-14997 CRI error: /sys is read-only: cannot modify conntrack limits, problems may arise later (If running Docker, see docker issue #24000) * Normal Starting 24m kube-proxy, crio-20200724220901-14997 Starting kube-proxy. * * ==> dmesg <== * [ +0.008042] FS-Cache: N-cookie d=00000000087e516b n=00000000639e5e65 * [ +0.007221] FS-Cache: N-key=[8] '56c00f0000000000' * [ +0.005631] FS-Cache: Duplicate cookie detected * [ +0.003841] FS-Cache: O-cookie c=0000000011bf07ab [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.005951] FS-Cache: O-cookie d=00000000087e516b n=00000000251e97b7 * [ +0.004790] FS-Cache: O-key=[8] '56c00f0000000000' * [ +0.003643] FS-Cache: N-cookie c=00000000e2eec606 [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.005689] FS-Cache: N-cookie d=00000000087e516b n=000000007550b1ca * [ +0.004826] FS-Cache: N-key=[8] '56c00f0000000000' * [ +1.890005] FS-Cache: Duplicate cookie detected * [ +0.022632] FS-Cache: O-cookie c=0000000036b15294 [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.008521] FS-Cache: O-cookie d=00000000087e516b n=00000000efd734b0 * [ +0.007450] FS-Cache: O-key=[8] '55c00f0000000000' * [ +0.004298] FS-Cache: N-cookie c=00000000c6760349 [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.007729] FS-Cache: N-cookie d=00000000087e516b n=000000002b6d28f2 * [ +0.006730] FS-Cache: N-key=[8] '55c00f0000000000' * [ +2.781512] FS-Cache: Duplicate cookie detected * [ +0.004691] FS-Cache: O-cookie c=0000000026b60cdb [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.006817] FS-Cache: O-cookie d=00000000087e516b n=0000000083eaff9d * [ +0.006146] FS-Cache: O-key=[8] '5ac00f0000000000' * [ +0.004163] FS-Cache: N-cookie c=000000006a4278df [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.006241] FS-Cache: N-cookie d=00000000087e516b n=00000000e4c3dd6e * [ +0.005074] FS-Cache: N-key=[8] '5ac00f0000000000' * [Jul24 22:02] tee (116897): /proc/108378/oom_adj is deprecated, please use /proc/108378/oom_score_adj instead. * [Jul24 22:09] hrtimer: interrupt took 1877431 ns * * ==> etcd [7cd3d2c27ce8d87211660e16ef5baac83ffc42799754d9ad03e5cf733dcd820c] <== * 2020-07-24 22:28:20.693526 W | etcdserver: failed to revoke 3c247382efb5110a ("lease not found") * 2020-07-24 22:28:20.693537 W | etcdserver: failed to revoke 3c247382efb5110a ("lease not found") * 2020-07-24 22:28:20.693546 W | etcdserver: failed to revoke 3c247382efb5110a ("lease not found") * 2020-07-24 22:28:20.715917 W | etcdserver: read-only range request "key:\"foo\" " with result "range_response_count:0 size:5" took too long (3.215685555s) to execute * 2020-07-24 22:28:20.715967 W | etcdserver: read-only range request "key:\"/registry/statefulsets\" range_end:\"/registry/statefulsett\" count_only:true " with result "range_response_count:0 size:5" took too long (3.467781535s) to execute * 2020-07-24 22:28:20.715982 W | etcdserver: read-only range request "key:\"/registry/horizontalpodautoscalers\" range_end:\"/registry/horizontalpodautoscalert\" count_only:true " with result "range_response_count:0 size:5" took too long (1.594353687s) to execute * 2020-07-24 22:28:20.716160 W | etcdserver: read-only range request "key:\"/registry/statefulsets\" range_end:\"/registry/statefulsett\" count_only:true " with result "range_response_count:0 size:5" took too long (2.909137077s) to execute * 2020-07-24 22:28:20.716193 W | etcdserver: read-only range request "key:\"/registry/pods/kube-system/storage-provisioner\" " with result "range_response_count:1 size:2416" took too long (4.129795802s) to execute * 2020-07-24 22:28:20.716213 W | etcdserver: read-only range request "key:\"/registry/horizontalpodautoscalers\" range_end:\"/registry/horizontalpodautoscalert\" count_only:true " with result "range_response_count:0 size:5" took too long (3.658155312s) to execute * 2020-07-24 22:28:20.716294 W | etcdserver: read-only range request "key:\"/registry/services/specs/default/kubernetes\" " with result "range_response_count:1 size:293" took too long (4.130839176s) to execute * 2020-07-24 22:28:20.716427 W | etcdserver: read-only range request "key:\"/registry/clusterroles\" range_end:\"/registry/clusterrolet\" count_only:true " with result "range_response_count:0 size:7" took too long (1.282647649s) to execute * 2020-07-24 22:28:20.844313 W | etcdserver: read-only range request "key:\"/registry/events/kube-system/storage-provisioner.1624d0b10da12b9b\" " with result "range_response_count:1 size:483" took too long (122.063812ms) to execute * 2020-07-24 22:28:20.950803 W | etcdserver: read-only range request "key:\"/registry/services/endpoints/default/kubernetes\" " with result "range_response_count:1 size:213" took too long (105.11143ms) to execute * 2020-07-24 22:31:03.116431 W | etcdserver: read-only range request "key:\"/registry/jobs/\" range_end:\"/registry/jobs0\" limit:500 " with result "range_response_count:0 size:5" took too long (2.348768413s) to execute * 2020-07-24 22:31:03.116531 W | etcdserver: request "header: txn: success:> failure: >>" with result "size:16" took too long (2.159753959s) to execute * 2020-07-24 22:31:03.117809 W | etcdserver: read-only range request "key:\"/registry/health\" " with result "error:context deadline exceeded" took too long (2.000019975s) to execute * 2020-07-24 22:31:03.118930 W | wal: sync duration of 2.16222982s, expected less than 1s * 2020-07-24 22:31:03.119141 W | etcdserver: read-only range request "key:\"/registry/configmaps\" range_end:\"/registry/configmapt\" count_only:true " with result "range_response_count:0 size:7" took too long (1.514657725s) to execute * 2020-07-24 22:31:03.119390 W | etcdserver: read-only range request "key:\"/registry/namespaces/default\" " with result "range_response_count:1 size:172" took too long (1.472246821s) to execute * 2020-07-24 22:36:56.295220 I | mvcc: store.index: compact 1090 * 2020-07-24 22:36:56.296281 I | mvcc: finished scheduled compaction at 1090 (took 726.75µs) * 2020-07-24 22:41:56.312125 I | mvcc: store.index: compact 1203 * 2020-07-24 22:41:56.312972 I | mvcc: finished scheduled compaction at 1203 (took 523.537µs) * 2020-07-24 22:46:56.323897 I | mvcc: store.index: compact 1285 * 2020-07-24 22:46:56.324603 I | mvcc: finished scheduled compaction at 1285 (took 389.624µs) * * ==> kernel <== * 22:51:38 up 1:18, 0 users, load average: 0.83, 0.94, 2.73 * Linux crio-20200724220901-14997 5.4.0-1022-azure #22-Ubuntu SMP Fri Jul 10 06:14:37 UTC 2020 x86_64 x86_64 x86_64 GNU/Linux * PRETTY_NAME="Ubuntu 19.10" * * ==> kube-apiserver [60aac29a1caeafa329981bf8f9a73c7f579633e998d2e82caabedfba3f1d1138] <== * I0724 22:32:16.144038 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:16.144097 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:16.144148 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:16.152885 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:36.144044 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [] * I0724 22:32:36.144250 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:36.144381 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:36.144412 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:36.144424 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:36.153545 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:56.144278 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [] * I0724 22:32:56.144441 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:56.144512 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:56.144567 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:56.144612 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:56.156564 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:33:16.144467 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [] * I0724 22:33:16.144655 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:33:16.144710 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:33:16.144738 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:33:16.154631 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:33:36.144698 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [] * I0724 22:33:36.144939 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:33:36.145007 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:33:36.155063 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * * ==> kube-controller-manager [5e2ae262399021f9b26d8e88b478a4ff629d1bc4dffef0901936b18cc1e4548c] <== * I0724 22:27:16.476945 1 taint_manager.go:182] Starting NoExecuteTaintManager * I0724 22:27:16.476964 1 event.go:258] Event(v1.ObjectReference{Kind:"Node", Namespace:"", Name:"crio-20200724220901-14997", UID:"ef0f0ad1-a556-4ea4-8b4e-4df0035f7a0b", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'RegisteredNode' Node crio-20200724220901-14997 event: Registered Node crio-20200724220901-14997 in Controller * W0724 22:27:16.476984 1 node_lifecycle_controller.go:863] Missing timestamp for Node crio-20200724220901-14997. Assuming now as a timestamp. * I0724 22:27:16.477018 1 node_lifecycle_controller.go:1089] Controller detected that zone is now in state Normal. * I0724 22:27:16.496120 1 controller_utils.go:1036] Caches are synced for daemon sets controller * I0724 22:27:16.517177 1 controller_utils.go:1036] Caches are synced for endpoint controller * I0724 22:27:16.522134 1 controller_utils.go:1036] Caches are synced for ReplicationController controller * I0724 22:27:16.526074 1 controller_utils.go:1036] Caches are synced for disruption controller * I0724 22:27:16.526095 1 disruption.go:338] Sending events to api server. * I0724 22:27:16.526636 1 controller_utils.go:1036] Caches are synced for job controller * I0724 22:27:16.526835 1 controller_utils.go:1036] Caches are synced for PVC protection controller * I0724 22:27:16.531393 1 controller_utils.go:1036] Caches are synced for attach detach controller * I0724 22:27:16.540016 1 controller_utils.go:1036] Caches are synced for persistent volume controller * I0724 22:27:16.551348 1 controller_utils.go:1036] Caches are synced for ReplicaSet controller * I0724 22:27:16.577221 1 controller_utils.go:1036] Caches are synced for deployment controller * I0724 22:27:16.580595 1 event.go:258] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"kubernetes-dashboard", Name:"kubernetes-dashboard", UID:"db313703-4ad6-497b-8b1b-c7657766a8c5", APIVersion:"apps/v1", ResourceVersion:"821", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set kubernetes-dashboard-6979c57f4c to 1 * I0724 22:27:16.584728 1 event.go:258] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"kubernetes-dashboard", Name:"dashboard-metrics-scraper", UID:"a8d0d7cc-7814-4653-af48-855bd4001c27", APIVersion:"apps/v1", ResourceVersion:"819", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set dashboard-metrics-scraper-c8b69c96c to 1 * I0724 22:27:16.589419 1 event.go:258] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"kubernetes-dashboard", Name:"dashboard-metrics-scraper-c8b69c96c", UID:"79acf5d3-9c82-447f-b71d-910de5b42f1b", APIVersion:"apps/v1", ResourceVersion:"855", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: dashboard-metrics-scraper-c8b69c96c-ljbr4 * I0724 22:27:16.589458 1 event.go:258] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"kubernetes-dashboard", Name:"kubernetes-dashboard-6979c57f4c", UID:"b43829eb-0088-472e-9584-6679aa6b72c4", APIVersion:"apps/v1", ResourceVersion:"854", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: kubernetes-dashboard-6979c57f4c-wbxrt * I0724 22:27:16.589508 1 controller_utils.go:1036] Caches are synced for resource quota controller * I0724 22:27:16.635535 1 controller_utils.go:1036] Caches are synced for garbage collector controller * I0724 22:27:16.635571 1 garbagecollector.go:137] Garbage collector: all resource monitors have synced. Proceeding to collect garbage * I0724 22:27:16.640852 1 controller_utils.go:1036] Caches are synced for resource quota controller * I0724 22:27:17.230884 1 controller_utils.go:1029] Waiting for caches to sync for garbage collector controller * I0724 22:27:17.331259 1 controller_utils.go:1036] Caches are synced for garbage collector controller * * ==> kube-proxy [4e28cb6807125f410044e3fce6354030e93abbb1dd7a5b0efeb136ab9f75cc6f] <== * I0724 22:10:51.711236 1 server_others.go:143] Using iptables Proxier. * I0724 22:10:51.711733 1 server.go:534] Version: v1.15.7 * I0724 22:10:51.747623 1 conntrack.go:52] Setting nf_conntrack_max to 524288 * E0724 22:10:51.748800 1 conntrack.go:127] sysfs is not writable: {Device:sysfs Path:/sys Type:sysfs Opts:[ro nosuid nodev noexec relatime] Freq:0 Pass:0} (mount options are [ro nosuid nodev noexec relatime]) * I0724 22:10:51.749038 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_established' to 86400 * I0724 22:10:51.749127 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_close_wait' to 3600 * I0724 22:10:51.749315 1 config.go:187] Starting service config controller * I0724 22:10:51.749352 1 controller_utils.go:1029] Waiting for caches to sync for service config controller * I0724 22:10:51.749404 1 config.go:96] Starting endpoints config controller * I0724 22:10:51.749502 1 controller_utils.go:1029] Waiting for caches to sync for endpoints config controller * I0724 22:10:51.849555 1 controller_utils.go:1036] Caches are synced for service config controller * I0724 22:10:51.849731 1 controller_utils.go:1036] Caches are synced for endpoints config controller * W0724 22:27:02.252143 1 server_others.go:249] Flag proxy-mode="" unknown, assuming iptables proxy * I0724 22:27:02.336837 1 server_others.go:143] Using iptables Proxier. * I0724 22:27:02.337285 1 server.go:534] Version: v1.15.7 * I0724 22:27:02.437683 1 conntrack.go:52] Setting nf_conntrack_max to 524288 * E0724 22:27:02.438093 1 conntrack.go:127] sysfs is not writable: {Device:sysfs Path:/sys Type:sysfs Opts:[ro nosuid nodev noexec relatime] Freq:0 Pass:0} (mount options are [ro nosuid nodev noexec relatime]) * I0724 22:27:02.438352 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_established' to 86400 * I0724 22:27:02.438437 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_close_wait' to 3600 * I0724 22:27:02.438748 1 config.go:96] Starting endpoints config controller * I0724 22:27:02.438828 1 controller_utils.go:1029] Waiting for caches to sync for endpoints config controller * I0724 22:27:02.438814 1 config.go:187] Starting service config controller * I0724 22:27:02.438854 1 controller_utils.go:1029] Waiting for caches to sync for service config controller * I0724 22:27:02.539002 1 controller_utils.go:1036] Caches are synced for endpoints config controller * I0724 22:27:02.539024 1 controller_utils.go:1036] Caches are synced for service config controller * * ==> kube-scheduler [9881d2c304cdca9fe0f38468be746e289b9fa419917792f7aa9019077b4a4374] <== * E0724 22:10:32.938093 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope * E0724 22:10:32.940907 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope * E0724 22:10:32.940967 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope * E0724 22:10:32.941768 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope * E0724 22:10:32.942905 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope * E0724 22:10:32.943936 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope * E0724 22:10:32.947924 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope * I0724 22:26:55.290417 1 serving.go:319] Generated self-signed cert in-memory * W0724 22:26:55.521509 1 authentication.go:249] No authentication-kubeconfig provided in order to lookup client-ca-file in configmap/extension-apiserver-authentication in kube-system, so client certificate authentication won't work. * W0724 22:26:55.521537 1 authentication.go:252] No authentication-kubeconfig provided in order to lookup requestheader-client-ca-file in configmap/extension-apiserver-authentication in kube-system, so request-header client certificate authentication won't work. * W0724 22:26:55.521549 1 authorization.go:146] No authorization-kubeconfig provided, so SubjectAccessReview of authorization tokens won't work. * I0724 22:26:55.524427 1 server.go:142] Version: v1.15.7 * I0724 22:26:55.524493 1 defaults.go:87] TaintNodesByCondition is enabled, PodToleratesNodeTaints predicate is mandatory * W0724 22:26:55.525401 1 authorization.go:47] Authorization is disabled * W0724 22:26:55.525419 1 authentication.go:55] Authentication is disabled * I0724 22:26:55.525435 1 deprecated_insecure_serving.go:51] Serving healthz insecurely on [::]:10251 * I0724 22:26:55.535873 1 secure_serving.go:116] Serving securely on 127.0.0.1:10259 * E0724 22:27:00.040558 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope * E0724 22:27:00.140295 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope: RBAC: [clusterrole.rbac.authorization.k8s.io "system:public-info-viewer" not found, clusterrole.rbac.authorization.k8s.io "system:discovery" not found, clusterrole.rbac.authorization.k8s.io "system:kube-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:volume-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:basic-user" not found] * E0724 22:27:00.140491 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope * E0724 22:27:00.140663 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope * E0724 22:27:00.140655 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1beta1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope: RBAC: [clusterrole.rbac.authorization.k8s.io "system:kube-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:discovery" not found, clusterrole.rbac.authorization.k8s.io "system:basic-user" not found, clusterrole.rbac.authorization.k8s.io "system:volume-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:public-info-viewer" not found] * E0724 22:27:00.140755 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope: RBAC: [clusterrole.rbac.authorization.k8s.io "system:public-info-viewer" not found, clusterrole.rbac.authorization.k8s.io "system:kube-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:discovery" not found, clusterrole.rbac.authorization.k8s.io "system:basic-user" not found, clusterrole.rbac.authorization.k8s.io "system:volume-scheduler" not found] * E0724 22:27:00.140806 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope: RBAC: [clusterrole.rbac.authorization.k8s.io "system:kube-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:discovery" not found, clusterrole.rbac.authorization.k8s.io "system:basic-user" not found, clusterrole.rbac.authorization.k8s.io "system:volume-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:public-info-viewer" not found] * E0724 22:27:00.140869 1 reflector.go:125] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:226: Failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope: RBAC: [clusterrole.rbac.authorization.k8s.io "system:basic-user" not found, clusterrole.rbac.authorization.k8s.io "system:volume-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:public-info-viewer" not found, clusterrole.rbac.authorization.k8s.io "system:kube-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:discovery" not found] * * ==> kubelet <== * -- Logs begin at Fri 2020-07-24 22:26:07 UTC, end at Fri 2020-07-24 22:51:38 UTC. -- * Jul 24 22:51:22 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:22.824288 1598 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(5640d9f74f19b787273675f6175658f5a4b944f93df8a5e82447b8adc4afddff): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:22 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:22.824313 1598 kuberuntime_manager.go:692] createPodSandbox for pod "coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(5640d9f74f19b787273675f6175658f5a4b944f93df8a5e82447b8adc4afddff): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:22 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:22.824404 1598 pod_workers.go:190] Error syncing pod dbd8dc2c-1af2-4c99-89f0-d44686805742 ("coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)"), skipping: failed to "CreatePodSandbox" for "coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)" with CreatePodSandboxError: "CreatePodSandbox for pod \"coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)\" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(5640d9f74f19b787273675f6175658f5a4b944f93df8a5e82447b8adc4afddff): failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:51:24 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:24.531997 1598 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard_145a2077-6b60-46d0-9d89-7151f4a78b4b_0(25492f81a77c03ed1561da5355054094967a64c28fd4b6fe0b3c3ae3e2148fee): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:24 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:24.532069 1598 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard_145a2077-6b60-46d0-9d89-7151f4a78b4b_0(25492f81a77c03ed1561da5355054094967a64c28fd4b6fe0b3c3ae3e2148fee): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:24 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:24.532093 1598 kuberuntime_manager.go:692] createPodSandbox for pod "dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard_145a2077-6b60-46d0-9d89-7151f4a78b4b_0(25492f81a77c03ed1561da5355054094967a64c28fd4b6fe0b3c3ae3e2148fee): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:24 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:24.532169 1598 pod_workers.go:190] Error syncing pod 145a2077-6b60-46d0-9d89-7151f4a78b4b ("dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)"), skipping: failed to "CreatePodSandbox" for "dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)" with CreatePodSandboxError: "CreatePodSandbox for pod \"dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)\" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard_145a2077-6b60-46d0-9d89-7151f4a78b4b_0(25492f81a77c03ed1561da5355054094967a64c28fd4b6fe0b3c3ae3e2148fee): failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:51:25 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:25.744397 1598 pod_workers.go:190] Error syncing pod e76b38cb-d66f-4b4b-a962-cdd9c670e250 ("storage-provisioner_kube-system(e76b38cb-d66f-4b4b-a962-cdd9c670e250)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "Back-off 5m0s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(e76b38cb-d66f-4b4b-a962-cdd9c670e250)" * Jul 24 22:51:25 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:25.744505 1598 pod_workers.go:190] Error syncing pod 9fb35a28-5601-47e4-88e6-be2a18fa55ef ("kindnet-4qfcd_kube-system(9fb35a28-5601-47e4-88e6-be2a18fa55ef)"), skipping: failed to "StartContainer" for "kindnet-cni" with CrashLoopBackOff: "Back-off 5m0s restarting failed container=kindnet-cni pod=kindnet-4qfcd_kube-system(9fb35a28-5601-47e4-88e6-be2a18fa55ef)" * Jul 24 22:51:32 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:32.248818 1598 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard_2734d7c0-daef-420d-966b-49f33f94e547_0(9ccf0f0cce6b567ad2f291f016191bb44502de1d3e86e6df90c6e7576ed5ab81): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:32 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:32.248893 1598 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard(2734d7c0-daef-420d-966b-49f33f94e547)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard_2734d7c0-daef-420d-966b-49f33f94e547_0(9ccf0f0cce6b567ad2f291f016191bb44502de1d3e86e6df90c6e7576ed5ab81): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:32 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:32.248923 1598 kuberuntime_manager.go:692] createPodSandbox for pod "kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard(2734d7c0-daef-420d-966b-49f33f94e547)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard_2734d7c0-daef-420d-966b-49f33f94e547_0(9ccf0f0cce6b567ad2f291f016191bb44502de1d3e86e6df90c6e7576ed5ab81): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:32 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:32.248993 1598 pod_workers.go:190] Error syncing pod 2734d7c0-daef-420d-966b-49f33f94e547 ("kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard(2734d7c0-daef-420d-966b-49f33f94e547)"), skipping: failed to "CreatePodSandbox" for "kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard(2734d7c0-daef-420d-966b-49f33f94e547)" with CreatePodSandboxError: "CreatePodSandbox for pod \"kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard(2734d7c0-daef-420d-966b-49f33f94e547)\" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard_2734d7c0-daef-420d-966b-49f33f94e547_0(9ccf0f0cce6b567ad2f291f016191bb44502de1d3e86e6df90c6e7576ed5ab81): failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:51:33 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:33.883234 1598 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(86aa6f798e8cfd97aef81555b92613e818d4c7173384c653d026f84ec976db8e): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:33 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:33.883300 1598 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(86aa6f798e8cfd97aef81555b92613e818d4c7173384c653d026f84ec976db8e): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:33 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:33.883327 1598 kuberuntime_manager.go:692] createPodSandbox for pod "busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(86aa6f798e8cfd97aef81555b92613e818d4c7173384c653d026f84ec976db8e): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:33 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:33.883383 1598 pod_workers.go:190] Error syncing pod b947226f-0b0e-4916-9b65-c7b70a6e137e ("busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)"), skipping: failed to "CreatePodSandbox" for "busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)" with CreatePodSandboxError: "CreatePodSandbox for pod \"busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)\" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(86aa6f798e8cfd97aef81555b92613e818d4c7173384c653d026f84ec976db8e): failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:51:37 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:37.322582 1598 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(c16b06815e3b08f65dd9e7bea079da08a86b6801b4df5c196e85f0a68b20bffc): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:37 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:37.322652 1598 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(c16b06815e3b08f65dd9e7bea079da08a86b6801b4df5c196e85f0a68b20bffc): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:37 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:37.322685 1598 kuberuntime_manager.go:692] createPodSandbox for pod "coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(c16b06815e3b08f65dd9e7bea079da08a86b6801b4df5c196e85f0a68b20bffc): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:37 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:37.322744 1598 pod_workers.go:190] Error syncing pod dbd8dc2c-1af2-4c99-89f0-d44686805742 ("coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)"), skipping: failed to "CreatePodSandbox" for "coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)" with CreatePodSandboxError: "CreatePodSandbox for pod \"coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)\" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(c16b06815e3b08f65dd9e7bea079da08a86b6801b4df5c196e85f0a68b20bffc): failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:51:38 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:38.268197 1598 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard_145a2077-6b60-46d0-9d89-7151f4a78b4b_0(b0a7787b999a8e94452730c7c3f574d37d249d15216835555313ddbf498ad6fb): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:38 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:38.268262 1598 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard_145a2077-6b60-46d0-9d89-7151f4a78b4b_0(b0a7787b999a8e94452730c7c3f574d37d249d15216835555313ddbf498ad6fb): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:38 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:38.268285 1598 kuberuntime_manager.go:692] createPodSandbox for pod "dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard_145a2077-6b60-46d0-9d89-7151f4a78b4b_0(b0a7787b999a8e94452730c7c3f574d37d249d15216835555313ddbf498ad6fb): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:38 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:38.268374 1598 pod_workers.go:190] Error syncing pod 145a2077-6b60-46d0-9d89-7151f4a78b4b ("dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)"), skipping: failed to "CreatePodSandbox" for "dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)" with CreatePodSandboxError: "CreatePodSandbox for pod \"dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)\" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard_145a2077-6b60-46d0-9d89-7151f4a78b4b_0(b0a7787b999a8e94452730c7c3f574d37d249d15216835555313ddbf498ad6fb): failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * * ==> storage-provisioner [c5fc8cb46441ceaef6bf0347ecae8bb423d4fc849fad64b2afb93b63052026f2] <== * F0724 22:48:28.232635 1 main.go:37] Error getting server version: Get https://10.96.0.1:443/version: dial tcp 10.96.0.1:443: getsockopt: no route to host -- /stdout -- helpers_test.go:247: (dbg) Run: ./minikube-linux-amd64 status --format={{.APIServer}} -p crio-20200724220901-14997 -n crio-20200724220901-14997 helpers_test.go:254: (dbg) Run: kubectl --context crio-20200724220901-14997 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running helpers_test.go:260: non-running pods: busybox coredns-5d4dd4b4db-9ssg6 dashboard-metrics-scraper-c8b69c96c-ljbr4 kubernetes-dashboard-6979c57f4c-wbxrt helpers_test.go:262: ======> post-mortem[TestStartStop/group/crio/serial/AddonExistsAfterStop]: describe non-running pods <====== helpers_test.go:265: (dbg) Run: kubectl --context crio-20200724220901-14997 describe pod busybox coredns-5d4dd4b4db-9ssg6 dashboard-metrics-scraper-c8b69c96c-ljbr4 kubernetes-dashboard-6979c57f4c-wbxrt helpers_test.go:265: (dbg) Non-zero exit: kubectl --context crio-20200724220901-14997 describe pod busybox coredns-5d4dd4b4db-9ssg6 dashboard-metrics-scraper-c8b69c96c-ljbr4 kubernetes-dashboard-6979c57f4c-wbxrt: exit status 1 (84.238115ms) -- stdout -- Name: busybox Namespace: default Priority: 0 Node: crio-20200724220901-14997/172.17.0.2 Start Time: Fri, 24 Jul 2020 22:17:30 +0000 Labels: integration-test=busybox Annotations: Status: Pending IP: IPs: Containers: busybox: Container ID: Image: busybox:1.28.4-glibc Image ID: Port: Host Port: Command: sleep 3600 State: Waiting Reason: ContainerCreating Ready: False Restart Count: 0 Environment: Mounts: /var/run/secrets/kubernetes.io/serviceaccount from default-token-2jsfl (ro) Conditions: Type Status Initialized True Ready False ContainersReady False PodScheduled True Volumes: default-token-2jsfl: Type: Secret (a volume populated by a Secret) SecretName: default-token-2jsfl Optional: false QoS Class: BestEffort Node-Selectors: Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s node.kubernetes.io/unreachable:NoExecute for 300s Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Scheduled 34m default-scheduler Successfully assigned default/busybox to crio-20200724220901-14997 Warning FailedCreatePodSandBox 34m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(df0c3a384af70876e631b79afa238f8219e909abbaf7684ff30d1dce8a9a54fa): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 33m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(7e1e51912dee7c82a399942b88c2992e156a60bb2bdcfc5a0340b0f5eb894fe0): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 33m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(eb6542b74bcd5168e50e03b41a224e2df5f3761d00e96252755d3d77cabf510e): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 33m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(95381eb738e20acf009ca3de52d9dd44c0e10363127a19f55cc9d8b9b15d935d): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 33m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(5da5f64daf132db1d2f57fcbbd23261f5b055438f42fdc9e58f02b1dd57b9240): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 32m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(d843353d3186fd450ada3d11767e5294d07ef90a741caba47e62a59eb82f9e97): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 32m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(cdb66590d250947869cc36af8c6d22ecc1fd99752b4ba0d09be444e2de9b06eb): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 32m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(590dbdf2276a1c93cf59fe15b35411b3d19be44299a08851ea9957e8c6c03681): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 32m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(fe109ba9d55515001bcdb9c5ba7b51262263252184892313119b14411c1ba69a): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 27m (x17 over 31m) kubelet, crio-20200724220901-14997 (combined from similar events): Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(4bee5e35c962aafeecadb6e69afb27ad039cbcf256940e8735ce4f079bf10862): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 24m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(e8fcb63d4fbd6ae3b748da2e936f37eeaa140b43d0f98056e85b082da82fbb01): netplugin failed with no error message Warning FailedCreatePodSandBox 24m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(60cd38ffb8a1dd4fa390aba070fecc1acec7811fd5775ea0d2a431e9e2424e62): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 24m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(68df75d364336016be8023c132977fe34319aa4b9f92c7d70e457a57217a281e): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 23m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(aaae971e15b6884b2bb1799dd4abb10b195368e13b5e4f290f43c5b4b11ec194): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 23m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(bd1d721cd146bbfe1fa05dc287b325ab526dc19d3404ff58cef06e259e2b5986): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 22m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(df52b05097dfc5b141cfc44d2e97b43954c3d247a95a09c7cfb30e7c69871395): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 22m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(d5729ae94c7a9707bb77f5dc43fb6a6d6d451a9f25758519bb72c37e64012cbe): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 22m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(1080ac6803a0e7d14628d92578ed58468a1fddfae32c4476765ee0ff2580e28e): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 22m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(7b666099a72822b0a53ba3bb57632f31f4390032be187dca2fe8fb309f923937): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 4m26s (x70 over 21m) kubelet, crio-20200724220901-14997 (combined from similar events): Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(ddf5dab0e040cc42e18461726216644f257157271baffe0b4e6c06f634f4a48c): failed to set bridge addr: could not add IP address to "cni0": permission denied -- /stdout -- ** stderr ** Error from server (NotFound): pods "coredns-5d4dd4b4db-9ssg6" not found Error from server (NotFound): pods "dashboard-metrics-scraper-c8b69c96c-ljbr4" not found Error from server (NotFound): pods "kubernetes-dashboard-6979c57f4c-wbxrt" not found ** /stderr ** helpers_test.go:267: kubectl --context crio-20200724220901-14997 describe pod busybox coredns-5d4dd4b4db-9ssg6 dashboard-metrics-scraper-c8b69c96c-ljbr4 kubernetes-dashboard-6979c57f4c-wbxrt: exit status 1 === RUN TestStartStop/group/crio/serial/VerifyKubernetesImages start_stop_delete_test.go:227: (dbg) Run: ./minikube-linux-amd64 ssh -p crio-20200724220901-14997 "sudo crictl images -o json" start_stop_delete_test.go:227: (dbg) Non-zero exit: ./minikube-linux-amd64 ssh -p crio-20200724220901-14997 "sudo crictl images -o json": context deadline exceeded (1.2µs) start_stop_delete_test.go:227: failed tp get images inside minikube. args "./minikube-linux-amd64 ssh -p crio-20200724220901-14997 \"sudo crictl images -o json\"": context deadline exceeded start_stop_delete_test.go:227: failed to decode images json unexpected end of JSON input. output: start_stop_delete_test.go:227: v1.15.7 images mismatch (-want +got): []string{ - "gcr.io/k8s-minikube/storage-provisioner:v1.8.1", - "k8s.gcr.io/coredns:1.3.1", - "k8s.gcr.io/etcd:3.3.10", - "k8s.gcr.io/kube-apiserver:v1.15.7", - "k8s.gcr.io/kube-controller-manager:v1.15.7", - "k8s.gcr.io/kube-proxy:v1.15.7", - "k8s.gcr.io/kube-scheduler:v1.15.7", - "k8s.gcr.io/pause:3.1", - "kubernetesui/dashboard:v2.0.1", - "kubernetesui/metrics-scraper:v1.0.4", } helpers_test.go:215: -----------------------post-mortem-------------------------------- helpers_test.go:223: ======> post-mortem[TestStartStop/group/crio/serial/VerifyKubernetesImages]: docker inspect <====== helpers_test.go:224: (dbg) Run: docker inspect crio-20200724220901-14997 helpers_test.go:228: (dbg) docker inspect crio-20200724220901-14997: -- stdout -- [ { "Id": "d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a", "Created": "2020-07-24T22:09:11.178770681Z", "Path": "/usr/local/bin/entrypoint", "Args": [ "/sbin/init" ], "State": { "Status": "running", "Running": true, "Paused": false, "Restarting": false, "OOMKilled": false, "Dead": false, "Pid": 667119, "ExitCode": 0, "Error": "", "StartedAt": "2020-07-24T22:26:07.35920725Z", "FinishedAt": "2020-07-24T22:26:00.416732195Z" }, "Image": "sha256:e6bc41c39dc48b2b472936db36aedb28527ce0f675ed1bc20d029125c9ccf578", "ResolvConfPath": "/var/lib/docker/containers/d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a/resolv.conf", "HostnamePath": "/var/lib/docker/containers/d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a/hostname", "HostsPath": "/var/lib/docker/containers/d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a/hosts", "LogPath": "/var/lib/docker/containers/d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a/d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a-json.log", "Name": "/crio-20200724220901-14997", "RestartCount": 0, "Driver": "overlay2", "Platform": "linux", "MountLabel": "", "ProcessLabel": "", "AppArmorProfile": "unconfined", "ExecIDs": null, "HostConfig": { "Binds": [ "/lib/modules:/lib/modules:ro", "crio-20200724220901-14997:/var" ], "ContainerIDFile": "", "LogConfig": { "Type": "json-file", "Config": {} }, "NetworkMode": "default", "PortBindings": { "22/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "2376/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "5000/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "8443/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ] }, "RestartPolicy": { "Name": "no", "MaximumRetryCount": 0 }, "AutoRemove": false, "VolumeDriver": "", "VolumesFrom": null, "CapAdd": null, "CapDrop": null, "Capabilities": null, "Dns": [], "DnsOptions": [], "DnsSearch": [], "ExtraHosts": null, "GroupAdd": null, "IpcMode": "private", "Cgroup": "", "Links": null, "OomScoreAdj": 0, "PidMode": "", "Privileged": true, "PublishAllPorts": false, "ReadonlyRootfs": false, "SecurityOpt": [ "seccomp=unconfined", "apparmor=unconfined", "label=disable" ], "Tmpfs": { "/run": "", "/tmp": "" }, "UTSMode": "", "UsernsMode": "", "ShmSize": 67108864, "Runtime": "runc", "ConsoleSize": [ 0, 0 ], "Isolation": "", "CpuShares": 0, "Memory": 2306867200, "NanoCpus": 2000000000, "CgroupParent": "", "BlkioWeight": 0, "BlkioWeightDevice": [], "BlkioDeviceReadBps": null, "BlkioDeviceWriteBps": null, "BlkioDeviceReadIOps": null, "BlkioDeviceWriteIOps": null, "CpuPeriod": 0, "CpuQuota": 0, "CpuRealtimePeriod": 0, "CpuRealtimeRuntime": 0, "CpusetCpus": "", "CpusetMems": "", "Devices": [], "DeviceCgroupRules": null, "DeviceRequests": null, "KernelMemory": 0, "KernelMemoryTCP": 0, "MemoryReservation": 0, "MemorySwap": -1, "MemorySwappiness": null, "OomKillDisable": false, "PidsLimit": null, "Ulimits": null, "CpuCount": 0, "CpuPercent": 0, "IOMaximumIOps": 0, "IOMaximumBandwidth": 0, "MaskedPaths": null, "ReadonlyPaths": null }, "GraphDriver": { "Data": { "LowerDir": "/var/lib/docker/overlay2/25235e8a657907fa460b6393f79aab4ea109b0924e4843714a3776886a051c03-init/diff:/var/lib/docker/overlay2/33ef5d7ed4fbb30d55d2e5925de9b5c359ee2f6bcaf5a31308f0c4c11e358759/diff:/var/lib/docker/overlay2/3cf31b1deb19cf9de586952311d9f94baab00dac6393d96e1a80da943c3823e6/diff:/var/lib/docker/overlay2/5e25be0edbb143d613aab16e23056ece18370c2427214464ccc0b8f033fc76c5/diff:/var/lib/docker/overlay2/933d7b760faf1726c0231eb48cbf3e7af8aeea7d1f449959b7bf07bbe9a84376/diff:/var/lib/docker/overlay2/69ec5555996fe29cf36b7e927034690ecb66f32d4c72396f3c2f3d73303ab10c/diff:/var/lib/docker/overlay2/891f9311e0e93aca44c576d873c0860bf67e972178833fc3ac0569308996933c/diff:/var/lib/docker/overlay2/431ddf5eea9395c020c3a27539c6984764cb7a0dd8f3eeadfbb24f37a897caf7/diff:/var/lib/docker/overlay2/ef7d3d9d53c985fdb46a41c8bcbc04c7d98baa3d8030b1432d115657269a4611/diff:/var/lib/docker/overlay2/d976f7471257da8ef85e0de564cae8e6b16eeb8f83fff6750b3985387f87052c/diff:/var/lib/docker/overlay2/16ed6f6abe25e98b50b10529e70d835da8b64ca99934243f70e5cc63cca44ba1/diff:/var/lib/docker/overlay2/a6495d6f579c32bd0faf387dc3b48c733dae0219e3cfc5d94f9f6ba027261668/diff:/var/lib/docker/overlay2/a34db548da9e24b0323e43e53767c49484fa19509ba9c1e738fb42330788ec52/diff:/var/lib/docker/overlay2/03f93f74dafc48e2bb0234297546abf70dc9918c047a7e3b500ed2242aec09ff/diff:/var/lib/docker/overlay2/b0d08d0be034080c25561d9451d8081050a591c2991a7f3d242234b9bf7c046d/diff:/var/lib/docker/overlay2/d7be6ed61eedc76988e74fe1d87a6a1ef287912654acea5566e7f9cb797b3e3c/diff:/var/lib/docker/overlay2/bde6a4a42d5652b3ef22e3a1dddd0f6fb765d2cd9f5b1a38d4ecb9b44c45b998/diff:/var/lib/docker/overlay2/0f384da0b034b730d4bdd22f270821719c0ac0b7b7d608c3a52fa89b9a386b90/diff:/var/lib/docker/overlay2/46e9d50bc162d2d523c12da3e378e5b859515056dc7d7f11344c1c42c64718d4/diff:/var/lib/docker/overlay2/6e7552cdb94dfc1c9a0a8efae25c758d93fff3d2eec58a3e2b710496d0c94653/diff:/var/lib/docker/overlay2/b9217960caef08b70ded801ca89f5a0808227383f2ab1de5ae7313c200a8090b/diff:/var/lib/docker/overlay2/d11fc39cafccd3e3cb8fba6cfdda2bbfdb99c2e41619ce169b0d6ede0b3f6a64/diff:/var/lib/docker/overlay2/0f89620291cba4bc3afac7c9fa1faf205450d5dcae032c3a9882009633b72b4a/diff", "MergedDir": "/var/lib/docker/overlay2/25235e8a657907fa460b6393f79aab4ea109b0924e4843714a3776886a051c03/merged", "UpperDir": "/var/lib/docker/overlay2/25235e8a657907fa460b6393f79aab4ea109b0924e4843714a3776886a051c03/diff", "WorkDir": "/var/lib/docker/overlay2/25235e8a657907fa460b6393f79aab4ea109b0924e4843714a3776886a051c03/work" }, "Name": "overlay2" }, "Mounts": [ { "Type": "volume", "Name": "crio-20200724220901-14997", "Source": "/var/lib/docker/volumes/crio-20200724220901-14997/_data", "Destination": "/var", "Driver": "local", "Mode": "z", "RW": true, "Propagation": "" }, { "Type": "bind", "Source": "/lib/modules", "Destination": "/lib/modules", "Mode": "ro", "RW": false, "Propagation": "rprivate" } ], "Config": { "Hostname": "crio-20200724220901-14997", "Domainname": "", "User": "root", "AttachStdin": false, "AttachStdout": false, "AttachStderr": false, "ExposedPorts": { "22/tcp": {}, "2376/tcp": {}, "5000/tcp": {}, "8443/tcp": {} }, "Tty": true, "OpenStdin": false, "StdinOnce": false, "Env": [ "container=docker", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], "Cmd": null, "Image": "kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438", "Volumes": null, "WorkingDir": "", "Entrypoint": [ "/usr/local/bin/entrypoint", "/sbin/init" ], "OnBuild": null, "Labels": { "created_by.minikube.sigs.k8s.io": "true", "mode.minikube.sigs.k8s.io": "crio-20200724220901-14997", "name.minikube.sigs.k8s.io": "crio-20200724220901-14997", "role.minikube.sigs.k8s.io": "" }, "StopSignal": "SIGRTMIN+3" }, "NetworkSettings": { "Bridge": "", "SandboxID": "5953f0ce29bf68bb7146ac5384ca1a6be3ccb39427dcde4428b82a503b037325", "HairpinMode": false, "LinkLocalIPv6Address": "", "LinkLocalIPv6PrefixLen": 0, "Ports": { "22/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32916" } ], "2376/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32915" } ], "5000/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32914" } ], "8443/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32913" } ] }, "SandboxKey": "/var/run/docker/netns/5953f0ce29bf", "SecondaryIPAddresses": null, "SecondaryIPv6Addresses": null, "EndpointID": "280707bfa744faabf6b6bcfee77f56c3d14c11c8d8070fd9a4796c96665833f5", "Gateway": "172.17.0.1", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "IPAddress": "172.17.0.2", "IPPrefixLen": 16, "IPv6Gateway": "", "MacAddress": "02:42:ac:11:00:02", "Networks": { "bridge": { "IPAMConfig": null, "Links": null, "Aliases": null, "NetworkID": "d4a420189740571bc15c14b2ebac1ab66e95c9c6796461aa96bb3486a5f7e03d", "EndpointID": "280707bfa744faabf6b6bcfee77f56c3d14c11c8d8070fd9a4796c96665833f5", "Gateway": "172.17.0.1", "IPAddress": "172.17.0.2", "IPPrefixLen": 16, "IPv6Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "MacAddress": "02:42:ac:11:00:02", "DriverOpts": null } } } } ] -- /stdout -- helpers_test.go:232: (dbg) Run: ./minikube-linux-amd64 status --format={{.Host}} -p crio-20200724220901-14997 -n crio-20200724220901-14997 helpers_test.go:237: <<< TestStartStop/group/crio/serial/VerifyKubernetesImages FAILED: start of post-mortem logs <<< helpers_test.go:238: ======> post-mortem[TestStartStop/group/crio/serial/VerifyKubernetesImages]: minikube logs <====== helpers_test.go:240: (dbg) Run: ./minikube-linux-amd64 -p crio-20200724220901-14997 logs -n 25 helpers_test.go:245: TestStartStop/group/crio/serial/VerifyKubernetesImages logs: -- stdout -- * ==> CRI-O <== * -- Logs begin at Fri 2020-07-24 22:26:07 UTC, end at Fri 2020-07-24 22:51:40 UTC. -- * Jul 24 22:51:36 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:36.114414755Z" level=info msg="About to add CNI network crio-bridge (type=bridge)" * Jul 24 22:51:36 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:36.748254410Z" level=info msg="Attempting to create container: kube-system/kindnet-4qfcd/kindnet-cni" id=ce94fdb6-1741-49d4-a097-efa6bf9201b9 * Jul 24 22:51:36 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:36.790302613Z" level=warning msg="requested logPath for ctr id af520b21cf962cfe633916631b6dfff80514346fa3f77e21c279c048e3c47ce9 is a relative path: kindnet-cni/9.log" id=ce94fdb6-1741-49d4-a097-efa6bf9201b9 * Jul 24 22:51:36 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:36.790337215Z" level=warning msg="logPath from relative path is now absolute: /var/log/pods/kube-system_kindnet-4qfcd_9fb35a28-5601-47e4-88e6-be2a18fa55ef/kindnet-cni/9.log" id=ce94fdb6-1741-49d4-a097-efa6bf9201b9 * Jul 24 22:51:36 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:36.949567007Z" level=error msg="Error adding network: failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:51:36 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:36.949602509Z" level=error msg="Error while adding pod to CNI network \"crio-bridge\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:51:36 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:36.949730918Z" level=info msg="About to del CNI network lo (type=loopback)" * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.036625217Z" level=info msg="Got pod network &{Name:coredns-5d4dd4b4db-9ssg6 Namespace:kube-system ID:c16b06815e3b08f65dd9e7bea079da08a86b6801b4df5c196e85f0a68b20bffc NetNS:/proc/52890/ns/net Networks:[] RuntimeConfig:map[crio-bridge:{IP: MAC: PortMappings:[] Bandwidth: IpRanges:[]}]}" * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.036699122Z" level=error msg="error loading cached network config: network \"crio-bridge\" not found in CNI cache" * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.036714023Z" level=info msg="About to del CNI network crio-bridge (type=bridge)" * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.067026215Z" level=info msg="Created container af520b21cf962cfe633916631b6dfff80514346fa3f77e21c279c048e3c47ce9: kube-system/kindnet-4qfcd/kindnet-cni" id=ce94fdb6-1741-49d4-a097-efa6bf9201b9 * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.075803521Z" level=info msg="Started container af520b21cf962cfe633916631b6dfff80514346fa3f77e21c279c048e3c47ce9: kube-system/kindnet-4qfcd/kindnet-cni" id=80c4438a-9514-4533-a110-67970c02411d * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.108584484Z" level=error msg="Error deleting network: running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.1.215 -j CNI-9aa498ed525f8c8017ed6801 -m comment --comment name: \"crio-bridge\" id: \"c16b06815e3b08f65dd9e7bea079da08a86b6801b4df5c196e85f0a68b20bffc\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-9aa498ed525f8c8017ed6801':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.108632487Z" level=error msg="Error while removing pod from CNI network \"crio-bridge\": running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.1.215 -j CNI-9aa498ed525f8c8017ed6801 -m comment --comment name: \"crio-bridge\" id: \"c16b06815e3b08f65dd9e7bea079da08a86b6801b4df5c196e85f0a68b20bffc\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-9aa498ed525f8c8017ed6801':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.108687891Z" level=error msg="error stopping network on cleanup: failed to destroy network for pod sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(c16b06815e3b08f65dd9e7bea079da08a86b6801b4df5c196e85f0a68b20bffc): running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.1.215 -j CNI-9aa498ed525f8c8017ed6801 -m comment --comment name: \"crio-bridge\" id: \"c16b06815e3b08f65dd9e7bea079da08a86b6801b4df5c196e85f0a68b20bffc\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-9aa498ed525f8c8017ed6801':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" id=f28b0fbe-43a9-42fe-a945-e8bf9a55d2a7 * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.510470527Z" level=info msg="exec'd [/bin/sh -ec ETCDCTL_API=3 etcdctl --endpoints=https://[127.0.0.1]:2379 --cacert=/var/lib/minikube/certs/etcd/ca.crt --cert=/var/lib/minikube/certs/etcd/healthcheck-client.crt --key=/var/lib/minikube/certs/etcd/healthcheck-client.key get foo] in kube-system/etcd-crio-20200724220901-14997/etcd" id=78468eb4-799e-459a-86d1-ccda369af23d * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.993484870Z" level=error msg="Error adding network: failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.993530573Z" level=error msg="Error while adding pod to CNI network \"crio-bridge\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.993660882Z" level=info msg="About to del CNI network lo (type=loopback)" * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.999273169Z" level=info msg="Got pod network &{Name:dashboard-metrics-scraper-c8b69c96c-ljbr4 Namespace:kubernetes-dashboard ID:b0a7787b999a8e94452730c7c3f574d37d249d15216835555313ddbf498ad6fb NetNS:/proc/52960/ns/net Networks:[] RuntimeConfig:map[crio-bridge:{IP: MAC: PortMappings:[] Bandwidth: IpRanges:[]}]}" * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.999325273Z" level=error msg="error loading cached network config: network \"crio-bridge\" not found in CNI cache" * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.999337074Z" level=info msg="About to del CNI network crio-bridge (type=bridge)" * Jul 24 22:51:38 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:38.064872198Z" level=error msg="Error deleting network: running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.1.216 -j CNI-db244da69fafc09d440b57a8 -m comment --comment name: \"crio-bridge\" id: \"b0a7787b999a8e94452730c7c3f574d37d249d15216835555313ddbf498ad6fb\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-db244da69fafc09d440b57a8':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" * Jul 24 22:51:38 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:38.064918701Z" level=error msg="Error while removing pod from CNI network \"crio-bridge\": running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.1.216 -j CNI-db244da69fafc09d440b57a8 -m comment --comment name: \"crio-bridge\" id: \"b0a7787b999a8e94452730c7c3f574d37d249d15216835555313ddbf498ad6fb\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-db244da69fafc09d440b57a8':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" * Jul 24 22:51:38 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:38.064975305Z" level=error msg="error stopping network on cleanup: failed to destroy network for pod sandbox k8s_dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard_145a2077-6b60-46d0-9d89-7151f4a78b4b_0(b0a7787b999a8e94452730c7c3f574d37d249d15216835555313ddbf498ad6fb): running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.1.216 -j CNI-db244da69fafc09d440b57a8 -m comment --comment name: \"crio-bridge\" id: \"b0a7787b999a8e94452730c7c3f574d37d249d15216835555313ddbf498ad6fb\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-db244da69fafc09d440b57a8':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" id=7717042a-5394-4708-808f-4e6bbaa1ba7d * * ==> container status <== * CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID * af520b21cf962 2186a1a396deb58f1ea5eaf20193a518ca05049b46ccd754ec83366b5c8c13d5 3 seconds ago Running kindnet-cni 9 22d8d62f2f19c * c5fc8cb46441c 4689081edb103a9e8174bf23a255bfbe0b2d9ed82edc907abab6989d1c60f02c 3 minutes ago Exited storage-provisioner 8 ed36006be83da * 9b721f7ac557c 2186a1a396deb58f1ea5eaf20193a518ca05049b46ccd754ec83366b5c8c13d5 5 minutes ago Exited kindnet-cni 8 22d8d62f2f19c * 4e28cb6807125 ae3d9889423ede337df3814baa77445e566597a5a882f3cdf933b4d9e0025f0f 24 minutes ago Running kube-proxy 0 8b8f6c79bb715 * 5e2ae26239902 d2f090f2479fbf92c508100e0a6106b3516bb70421a465586661feb1494145a2 24 minutes ago Running kube-controller-manager 0 f5860ba3909ed * 9881d2c304cdc 78b4180ab00d0fb99b1be2b5ef92a4831ad07f00f27e6746828f374497d79367 24 minutes ago Running kube-scheduler 0 4b2a96d2f6659 * 7cd3d2c27ce8d 2c4adeb21b4ff8ed3309d0e42b6b4ae39872399f7b37e0856e673b13c4aba13d 24 minutes ago Running etcd 0 4187a344080e1 * 60aac29a1caea c500a024ff843278184e5454ff6ee040a106c867c5a0361886fd3057cace2264 24 minutes ago Running kube-apiserver 0 79ca654167789 * * ==> describe nodes <== * Name: crio-20200724220901-14997 * Roles: master * Labels: beta.kubernetes.io/arch=amd64 * beta.kubernetes.io/os=linux * kubernetes.io/arch=amd64 * kubernetes.io/hostname=crio-20200724220901-14997 * kubernetes.io/os=linux * minikube.k8s.io/commit=40eac8ce825d2bb784efa63b900c8d788ea49faf * minikube.k8s.io/name=crio-20200724220901-14997 * minikube.k8s.io/updated_at=2020_07_24T22_10_37_0700 * minikube.k8s.io/version=v1.12.1 * node-role.kubernetes.io/master= * Annotations: kubeadm.alpha.kubernetes.io/cri-socket: /var/run/crio/crio.sock * node.alpha.kubernetes.io/ttl: 0 * volumes.kubernetes.io/controller-managed-attach-detach: true * CreationTimestamp: Fri, 24 Jul 2020 22:10:31 +0000 * Taints: * Unschedulable: false * Conditions: * Type Status LastHeartbeatTime LastTransitionTime Reason Message * ---- ------ ----------------- ------------------ ------ ------- * MemoryPressure False Fri, 24 Jul 2020 22:51:05 +0000 Fri, 24 Jul 2020 22:10:27 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available * DiskPressure False Fri, 24 Jul 2020 22:51:05 +0000 Fri, 24 Jul 2020 22:10:27 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure * PIDPressure False Fri, 24 Jul 2020 22:51:05 +0000 Fri, 24 Jul 2020 22:10:27 +0000 KubeletHasSufficientPID kubelet has sufficient PID available * Ready True Fri, 24 Jul 2020 22:51:05 +0000 Fri, 24 Jul 2020 22:10:27 +0000 KubeletReady kubelet is posting ready status * Addresses: * InternalIP: 172.17.0.2 * Hostname: crio-20200724220901-14997 * Capacity: * cpu: 16 * ephemeral-storage: 128884272Ki * hugepages-1Gi: 0 * hugepages-2Mi: 0 * memory: 65817044Ki * pods: 110 * Allocatable: * cpu: 16 * ephemeral-storage: 128884272Ki * hugepages-1Gi: 0 * hugepages-2Mi: 0 * memory: 65817044Ki * pods: 110 * System Info: * Machine ID: e7c76c839aa944e99c5c76ea1345e361 * System UUID: 8677386b-5379-4ccc-90e7-5b585098762e * Boot ID: 65219ec9-ab55-4151-85fa-6cbcd6144529 * Kernel Version: 5.4.0-1022-azure * OS Image: Ubuntu 19.10 * Operating System: linux * Architecture: amd64 * Container Runtime Version: cri-o://1.17.3 * Kubelet Version: v1.15.7 * Kube-Proxy Version: v1.15.7 * PodCIDR: 10.244.0.0/24 * Non-terminated Pods: (11 in total) * Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits AGE * --------- ---- ------------ ---------- --------------- ------------- --- * default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 34m * kube-system coredns-5d4dd4b4db-9ssg6 100m (0%) 0 (0%) 70Mi (0%) 170Mi (0%) 40m * kube-system etcd-crio-20200724220901-14997 0 (0%) 0 (0%) 0 (0%) 0 (0%) 40m * kube-system kindnet-4qfcd 100m (0%) 100m (0%) 50Mi (0%) 50Mi (0%) 40m * kube-system kube-apiserver-crio-20200724220901-14997 250m (1%) 0 (0%) 0 (0%) 0 (0%) 40m * kube-system kube-controller-manager-crio-20200724220901-14997 200m (1%) 0 (0%) 0 (0%) 0 (0%) 24m * kube-system kube-proxy-6wf4w 0 (0%) 0 (0%) 0 (0%) 0 (0%) 40m * kube-system kube-scheduler-crio-20200724220901-14997 100m (0%) 0 (0%) 0 (0%) 0 (0%) 40m * kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 40m * kubernetes-dashboard dashboard-metrics-scraper-c8b69c96c-ljbr4 0 (0%) 0 (0%) 0 (0%) 0 (0%) 24m * kubernetes-dashboard kubernetes-dashboard-6979c57f4c-wbxrt 0 (0%) 0 (0%) 0 (0%) 0 (0%) 24m * Allocated resources: * (Total limits may be over 100 percent, i.e., overcommitted.) * Resource Requests Limits * -------- -------- ------ * cpu 750m (4%) 100m (0%) * memory 120Mi (0%) 220Mi (0%) * ephemeral-storage 0 (0%) 0 (0%) * Events: * Type Reason Age From Message * ---- ------ ---- ---- ------- * Normal NodeHasSufficientMemory 41m (x7 over 41m) kubelet, crio-20200724220901-14997 Node crio-20200724220901-14997 status is now: NodeHasSufficientMemory * Normal NodeHasNoDiskPressure 41m (x7 over 41m) kubelet, crio-20200724220901-14997 Node crio-20200724220901-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientPID 41m (x7 over 41m) kubelet, crio-20200724220901-14997 Node crio-20200724220901-14997 status is now: NodeHasSufficientPID * Warning readOnlySysFS 40m kube-proxy, crio-20200724220901-14997 CRI error: /sys is read-only: cannot modify conntrack limits, problems may arise later (If running Docker, see docker issue #24000) * Normal Starting 40m kube-proxy, crio-20200724220901-14997 Starting kube-proxy. * Normal Starting 24m kubelet, crio-20200724220901-14997 Starting kubelet. * Warning SystemOOM 24m (x2 over 24m) kubelet, crio-20200724220901-14997 System OOM encountered * Normal NodeHasSufficientMemory 24m (x7 over 24m) kubelet, crio-20200724220901-14997 Node crio-20200724220901-14997 status is now: NodeHasSufficientMemory * Normal NodeHasNoDiskPressure 24m (x7 over 24m) kubelet, crio-20200724220901-14997 Node crio-20200724220901-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientPID 24m (x7 over 24m) kubelet, crio-20200724220901-14997 Node crio-20200724220901-14997 status is now: NodeHasSufficientPID * Normal NodeAllocatableEnforced 24m kubelet, crio-20200724220901-14997 Updated Node Allocatable limit across pods * Warning readOnlySysFS 24m kube-proxy, crio-20200724220901-14997 CRI error: /sys is read-only: cannot modify conntrack limits, problems may arise later (If running Docker, see docker issue #24000) * Normal Starting 24m kube-proxy, crio-20200724220901-14997 Starting kube-proxy. * * ==> dmesg <== * [ +0.008042] FS-Cache: N-cookie d=00000000087e516b n=00000000639e5e65 * [ +0.007221] FS-Cache: N-key=[8] '56c00f0000000000' * [ +0.005631] FS-Cache: Duplicate cookie detected * [ +0.003841] FS-Cache: O-cookie c=0000000011bf07ab [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.005951] FS-Cache: O-cookie d=00000000087e516b n=00000000251e97b7 * [ +0.004790] FS-Cache: O-key=[8] '56c00f0000000000' * [ +0.003643] FS-Cache: N-cookie c=00000000e2eec606 [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.005689] FS-Cache: N-cookie d=00000000087e516b n=000000007550b1ca * [ +0.004826] FS-Cache: N-key=[8] '56c00f0000000000' * [ +1.890005] FS-Cache: Duplicate cookie detected * [ +0.022632] FS-Cache: O-cookie c=0000000036b15294 [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.008521] FS-Cache: O-cookie d=00000000087e516b n=00000000efd734b0 * [ +0.007450] FS-Cache: O-key=[8] '55c00f0000000000' * [ +0.004298] FS-Cache: N-cookie c=00000000c6760349 [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.007729] FS-Cache: N-cookie d=00000000087e516b n=000000002b6d28f2 * [ +0.006730] FS-Cache: N-key=[8] '55c00f0000000000' * [ +2.781512] FS-Cache: Duplicate cookie detected * [ +0.004691] FS-Cache: O-cookie c=0000000026b60cdb [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.006817] FS-Cache: O-cookie d=00000000087e516b n=0000000083eaff9d * [ +0.006146] FS-Cache: O-key=[8] '5ac00f0000000000' * [ +0.004163] FS-Cache: N-cookie c=000000006a4278df [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.006241] FS-Cache: N-cookie d=00000000087e516b n=00000000e4c3dd6e * [ +0.005074] FS-Cache: N-key=[8] '5ac00f0000000000' * [Jul24 22:02] tee (116897): /proc/108378/oom_adj is deprecated, please use /proc/108378/oom_score_adj instead. * [Jul24 22:09] hrtimer: interrupt took 1877431 ns * * ==> etcd [7cd3d2c27ce8d87211660e16ef5baac83ffc42799754d9ad03e5cf733dcd820c] <== * 2020-07-24 22:28:20.693526 W | etcdserver: failed to revoke 3c247382efb5110a ("lease not found") * 2020-07-24 22:28:20.693537 W | etcdserver: failed to revoke 3c247382efb5110a ("lease not found") * 2020-07-24 22:28:20.693546 W | etcdserver: failed to revoke 3c247382efb5110a ("lease not found") * 2020-07-24 22:28:20.715917 W | etcdserver: read-only range request "key:\"foo\" " with result "range_response_count:0 size:5" took too long (3.215685555s) to execute * 2020-07-24 22:28:20.715967 W | etcdserver: read-only range request "key:\"/registry/statefulsets\" range_end:\"/registry/statefulsett\" count_only:true " with result "range_response_count:0 size:5" took too long (3.467781535s) to execute * 2020-07-24 22:28:20.715982 W | etcdserver: read-only range request "key:\"/registry/horizontalpodautoscalers\" range_end:\"/registry/horizontalpodautoscalert\" count_only:true " with result "range_response_count:0 size:5" took too long (1.594353687s) to execute * 2020-07-24 22:28:20.716160 W | etcdserver: read-only range request "key:\"/registry/statefulsets\" range_end:\"/registry/statefulsett\" count_only:true " with result "range_response_count:0 size:5" took too long (2.909137077s) to execute * 2020-07-24 22:28:20.716193 W | etcdserver: read-only range request "key:\"/registry/pods/kube-system/storage-provisioner\" " with result "range_response_count:1 size:2416" took too long (4.129795802s) to execute * 2020-07-24 22:28:20.716213 W | etcdserver: read-only range request "key:\"/registry/horizontalpodautoscalers\" range_end:\"/registry/horizontalpodautoscalert\" count_only:true " with result "range_response_count:0 size:5" took too long (3.658155312s) to execute * 2020-07-24 22:28:20.716294 W | etcdserver: read-only range request "key:\"/registry/services/specs/default/kubernetes\" " with result "range_response_count:1 size:293" took too long (4.130839176s) to execute * 2020-07-24 22:28:20.716427 W | etcdserver: read-only range request "key:\"/registry/clusterroles\" range_end:\"/registry/clusterrolet\" count_only:true " with result "range_response_count:0 size:7" took too long (1.282647649s) to execute * 2020-07-24 22:28:20.844313 W | etcdserver: read-only range request "key:\"/registry/events/kube-system/storage-provisioner.1624d0b10da12b9b\" " with result "range_response_count:1 size:483" took too long (122.063812ms) to execute * 2020-07-24 22:28:20.950803 W | etcdserver: read-only range request "key:\"/registry/services/endpoints/default/kubernetes\" " with result "range_response_count:1 size:213" took too long (105.11143ms) to execute * 2020-07-24 22:31:03.116431 W | etcdserver: read-only range request "key:\"/registry/jobs/\" range_end:\"/registry/jobs0\" limit:500 " with result "range_response_count:0 size:5" took too long (2.348768413s) to execute * 2020-07-24 22:31:03.116531 W | etcdserver: request "header: txn: success:> failure: >>" with result "size:16" took too long (2.159753959s) to execute * 2020-07-24 22:31:03.117809 W | etcdserver: read-only range request "key:\"/registry/health\" " with result "error:context deadline exceeded" took too long (2.000019975s) to execute * 2020-07-24 22:31:03.118930 W | wal: sync duration of 2.16222982s, expected less than 1s * 2020-07-24 22:31:03.119141 W | etcdserver: read-only range request "key:\"/registry/configmaps\" range_end:\"/registry/configmapt\" count_only:true " with result "range_response_count:0 size:7" took too long (1.514657725s) to execute * 2020-07-24 22:31:03.119390 W | etcdserver: read-only range request "key:\"/registry/namespaces/default\" " with result "range_response_count:1 size:172" took too long (1.472246821s) to execute * 2020-07-24 22:36:56.295220 I | mvcc: store.index: compact 1090 * 2020-07-24 22:36:56.296281 I | mvcc: finished scheduled compaction at 1090 (took 726.75µs) * 2020-07-24 22:41:56.312125 I | mvcc: store.index: compact 1203 * 2020-07-24 22:41:56.312972 I | mvcc: finished scheduled compaction at 1203 (took 523.537µs) * 2020-07-24 22:46:56.323897 I | mvcc: store.index: compact 1285 * 2020-07-24 22:46:56.324603 I | mvcc: finished scheduled compaction at 1285 (took 389.624µs) * * ==> kernel <== * 22:51:40 up 1:18, 0 users, load average: 0.83, 0.94, 2.73 * Linux crio-20200724220901-14997 5.4.0-1022-azure #22-Ubuntu SMP Fri Jul 10 06:14:37 UTC 2020 x86_64 x86_64 x86_64 GNU/Linux * PRETTY_NAME="Ubuntu 19.10" * * ==> kube-apiserver [60aac29a1caeafa329981bf8f9a73c7f579633e998d2e82caabedfba3f1d1138] <== * I0724 22:32:16.144038 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:16.144097 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:16.144148 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:16.152885 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:36.144044 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [] * I0724 22:32:36.144250 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:36.144381 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:36.144412 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:36.144424 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:36.153545 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:56.144278 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [] * I0724 22:32:56.144441 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:56.144512 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:56.144567 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:56.144612 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:56.156564 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:33:16.144467 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [] * I0724 22:33:16.144655 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:33:16.144710 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:33:16.144738 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:33:16.154631 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:33:36.144698 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [] * I0724 22:33:36.144939 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:33:36.145007 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:33:36.155063 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * * ==> kube-controller-manager [5e2ae262399021f9b26d8e88b478a4ff629d1bc4dffef0901936b18cc1e4548c] <== * I0724 22:27:16.476945 1 taint_manager.go:182] Starting NoExecuteTaintManager * I0724 22:27:16.476964 1 event.go:258] Event(v1.ObjectReference{Kind:"Node", Namespace:"", Name:"crio-20200724220901-14997", UID:"ef0f0ad1-a556-4ea4-8b4e-4df0035f7a0b", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'RegisteredNode' Node crio-20200724220901-14997 event: Registered Node crio-20200724220901-14997 in Controller * W0724 22:27:16.476984 1 node_lifecycle_controller.go:863] Missing timestamp for Node crio-20200724220901-14997. Assuming now as a timestamp. * I0724 22:27:16.477018 1 node_lifecycle_controller.go:1089] Controller detected that zone is now in state Normal. * I0724 22:27:16.496120 1 controller_utils.go:1036] Caches are synced for daemon sets controller * I0724 22:27:16.517177 1 controller_utils.go:1036] Caches are synced for endpoint controller * I0724 22:27:16.522134 1 controller_utils.go:1036] Caches are synced for ReplicationController controller * I0724 22:27:16.526074 1 controller_utils.go:1036] Caches are synced for disruption controller * I0724 22:27:16.526095 1 disruption.go:338] Sending events to api server. * I0724 22:27:16.526636 1 controller_utils.go:1036] Caches are synced for job controller * I0724 22:27:16.526835 1 controller_utils.go:1036] Caches are synced for PVC protection controller * I0724 22:27:16.531393 1 controller_utils.go:1036] Caches are synced for attach detach controller * I0724 22:27:16.540016 1 controller_utils.go:1036] Caches are synced for persistent volume controller * I0724 22:27:16.551348 1 controller_utils.go:1036] Caches are synced for ReplicaSet controller * I0724 22:27:16.577221 1 controller_utils.go:1036] Caches are synced for deployment controller * I0724 22:27:16.580595 1 event.go:258] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"kubernetes-dashboard", Name:"kubernetes-dashboard", UID:"db313703-4ad6-497b-8b1b-c7657766a8c5", APIVersion:"apps/v1", ResourceVersion:"821", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set kubernetes-dashboard-6979c57f4c to 1 * I0724 22:27:16.584728 1 event.go:258] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"kubernetes-dashboard", Name:"dashboard-metrics-scraper", UID:"a8d0d7cc-7814-4653-af48-855bd4001c27", APIVersion:"apps/v1", ResourceVersion:"819", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set dashboard-metrics-scraper-c8b69c96c to 1 * I0724 22:27:16.589419 1 event.go:258] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"kubernetes-dashboard", Name:"dashboard-metrics-scraper-c8b69c96c", UID:"79acf5d3-9c82-447f-b71d-910de5b42f1b", APIVersion:"apps/v1", ResourceVersion:"855", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: dashboard-metrics-scraper-c8b69c96c-ljbr4 * I0724 22:27:16.589458 1 event.go:258] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"kubernetes-dashboard", Name:"kubernetes-dashboard-6979c57f4c", UID:"b43829eb-0088-472e-9584-6679aa6b72c4", APIVersion:"apps/v1", ResourceVersion:"854", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: kubernetes-dashboard-6979c57f4c-wbxrt * I0724 22:27:16.589508 1 controller_utils.go:1036] Caches are synced for resource quota controller * I0724 22:27:16.635535 1 controller_utils.go:1036] Caches are synced for garbage collector controller * I0724 22:27:16.635571 1 garbagecollector.go:137] Garbage collector: all resource monitors have synced. Proceeding to collect garbage * I0724 22:27:16.640852 1 controller_utils.go:1036] Caches are synced for resource quota controller * I0724 22:27:17.230884 1 controller_utils.go:1029] Waiting for caches to sync for garbage collector controller * I0724 22:27:17.331259 1 controller_utils.go:1036] Caches are synced for garbage collector controller * * ==> kube-proxy [4e28cb6807125f410044e3fce6354030e93abbb1dd7a5b0efeb136ab9f75cc6f] <== * I0724 22:10:51.711236 1 server_others.go:143] Using iptables Proxier. * I0724 22:10:51.711733 1 server.go:534] Version: v1.15.7 * I0724 22:10:51.747623 1 conntrack.go:52] Setting nf_conntrack_max to 524288 * E0724 22:10:51.748800 1 conntrack.go:127] sysfs is not writable: {Device:sysfs Path:/sys Type:sysfs Opts:[ro nosuid nodev noexec relatime] Freq:0 Pass:0} (mount options are [ro nosuid nodev noexec relatime]) * I0724 22:10:51.749038 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_established' to 86400 * I0724 22:10:51.749127 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_close_wait' to 3600 * I0724 22:10:51.749315 1 config.go:187] Starting service config controller * I0724 22:10:51.749352 1 controller_utils.go:1029] Waiting for caches to sync for service config controller * I0724 22:10:51.749404 1 config.go:96] Starting endpoints config controller * I0724 22:10:51.749502 1 controller_utils.go:1029] Waiting for caches to sync for endpoints config controller * I0724 22:10:51.849555 1 controller_utils.go:1036] Caches are synced for service config controller * I0724 22:10:51.849731 1 controller_utils.go:1036] Caches are synced for endpoints config controller * W0724 22:27:02.252143 1 server_others.go:249] Flag proxy-mode="" unknown, assuming iptables proxy * I0724 22:27:02.336837 1 server_others.go:143] Using iptables Proxier. * I0724 22:27:02.337285 1 server.go:534] Version: v1.15.7 * I0724 22:27:02.437683 1 conntrack.go:52] Setting nf_conntrack_max to 524288 * E0724 22:27:02.438093 1 conntrack.go:127] sysfs is not writable: {Device:sysfs Path:/sys Type:sysfs Opts:[ro nosuid nodev noexec relatime] Freq:0 Pass:0} (mount options are [ro nosuid nodev noexec relatime]) * I0724 22:27:02.438352 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_established' to 86400 * I0724 22:27:02.438437 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_close_wait' to 3600 * I0724 22:27:02.438748 1 config.go:96] Starting endpoints config controller * I0724 22:27:02.438828 1 controller_utils.go:1029] Waiting for caches to sync for endpoints config controller * I0724 22:27:02.438814 1 config.go:187] Starting service config controller * I0724 22:27:02.438854 1 controller_utils.go:1029] Waiting for caches to sync for service config controller * I0724 22:27:02.539002 1 controller_utils.go:1036] Caches are synced for endpoints config controller * I0724 22:27:02.539024 1 controller_utils.go:1036] Caches are synced for service config controller * * ==> kube-scheduler [9881d2c304cdca9fe0f38468be746e289b9fa419917792f7aa9019077b4a4374] <== * E0724 22:10:32.938093 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope * E0724 22:10:32.940907 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope * E0724 22:10:32.940967 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope * E0724 22:10:32.941768 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope * E0724 22:10:32.942905 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope * E0724 22:10:32.943936 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope * E0724 22:10:32.947924 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope * I0724 22:26:55.290417 1 serving.go:319] Generated self-signed cert in-memory * W0724 22:26:55.521509 1 authentication.go:249] No authentication-kubeconfig provided in order to lookup client-ca-file in configmap/extension-apiserver-authentication in kube-system, so client certificate authentication won't work. * W0724 22:26:55.521537 1 authentication.go:252] No authentication-kubeconfig provided in order to lookup requestheader-client-ca-file in configmap/extension-apiserver-authentication in kube-system, so request-header client certificate authentication won't work. * W0724 22:26:55.521549 1 authorization.go:146] No authorization-kubeconfig provided, so SubjectAccessReview of authorization tokens won't work. * I0724 22:26:55.524427 1 server.go:142] Version: v1.15.7 * I0724 22:26:55.524493 1 defaults.go:87] TaintNodesByCondition is enabled, PodToleratesNodeTaints predicate is mandatory * W0724 22:26:55.525401 1 authorization.go:47] Authorization is disabled * W0724 22:26:55.525419 1 authentication.go:55] Authentication is disabled * I0724 22:26:55.525435 1 deprecated_insecure_serving.go:51] Serving healthz insecurely on [::]:10251 * I0724 22:26:55.535873 1 secure_serving.go:116] Serving securely on 127.0.0.1:10259 * E0724 22:27:00.040558 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope * E0724 22:27:00.140295 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope: RBAC: [clusterrole.rbac.authorization.k8s.io "system:public-info-viewer" not found, clusterrole.rbac.authorization.k8s.io "system:discovery" not found, clusterrole.rbac.authorization.k8s.io "system:kube-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:volume-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:basic-user" not found] * E0724 22:27:00.140491 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope * E0724 22:27:00.140663 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope * E0724 22:27:00.140655 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1beta1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope: RBAC: [clusterrole.rbac.authorization.k8s.io "system:kube-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:discovery" not found, clusterrole.rbac.authorization.k8s.io "system:basic-user" not found, clusterrole.rbac.authorization.k8s.io "system:volume-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:public-info-viewer" not found] * E0724 22:27:00.140755 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope: RBAC: [clusterrole.rbac.authorization.k8s.io "system:public-info-viewer" not found, clusterrole.rbac.authorization.k8s.io "system:kube-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:discovery" not found, clusterrole.rbac.authorization.k8s.io "system:basic-user" not found, clusterrole.rbac.authorization.k8s.io "system:volume-scheduler" not found] * E0724 22:27:00.140806 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope: RBAC: [clusterrole.rbac.authorization.k8s.io "system:kube-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:discovery" not found, clusterrole.rbac.authorization.k8s.io "system:basic-user" not found, clusterrole.rbac.authorization.k8s.io "system:volume-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:public-info-viewer" not found] * E0724 22:27:00.140869 1 reflector.go:125] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:226: Failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope: RBAC: [clusterrole.rbac.authorization.k8s.io "system:basic-user" not found, clusterrole.rbac.authorization.k8s.io "system:volume-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:public-info-viewer" not found, clusterrole.rbac.authorization.k8s.io "system:kube-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:discovery" not found] * * ==> kubelet <== * -- Logs begin at Fri 2020-07-24 22:26:07 UTC, end at Fri 2020-07-24 22:51:41 UTC. -- * Jul 24 22:51:22 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:22.824313 1598 kuberuntime_manager.go:692] createPodSandbox for pod "coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(5640d9f74f19b787273675f6175658f5a4b944f93df8a5e82447b8adc4afddff): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:22 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:22.824404 1598 pod_workers.go:190] Error syncing pod dbd8dc2c-1af2-4c99-89f0-d44686805742 ("coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)"), skipping: failed to "CreatePodSandbox" for "coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)" with CreatePodSandboxError: "CreatePodSandbox for pod \"coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)\" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(5640d9f74f19b787273675f6175658f5a4b944f93df8a5e82447b8adc4afddff): failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:51:24 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:24.531997 1598 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard_145a2077-6b60-46d0-9d89-7151f4a78b4b_0(25492f81a77c03ed1561da5355054094967a64c28fd4b6fe0b3c3ae3e2148fee): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:24 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:24.532069 1598 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard_145a2077-6b60-46d0-9d89-7151f4a78b4b_0(25492f81a77c03ed1561da5355054094967a64c28fd4b6fe0b3c3ae3e2148fee): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:24 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:24.532093 1598 kuberuntime_manager.go:692] createPodSandbox for pod "dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard_145a2077-6b60-46d0-9d89-7151f4a78b4b_0(25492f81a77c03ed1561da5355054094967a64c28fd4b6fe0b3c3ae3e2148fee): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:24 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:24.532169 1598 pod_workers.go:190] Error syncing pod 145a2077-6b60-46d0-9d89-7151f4a78b4b ("dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)"), skipping: failed to "CreatePodSandbox" for "dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)" with CreatePodSandboxError: "CreatePodSandbox for pod \"dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)\" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard_145a2077-6b60-46d0-9d89-7151f4a78b4b_0(25492f81a77c03ed1561da5355054094967a64c28fd4b6fe0b3c3ae3e2148fee): failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:51:25 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:25.744397 1598 pod_workers.go:190] Error syncing pod e76b38cb-d66f-4b4b-a962-cdd9c670e250 ("storage-provisioner_kube-system(e76b38cb-d66f-4b4b-a962-cdd9c670e250)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "Back-off 5m0s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(e76b38cb-d66f-4b4b-a962-cdd9c670e250)" * Jul 24 22:51:25 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:25.744505 1598 pod_workers.go:190] Error syncing pod 9fb35a28-5601-47e4-88e6-be2a18fa55ef ("kindnet-4qfcd_kube-system(9fb35a28-5601-47e4-88e6-be2a18fa55ef)"), skipping: failed to "StartContainer" for "kindnet-cni" with CrashLoopBackOff: "Back-off 5m0s restarting failed container=kindnet-cni pod=kindnet-4qfcd_kube-system(9fb35a28-5601-47e4-88e6-be2a18fa55ef)" * Jul 24 22:51:32 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:32.248818 1598 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard_2734d7c0-daef-420d-966b-49f33f94e547_0(9ccf0f0cce6b567ad2f291f016191bb44502de1d3e86e6df90c6e7576ed5ab81): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:32 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:32.248893 1598 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard(2734d7c0-daef-420d-966b-49f33f94e547)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard_2734d7c0-daef-420d-966b-49f33f94e547_0(9ccf0f0cce6b567ad2f291f016191bb44502de1d3e86e6df90c6e7576ed5ab81): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:32 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:32.248923 1598 kuberuntime_manager.go:692] createPodSandbox for pod "kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard(2734d7c0-daef-420d-966b-49f33f94e547)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard_2734d7c0-daef-420d-966b-49f33f94e547_0(9ccf0f0cce6b567ad2f291f016191bb44502de1d3e86e6df90c6e7576ed5ab81): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:32 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:32.248993 1598 pod_workers.go:190] Error syncing pod 2734d7c0-daef-420d-966b-49f33f94e547 ("kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard(2734d7c0-daef-420d-966b-49f33f94e547)"), skipping: failed to "CreatePodSandbox" for "kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard(2734d7c0-daef-420d-966b-49f33f94e547)" with CreatePodSandboxError: "CreatePodSandbox for pod \"kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard(2734d7c0-daef-420d-966b-49f33f94e547)\" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard_2734d7c0-daef-420d-966b-49f33f94e547_0(9ccf0f0cce6b567ad2f291f016191bb44502de1d3e86e6df90c6e7576ed5ab81): failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:51:33 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:33.883234 1598 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(86aa6f798e8cfd97aef81555b92613e818d4c7173384c653d026f84ec976db8e): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:33 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:33.883300 1598 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(86aa6f798e8cfd97aef81555b92613e818d4c7173384c653d026f84ec976db8e): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:33 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:33.883327 1598 kuberuntime_manager.go:692] createPodSandbox for pod "busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(86aa6f798e8cfd97aef81555b92613e818d4c7173384c653d026f84ec976db8e): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:33 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:33.883383 1598 pod_workers.go:190] Error syncing pod b947226f-0b0e-4916-9b65-c7b70a6e137e ("busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)"), skipping: failed to "CreatePodSandbox" for "busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)" with CreatePodSandboxError: "CreatePodSandbox for pod \"busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)\" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(86aa6f798e8cfd97aef81555b92613e818d4c7173384c653d026f84ec976db8e): failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:51:37 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:37.322582 1598 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(c16b06815e3b08f65dd9e7bea079da08a86b6801b4df5c196e85f0a68b20bffc): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:37 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:37.322652 1598 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(c16b06815e3b08f65dd9e7bea079da08a86b6801b4df5c196e85f0a68b20bffc): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:37 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:37.322685 1598 kuberuntime_manager.go:692] createPodSandbox for pod "coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(c16b06815e3b08f65dd9e7bea079da08a86b6801b4df5c196e85f0a68b20bffc): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:37 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:37.322744 1598 pod_workers.go:190] Error syncing pod dbd8dc2c-1af2-4c99-89f0-d44686805742 ("coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)"), skipping: failed to "CreatePodSandbox" for "coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)" with CreatePodSandboxError: "CreatePodSandbox for pod \"coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)\" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(c16b06815e3b08f65dd9e7bea079da08a86b6801b4df5c196e85f0a68b20bffc): failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:51:38 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:38.268197 1598 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard_145a2077-6b60-46d0-9d89-7151f4a78b4b_0(b0a7787b999a8e94452730c7c3f574d37d249d15216835555313ddbf498ad6fb): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:38 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:38.268262 1598 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard_145a2077-6b60-46d0-9d89-7151f4a78b4b_0(b0a7787b999a8e94452730c7c3f574d37d249d15216835555313ddbf498ad6fb): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:38 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:38.268285 1598 kuberuntime_manager.go:692] createPodSandbox for pod "dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard_145a2077-6b60-46d0-9d89-7151f4a78b4b_0(b0a7787b999a8e94452730c7c3f574d37d249d15216835555313ddbf498ad6fb): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:38 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:38.268374 1598 pod_workers.go:190] Error syncing pod 145a2077-6b60-46d0-9d89-7151f4a78b4b ("dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)"), skipping: failed to "CreatePodSandbox" for "dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)" with CreatePodSandboxError: "CreatePodSandbox for pod \"dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)\" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard_145a2077-6b60-46d0-9d89-7151f4a78b4b_0(b0a7787b999a8e94452730c7c3f574d37d249d15216835555313ddbf498ad6fb): failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:51:38 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:38.744063 1598 pod_workers.go:190] Error syncing pod e76b38cb-d66f-4b4b-a962-cdd9c670e250 ("storage-provisioner_kube-system(e76b38cb-d66f-4b4b-a962-cdd9c670e250)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "Back-off 5m0s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(e76b38cb-d66f-4b4b-a962-cdd9c670e250)" * * ==> storage-provisioner [c5fc8cb46441ceaef6bf0347ecae8bb423d4fc849fad64b2afb93b63052026f2] <== * F0724 22:48:28.232635 1 main.go:37] Error getting server version: Get https://10.96.0.1:443/version: dial tcp 10.96.0.1:443: getsockopt: no route to host -- /stdout -- helpers_test.go:247: (dbg) Run: ./minikube-linux-amd64 status --format={{.APIServer}} -p crio-20200724220901-14997 -n crio-20200724220901-14997 helpers_test.go:254: (dbg) Run: kubectl --context crio-20200724220901-14997 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running helpers_test.go:260: non-running pods: busybox coredns-5d4dd4b4db-9ssg6 dashboard-metrics-scraper-c8b69c96c-ljbr4 kubernetes-dashboard-6979c57f4c-wbxrt helpers_test.go:262: ======> post-mortem[TestStartStop/group/crio/serial/VerifyKubernetesImages]: describe non-running pods <====== helpers_test.go:265: (dbg) Run: kubectl --context crio-20200724220901-14997 describe pod busybox coredns-5d4dd4b4db-9ssg6 dashboard-metrics-scraper-c8b69c96c-ljbr4 kubernetes-dashboard-6979c57f4c-wbxrt helpers_test.go:265: (dbg) Non-zero exit: kubectl --context crio-20200724220901-14997 describe pod busybox coredns-5d4dd4b4db-9ssg6 dashboard-metrics-scraper-c8b69c96c-ljbr4 kubernetes-dashboard-6979c57f4c-wbxrt: exit status 1 (83.394456ms) -- stdout -- Name: busybox Namespace: default Priority: 0 Node: crio-20200724220901-14997/172.17.0.2 Start Time: Fri, 24 Jul 2020 22:17:30 +0000 Labels: integration-test=busybox Annotations: Status: Pending IP: IPs: Containers: busybox: Container ID: Image: busybox:1.28.4-glibc Image ID: Port: Host Port: Command: sleep 3600 State: Waiting Reason: ContainerCreating Ready: False Restart Count: 0 Environment: Mounts: /var/run/secrets/kubernetes.io/serviceaccount from default-token-2jsfl (ro) Conditions: Type Status Initialized True Ready False ContainersReady False PodScheduled True Volumes: default-token-2jsfl: Type: Secret (a volume populated by a Secret) SecretName: default-token-2jsfl Optional: false QoS Class: BestEffort Node-Selectors: Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s node.kubernetes.io/unreachable:NoExecute for 300s Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Scheduled 34m default-scheduler Successfully assigned default/busybox to crio-20200724220901-14997 Warning FailedCreatePodSandBox 34m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(df0c3a384af70876e631b79afa238f8219e909abbaf7684ff30d1dce8a9a54fa): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 33m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(7e1e51912dee7c82a399942b88c2992e156a60bb2bdcfc5a0340b0f5eb894fe0): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 33m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(eb6542b74bcd5168e50e03b41a224e2df5f3761d00e96252755d3d77cabf510e): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 33m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(95381eb738e20acf009ca3de52d9dd44c0e10363127a19f55cc9d8b9b15d935d): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 33m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(5da5f64daf132db1d2f57fcbbd23261f5b055438f42fdc9e58f02b1dd57b9240): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 32m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(d843353d3186fd450ada3d11767e5294d07ef90a741caba47e62a59eb82f9e97): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 32m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(cdb66590d250947869cc36af8c6d22ecc1fd99752b4ba0d09be444e2de9b06eb): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 32m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(590dbdf2276a1c93cf59fe15b35411b3d19be44299a08851ea9957e8c6c03681): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 32m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(fe109ba9d55515001bcdb9c5ba7b51262263252184892313119b14411c1ba69a): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 27m (x17 over 31m) kubelet, crio-20200724220901-14997 (combined from similar events): Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(4bee5e35c962aafeecadb6e69afb27ad039cbcf256940e8735ce4f079bf10862): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 24m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(e8fcb63d4fbd6ae3b748da2e936f37eeaa140b43d0f98056e85b082da82fbb01): netplugin failed with no error message Warning FailedCreatePodSandBox 24m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(60cd38ffb8a1dd4fa390aba070fecc1acec7811fd5775ea0d2a431e9e2424e62): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 24m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(68df75d364336016be8023c132977fe34319aa4b9f92c7d70e457a57217a281e): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 23m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(aaae971e15b6884b2bb1799dd4abb10b195368e13b5e4f290f43c5b4b11ec194): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 23m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(bd1d721cd146bbfe1fa05dc287b325ab526dc19d3404ff58cef06e259e2b5986): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 22m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(df52b05097dfc5b141cfc44d2e97b43954c3d247a95a09c7cfb30e7c69871395): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 22m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(d5729ae94c7a9707bb77f5dc43fb6a6d6d451a9f25758519bb72c37e64012cbe): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 22m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(1080ac6803a0e7d14628d92578ed58468a1fddfae32c4476765ee0ff2580e28e): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 22m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(7b666099a72822b0a53ba3bb57632f31f4390032be187dca2fe8fb309f923937): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 4m28s (x70 over 21m) kubelet, crio-20200724220901-14997 (combined from similar events): Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(ddf5dab0e040cc42e18461726216644f257157271baffe0b4e6c06f634f4a48c): failed to set bridge addr: could not add IP address to "cni0": permission denied -- /stdout -- ** stderr ** Error from server (NotFound): pods "coredns-5d4dd4b4db-9ssg6" not found Error from server (NotFound): pods "dashboard-metrics-scraper-c8b69c96c-ljbr4" not found Error from server (NotFound): pods "kubernetes-dashboard-6979c57f4c-wbxrt" not found ** /stderr ** helpers_test.go:267: kubectl --context crio-20200724220901-14997 describe pod busybox coredns-5d4dd4b4db-9ssg6 dashboard-metrics-scraper-c8b69c96c-ljbr4 kubernetes-dashboard-6979c57f4c-wbxrt: exit status 1 helpers_test.go:215: -----------------------post-mortem-------------------------------- helpers_test.go:223: ======> post-mortem[TestStartStop/group/crio/serial/VerifyKubernetesImages]: docker inspect <====== helpers_test.go:224: (dbg) Run: docker inspect crio-20200724220901-14997 helpers_test.go:228: (dbg) docker inspect crio-20200724220901-14997: -- stdout -- [ { "Id": "d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a", "Created": "2020-07-24T22:09:11.178770681Z", "Path": "/usr/local/bin/entrypoint", "Args": [ "/sbin/init" ], "State": { "Status": "running", "Running": true, "Paused": false, "Restarting": false, "OOMKilled": false, "Dead": false, "Pid": 667119, "ExitCode": 0, "Error": "", "StartedAt": "2020-07-24T22:26:07.35920725Z", "FinishedAt": "2020-07-24T22:26:00.416732195Z" }, "Image": "sha256:e6bc41c39dc48b2b472936db36aedb28527ce0f675ed1bc20d029125c9ccf578", "ResolvConfPath": "/var/lib/docker/containers/d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a/resolv.conf", "HostnamePath": "/var/lib/docker/containers/d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a/hostname", "HostsPath": "/var/lib/docker/containers/d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a/hosts", "LogPath": "/var/lib/docker/containers/d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a/d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a-json.log", "Name": "/crio-20200724220901-14997", "RestartCount": 0, "Driver": "overlay2", "Platform": "linux", "MountLabel": "", "ProcessLabel": "", "AppArmorProfile": "unconfined", "ExecIDs": null, "HostConfig": { "Binds": [ "/lib/modules:/lib/modules:ro", "crio-20200724220901-14997:/var" ], "ContainerIDFile": "", "LogConfig": { "Type": "json-file", "Config": {} }, "NetworkMode": "default", "PortBindings": { "22/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "2376/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "5000/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "8443/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ] }, "RestartPolicy": { "Name": "no", "MaximumRetryCount": 0 }, "AutoRemove": false, "VolumeDriver": "", "VolumesFrom": null, "CapAdd": null, "CapDrop": null, "Capabilities": null, "Dns": [], "DnsOptions": [], "DnsSearch": [], "ExtraHosts": null, "GroupAdd": null, "IpcMode": "private", "Cgroup": "", "Links": null, "OomScoreAdj": 0, "PidMode": "", "Privileged": true, "PublishAllPorts": false, "ReadonlyRootfs": false, "SecurityOpt": [ "seccomp=unconfined", "apparmor=unconfined", "label=disable" ], "Tmpfs": { "/run": "", "/tmp": "" }, "UTSMode": "", "UsernsMode": "", "ShmSize": 67108864, "Runtime": "runc", "ConsoleSize": [ 0, 0 ], "Isolation": "", "CpuShares": 0, "Memory": 2306867200, "NanoCpus": 2000000000, "CgroupParent": "", "BlkioWeight": 0, "BlkioWeightDevice": [], "BlkioDeviceReadBps": null, "BlkioDeviceWriteBps": null, "BlkioDeviceReadIOps": null, "BlkioDeviceWriteIOps": null, "CpuPeriod": 0, "CpuQuota": 0, "CpuRealtimePeriod": 0, "CpuRealtimeRuntime": 0, "CpusetCpus": "", "CpusetMems": "", "Devices": [], "DeviceCgroupRules": null, "DeviceRequests": null, "KernelMemory": 0, "KernelMemoryTCP": 0, "MemoryReservation": 0, "MemorySwap": -1, "MemorySwappiness": null, "OomKillDisable": false, "PidsLimit": null, "Ulimits": null, "CpuCount": 0, "CpuPercent": 0, "IOMaximumIOps": 0, "IOMaximumBandwidth": 0, "MaskedPaths": null, "ReadonlyPaths": null }, "GraphDriver": { "Data": { "LowerDir": "/var/lib/docker/overlay2/25235e8a657907fa460b6393f79aab4ea109b0924e4843714a3776886a051c03-init/diff:/var/lib/docker/overlay2/33ef5d7ed4fbb30d55d2e5925de9b5c359ee2f6bcaf5a31308f0c4c11e358759/diff:/var/lib/docker/overlay2/3cf31b1deb19cf9de586952311d9f94baab00dac6393d96e1a80da943c3823e6/diff:/var/lib/docker/overlay2/5e25be0edbb143d613aab16e23056ece18370c2427214464ccc0b8f033fc76c5/diff:/var/lib/docker/overlay2/933d7b760faf1726c0231eb48cbf3e7af8aeea7d1f449959b7bf07bbe9a84376/diff:/var/lib/docker/overlay2/69ec5555996fe29cf36b7e927034690ecb66f32d4c72396f3c2f3d73303ab10c/diff:/var/lib/docker/overlay2/891f9311e0e93aca44c576d873c0860bf67e972178833fc3ac0569308996933c/diff:/var/lib/docker/overlay2/431ddf5eea9395c020c3a27539c6984764cb7a0dd8f3eeadfbb24f37a897caf7/diff:/var/lib/docker/overlay2/ef7d3d9d53c985fdb46a41c8bcbc04c7d98baa3d8030b1432d115657269a4611/diff:/var/lib/docker/overlay2/d976f7471257da8ef85e0de564cae8e6b16eeb8f83fff6750b3985387f87052c/diff:/var/lib/docker/overlay2/16ed6f6abe25e98b50b10529e70d835da8b64ca99934243f70e5cc63cca44ba1/diff:/var/lib/docker/overlay2/a6495d6f579c32bd0faf387dc3b48c733dae0219e3cfc5d94f9f6ba027261668/diff:/var/lib/docker/overlay2/a34db548da9e24b0323e43e53767c49484fa19509ba9c1e738fb42330788ec52/diff:/var/lib/docker/overlay2/03f93f74dafc48e2bb0234297546abf70dc9918c047a7e3b500ed2242aec09ff/diff:/var/lib/docker/overlay2/b0d08d0be034080c25561d9451d8081050a591c2991a7f3d242234b9bf7c046d/diff:/var/lib/docker/overlay2/d7be6ed61eedc76988e74fe1d87a6a1ef287912654acea5566e7f9cb797b3e3c/diff:/var/lib/docker/overlay2/bde6a4a42d5652b3ef22e3a1dddd0f6fb765d2cd9f5b1a38d4ecb9b44c45b998/diff:/var/lib/docker/overlay2/0f384da0b034b730d4bdd22f270821719c0ac0b7b7d608c3a52fa89b9a386b90/diff:/var/lib/docker/overlay2/46e9d50bc162d2d523c12da3e378e5b859515056dc7d7f11344c1c42c64718d4/diff:/var/lib/docker/overlay2/6e7552cdb94dfc1c9a0a8efae25c758d93fff3d2eec58a3e2b710496d0c94653/diff:/var/lib/docker/overlay2/b9217960caef08b70ded801ca89f5a0808227383f2ab1de5ae7313c200a8090b/diff:/var/lib/docker/overlay2/d11fc39cafccd3e3cb8fba6cfdda2bbfdb99c2e41619ce169b0d6ede0b3f6a64/diff:/var/lib/docker/overlay2/0f89620291cba4bc3afac7c9fa1faf205450d5dcae032c3a9882009633b72b4a/diff", "MergedDir": "/var/lib/docker/overlay2/25235e8a657907fa460b6393f79aab4ea109b0924e4843714a3776886a051c03/merged", "UpperDir": "/var/lib/docker/overlay2/25235e8a657907fa460b6393f79aab4ea109b0924e4843714a3776886a051c03/diff", "WorkDir": "/var/lib/docker/overlay2/25235e8a657907fa460b6393f79aab4ea109b0924e4843714a3776886a051c03/work" }, "Name": "overlay2" }, "Mounts": [ { "Type": "bind", "Source": "/lib/modules", "Destination": "/lib/modules", "Mode": "ro", "RW": false, "Propagation": "rprivate" }, { "Type": "volume", "Name": "crio-20200724220901-14997", "Source": "/var/lib/docker/volumes/crio-20200724220901-14997/_data", "Destination": "/var", "Driver": "local", "Mode": "z", "RW": true, "Propagation": "" } ], "Config": { "Hostname": "crio-20200724220901-14997", "Domainname": "", "User": "root", "AttachStdin": false, "AttachStdout": false, "AttachStderr": false, "ExposedPorts": { "22/tcp": {}, "2376/tcp": {}, "5000/tcp": {}, "8443/tcp": {} }, "Tty": true, "OpenStdin": false, "StdinOnce": false, "Env": [ "container=docker", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], "Cmd": null, "Image": "kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438", "Volumes": null, "WorkingDir": "", "Entrypoint": [ "/usr/local/bin/entrypoint", "/sbin/init" ], "OnBuild": null, "Labels": { "created_by.minikube.sigs.k8s.io": "true", "mode.minikube.sigs.k8s.io": "crio-20200724220901-14997", "name.minikube.sigs.k8s.io": "crio-20200724220901-14997", "role.minikube.sigs.k8s.io": "" }, "StopSignal": "SIGRTMIN+3" }, "NetworkSettings": { "Bridge": "", "SandboxID": "5953f0ce29bf68bb7146ac5384ca1a6be3ccb39427dcde4428b82a503b037325", "HairpinMode": false, "LinkLocalIPv6Address": "", "LinkLocalIPv6PrefixLen": 0, "Ports": { "22/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32916" } ], "2376/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32915" } ], "5000/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32914" } ], "8443/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32913" } ] }, "SandboxKey": "/var/run/docker/netns/5953f0ce29bf", "SecondaryIPAddresses": null, "SecondaryIPv6Addresses": null, "EndpointID": "280707bfa744faabf6b6bcfee77f56c3d14c11c8d8070fd9a4796c96665833f5", "Gateway": "172.17.0.1", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "IPAddress": "172.17.0.2", "IPPrefixLen": 16, "IPv6Gateway": "", "MacAddress": "02:42:ac:11:00:02", "Networks": { "bridge": { "IPAMConfig": null, "Links": null, "Aliases": null, "NetworkID": "d4a420189740571bc15c14b2ebac1ab66e95c9c6796461aa96bb3486a5f7e03d", "EndpointID": "280707bfa744faabf6b6bcfee77f56c3d14c11c8d8070fd9a4796c96665833f5", "Gateway": "172.17.0.1", "IPAddress": "172.17.0.2", "IPPrefixLen": 16, "IPv6Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "MacAddress": "02:42:ac:11:00:02", "DriverOpts": null } } } } ] -- /stdout -- helpers_test.go:232: (dbg) Run: ./minikube-linux-amd64 status --format={{.Host}} -p crio-20200724220901-14997 -n crio-20200724220901-14997 helpers_test.go:237: <<< TestStartStop/group/crio/serial/VerifyKubernetesImages FAILED: start of post-mortem logs <<< helpers_test.go:238: ======> post-mortem[TestStartStop/group/crio/serial/VerifyKubernetesImages]: minikube logs <====== helpers_test.go:240: (dbg) Run: ./minikube-linux-amd64 -p crio-20200724220901-14997 logs -n 25 helpers_test.go:245: TestStartStop/group/crio/serial/VerifyKubernetesImages logs: -- stdout -- * ==> CRI-O <== * -- Logs begin at Fri 2020-07-24 22:26:07 UTC, end at Fri 2020-07-24 22:51:42 UTC. -- * Jul 24 22:51:36 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:36.114414755Z" level=info msg="About to add CNI network crio-bridge (type=bridge)" * Jul 24 22:51:36 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:36.748254410Z" level=info msg="Attempting to create container: kube-system/kindnet-4qfcd/kindnet-cni" id=ce94fdb6-1741-49d4-a097-efa6bf9201b9 * Jul 24 22:51:36 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:36.790302613Z" level=warning msg="requested logPath for ctr id af520b21cf962cfe633916631b6dfff80514346fa3f77e21c279c048e3c47ce9 is a relative path: kindnet-cni/9.log" id=ce94fdb6-1741-49d4-a097-efa6bf9201b9 * Jul 24 22:51:36 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:36.790337215Z" level=warning msg="logPath from relative path is now absolute: /var/log/pods/kube-system_kindnet-4qfcd_9fb35a28-5601-47e4-88e6-be2a18fa55ef/kindnet-cni/9.log" id=ce94fdb6-1741-49d4-a097-efa6bf9201b9 * Jul 24 22:51:36 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:36.949567007Z" level=error msg="Error adding network: failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:51:36 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:36.949602509Z" level=error msg="Error while adding pod to CNI network \"crio-bridge\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:51:36 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:36.949730918Z" level=info msg="About to del CNI network lo (type=loopback)" * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.036625217Z" level=info msg="Got pod network &{Name:coredns-5d4dd4b4db-9ssg6 Namespace:kube-system ID:c16b06815e3b08f65dd9e7bea079da08a86b6801b4df5c196e85f0a68b20bffc NetNS:/proc/52890/ns/net Networks:[] RuntimeConfig:map[crio-bridge:{IP: MAC: PortMappings:[] Bandwidth: IpRanges:[]}]}" * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.036699122Z" level=error msg="error loading cached network config: network \"crio-bridge\" not found in CNI cache" * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.036714023Z" level=info msg="About to del CNI network crio-bridge (type=bridge)" * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.067026215Z" level=info msg="Created container af520b21cf962cfe633916631b6dfff80514346fa3f77e21c279c048e3c47ce9: kube-system/kindnet-4qfcd/kindnet-cni" id=ce94fdb6-1741-49d4-a097-efa6bf9201b9 * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.075803521Z" level=info msg="Started container af520b21cf962cfe633916631b6dfff80514346fa3f77e21c279c048e3c47ce9: kube-system/kindnet-4qfcd/kindnet-cni" id=80c4438a-9514-4533-a110-67970c02411d * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.108584484Z" level=error msg="Error deleting network: running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.1.215 -j CNI-9aa498ed525f8c8017ed6801 -m comment --comment name: \"crio-bridge\" id: \"c16b06815e3b08f65dd9e7bea079da08a86b6801b4df5c196e85f0a68b20bffc\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-9aa498ed525f8c8017ed6801':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.108632487Z" level=error msg="Error while removing pod from CNI network \"crio-bridge\": running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.1.215 -j CNI-9aa498ed525f8c8017ed6801 -m comment --comment name: \"crio-bridge\" id: \"c16b06815e3b08f65dd9e7bea079da08a86b6801b4df5c196e85f0a68b20bffc\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-9aa498ed525f8c8017ed6801':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.108687891Z" level=error msg="error stopping network on cleanup: failed to destroy network for pod sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(c16b06815e3b08f65dd9e7bea079da08a86b6801b4df5c196e85f0a68b20bffc): running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.1.215 -j CNI-9aa498ed525f8c8017ed6801 -m comment --comment name: \"crio-bridge\" id: \"c16b06815e3b08f65dd9e7bea079da08a86b6801b4df5c196e85f0a68b20bffc\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-9aa498ed525f8c8017ed6801':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" id=f28b0fbe-43a9-42fe-a945-e8bf9a55d2a7 * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.510470527Z" level=info msg="exec'd [/bin/sh -ec ETCDCTL_API=3 etcdctl --endpoints=https://[127.0.0.1]:2379 --cacert=/var/lib/minikube/certs/etcd/ca.crt --cert=/var/lib/minikube/certs/etcd/healthcheck-client.crt --key=/var/lib/minikube/certs/etcd/healthcheck-client.key get foo] in kube-system/etcd-crio-20200724220901-14997/etcd" id=78468eb4-799e-459a-86d1-ccda369af23d * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.993484870Z" level=error msg="Error adding network: failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.993530573Z" level=error msg="Error while adding pod to CNI network \"crio-bridge\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.993660882Z" level=info msg="About to del CNI network lo (type=loopback)" * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.999273169Z" level=info msg="Got pod network &{Name:dashboard-metrics-scraper-c8b69c96c-ljbr4 Namespace:kubernetes-dashboard ID:b0a7787b999a8e94452730c7c3f574d37d249d15216835555313ddbf498ad6fb NetNS:/proc/52960/ns/net Networks:[] RuntimeConfig:map[crio-bridge:{IP: MAC: PortMappings:[] Bandwidth: IpRanges:[]}]}" * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.999325273Z" level=error msg="error loading cached network config: network \"crio-bridge\" not found in CNI cache" * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.999337074Z" level=info msg="About to del CNI network crio-bridge (type=bridge)" * Jul 24 22:51:38 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:38.064872198Z" level=error msg="Error deleting network: running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.1.216 -j CNI-db244da69fafc09d440b57a8 -m comment --comment name: \"crio-bridge\" id: \"b0a7787b999a8e94452730c7c3f574d37d249d15216835555313ddbf498ad6fb\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-db244da69fafc09d440b57a8':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" * Jul 24 22:51:38 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:38.064918701Z" level=error msg="Error while removing pod from CNI network \"crio-bridge\": running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.1.216 -j CNI-db244da69fafc09d440b57a8 -m comment --comment name: \"crio-bridge\" id: \"b0a7787b999a8e94452730c7c3f574d37d249d15216835555313ddbf498ad6fb\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-db244da69fafc09d440b57a8':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" * Jul 24 22:51:38 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:38.064975305Z" level=error msg="error stopping network on cleanup: failed to destroy network for pod sandbox k8s_dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard_145a2077-6b60-46d0-9d89-7151f4a78b4b_0(b0a7787b999a8e94452730c7c3f574d37d249d15216835555313ddbf498ad6fb): running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.1.216 -j CNI-db244da69fafc09d440b57a8 -m comment --comment name: \"crio-bridge\" id: \"b0a7787b999a8e94452730c7c3f574d37d249d15216835555313ddbf498ad6fb\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-db244da69fafc09d440b57a8':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" id=7717042a-5394-4708-808f-4e6bbaa1ba7d * * ==> container status <== * CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID * af520b21cf962 2186a1a396deb58f1ea5eaf20193a518ca05049b46ccd754ec83366b5c8c13d5 5 seconds ago Running kindnet-cni 9 22d8d62f2f19c * c5fc8cb46441c 4689081edb103a9e8174bf23a255bfbe0b2d9ed82edc907abab6989d1c60f02c 3 minutes ago Exited storage-provisioner 8 ed36006be83da * 9b721f7ac557c 2186a1a396deb58f1ea5eaf20193a518ca05049b46ccd754ec83366b5c8c13d5 5 minutes ago Exited kindnet-cni 8 22d8d62f2f19c * 4e28cb6807125 ae3d9889423ede337df3814baa77445e566597a5a882f3cdf933b4d9e0025f0f 24 minutes ago Running kube-proxy 0 8b8f6c79bb715 * 5e2ae26239902 d2f090f2479fbf92c508100e0a6106b3516bb70421a465586661feb1494145a2 24 minutes ago Running kube-controller-manager 0 f5860ba3909ed * 9881d2c304cdc 78b4180ab00d0fb99b1be2b5ef92a4831ad07f00f27e6746828f374497d79367 24 minutes ago Running kube-scheduler 0 4b2a96d2f6659 * 7cd3d2c27ce8d 2c4adeb21b4ff8ed3309d0e42b6b4ae39872399f7b37e0856e673b13c4aba13d 24 minutes ago Running etcd 0 4187a344080e1 * 60aac29a1caea c500a024ff843278184e5454ff6ee040a106c867c5a0361886fd3057cace2264 24 minutes ago Running kube-apiserver 0 79ca654167789 * * ==> describe nodes <== * Name: crio-20200724220901-14997 * Roles: master * Labels: beta.kubernetes.io/arch=amd64 * beta.kubernetes.io/os=linux * kubernetes.io/arch=amd64 * kubernetes.io/hostname=crio-20200724220901-14997 * kubernetes.io/os=linux * minikube.k8s.io/commit=40eac8ce825d2bb784efa63b900c8d788ea49faf * minikube.k8s.io/name=crio-20200724220901-14997 * minikube.k8s.io/updated_at=2020_07_24T22_10_37_0700 * minikube.k8s.io/version=v1.12.1 * node-role.kubernetes.io/master= * Annotations: kubeadm.alpha.kubernetes.io/cri-socket: /var/run/crio/crio.sock * node.alpha.kubernetes.io/ttl: 0 * volumes.kubernetes.io/controller-managed-attach-detach: true * CreationTimestamp: Fri, 24 Jul 2020 22:10:31 +0000 * Taints: * Unschedulable: false * Conditions: * Type Status LastHeartbeatTime LastTransitionTime Reason Message * ---- ------ ----------------- ------------------ ------ ------- * MemoryPressure False Fri, 24 Jul 2020 22:51:05 +0000 Fri, 24 Jul 2020 22:10:27 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available * DiskPressure False Fri, 24 Jul 2020 22:51:05 +0000 Fri, 24 Jul 2020 22:10:27 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure * PIDPressure False Fri, 24 Jul 2020 22:51:05 +0000 Fri, 24 Jul 2020 22:10:27 +0000 KubeletHasSufficientPID kubelet has sufficient PID available * Ready True Fri, 24 Jul 2020 22:51:05 +0000 Fri, 24 Jul 2020 22:10:27 +0000 KubeletReady kubelet is posting ready status * Addresses: * InternalIP: 172.17.0.2 * Hostname: crio-20200724220901-14997 * Capacity: * cpu: 16 * ephemeral-storage: 128884272Ki * hugepages-1Gi: 0 * hugepages-2Mi: 0 * memory: 65817044Ki * pods: 110 * Allocatable: * cpu: 16 * ephemeral-storage: 128884272Ki * hugepages-1Gi: 0 * hugepages-2Mi: 0 * memory: 65817044Ki * pods: 110 * System Info: * Machine ID: e7c76c839aa944e99c5c76ea1345e361 * System UUID: 8677386b-5379-4ccc-90e7-5b585098762e * Boot ID: 65219ec9-ab55-4151-85fa-6cbcd6144529 * Kernel Version: 5.4.0-1022-azure * OS Image: Ubuntu 19.10 * Operating System: linux * Architecture: amd64 * Container Runtime Version: cri-o://1.17.3 * Kubelet Version: v1.15.7 * Kube-Proxy Version: v1.15.7 * PodCIDR: 10.244.0.0/24 * Non-terminated Pods: (11 in total) * Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits AGE * --------- ---- ------------ ---------- --------------- ------------- --- * default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 34m * kube-system coredns-5d4dd4b4db-9ssg6 100m (0%) 0 (0%) 70Mi (0%) 170Mi (0%) 40m * kube-system etcd-crio-20200724220901-14997 0 (0%) 0 (0%) 0 (0%) 0 (0%) 40m * kube-system kindnet-4qfcd 100m (0%) 100m (0%) 50Mi (0%) 50Mi (0%) 40m * kube-system kube-apiserver-crio-20200724220901-14997 250m (1%) 0 (0%) 0 (0%) 0 (0%) 40m * kube-system kube-controller-manager-crio-20200724220901-14997 200m (1%) 0 (0%) 0 (0%) 0 (0%) 24m * kube-system kube-proxy-6wf4w 0 (0%) 0 (0%) 0 (0%) 0 (0%) 40m * kube-system kube-scheduler-crio-20200724220901-14997 100m (0%) 0 (0%) 0 (0%) 0 (0%) 40m * kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 40m * kubernetes-dashboard dashboard-metrics-scraper-c8b69c96c-ljbr4 0 (0%) 0 (0%) 0 (0%) 0 (0%) 24m * kubernetes-dashboard kubernetes-dashboard-6979c57f4c-wbxrt 0 (0%) 0 (0%) 0 (0%) 0 (0%) 24m * Allocated resources: * (Total limits may be over 100 percent, i.e., overcommitted.) * Resource Requests Limits * -------- -------- ------ * cpu 750m (4%) 100m (0%) * memory 120Mi (0%) 220Mi (0%) * ephemeral-storage 0 (0%) 0 (0%) * Events: * Type Reason Age From Message * ---- ------ ---- ---- ------- * Normal NodeHasSufficientMemory 41m (x7 over 41m) kubelet, crio-20200724220901-14997 Node crio-20200724220901-14997 status is now: NodeHasSufficientMemory * Normal NodeHasNoDiskPressure 41m (x7 over 41m) kubelet, crio-20200724220901-14997 Node crio-20200724220901-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientPID 41m (x7 over 41m) kubelet, crio-20200724220901-14997 Node crio-20200724220901-14997 status is now: NodeHasSufficientPID * Warning readOnlySysFS 40m kube-proxy, crio-20200724220901-14997 CRI error: /sys is read-only: cannot modify conntrack limits, problems may arise later (If running Docker, see docker issue #24000) * Normal Starting 40m kube-proxy, crio-20200724220901-14997 Starting kube-proxy. * Normal Starting 24m kubelet, crio-20200724220901-14997 Starting kubelet. * Warning SystemOOM 24m (x2 over 24m) kubelet, crio-20200724220901-14997 System OOM encountered * Normal NodeHasSufficientMemory 24m (x7 over 24m) kubelet, crio-20200724220901-14997 Node crio-20200724220901-14997 status is now: NodeHasSufficientMemory * Normal NodeHasNoDiskPressure 24m (x7 over 24m) kubelet, crio-20200724220901-14997 Node crio-20200724220901-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientPID 24m (x7 over 24m) kubelet, crio-20200724220901-14997 Node crio-20200724220901-14997 status is now: NodeHasSufficientPID * Normal NodeAllocatableEnforced 24m kubelet, crio-20200724220901-14997 Updated Node Allocatable limit across pods * Warning readOnlySysFS 24m kube-proxy, crio-20200724220901-14997 CRI error: /sys is read-only: cannot modify conntrack limits, problems may arise later (If running Docker, see docker issue #24000) * Normal Starting 24m kube-proxy, crio-20200724220901-14997 Starting kube-proxy. * * ==> dmesg <== * [ +0.008042] FS-Cache: N-cookie d=00000000087e516b n=00000000639e5e65 * [ +0.007221] FS-Cache: N-key=[8] '56c00f0000000000' * [ +0.005631] FS-Cache: Duplicate cookie detected * [ +0.003841] FS-Cache: O-cookie c=0000000011bf07ab [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.005951] FS-Cache: O-cookie d=00000000087e516b n=00000000251e97b7 * [ +0.004790] FS-Cache: O-key=[8] '56c00f0000000000' * [ +0.003643] FS-Cache: N-cookie c=00000000e2eec606 [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.005689] FS-Cache: N-cookie d=00000000087e516b n=000000007550b1ca * [ +0.004826] FS-Cache: N-key=[8] '56c00f0000000000' * [ +1.890005] FS-Cache: Duplicate cookie detected * [ +0.022632] FS-Cache: O-cookie c=0000000036b15294 [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.008521] FS-Cache: O-cookie d=00000000087e516b n=00000000efd734b0 * [ +0.007450] FS-Cache: O-key=[8] '55c00f0000000000' * [ +0.004298] FS-Cache: N-cookie c=00000000c6760349 [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.007729] FS-Cache: N-cookie d=00000000087e516b n=000000002b6d28f2 * [ +0.006730] FS-Cache: N-key=[8] '55c00f0000000000' * [ +2.781512] FS-Cache: Duplicate cookie detected * [ +0.004691] FS-Cache: O-cookie c=0000000026b60cdb [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.006817] FS-Cache: O-cookie d=00000000087e516b n=0000000083eaff9d * [ +0.006146] FS-Cache: O-key=[8] '5ac00f0000000000' * [ +0.004163] FS-Cache: N-cookie c=000000006a4278df [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.006241] FS-Cache: N-cookie d=00000000087e516b n=00000000e4c3dd6e * [ +0.005074] FS-Cache: N-key=[8] '5ac00f0000000000' * [Jul24 22:02] tee (116897): /proc/108378/oom_adj is deprecated, please use /proc/108378/oom_score_adj instead. * [Jul24 22:09] hrtimer: interrupt took 1877431 ns * * ==> etcd [7cd3d2c27ce8d87211660e16ef5baac83ffc42799754d9ad03e5cf733dcd820c] <== * 2020-07-24 22:28:20.693526 W | etcdserver: failed to revoke 3c247382efb5110a ("lease not found") * 2020-07-24 22:28:20.693537 W | etcdserver: failed to revoke 3c247382efb5110a ("lease not found") * 2020-07-24 22:28:20.693546 W | etcdserver: failed to revoke 3c247382efb5110a ("lease not found") * 2020-07-24 22:28:20.715917 W | etcdserver: read-only range request "key:\"foo\" " with result "range_response_count:0 size:5" took too long (3.215685555s) to execute * 2020-07-24 22:28:20.715967 W | etcdserver: read-only range request "key:\"/registry/statefulsets\" range_end:\"/registry/statefulsett\" count_only:true " with result "range_response_count:0 size:5" took too long (3.467781535s) to execute * 2020-07-24 22:28:20.715982 W | etcdserver: read-only range request "key:\"/registry/horizontalpodautoscalers\" range_end:\"/registry/horizontalpodautoscalert\" count_only:true " with result "range_response_count:0 size:5" took too long (1.594353687s) to execute * 2020-07-24 22:28:20.716160 W | etcdserver: read-only range request "key:\"/registry/statefulsets\" range_end:\"/registry/statefulsett\" count_only:true " with result "range_response_count:0 size:5" took too long (2.909137077s) to execute * 2020-07-24 22:28:20.716193 W | etcdserver: read-only range request "key:\"/registry/pods/kube-system/storage-provisioner\" " with result "range_response_count:1 size:2416" took too long (4.129795802s) to execute * 2020-07-24 22:28:20.716213 W | etcdserver: read-only range request "key:\"/registry/horizontalpodautoscalers\" range_end:\"/registry/horizontalpodautoscalert\" count_only:true " with result "range_response_count:0 size:5" took too long (3.658155312s) to execute * 2020-07-24 22:28:20.716294 W | etcdserver: read-only range request "key:\"/registry/services/specs/default/kubernetes\" " with result "range_response_count:1 size:293" took too long (4.130839176s) to execute * 2020-07-24 22:28:20.716427 W | etcdserver: read-only range request "key:\"/registry/clusterroles\" range_end:\"/registry/clusterrolet\" count_only:true " with result "range_response_count:0 size:7" took too long (1.282647649s) to execute * 2020-07-24 22:28:20.844313 W | etcdserver: read-only range request "key:\"/registry/events/kube-system/storage-provisioner.1624d0b10da12b9b\" " with result "range_response_count:1 size:483" took too long (122.063812ms) to execute * 2020-07-24 22:28:20.950803 W | etcdserver: read-only range request "key:\"/registry/services/endpoints/default/kubernetes\" " with result "range_response_count:1 size:213" took too long (105.11143ms) to execute * 2020-07-24 22:31:03.116431 W | etcdserver: read-only range request "key:\"/registry/jobs/\" range_end:\"/registry/jobs0\" limit:500 " with result "range_response_count:0 size:5" took too long (2.348768413s) to execute * 2020-07-24 22:31:03.116531 W | etcdserver: request "header: txn: success:> failure: >>" with result "size:16" took too long (2.159753959s) to execute * 2020-07-24 22:31:03.117809 W | etcdserver: read-only range request "key:\"/registry/health\" " with result "error:context deadline exceeded" took too long (2.000019975s) to execute * 2020-07-24 22:31:03.118930 W | wal: sync duration of 2.16222982s, expected less than 1s * 2020-07-24 22:31:03.119141 W | etcdserver: read-only range request "key:\"/registry/configmaps\" range_end:\"/registry/configmapt\" count_only:true " with result "range_response_count:0 size:7" took too long (1.514657725s) to execute * 2020-07-24 22:31:03.119390 W | etcdserver: read-only range request "key:\"/registry/namespaces/default\" " with result "range_response_count:1 size:172" took too long (1.472246821s) to execute * 2020-07-24 22:36:56.295220 I | mvcc: store.index: compact 1090 * 2020-07-24 22:36:56.296281 I | mvcc: finished scheduled compaction at 1090 (took 726.75µs) * 2020-07-24 22:41:56.312125 I | mvcc: store.index: compact 1203 * 2020-07-24 22:41:56.312972 I | mvcc: finished scheduled compaction at 1203 (took 523.537µs) * 2020-07-24 22:46:56.323897 I | mvcc: store.index: compact 1285 * 2020-07-24 22:46:56.324603 I | mvcc: finished scheduled compaction at 1285 (took 389.624µs) * * ==> kernel <== * 22:51:42 up 1:19, 0 users, load average: 0.84, 0.94, 2.72 * Linux crio-20200724220901-14997 5.4.0-1022-azure #22-Ubuntu SMP Fri Jul 10 06:14:37 UTC 2020 x86_64 x86_64 x86_64 GNU/Linux * PRETTY_NAME="Ubuntu 19.10" * * ==> kube-apiserver [60aac29a1caeafa329981bf8f9a73c7f579633e998d2e82caabedfba3f1d1138] <== * I0724 22:32:16.144038 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:16.144097 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:16.144148 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:16.152885 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:36.144044 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [] * I0724 22:32:36.144250 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:36.144381 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:36.144412 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:36.144424 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:36.153545 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:56.144278 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [] * I0724 22:32:56.144441 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:56.144512 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:56.144567 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:56.144612 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:56.156564 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:33:16.144467 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [] * I0724 22:33:16.144655 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:33:16.144710 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:33:16.144738 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:33:16.154631 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:33:36.144698 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [] * I0724 22:33:36.144939 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:33:36.145007 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:33:36.155063 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * * ==> kube-controller-manager [5e2ae262399021f9b26d8e88b478a4ff629d1bc4dffef0901936b18cc1e4548c] <== * I0724 22:27:16.476945 1 taint_manager.go:182] Starting NoExecuteTaintManager * I0724 22:27:16.476964 1 event.go:258] Event(v1.ObjectReference{Kind:"Node", Namespace:"", Name:"crio-20200724220901-14997", UID:"ef0f0ad1-a556-4ea4-8b4e-4df0035f7a0b", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'RegisteredNode' Node crio-20200724220901-14997 event: Registered Node crio-20200724220901-14997 in Controller * W0724 22:27:16.476984 1 node_lifecycle_controller.go:863] Missing timestamp for Node crio-20200724220901-14997. Assuming now as a timestamp. * I0724 22:27:16.477018 1 node_lifecycle_controller.go:1089] Controller detected that zone is now in state Normal. * I0724 22:27:16.496120 1 controller_utils.go:1036] Caches are synced for daemon sets controller * I0724 22:27:16.517177 1 controller_utils.go:1036] Caches are synced for endpoint controller * I0724 22:27:16.522134 1 controller_utils.go:1036] Caches are synced for ReplicationController controller * I0724 22:27:16.526074 1 controller_utils.go:1036] Caches are synced for disruption controller * I0724 22:27:16.526095 1 disruption.go:338] Sending events to api server. * I0724 22:27:16.526636 1 controller_utils.go:1036] Caches are synced for job controller * I0724 22:27:16.526835 1 controller_utils.go:1036] Caches are synced for PVC protection controller * I0724 22:27:16.531393 1 controller_utils.go:1036] Caches are synced for attach detach controller * I0724 22:27:16.540016 1 controller_utils.go:1036] Caches are synced for persistent volume controller * I0724 22:27:16.551348 1 controller_utils.go:1036] Caches are synced for ReplicaSet controller * I0724 22:27:16.577221 1 controller_utils.go:1036] Caches are synced for deployment controller * I0724 22:27:16.580595 1 event.go:258] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"kubernetes-dashboard", Name:"kubernetes-dashboard", UID:"db313703-4ad6-497b-8b1b-c7657766a8c5", APIVersion:"apps/v1", ResourceVersion:"821", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set kubernetes-dashboard-6979c57f4c to 1 * I0724 22:27:16.584728 1 event.go:258] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"kubernetes-dashboard", Name:"dashboard-metrics-scraper", UID:"a8d0d7cc-7814-4653-af48-855bd4001c27", APIVersion:"apps/v1", ResourceVersion:"819", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set dashboard-metrics-scraper-c8b69c96c to 1 * I0724 22:27:16.589419 1 event.go:258] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"kubernetes-dashboard", Name:"dashboard-metrics-scraper-c8b69c96c", UID:"79acf5d3-9c82-447f-b71d-910de5b42f1b", APIVersion:"apps/v1", ResourceVersion:"855", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: dashboard-metrics-scraper-c8b69c96c-ljbr4 * I0724 22:27:16.589458 1 event.go:258] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"kubernetes-dashboard", Name:"kubernetes-dashboard-6979c57f4c", UID:"b43829eb-0088-472e-9584-6679aa6b72c4", APIVersion:"apps/v1", ResourceVersion:"854", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: kubernetes-dashboard-6979c57f4c-wbxrt * I0724 22:27:16.589508 1 controller_utils.go:1036] Caches are synced for resource quota controller * I0724 22:27:16.635535 1 controller_utils.go:1036] Caches are synced for garbage collector controller * I0724 22:27:16.635571 1 garbagecollector.go:137] Garbage collector: all resource monitors have synced. Proceeding to collect garbage * I0724 22:27:16.640852 1 controller_utils.go:1036] Caches are synced for resource quota controller * I0724 22:27:17.230884 1 controller_utils.go:1029] Waiting for caches to sync for garbage collector controller * I0724 22:27:17.331259 1 controller_utils.go:1036] Caches are synced for garbage collector controller * * ==> kube-proxy [4e28cb6807125f410044e3fce6354030e93abbb1dd7a5b0efeb136ab9f75cc6f] <== * I0724 22:10:51.711236 1 server_others.go:143] Using iptables Proxier. * I0724 22:10:51.711733 1 server.go:534] Version: v1.15.7 * I0724 22:10:51.747623 1 conntrack.go:52] Setting nf_conntrack_max to 524288 * E0724 22:10:51.748800 1 conntrack.go:127] sysfs is not writable: {Device:sysfs Path:/sys Type:sysfs Opts:[ro nosuid nodev noexec relatime] Freq:0 Pass:0} (mount options are [ro nosuid nodev noexec relatime]) * I0724 22:10:51.749038 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_established' to 86400 * I0724 22:10:51.749127 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_close_wait' to 3600 * I0724 22:10:51.749315 1 config.go:187] Starting service config controller * I0724 22:10:51.749352 1 controller_utils.go:1029] Waiting for caches to sync for service config controller * I0724 22:10:51.749404 1 config.go:96] Starting endpoints config controller * I0724 22:10:51.749502 1 controller_utils.go:1029] Waiting for caches to sync for endpoints config controller * I0724 22:10:51.849555 1 controller_utils.go:1036] Caches are synced for service config controller * I0724 22:10:51.849731 1 controller_utils.go:1036] Caches are synced for endpoints config controller * W0724 22:27:02.252143 1 server_others.go:249] Flag proxy-mode="" unknown, assuming iptables proxy * I0724 22:27:02.336837 1 server_others.go:143] Using iptables Proxier. * I0724 22:27:02.337285 1 server.go:534] Version: v1.15.7 * I0724 22:27:02.437683 1 conntrack.go:52] Setting nf_conntrack_max to 524288 * E0724 22:27:02.438093 1 conntrack.go:127] sysfs is not writable: {Device:sysfs Path:/sys Type:sysfs Opts:[ro nosuid nodev noexec relatime] Freq:0 Pass:0} (mount options are [ro nosuid nodev noexec relatime]) * I0724 22:27:02.438352 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_established' to 86400 * I0724 22:27:02.438437 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_close_wait' to 3600 * I0724 22:27:02.438748 1 config.go:96] Starting endpoints config controller * I0724 22:27:02.438828 1 controller_utils.go:1029] Waiting for caches to sync for endpoints config controller * I0724 22:27:02.438814 1 config.go:187] Starting service config controller * I0724 22:27:02.438854 1 controller_utils.go:1029] Waiting for caches to sync for service config controller * I0724 22:27:02.539002 1 controller_utils.go:1036] Caches are synced for endpoints config controller * I0724 22:27:02.539024 1 controller_utils.go:1036] Caches are synced for service config controller * * ==> kube-scheduler [9881d2c304cdca9fe0f38468be746e289b9fa419917792f7aa9019077b4a4374] <== * E0724 22:10:32.938093 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope * E0724 22:10:32.940907 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope * E0724 22:10:32.940967 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope * E0724 22:10:32.941768 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope * E0724 22:10:32.942905 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope * E0724 22:10:32.943936 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope * E0724 22:10:32.947924 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope * I0724 22:26:55.290417 1 serving.go:319] Generated self-signed cert in-memory * W0724 22:26:55.521509 1 authentication.go:249] No authentication-kubeconfig provided in order to lookup client-ca-file in configmap/extension-apiserver-authentication in kube-system, so client certificate authentication won't work. * W0724 22:26:55.521537 1 authentication.go:252] No authentication-kubeconfig provided in order to lookup requestheader-client-ca-file in configmap/extension-apiserver-authentication in kube-system, so request-header client certificate authentication won't work. * W0724 22:26:55.521549 1 authorization.go:146] No authorization-kubeconfig provided, so SubjectAccessReview of authorization tokens won't work. * I0724 22:26:55.524427 1 server.go:142] Version: v1.15.7 * I0724 22:26:55.524493 1 defaults.go:87] TaintNodesByCondition is enabled, PodToleratesNodeTaints predicate is mandatory * W0724 22:26:55.525401 1 authorization.go:47] Authorization is disabled * W0724 22:26:55.525419 1 authentication.go:55] Authentication is disabled * I0724 22:26:55.525435 1 deprecated_insecure_serving.go:51] Serving healthz insecurely on [::]:10251 * I0724 22:26:55.535873 1 secure_serving.go:116] Serving securely on 127.0.0.1:10259 * E0724 22:27:00.040558 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope * E0724 22:27:00.140295 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope: RBAC: [clusterrole.rbac.authorization.k8s.io "system:public-info-viewer" not found, clusterrole.rbac.authorization.k8s.io "system:discovery" not found, clusterrole.rbac.authorization.k8s.io "system:kube-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:volume-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:basic-user" not found] * E0724 22:27:00.140491 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope * E0724 22:27:00.140663 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope * E0724 22:27:00.140655 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1beta1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope: RBAC: [clusterrole.rbac.authorization.k8s.io "system:kube-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:discovery" not found, clusterrole.rbac.authorization.k8s.io "system:basic-user" not found, clusterrole.rbac.authorization.k8s.io "system:volume-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:public-info-viewer" not found] * E0724 22:27:00.140755 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope: RBAC: [clusterrole.rbac.authorization.k8s.io "system:public-info-viewer" not found, clusterrole.rbac.authorization.k8s.io "system:kube-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:discovery" not found, clusterrole.rbac.authorization.k8s.io "system:basic-user" not found, clusterrole.rbac.authorization.k8s.io "system:volume-scheduler" not found] * E0724 22:27:00.140806 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope: RBAC: [clusterrole.rbac.authorization.k8s.io "system:kube-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:discovery" not found, clusterrole.rbac.authorization.k8s.io "system:basic-user" not found, clusterrole.rbac.authorization.k8s.io "system:volume-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:public-info-viewer" not found] * E0724 22:27:00.140869 1 reflector.go:125] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:226: Failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope: RBAC: [clusterrole.rbac.authorization.k8s.io "system:basic-user" not found, clusterrole.rbac.authorization.k8s.io "system:volume-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:public-info-viewer" not found, clusterrole.rbac.authorization.k8s.io "system:kube-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:discovery" not found] * * ==> kubelet <== * -- Logs begin at Fri 2020-07-24 22:26:07 UTC, end at Fri 2020-07-24 22:51:42 UTC. -- * Jul 24 22:51:22 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:22.824313 1598 kuberuntime_manager.go:692] createPodSandbox for pod "coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(5640d9f74f19b787273675f6175658f5a4b944f93df8a5e82447b8adc4afddff): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:22 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:22.824404 1598 pod_workers.go:190] Error syncing pod dbd8dc2c-1af2-4c99-89f0-d44686805742 ("coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)"), skipping: failed to "CreatePodSandbox" for "coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)" with CreatePodSandboxError: "CreatePodSandbox for pod \"coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)\" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(5640d9f74f19b787273675f6175658f5a4b944f93df8a5e82447b8adc4afddff): failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:51:24 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:24.531997 1598 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard_145a2077-6b60-46d0-9d89-7151f4a78b4b_0(25492f81a77c03ed1561da5355054094967a64c28fd4b6fe0b3c3ae3e2148fee): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:24 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:24.532069 1598 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard_145a2077-6b60-46d0-9d89-7151f4a78b4b_0(25492f81a77c03ed1561da5355054094967a64c28fd4b6fe0b3c3ae3e2148fee): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:24 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:24.532093 1598 kuberuntime_manager.go:692] createPodSandbox for pod "dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard_145a2077-6b60-46d0-9d89-7151f4a78b4b_0(25492f81a77c03ed1561da5355054094967a64c28fd4b6fe0b3c3ae3e2148fee): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:24 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:24.532169 1598 pod_workers.go:190] Error syncing pod 145a2077-6b60-46d0-9d89-7151f4a78b4b ("dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)"), skipping: failed to "CreatePodSandbox" for "dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)" with CreatePodSandboxError: "CreatePodSandbox for pod \"dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)\" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard_145a2077-6b60-46d0-9d89-7151f4a78b4b_0(25492f81a77c03ed1561da5355054094967a64c28fd4b6fe0b3c3ae3e2148fee): failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:51:25 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:25.744397 1598 pod_workers.go:190] Error syncing pod e76b38cb-d66f-4b4b-a962-cdd9c670e250 ("storage-provisioner_kube-system(e76b38cb-d66f-4b4b-a962-cdd9c670e250)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "Back-off 5m0s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(e76b38cb-d66f-4b4b-a962-cdd9c670e250)" * Jul 24 22:51:25 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:25.744505 1598 pod_workers.go:190] Error syncing pod 9fb35a28-5601-47e4-88e6-be2a18fa55ef ("kindnet-4qfcd_kube-system(9fb35a28-5601-47e4-88e6-be2a18fa55ef)"), skipping: failed to "StartContainer" for "kindnet-cni" with CrashLoopBackOff: "Back-off 5m0s restarting failed container=kindnet-cni pod=kindnet-4qfcd_kube-system(9fb35a28-5601-47e4-88e6-be2a18fa55ef)" * Jul 24 22:51:32 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:32.248818 1598 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard_2734d7c0-daef-420d-966b-49f33f94e547_0(9ccf0f0cce6b567ad2f291f016191bb44502de1d3e86e6df90c6e7576ed5ab81): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:32 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:32.248893 1598 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard(2734d7c0-daef-420d-966b-49f33f94e547)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard_2734d7c0-daef-420d-966b-49f33f94e547_0(9ccf0f0cce6b567ad2f291f016191bb44502de1d3e86e6df90c6e7576ed5ab81): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:32 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:32.248923 1598 kuberuntime_manager.go:692] createPodSandbox for pod "kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard(2734d7c0-daef-420d-966b-49f33f94e547)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard_2734d7c0-daef-420d-966b-49f33f94e547_0(9ccf0f0cce6b567ad2f291f016191bb44502de1d3e86e6df90c6e7576ed5ab81): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:32 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:32.248993 1598 pod_workers.go:190] Error syncing pod 2734d7c0-daef-420d-966b-49f33f94e547 ("kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard(2734d7c0-daef-420d-966b-49f33f94e547)"), skipping: failed to "CreatePodSandbox" for "kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard(2734d7c0-daef-420d-966b-49f33f94e547)" with CreatePodSandboxError: "CreatePodSandbox for pod \"kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard(2734d7c0-daef-420d-966b-49f33f94e547)\" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard_2734d7c0-daef-420d-966b-49f33f94e547_0(9ccf0f0cce6b567ad2f291f016191bb44502de1d3e86e6df90c6e7576ed5ab81): failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:51:33 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:33.883234 1598 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(86aa6f798e8cfd97aef81555b92613e818d4c7173384c653d026f84ec976db8e): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:33 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:33.883300 1598 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(86aa6f798e8cfd97aef81555b92613e818d4c7173384c653d026f84ec976db8e): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:33 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:33.883327 1598 kuberuntime_manager.go:692] createPodSandbox for pod "busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(86aa6f798e8cfd97aef81555b92613e818d4c7173384c653d026f84ec976db8e): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:33 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:33.883383 1598 pod_workers.go:190] Error syncing pod b947226f-0b0e-4916-9b65-c7b70a6e137e ("busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)"), skipping: failed to "CreatePodSandbox" for "busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)" with CreatePodSandboxError: "CreatePodSandbox for pod \"busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)\" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(86aa6f798e8cfd97aef81555b92613e818d4c7173384c653d026f84ec976db8e): failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:51:37 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:37.322582 1598 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(c16b06815e3b08f65dd9e7bea079da08a86b6801b4df5c196e85f0a68b20bffc): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:37 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:37.322652 1598 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(c16b06815e3b08f65dd9e7bea079da08a86b6801b4df5c196e85f0a68b20bffc): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:37 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:37.322685 1598 kuberuntime_manager.go:692] createPodSandbox for pod "coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(c16b06815e3b08f65dd9e7bea079da08a86b6801b4df5c196e85f0a68b20bffc): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:37 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:37.322744 1598 pod_workers.go:190] Error syncing pod dbd8dc2c-1af2-4c99-89f0-d44686805742 ("coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)"), skipping: failed to "CreatePodSandbox" for "coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)" with CreatePodSandboxError: "CreatePodSandbox for pod \"coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)\" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(c16b06815e3b08f65dd9e7bea079da08a86b6801b4df5c196e85f0a68b20bffc): failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:51:38 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:38.268197 1598 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard_145a2077-6b60-46d0-9d89-7151f4a78b4b_0(b0a7787b999a8e94452730c7c3f574d37d249d15216835555313ddbf498ad6fb): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:38 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:38.268262 1598 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard_145a2077-6b60-46d0-9d89-7151f4a78b4b_0(b0a7787b999a8e94452730c7c3f574d37d249d15216835555313ddbf498ad6fb): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:38 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:38.268285 1598 kuberuntime_manager.go:692] createPodSandbox for pod "dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard_145a2077-6b60-46d0-9d89-7151f4a78b4b_0(b0a7787b999a8e94452730c7c3f574d37d249d15216835555313ddbf498ad6fb): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:38 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:38.268374 1598 pod_workers.go:190] Error syncing pod 145a2077-6b60-46d0-9d89-7151f4a78b4b ("dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)"), skipping: failed to "CreatePodSandbox" for "dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)" with CreatePodSandboxError: "CreatePodSandbox for pod \"dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)\" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard_145a2077-6b60-46d0-9d89-7151f4a78b4b_0(b0a7787b999a8e94452730c7c3f574d37d249d15216835555313ddbf498ad6fb): failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:51:38 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:38.744063 1598 pod_workers.go:190] Error syncing pod e76b38cb-d66f-4b4b-a962-cdd9c670e250 ("storage-provisioner_kube-system(e76b38cb-d66f-4b4b-a962-cdd9c670e250)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "Back-off 5m0s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(e76b38cb-d66f-4b4b-a962-cdd9c670e250)" * * ==> storage-provisioner [c5fc8cb46441ceaef6bf0347ecae8bb423d4fc849fad64b2afb93b63052026f2] <== * F0724 22:48:28.232635 1 main.go:37] Error getting server version: Get https://10.96.0.1:443/version: dial tcp 10.96.0.1:443: getsockopt: no route to host -- /stdout -- helpers_test.go:247: (dbg) Run: ./minikube-linux-amd64 status --format={{.APIServer}} -p crio-20200724220901-14997 -n crio-20200724220901-14997 helpers_test.go:254: (dbg) Run: kubectl --context crio-20200724220901-14997 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running helpers_test.go:260: non-running pods: busybox coredns-5d4dd4b4db-9ssg6 dashboard-metrics-scraper-c8b69c96c-ljbr4 kubernetes-dashboard-6979c57f4c-wbxrt helpers_test.go:262: ======> post-mortem[TestStartStop/group/crio/serial/VerifyKubernetesImages]: describe non-running pods <====== helpers_test.go:265: (dbg) Run: kubectl --context crio-20200724220901-14997 describe pod busybox coredns-5d4dd4b4db-9ssg6 dashboard-metrics-scraper-c8b69c96c-ljbr4 kubernetes-dashboard-6979c57f4c-wbxrt helpers_test.go:265: (dbg) Non-zero exit: kubectl --context crio-20200724220901-14997 describe pod busybox coredns-5d4dd4b4db-9ssg6 dashboard-metrics-scraper-c8b69c96c-ljbr4 kubernetes-dashboard-6979c57f4c-wbxrt: exit status 1 (84.028501ms) -- stdout -- Name: busybox Namespace: default Priority: 0 Node: crio-20200724220901-14997/172.17.0.2 Start Time: Fri, 24 Jul 2020 22:17:30 +0000 Labels: integration-test=busybox Annotations: Status: Pending IP: IPs: Containers: busybox: Container ID: Image: busybox:1.28.4-glibc Image ID: Port: Host Port: Command: sleep 3600 State: Waiting Reason: ContainerCreating Ready: False Restart Count: 0 Environment: Mounts: /var/run/secrets/kubernetes.io/serviceaccount from default-token-2jsfl (ro) Conditions: Type Status Initialized True Ready False ContainersReady False PodScheduled True Volumes: default-token-2jsfl: Type: Secret (a volume populated by a Secret) SecretName: default-token-2jsfl Optional: false QoS Class: BestEffort Node-Selectors: Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s node.kubernetes.io/unreachable:NoExecute for 300s Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Scheduled 34m default-scheduler Successfully assigned default/busybox to crio-20200724220901-14997 Warning FailedCreatePodSandBox 34m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(df0c3a384af70876e631b79afa238f8219e909abbaf7684ff30d1dce8a9a54fa): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 33m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(7e1e51912dee7c82a399942b88c2992e156a60bb2bdcfc5a0340b0f5eb894fe0): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 33m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(eb6542b74bcd5168e50e03b41a224e2df5f3761d00e96252755d3d77cabf510e): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 33m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(95381eb738e20acf009ca3de52d9dd44c0e10363127a19f55cc9d8b9b15d935d): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 33m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(5da5f64daf132db1d2f57fcbbd23261f5b055438f42fdc9e58f02b1dd57b9240): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 32m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(d843353d3186fd450ada3d11767e5294d07ef90a741caba47e62a59eb82f9e97): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 32m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(cdb66590d250947869cc36af8c6d22ecc1fd99752b4ba0d09be444e2de9b06eb): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 32m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(590dbdf2276a1c93cf59fe15b35411b3d19be44299a08851ea9957e8c6c03681): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 32m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(fe109ba9d55515001bcdb9c5ba7b51262263252184892313119b14411c1ba69a): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 27m (x17 over 31m) kubelet, crio-20200724220901-14997 (combined from similar events): Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(4bee5e35c962aafeecadb6e69afb27ad039cbcf256940e8735ce4f079bf10862): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 24m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(e8fcb63d4fbd6ae3b748da2e936f37eeaa140b43d0f98056e85b082da82fbb01): netplugin failed with no error message Warning FailedCreatePodSandBox 24m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(60cd38ffb8a1dd4fa390aba070fecc1acec7811fd5775ea0d2a431e9e2424e62): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 24m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(68df75d364336016be8023c132977fe34319aa4b9f92c7d70e457a57217a281e): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 23m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(aaae971e15b6884b2bb1799dd4abb10b195368e13b5e4f290f43c5b4b11ec194): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 23m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(bd1d721cd146bbfe1fa05dc287b325ab526dc19d3404ff58cef06e259e2b5986): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 23m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(df52b05097dfc5b141cfc44d2e97b43954c3d247a95a09c7cfb30e7c69871395): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 22m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(d5729ae94c7a9707bb77f5dc43fb6a6d6d451a9f25758519bb72c37e64012cbe): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 22m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(1080ac6803a0e7d14628d92578ed58468a1fddfae32c4476765ee0ff2580e28e): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 22m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(7b666099a72822b0a53ba3bb57632f31f4390032be187dca2fe8fb309f923937): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 4m30s (x70 over 22m) kubelet, crio-20200724220901-14997 (combined from similar events): Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(ddf5dab0e040cc42e18461726216644f257157271baffe0b4e6c06f634f4a48c): failed to set bridge addr: could not add IP address to "cni0": permission denied -- /stdout -- ** stderr ** Error from server (NotFound): pods "coredns-5d4dd4b4db-9ssg6" not found Error from server (NotFound): pods "dashboard-metrics-scraper-c8b69c96c-ljbr4" not found Error from server (NotFound): pods "kubernetes-dashboard-6979c57f4c-wbxrt" not found ** /stderr ** helpers_test.go:267: kubectl --context crio-20200724220901-14997 describe pod busybox coredns-5d4dd4b4db-9ssg6 dashboard-metrics-scraper-c8b69c96c-ljbr4 kubernetes-dashboard-6979c57f4c-wbxrt: exit status 1 === RUN TestStartStop/group/crio/serial/Pause start_stop_delete_test.go:233: (dbg) Run: ./minikube-linux-amd64 pause -p crio-20200724220901-14997 --alsologtostderr -v=1 start_stop_delete_test.go:233: (dbg) Non-zero exit: ./minikube-linux-amd64 pause -p crio-20200724220901-14997 --alsologtostderr -v=1: context deadline exceeded (1.2µs) start_stop_delete_test.go:233: ./minikube-linux-amd64 pause -p crio-20200724220901-14997 --alsologtostderr -v=1 failed: context deadline exceeded helpers_test.go:215: -----------------------post-mortem-------------------------------- helpers_test.go:223: ======> post-mortem[TestStartStop/group/crio/serial/Pause]: docker inspect <====== helpers_test.go:224: (dbg) Run: docker inspect crio-20200724220901-14997 helpers_test.go:228: (dbg) docker inspect crio-20200724220901-14997: -- stdout -- [ { "Id": "d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a", "Created": "2020-07-24T22:09:11.178770681Z", "Path": "/usr/local/bin/entrypoint", "Args": [ "/sbin/init" ], "State": { "Status": "running", "Running": true, "Paused": false, "Restarting": false, "OOMKilled": false, "Dead": false, "Pid": 667119, "ExitCode": 0, "Error": "", "StartedAt": "2020-07-24T22:26:07.35920725Z", "FinishedAt": "2020-07-24T22:26:00.416732195Z" }, "Image": "sha256:e6bc41c39dc48b2b472936db36aedb28527ce0f675ed1bc20d029125c9ccf578", "ResolvConfPath": "/var/lib/docker/containers/d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a/resolv.conf", "HostnamePath": "/var/lib/docker/containers/d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a/hostname", "HostsPath": "/var/lib/docker/containers/d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a/hosts", "LogPath": "/var/lib/docker/containers/d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a/d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a-json.log", "Name": "/crio-20200724220901-14997", "RestartCount": 0, "Driver": "overlay2", "Platform": "linux", "MountLabel": "", "ProcessLabel": "", "AppArmorProfile": "unconfined", "ExecIDs": null, "HostConfig": { "Binds": [ "/lib/modules:/lib/modules:ro", "crio-20200724220901-14997:/var" ], "ContainerIDFile": "", "LogConfig": { "Type": "json-file", "Config": {} }, "NetworkMode": "default", "PortBindings": { "22/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "2376/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "5000/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "8443/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ] }, "RestartPolicy": { "Name": "no", "MaximumRetryCount": 0 }, "AutoRemove": false, "VolumeDriver": "", "VolumesFrom": null, "CapAdd": null, "CapDrop": null, "Capabilities": null, "Dns": [], "DnsOptions": [], "DnsSearch": [], "ExtraHosts": null, "GroupAdd": null, "IpcMode": "private", "Cgroup": "", "Links": null, "OomScoreAdj": 0, "PidMode": "", "Privileged": true, "PublishAllPorts": false, "ReadonlyRootfs": false, "SecurityOpt": [ "seccomp=unconfined", "apparmor=unconfined", "label=disable" ], "Tmpfs": { "/run": "", "/tmp": "" }, "UTSMode": "", "UsernsMode": "", "ShmSize": 67108864, "Runtime": "runc", "ConsoleSize": [ 0, 0 ], "Isolation": "", "CpuShares": 0, "Memory": 2306867200, "NanoCpus": 2000000000, "CgroupParent": "", "BlkioWeight": 0, "BlkioWeightDevice": [], "BlkioDeviceReadBps": null, "BlkioDeviceWriteBps": null, "BlkioDeviceReadIOps": null, "BlkioDeviceWriteIOps": null, "CpuPeriod": 0, "CpuQuota": 0, "CpuRealtimePeriod": 0, "CpuRealtimeRuntime": 0, "CpusetCpus": "", "CpusetMems": "", "Devices": [], "DeviceCgroupRules": null, "DeviceRequests": null, "KernelMemory": 0, "KernelMemoryTCP": 0, "MemoryReservation": 0, "MemorySwap": -1, "MemorySwappiness": null, "OomKillDisable": false, "PidsLimit": null, "Ulimits": null, "CpuCount": 0, "CpuPercent": 0, "IOMaximumIOps": 0, "IOMaximumBandwidth": 0, "MaskedPaths": null, "ReadonlyPaths": null }, "GraphDriver": { "Data": { "LowerDir": "/var/lib/docker/overlay2/25235e8a657907fa460b6393f79aab4ea109b0924e4843714a3776886a051c03-init/diff:/var/lib/docker/overlay2/33ef5d7ed4fbb30d55d2e5925de9b5c359ee2f6bcaf5a31308f0c4c11e358759/diff:/var/lib/docker/overlay2/3cf31b1deb19cf9de586952311d9f94baab00dac6393d96e1a80da943c3823e6/diff:/var/lib/docker/overlay2/5e25be0edbb143d613aab16e23056ece18370c2427214464ccc0b8f033fc76c5/diff:/var/lib/docker/overlay2/933d7b760faf1726c0231eb48cbf3e7af8aeea7d1f449959b7bf07bbe9a84376/diff:/var/lib/docker/overlay2/69ec5555996fe29cf36b7e927034690ecb66f32d4c72396f3c2f3d73303ab10c/diff:/var/lib/docker/overlay2/891f9311e0e93aca44c576d873c0860bf67e972178833fc3ac0569308996933c/diff:/var/lib/docker/overlay2/431ddf5eea9395c020c3a27539c6984764cb7a0dd8f3eeadfbb24f37a897caf7/diff:/var/lib/docker/overlay2/ef7d3d9d53c985fdb46a41c8bcbc04c7d98baa3d8030b1432d115657269a4611/diff:/var/lib/docker/overlay2/d976f7471257da8ef85e0de564cae8e6b16eeb8f83fff6750b3985387f87052c/diff:/var/lib/docker/overlay2/16ed6f6abe25e98b50b10529e70d835da8b64ca99934243f70e5cc63cca44ba1/diff:/var/lib/docker/overlay2/a6495d6f579c32bd0faf387dc3b48c733dae0219e3cfc5d94f9f6ba027261668/diff:/var/lib/docker/overlay2/a34db548da9e24b0323e43e53767c49484fa19509ba9c1e738fb42330788ec52/diff:/var/lib/docker/overlay2/03f93f74dafc48e2bb0234297546abf70dc9918c047a7e3b500ed2242aec09ff/diff:/var/lib/docker/overlay2/b0d08d0be034080c25561d9451d8081050a591c2991a7f3d242234b9bf7c046d/diff:/var/lib/docker/overlay2/d7be6ed61eedc76988e74fe1d87a6a1ef287912654acea5566e7f9cb797b3e3c/diff:/var/lib/docker/overlay2/bde6a4a42d5652b3ef22e3a1dddd0f6fb765d2cd9f5b1a38d4ecb9b44c45b998/diff:/var/lib/docker/overlay2/0f384da0b034b730d4bdd22f270821719c0ac0b7b7d608c3a52fa89b9a386b90/diff:/var/lib/docker/overlay2/46e9d50bc162d2d523c12da3e378e5b859515056dc7d7f11344c1c42c64718d4/diff:/var/lib/docker/overlay2/6e7552cdb94dfc1c9a0a8efae25c758d93fff3d2eec58a3e2b710496d0c94653/diff:/var/lib/docker/overlay2/b9217960caef08b70ded801ca89f5a0808227383f2ab1de5ae7313c200a8090b/diff:/var/lib/docker/overlay2/d11fc39cafccd3e3cb8fba6cfdda2bbfdb99c2e41619ce169b0d6ede0b3f6a64/diff:/var/lib/docker/overlay2/0f89620291cba4bc3afac7c9fa1faf205450d5dcae032c3a9882009633b72b4a/diff", "MergedDir": "/var/lib/docker/overlay2/25235e8a657907fa460b6393f79aab4ea109b0924e4843714a3776886a051c03/merged", "UpperDir": "/var/lib/docker/overlay2/25235e8a657907fa460b6393f79aab4ea109b0924e4843714a3776886a051c03/diff", "WorkDir": "/var/lib/docker/overlay2/25235e8a657907fa460b6393f79aab4ea109b0924e4843714a3776886a051c03/work" }, "Name": "overlay2" }, "Mounts": [ { "Type": "bind", "Source": "/lib/modules", "Destination": "/lib/modules", "Mode": "ro", "RW": false, "Propagation": "rprivate" }, { "Type": "volume", "Name": "crio-20200724220901-14997", "Source": "/var/lib/docker/volumes/crio-20200724220901-14997/_data", "Destination": "/var", "Driver": "local", "Mode": "z", "RW": true, "Propagation": "" } ], "Config": { "Hostname": "crio-20200724220901-14997", "Domainname": "", "User": "root", "AttachStdin": false, "AttachStdout": false, "AttachStderr": false, "ExposedPorts": { "22/tcp": {}, "2376/tcp": {}, "5000/tcp": {}, "8443/tcp": {} }, "Tty": true, "OpenStdin": false, "StdinOnce": false, "Env": [ "container=docker", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], "Cmd": null, "Image": "kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438", "Volumes": null, "WorkingDir": "", "Entrypoint": [ "/usr/local/bin/entrypoint", "/sbin/init" ], "OnBuild": null, "Labels": { "created_by.minikube.sigs.k8s.io": "true", "mode.minikube.sigs.k8s.io": "crio-20200724220901-14997", "name.minikube.sigs.k8s.io": "crio-20200724220901-14997", "role.minikube.sigs.k8s.io": "" }, "StopSignal": "SIGRTMIN+3" }, "NetworkSettings": { "Bridge": "", "SandboxID": "5953f0ce29bf68bb7146ac5384ca1a6be3ccb39427dcde4428b82a503b037325", "HairpinMode": false, "LinkLocalIPv6Address": "", "LinkLocalIPv6PrefixLen": 0, "Ports": { "22/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32916" } ], "2376/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32915" } ], "5000/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32914" } ], "8443/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32913" } ] }, "SandboxKey": "/var/run/docker/netns/5953f0ce29bf", "SecondaryIPAddresses": null, "SecondaryIPv6Addresses": null, "EndpointID": "280707bfa744faabf6b6bcfee77f56c3d14c11c8d8070fd9a4796c96665833f5", "Gateway": "172.17.0.1", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "IPAddress": "172.17.0.2", "IPPrefixLen": 16, "IPv6Gateway": "", "MacAddress": "02:42:ac:11:00:02", "Networks": { "bridge": { "IPAMConfig": null, "Links": null, "Aliases": null, "NetworkID": "d4a420189740571bc15c14b2ebac1ab66e95c9c6796461aa96bb3486a5f7e03d", "EndpointID": "280707bfa744faabf6b6bcfee77f56c3d14c11c8d8070fd9a4796c96665833f5", "Gateway": "172.17.0.1", "IPAddress": "172.17.0.2", "IPPrefixLen": 16, "IPv6Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "MacAddress": "02:42:ac:11:00:02", "DriverOpts": null } } } } ] -- /stdout -- helpers_test.go:232: (dbg) Run: ./minikube-linux-amd64 status --format={{.Host}} -p crio-20200724220901-14997 -n crio-20200724220901-14997 helpers_test.go:237: <<< TestStartStop/group/crio/serial/Pause FAILED: start of post-mortem logs <<< helpers_test.go:238: ======> post-mortem[TestStartStop/group/crio/serial/Pause]: minikube logs <====== helpers_test.go:240: (dbg) Run: ./minikube-linux-amd64 -p crio-20200724220901-14997 logs -n 25 helpers_test.go:245: TestStartStop/group/crio/serial/Pause logs: -- stdout -- * ==> CRI-O <== * -- Logs begin at Fri 2020-07-24 22:26:07 UTC, end at Fri 2020-07-24 22:51:44 UTC. -- * Jul 24 22:51:36 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:36.114414755Z" level=info msg="About to add CNI network crio-bridge (type=bridge)" * Jul 24 22:51:36 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:36.748254410Z" level=info msg="Attempting to create container: kube-system/kindnet-4qfcd/kindnet-cni" id=ce94fdb6-1741-49d4-a097-efa6bf9201b9 * Jul 24 22:51:36 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:36.790302613Z" level=warning msg="requested logPath for ctr id af520b21cf962cfe633916631b6dfff80514346fa3f77e21c279c048e3c47ce9 is a relative path: kindnet-cni/9.log" id=ce94fdb6-1741-49d4-a097-efa6bf9201b9 * Jul 24 22:51:36 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:36.790337215Z" level=warning msg="logPath from relative path is now absolute: /var/log/pods/kube-system_kindnet-4qfcd_9fb35a28-5601-47e4-88e6-be2a18fa55ef/kindnet-cni/9.log" id=ce94fdb6-1741-49d4-a097-efa6bf9201b9 * Jul 24 22:51:36 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:36.949567007Z" level=error msg="Error adding network: failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:51:36 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:36.949602509Z" level=error msg="Error while adding pod to CNI network \"crio-bridge\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:51:36 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:36.949730918Z" level=info msg="About to del CNI network lo (type=loopback)" * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.036625217Z" level=info msg="Got pod network &{Name:coredns-5d4dd4b4db-9ssg6 Namespace:kube-system ID:c16b06815e3b08f65dd9e7bea079da08a86b6801b4df5c196e85f0a68b20bffc NetNS:/proc/52890/ns/net Networks:[] RuntimeConfig:map[crio-bridge:{IP: MAC: PortMappings:[] Bandwidth: IpRanges:[]}]}" * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.036699122Z" level=error msg="error loading cached network config: network \"crio-bridge\" not found in CNI cache" * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.036714023Z" level=info msg="About to del CNI network crio-bridge (type=bridge)" * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.067026215Z" level=info msg="Created container af520b21cf962cfe633916631b6dfff80514346fa3f77e21c279c048e3c47ce9: kube-system/kindnet-4qfcd/kindnet-cni" id=ce94fdb6-1741-49d4-a097-efa6bf9201b9 * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.075803521Z" level=info msg="Started container af520b21cf962cfe633916631b6dfff80514346fa3f77e21c279c048e3c47ce9: kube-system/kindnet-4qfcd/kindnet-cni" id=80c4438a-9514-4533-a110-67970c02411d * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.108584484Z" level=error msg="Error deleting network: running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.1.215 -j CNI-9aa498ed525f8c8017ed6801 -m comment --comment name: \"crio-bridge\" id: \"c16b06815e3b08f65dd9e7bea079da08a86b6801b4df5c196e85f0a68b20bffc\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-9aa498ed525f8c8017ed6801':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.108632487Z" level=error msg="Error while removing pod from CNI network \"crio-bridge\": running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.1.215 -j CNI-9aa498ed525f8c8017ed6801 -m comment --comment name: \"crio-bridge\" id: \"c16b06815e3b08f65dd9e7bea079da08a86b6801b4df5c196e85f0a68b20bffc\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-9aa498ed525f8c8017ed6801':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.108687891Z" level=error msg="error stopping network on cleanup: failed to destroy network for pod sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(c16b06815e3b08f65dd9e7bea079da08a86b6801b4df5c196e85f0a68b20bffc): running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.1.215 -j CNI-9aa498ed525f8c8017ed6801 -m comment --comment name: \"crio-bridge\" id: \"c16b06815e3b08f65dd9e7bea079da08a86b6801b4df5c196e85f0a68b20bffc\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-9aa498ed525f8c8017ed6801':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" id=f28b0fbe-43a9-42fe-a945-e8bf9a55d2a7 * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.510470527Z" level=info msg="exec'd [/bin/sh -ec ETCDCTL_API=3 etcdctl --endpoints=https://[127.0.0.1]:2379 --cacert=/var/lib/minikube/certs/etcd/ca.crt --cert=/var/lib/minikube/certs/etcd/healthcheck-client.crt --key=/var/lib/minikube/certs/etcd/healthcheck-client.key get foo] in kube-system/etcd-crio-20200724220901-14997/etcd" id=78468eb4-799e-459a-86d1-ccda369af23d * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.993484870Z" level=error msg="Error adding network: failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.993530573Z" level=error msg="Error while adding pod to CNI network \"crio-bridge\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.993660882Z" level=info msg="About to del CNI network lo (type=loopback)" * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.999273169Z" level=info msg="Got pod network &{Name:dashboard-metrics-scraper-c8b69c96c-ljbr4 Namespace:kubernetes-dashboard ID:b0a7787b999a8e94452730c7c3f574d37d249d15216835555313ddbf498ad6fb NetNS:/proc/52960/ns/net Networks:[] RuntimeConfig:map[crio-bridge:{IP: MAC: PortMappings:[] Bandwidth: IpRanges:[]}]}" * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.999325273Z" level=error msg="error loading cached network config: network \"crio-bridge\" not found in CNI cache" * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.999337074Z" level=info msg="About to del CNI network crio-bridge (type=bridge)" * Jul 24 22:51:38 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:38.064872198Z" level=error msg="Error deleting network: running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.1.216 -j CNI-db244da69fafc09d440b57a8 -m comment --comment name: \"crio-bridge\" id: \"b0a7787b999a8e94452730c7c3f574d37d249d15216835555313ddbf498ad6fb\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-db244da69fafc09d440b57a8':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" * Jul 24 22:51:38 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:38.064918701Z" level=error msg="Error while removing pod from CNI network \"crio-bridge\": running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.1.216 -j CNI-db244da69fafc09d440b57a8 -m comment --comment name: \"crio-bridge\" id: \"b0a7787b999a8e94452730c7c3f574d37d249d15216835555313ddbf498ad6fb\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-db244da69fafc09d440b57a8':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" * Jul 24 22:51:38 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:38.064975305Z" level=error msg="error stopping network on cleanup: failed to destroy network for pod sandbox k8s_dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard_145a2077-6b60-46d0-9d89-7151f4a78b4b_0(b0a7787b999a8e94452730c7c3f574d37d249d15216835555313ddbf498ad6fb): running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.1.216 -j CNI-db244da69fafc09d440b57a8 -m comment --comment name: \"crio-bridge\" id: \"b0a7787b999a8e94452730c7c3f574d37d249d15216835555313ddbf498ad6fb\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-db244da69fafc09d440b57a8':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" id=7717042a-5394-4708-808f-4e6bbaa1ba7d * * ==> container status <== * CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID * af520b21cf962 2186a1a396deb58f1ea5eaf20193a518ca05049b46ccd754ec83366b5c8c13d5 7 seconds ago Running kindnet-cni 9 22d8d62f2f19c * c5fc8cb46441c 4689081edb103a9e8174bf23a255bfbe0b2d9ed82edc907abab6989d1c60f02c 3 minutes ago Exited storage-provisioner 8 ed36006be83da * 9b721f7ac557c 2186a1a396deb58f1ea5eaf20193a518ca05049b46ccd754ec83366b5c8c13d5 5 minutes ago Exited kindnet-cni 8 22d8d62f2f19c * 4e28cb6807125 ae3d9889423ede337df3814baa77445e566597a5a882f3cdf933b4d9e0025f0f 24 minutes ago Running kube-proxy 0 8b8f6c79bb715 * 5e2ae26239902 d2f090f2479fbf92c508100e0a6106b3516bb70421a465586661feb1494145a2 24 minutes ago Running kube-controller-manager 0 f5860ba3909ed * 9881d2c304cdc 78b4180ab00d0fb99b1be2b5ef92a4831ad07f00f27e6746828f374497d79367 24 minutes ago Running kube-scheduler 0 4b2a96d2f6659 * 7cd3d2c27ce8d 2c4adeb21b4ff8ed3309d0e42b6b4ae39872399f7b37e0856e673b13c4aba13d 24 minutes ago Running etcd 0 4187a344080e1 * 60aac29a1caea c500a024ff843278184e5454ff6ee040a106c867c5a0361886fd3057cace2264 24 minutes ago Running kube-apiserver 0 79ca654167789 * * ==> describe nodes <== * Name: crio-20200724220901-14997 * Roles: master * Labels: beta.kubernetes.io/arch=amd64 * beta.kubernetes.io/os=linux * kubernetes.io/arch=amd64 * kubernetes.io/hostname=crio-20200724220901-14997 * kubernetes.io/os=linux * minikube.k8s.io/commit=40eac8ce825d2bb784efa63b900c8d788ea49faf * minikube.k8s.io/name=crio-20200724220901-14997 * minikube.k8s.io/updated_at=2020_07_24T22_10_37_0700 * minikube.k8s.io/version=v1.12.1 * node-role.kubernetes.io/master= * Annotations: kubeadm.alpha.kubernetes.io/cri-socket: /var/run/crio/crio.sock * node.alpha.kubernetes.io/ttl: 0 * volumes.kubernetes.io/controller-managed-attach-detach: true * CreationTimestamp: Fri, 24 Jul 2020 22:10:31 +0000 * Taints: * Unschedulable: false * Conditions: * Type Status LastHeartbeatTime LastTransitionTime Reason Message * ---- ------ ----------------- ------------------ ------ ------- * MemoryPressure False Fri, 24 Jul 2020 22:51:05 +0000 Fri, 24 Jul 2020 22:10:27 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available * DiskPressure False Fri, 24 Jul 2020 22:51:05 +0000 Fri, 24 Jul 2020 22:10:27 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure * PIDPressure False Fri, 24 Jul 2020 22:51:05 +0000 Fri, 24 Jul 2020 22:10:27 +0000 KubeletHasSufficientPID kubelet has sufficient PID available * Ready True Fri, 24 Jul 2020 22:51:05 +0000 Fri, 24 Jul 2020 22:10:27 +0000 KubeletReady kubelet is posting ready status * Addresses: * InternalIP: 172.17.0.2 * Hostname: crio-20200724220901-14997 * Capacity: * cpu: 16 * ephemeral-storage: 128884272Ki * hugepages-1Gi: 0 * hugepages-2Mi: 0 * memory: 65817044Ki * pods: 110 * Allocatable: * cpu: 16 * ephemeral-storage: 128884272Ki * hugepages-1Gi: 0 * hugepages-2Mi: 0 * memory: 65817044Ki * pods: 110 * System Info: * Machine ID: e7c76c839aa944e99c5c76ea1345e361 * System UUID: 8677386b-5379-4ccc-90e7-5b585098762e * Boot ID: 65219ec9-ab55-4151-85fa-6cbcd6144529 * Kernel Version: 5.4.0-1022-azure * OS Image: Ubuntu 19.10 * Operating System: linux * Architecture: amd64 * Container Runtime Version: cri-o://1.17.3 * Kubelet Version: v1.15.7 * Kube-Proxy Version: v1.15.7 * PodCIDR: 10.244.0.0/24 * Non-terminated Pods: (11 in total) * Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits AGE * --------- ---- ------------ ---------- --------------- ------------- --- * default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 34m * kube-system coredns-5d4dd4b4db-9ssg6 100m (0%) 0 (0%) 70Mi (0%) 170Mi (0%) 40m * kube-system etcd-crio-20200724220901-14997 0 (0%) 0 (0%) 0 (0%) 0 (0%) 40m * kube-system kindnet-4qfcd 100m (0%) 100m (0%) 50Mi (0%) 50Mi (0%) 40m * kube-system kube-apiserver-crio-20200724220901-14997 250m (1%) 0 (0%) 0 (0%) 0 (0%) 40m * kube-system kube-controller-manager-crio-20200724220901-14997 200m (1%) 0 (0%) 0 (0%) 0 (0%) 24m * kube-system kube-proxy-6wf4w 0 (0%) 0 (0%) 0 (0%) 0 (0%) 40m * kube-system kube-scheduler-crio-20200724220901-14997 100m (0%) 0 (0%) 0 (0%) 0 (0%) 40m * kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 40m * kubernetes-dashboard dashboard-metrics-scraper-c8b69c96c-ljbr4 0 (0%) 0 (0%) 0 (0%) 0 (0%) 24m * kubernetes-dashboard kubernetes-dashboard-6979c57f4c-wbxrt 0 (0%) 0 (0%) 0 (0%) 0 (0%) 24m * Allocated resources: * (Total limits may be over 100 percent, i.e., overcommitted.) * Resource Requests Limits * -------- -------- ------ * cpu 750m (4%) 100m (0%) * memory 120Mi (0%) 220Mi (0%) * ephemeral-storage 0 (0%) 0 (0%) * Events: * Type Reason Age From Message * ---- ------ ---- ---- ------- * Normal NodeHasSufficientMemory 41m (x7 over 41m) kubelet, crio-20200724220901-14997 Node crio-20200724220901-14997 status is now: NodeHasSufficientMemory * Normal NodeHasNoDiskPressure 41m (x7 over 41m) kubelet, crio-20200724220901-14997 Node crio-20200724220901-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientPID 41m (x7 over 41m) kubelet, crio-20200724220901-14997 Node crio-20200724220901-14997 status is now: NodeHasSufficientPID * Warning readOnlySysFS 40m kube-proxy, crio-20200724220901-14997 CRI error: /sys is read-only: cannot modify conntrack limits, problems may arise later (If running Docker, see docker issue #24000) * Normal Starting 40m kube-proxy, crio-20200724220901-14997 Starting kube-proxy. * Normal Starting 24m kubelet, crio-20200724220901-14997 Starting kubelet. * Warning SystemOOM 24m (x2 over 24m) kubelet, crio-20200724220901-14997 System OOM encountered * Normal NodeHasSufficientMemory 24m (x7 over 24m) kubelet, crio-20200724220901-14997 Node crio-20200724220901-14997 status is now: NodeHasSufficientMemory * Normal NodeHasNoDiskPressure 24m (x7 over 24m) kubelet, crio-20200724220901-14997 Node crio-20200724220901-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientPID 24m (x7 over 24m) kubelet, crio-20200724220901-14997 Node crio-20200724220901-14997 status is now: NodeHasSufficientPID * Normal NodeAllocatableEnforced 24m kubelet, crio-20200724220901-14997 Updated Node Allocatable limit across pods * Warning readOnlySysFS 24m kube-proxy, crio-20200724220901-14997 CRI error: /sys is read-only: cannot modify conntrack limits, problems may arise later (If running Docker, see docker issue #24000) * Normal Starting 24m kube-proxy, crio-20200724220901-14997 Starting kube-proxy. * * ==> dmesg <== * [ +0.008042] FS-Cache: N-cookie d=00000000087e516b n=00000000639e5e65 * [ +0.007221] FS-Cache: N-key=[8] '56c00f0000000000' * [ +0.005631] FS-Cache: Duplicate cookie detected * [ +0.003841] FS-Cache: O-cookie c=0000000011bf07ab [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.005951] FS-Cache: O-cookie d=00000000087e516b n=00000000251e97b7 * [ +0.004790] FS-Cache: O-key=[8] '56c00f0000000000' * [ +0.003643] FS-Cache: N-cookie c=00000000e2eec606 [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.005689] FS-Cache: N-cookie d=00000000087e516b n=000000007550b1ca * [ +0.004826] FS-Cache: N-key=[8] '56c00f0000000000' * [ +1.890005] FS-Cache: Duplicate cookie detected * [ +0.022632] FS-Cache: O-cookie c=0000000036b15294 [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.008521] FS-Cache: O-cookie d=00000000087e516b n=00000000efd734b0 * [ +0.007450] FS-Cache: O-key=[8] '55c00f0000000000' * [ +0.004298] FS-Cache: N-cookie c=00000000c6760349 [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.007729] FS-Cache: N-cookie d=00000000087e516b n=000000002b6d28f2 * [ +0.006730] FS-Cache: N-key=[8] '55c00f0000000000' * [ +2.781512] FS-Cache: Duplicate cookie detected * [ +0.004691] FS-Cache: O-cookie c=0000000026b60cdb [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.006817] FS-Cache: O-cookie d=00000000087e516b n=0000000083eaff9d * [ +0.006146] FS-Cache: O-key=[8] '5ac00f0000000000' * [ +0.004163] FS-Cache: N-cookie c=000000006a4278df [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.006241] FS-Cache: N-cookie d=00000000087e516b n=00000000e4c3dd6e * [ +0.005074] FS-Cache: N-key=[8] '5ac00f0000000000' * [Jul24 22:02] tee (116897): /proc/108378/oom_adj is deprecated, please use /proc/108378/oom_score_adj instead. * [Jul24 22:09] hrtimer: interrupt took 1877431 ns * * ==> etcd [7cd3d2c27ce8d87211660e16ef5baac83ffc42799754d9ad03e5cf733dcd820c] <== * 2020-07-24 22:28:20.693526 W | etcdserver: failed to revoke 3c247382efb5110a ("lease not found") * 2020-07-24 22:28:20.693537 W | etcdserver: failed to revoke 3c247382efb5110a ("lease not found") * 2020-07-24 22:28:20.693546 W | etcdserver: failed to revoke 3c247382efb5110a ("lease not found") * 2020-07-24 22:28:20.715917 W | etcdserver: read-only range request "key:\"foo\" " with result "range_response_count:0 size:5" took too long (3.215685555s) to execute * 2020-07-24 22:28:20.715967 W | etcdserver: read-only range request "key:\"/registry/statefulsets\" range_end:\"/registry/statefulsett\" count_only:true " with result "range_response_count:0 size:5" took too long (3.467781535s) to execute * 2020-07-24 22:28:20.715982 W | etcdserver: read-only range request "key:\"/registry/horizontalpodautoscalers\" range_end:\"/registry/horizontalpodautoscalert\" count_only:true " with result "range_response_count:0 size:5" took too long (1.594353687s) to execute * 2020-07-24 22:28:20.716160 W | etcdserver: read-only range request "key:\"/registry/statefulsets\" range_end:\"/registry/statefulsett\" count_only:true " with result "range_response_count:0 size:5" took too long (2.909137077s) to execute * 2020-07-24 22:28:20.716193 W | etcdserver: read-only range request "key:\"/registry/pods/kube-system/storage-provisioner\" " with result "range_response_count:1 size:2416" took too long (4.129795802s) to execute * 2020-07-24 22:28:20.716213 W | etcdserver: read-only range request "key:\"/registry/horizontalpodautoscalers\" range_end:\"/registry/horizontalpodautoscalert\" count_only:true " with result "range_response_count:0 size:5" took too long (3.658155312s) to execute * 2020-07-24 22:28:20.716294 W | etcdserver: read-only range request "key:\"/registry/services/specs/default/kubernetes\" " with result "range_response_count:1 size:293" took too long (4.130839176s) to execute * 2020-07-24 22:28:20.716427 W | etcdserver: read-only range request "key:\"/registry/clusterroles\" range_end:\"/registry/clusterrolet\" count_only:true " with result "range_response_count:0 size:7" took too long (1.282647649s) to execute * 2020-07-24 22:28:20.844313 W | etcdserver: read-only range request "key:\"/registry/events/kube-system/storage-provisioner.1624d0b10da12b9b\" " with result "range_response_count:1 size:483" took too long (122.063812ms) to execute * 2020-07-24 22:28:20.950803 W | etcdserver: read-only range request "key:\"/registry/services/endpoints/default/kubernetes\" " with result "range_response_count:1 size:213" took too long (105.11143ms) to execute * 2020-07-24 22:31:03.116431 W | etcdserver: read-only range request "key:\"/registry/jobs/\" range_end:\"/registry/jobs0\" limit:500 " with result "range_response_count:0 size:5" took too long (2.348768413s) to execute * 2020-07-24 22:31:03.116531 W | etcdserver: request "header: txn: success:> failure: >>" with result "size:16" took too long (2.159753959s) to execute * 2020-07-24 22:31:03.117809 W | etcdserver: read-only range request "key:\"/registry/health\" " with result "error:context deadline exceeded" took too long (2.000019975s) to execute * 2020-07-24 22:31:03.118930 W | wal: sync duration of 2.16222982s, expected less than 1s * 2020-07-24 22:31:03.119141 W | etcdserver: read-only range request "key:\"/registry/configmaps\" range_end:\"/registry/configmapt\" count_only:true " with result "range_response_count:0 size:7" took too long (1.514657725s) to execute * 2020-07-24 22:31:03.119390 W | etcdserver: read-only range request "key:\"/registry/namespaces/default\" " with result "range_response_count:1 size:172" took too long (1.472246821s) to execute * 2020-07-24 22:36:56.295220 I | mvcc: store.index: compact 1090 * 2020-07-24 22:36:56.296281 I | mvcc: finished scheduled compaction at 1090 (took 726.75µs) * 2020-07-24 22:41:56.312125 I | mvcc: store.index: compact 1203 * 2020-07-24 22:41:56.312972 I | mvcc: finished scheduled compaction at 1203 (took 523.537µs) * 2020-07-24 22:46:56.323897 I | mvcc: store.index: compact 1285 * 2020-07-24 22:46:56.324603 I | mvcc: finished scheduled compaction at 1285 (took 389.624µs) * * ==> kernel <== * 22:51:44 up 1:19, 0 users, load average: 0.84, 0.94, 2.72 * Linux crio-20200724220901-14997 5.4.0-1022-azure #22-Ubuntu SMP Fri Jul 10 06:14:37 UTC 2020 x86_64 x86_64 x86_64 GNU/Linux * PRETTY_NAME="Ubuntu 19.10" * * ==> kube-apiserver [60aac29a1caeafa329981bf8f9a73c7f579633e998d2e82caabedfba3f1d1138] <== * I0724 22:32:16.144038 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:16.144097 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:16.144148 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:16.152885 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:36.144044 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [] * I0724 22:32:36.144250 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:36.144381 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:36.144412 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:36.144424 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:36.153545 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:56.144278 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [] * I0724 22:32:56.144441 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:56.144512 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:56.144567 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:56.144612 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:56.156564 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:33:16.144467 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [] * I0724 22:33:16.144655 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:33:16.144710 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:33:16.144738 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:33:16.154631 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:33:36.144698 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [] * I0724 22:33:36.144939 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:33:36.145007 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:33:36.155063 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * * ==> kube-controller-manager [5e2ae262399021f9b26d8e88b478a4ff629d1bc4dffef0901936b18cc1e4548c] <== * I0724 22:27:16.476945 1 taint_manager.go:182] Starting NoExecuteTaintManager * I0724 22:27:16.476964 1 event.go:258] Event(v1.ObjectReference{Kind:"Node", Namespace:"", Name:"crio-20200724220901-14997", UID:"ef0f0ad1-a556-4ea4-8b4e-4df0035f7a0b", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'RegisteredNode' Node crio-20200724220901-14997 event: Registered Node crio-20200724220901-14997 in Controller * W0724 22:27:16.476984 1 node_lifecycle_controller.go:863] Missing timestamp for Node crio-20200724220901-14997. Assuming now as a timestamp. * I0724 22:27:16.477018 1 node_lifecycle_controller.go:1089] Controller detected that zone is now in state Normal. * I0724 22:27:16.496120 1 controller_utils.go:1036] Caches are synced for daemon sets controller * I0724 22:27:16.517177 1 controller_utils.go:1036] Caches are synced for endpoint controller * I0724 22:27:16.522134 1 controller_utils.go:1036] Caches are synced for ReplicationController controller * I0724 22:27:16.526074 1 controller_utils.go:1036] Caches are synced for disruption controller * I0724 22:27:16.526095 1 disruption.go:338] Sending events to api server. * I0724 22:27:16.526636 1 controller_utils.go:1036] Caches are synced for job controller * I0724 22:27:16.526835 1 controller_utils.go:1036] Caches are synced for PVC protection controller * I0724 22:27:16.531393 1 controller_utils.go:1036] Caches are synced for attach detach controller * I0724 22:27:16.540016 1 controller_utils.go:1036] Caches are synced for persistent volume controller * I0724 22:27:16.551348 1 controller_utils.go:1036] Caches are synced for ReplicaSet controller * I0724 22:27:16.577221 1 controller_utils.go:1036] Caches are synced for deployment controller * I0724 22:27:16.580595 1 event.go:258] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"kubernetes-dashboard", Name:"kubernetes-dashboard", UID:"db313703-4ad6-497b-8b1b-c7657766a8c5", APIVersion:"apps/v1", ResourceVersion:"821", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set kubernetes-dashboard-6979c57f4c to 1 * I0724 22:27:16.584728 1 event.go:258] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"kubernetes-dashboard", Name:"dashboard-metrics-scraper", UID:"a8d0d7cc-7814-4653-af48-855bd4001c27", APIVersion:"apps/v1", ResourceVersion:"819", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set dashboard-metrics-scraper-c8b69c96c to 1 * I0724 22:27:16.589419 1 event.go:258] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"kubernetes-dashboard", Name:"dashboard-metrics-scraper-c8b69c96c", UID:"79acf5d3-9c82-447f-b71d-910de5b42f1b", APIVersion:"apps/v1", ResourceVersion:"855", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: dashboard-metrics-scraper-c8b69c96c-ljbr4 * I0724 22:27:16.589458 1 event.go:258] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"kubernetes-dashboard", Name:"kubernetes-dashboard-6979c57f4c", UID:"b43829eb-0088-472e-9584-6679aa6b72c4", APIVersion:"apps/v1", ResourceVersion:"854", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: kubernetes-dashboard-6979c57f4c-wbxrt * I0724 22:27:16.589508 1 controller_utils.go:1036] Caches are synced for resource quota controller * I0724 22:27:16.635535 1 controller_utils.go:1036] Caches are synced for garbage collector controller * I0724 22:27:16.635571 1 garbagecollector.go:137] Garbage collector: all resource monitors have synced. Proceeding to collect garbage * I0724 22:27:16.640852 1 controller_utils.go:1036] Caches are synced for resource quota controller * I0724 22:27:17.230884 1 controller_utils.go:1029] Waiting for caches to sync for garbage collector controller * I0724 22:27:17.331259 1 controller_utils.go:1036] Caches are synced for garbage collector controller * * ==> kube-proxy [4e28cb6807125f410044e3fce6354030e93abbb1dd7a5b0efeb136ab9f75cc6f] <== * I0724 22:10:51.711236 1 server_others.go:143] Using iptables Proxier. * I0724 22:10:51.711733 1 server.go:534] Version: v1.15.7 * I0724 22:10:51.747623 1 conntrack.go:52] Setting nf_conntrack_max to 524288 * E0724 22:10:51.748800 1 conntrack.go:127] sysfs is not writable: {Device:sysfs Path:/sys Type:sysfs Opts:[ro nosuid nodev noexec relatime] Freq:0 Pass:0} (mount options are [ro nosuid nodev noexec relatime]) * I0724 22:10:51.749038 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_established' to 86400 * I0724 22:10:51.749127 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_close_wait' to 3600 * I0724 22:10:51.749315 1 config.go:187] Starting service config controller * I0724 22:10:51.749352 1 controller_utils.go:1029] Waiting for caches to sync for service config controller * I0724 22:10:51.749404 1 config.go:96] Starting endpoints config controller * I0724 22:10:51.749502 1 controller_utils.go:1029] Waiting for caches to sync for endpoints config controller * I0724 22:10:51.849555 1 controller_utils.go:1036] Caches are synced for service config controller * I0724 22:10:51.849731 1 controller_utils.go:1036] Caches are synced for endpoints config controller * W0724 22:27:02.252143 1 server_others.go:249] Flag proxy-mode="" unknown, assuming iptables proxy * I0724 22:27:02.336837 1 server_others.go:143] Using iptables Proxier. * I0724 22:27:02.337285 1 server.go:534] Version: v1.15.7 * I0724 22:27:02.437683 1 conntrack.go:52] Setting nf_conntrack_max to 524288 * E0724 22:27:02.438093 1 conntrack.go:127] sysfs is not writable: {Device:sysfs Path:/sys Type:sysfs Opts:[ro nosuid nodev noexec relatime] Freq:0 Pass:0} (mount options are [ro nosuid nodev noexec relatime]) * I0724 22:27:02.438352 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_established' to 86400 * I0724 22:27:02.438437 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_close_wait' to 3600 * I0724 22:27:02.438748 1 config.go:96] Starting endpoints config controller * I0724 22:27:02.438828 1 controller_utils.go:1029] Waiting for caches to sync for endpoints config controller * I0724 22:27:02.438814 1 config.go:187] Starting service config controller * I0724 22:27:02.438854 1 controller_utils.go:1029] Waiting for caches to sync for service config controller * I0724 22:27:02.539002 1 controller_utils.go:1036] Caches are synced for endpoints config controller * I0724 22:27:02.539024 1 controller_utils.go:1036] Caches are synced for service config controller * * ==> kube-scheduler [9881d2c304cdca9fe0f38468be746e289b9fa419917792f7aa9019077b4a4374] <== * E0724 22:10:32.938093 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope * E0724 22:10:32.940907 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope * E0724 22:10:32.940967 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope * E0724 22:10:32.941768 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope * E0724 22:10:32.942905 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope * E0724 22:10:32.943936 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope * E0724 22:10:32.947924 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope * I0724 22:26:55.290417 1 serving.go:319] Generated self-signed cert in-memory * W0724 22:26:55.521509 1 authentication.go:249] No authentication-kubeconfig provided in order to lookup client-ca-file in configmap/extension-apiserver-authentication in kube-system, so client certificate authentication won't work. * W0724 22:26:55.521537 1 authentication.go:252] No authentication-kubeconfig provided in order to lookup requestheader-client-ca-file in configmap/extension-apiserver-authentication in kube-system, so request-header client certificate authentication won't work. * W0724 22:26:55.521549 1 authorization.go:146] No authorization-kubeconfig provided, so SubjectAccessReview of authorization tokens won't work. * I0724 22:26:55.524427 1 server.go:142] Version: v1.15.7 * I0724 22:26:55.524493 1 defaults.go:87] TaintNodesByCondition is enabled, PodToleratesNodeTaints predicate is mandatory * W0724 22:26:55.525401 1 authorization.go:47] Authorization is disabled * W0724 22:26:55.525419 1 authentication.go:55] Authentication is disabled * I0724 22:26:55.525435 1 deprecated_insecure_serving.go:51] Serving healthz insecurely on [::]:10251 * I0724 22:26:55.535873 1 secure_serving.go:116] Serving securely on 127.0.0.1:10259 * E0724 22:27:00.040558 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope * E0724 22:27:00.140295 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope: RBAC: [clusterrole.rbac.authorization.k8s.io "system:public-info-viewer" not found, clusterrole.rbac.authorization.k8s.io "system:discovery" not found, clusterrole.rbac.authorization.k8s.io "system:kube-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:volume-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:basic-user" not found] * E0724 22:27:00.140491 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope * E0724 22:27:00.140663 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope * E0724 22:27:00.140655 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1beta1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope: RBAC: [clusterrole.rbac.authorization.k8s.io "system:kube-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:discovery" not found, clusterrole.rbac.authorization.k8s.io "system:basic-user" not found, clusterrole.rbac.authorization.k8s.io "system:volume-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:public-info-viewer" not found] * E0724 22:27:00.140755 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope: RBAC: [clusterrole.rbac.authorization.k8s.io "system:public-info-viewer" not found, clusterrole.rbac.authorization.k8s.io "system:kube-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:discovery" not found, clusterrole.rbac.authorization.k8s.io "system:basic-user" not found, clusterrole.rbac.authorization.k8s.io "system:volume-scheduler" not found] * E0724 22:27:00.140806 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope: RBAC: [clusterrole.rbac.authorization.k8s.io "system:kube-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:discovery" not found, clusterrole.rbac.authorization.k8s.io "system:basic-user" not found, clusterrole.rbac.authorization.k8s.io "system:volume-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:public-info-viewer" not found] * E0724 22:27:00.140869 1 reflector.go:125] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:226: Failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope: RBAC: [clusterrole.rbac.authorization.k8s.io "system:basic-user" not found, clusterrole.rbac.authorization.k8s.io "system:volume-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:public-info-viewer" not found, clusterrole.rbac.authorization.k8s.io "system:kube-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:discovery" not found] * * ==> kubelet <== * -- Logs begin at Fri 2020-07-24 22:26:07 UTC, end at Fri 2020-07-24 22:51:44 UTC. -- * Jul 24 22:51:22 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:22.824313 1598 kuberuntime_manager.go:692] createPodSandbox for pod "coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(5640d9f74f19b787273675f6175658f5a4b944f93df8a5e82447b8adc4afddff): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:22 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:22.824404 1598 pod_workers.go:190] Error syncing pod dbd8dc2c-1af2-4c99-89f0-d44686805742 ("coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)"), skipping: failed to "CreatePodSandbox" for "coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)" with CreatePodSandboxError: "CreatePodSandbox for pod \"coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)\" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(5640d9f74f19b787273675f6175658f5a4b944f93df8a5e82447b8adc4afddff): failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:51:24 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:24.531997 1598 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard_145a2077-6b60-46d0-9d89-7151f4a78b4b_0(25492f81a77c03ed1561da5355054094967a64c28fd4b6fe0b3c3ae3e2148fee): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:24 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:24.532069 1598 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard_145a2077-6b60-46d0-9d89-7151f4a78b4b_0(25492f81a77c03ed1561da5355054094967a64c28fd4b6fe0b3c3ae3e2148fee): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:24 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:24.532093 1598 kuberuntime_manager.go:692] createPodSandbox for pod "dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard_145a2077-6b60-46d0-9d89-7151f4a78b4b_0(25492f81a77c03ed1561da5355054094967a64c28fd4b6fe0b3c3ae3e2148fee): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:24 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:24.532169 1598 pod_workers.go:190] Error syncing pod 145a2077-6b60-46d0-9d89-7151f4a78b4b ("dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)"), skipping: failed to "CreatePodSandbox" for "dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)" with CreatePodSandboxError: "CreatePodSandbox for pod \"dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)\" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard_145a2077-6b60-46d0-9d89-7151f4a78b4b_0(25492f81a77c03ed1561da5355054094967a64c28fd4b6fe0b3c3ae3e2148fee): failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:51:25 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:25.744397 1598 pod_workers.go:190] Error syncing pod e76b38cb-d66f-4b4b-a962-cdd9c670e250 ("storage-provisioner_kube-system(e76b38cb-d66f-4b4b-a962-cdd9c670e250)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "Back-off 5m0s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(e76b38cb-d66f-4b4b-a962-cdd9c670e250)" * Jul 24 22:51:25 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:25.744505 1598 pod_workers.go:190] Error syncing pod 9fb35a28-5601-47e4-88e6-be2a18fa55ef ("kindnet-4qfcd_kube-system(9fb35a28-5601-47e4-88e6-be2a18fa55ef)"), skipping: failed to "StartContainer" for "kindnet-cni" with CrashLoopBackOff: "Back-off 5m0s restarting failed container=kindnet-cni pod=kindnet-4qfcd_kube-system(9fb35a28-5601-47e4-88e6-be2a18fa55ef)" * Jul 24 22:51:32 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:32.248818 1598 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard_2734d7c0-daef-420d-966b-49f33f94e547_0(9ccf0f0cce6b567ad2f291f016191bb44502de1d3e86e6df90c6e7576ed5ab81): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:32 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:32.248893 1598 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard(2734d7c0-daef-420d-966b-49f33f94e547)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard_2734d7c0-daef-420d-966b-49f33f94e547_0(9ccf0f0cce6b567ad2f291f016191bb44502de1d3e86e6df90c6e7576ed5ab81): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:32 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:32.248923 1598 kuberuntime_manager.go:692] createPodSandbox for pod "kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard(2734d7c0-daef-420d-966b-49f33f94e547)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard_2734d7c0-daef-420d-966b-49f33f94e547_0(9ccf0f0cce6b567ad2f291f016191bb44502de1d3e86e6df90c6e7576ed5ab81): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:32 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:32.248993 1598 pod_workers.go:190] Error syncing pod 2734d7c0-daef-420d-966b-49f33f94e547 ("kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard(2734d7c0-daef-420d-966b-49f33f94e547)"), skipping: failed to "CreatePodSandbox" for "kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard(2734d7c0-daef-420d-966b-49f33f94e547)" with CreatePodSandboxError: "CreatePodSandbox for pod \"kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard(2734d7c0-daef-420d-966b-49f33f94e547)\" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard_2734d7c0-daef-420d-966b-49f33f94e547_0(9ccf0f0cce6b567ad2f291f016191bb44502de1d3e86e6df90c6e7576ed5ab81): failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:51:33 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:33.883234 1598 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(86aa6f798e8cfd97aef81555b92613e818d4c7173384c653d026f84ec976db8e): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:33 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:33.883300 1598 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(86aa6f798e8cfd97aef81555b92613e818d4c7173384c653d026f84ec976db8e): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:33 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:33.883327 1598 kuberuntime_manager.go:692] createPodSandbox for pod "busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(86aa6f798e8cfd97aef81555b92613e818d4c7173384c653d026f84ec976db8e): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:33 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:33.883383 1598 pod_workers.go:190] Error syncing pod b947226f-0b0e-4916-9b65-c7b70a6e137e ("busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)"), skipping: failed to "CreatePodSandbox" for "busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)" with CreatePodSandboxError: "CreatePodSandbox for pod \"busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)\" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(86aa6f798e8cfd97aef81555b92613e818d4c7173384c653d026f84ec976db8e): failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:51:37 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:37.322582 1598 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(c16b06815e3b08f65dd9e7bea079da08a86b6801b4df5c196e85f0a68b20bffc): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:37 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:37.322652 1598 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(c16b06815e3b08f65dd9e7bea079da08a86b6801b4df5c196e85f0a68b20bffc): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:37 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:37.322685 1598 kuberuntime_manager.go:692] createPodSandbox for pod "coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(c16b06815e3b08f65dd9e7bea079da08a86b6801b4df5c196e85f0a68b20bffc): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:37 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:37.322744 1598 pod_workers.go:190] Error syncing pod dbd8dc2c-1af2-4c99-89f0-d44686805742 ("coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)"), skipping: failed to "CreatePodSandbox" for "coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)" with CreatePodSandboxError: "CreatePodSandbox for pod \"coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)\" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(c16b06815e3b08f65dd9e7bea079da08a86b6801b4df5c196e85f0a68b20bffc): failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:51:38 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:38.268197 1598 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard_145a2077-6b60-46d0-9d89-7151f4a78b4b_0(b0a7787b999a8e94452730c7c3f574d37d249d15216835555313ddbf498ad6fb): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:38 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:38.268262 1598 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard_145a2077-6b60-46d0-9d89-7151f4a78b4b_0(b0a7787b999a8e94452730c7c3f574d37d249d15216835555313ddbf498ad6fb): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:38 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:38.268285 1598 kuberuntime_manager.go:692] createPodSandbox for pod "dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard_145a2077-6b60-46d0-9d89-7151f4a78b4b_0(b0a7787b999a8e94452730c7c3f574d37d249d15216835555313ddbf498ad6fb): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:38 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:38.268374 1598 pod_workers.go:190] Error syncing pod 145a2077-6b60-46d0-9d89-7151f4a78b4b ("dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)"), skipping: failed to "CreatePodSandbox" for "dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)" with CreatePodSandboxError: "CreatePodSandbox for pod \"dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)\" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard_145a2077-6b60-46d0-9d89-7151f4a78b4b_0(b0a7787b999a8e94452730c7c3f574d37d249d15216835555313ddbf498ad6fb): failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:51:38 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:38.744063 1598 pod_workers.go:190] Error syncing pod e76b38cb-d66f-4b4b-a962-cdd9c670e250 ("storage-provisioner_kube-system(e76b38cb-d66f-4b4b-a962-cdd9c670e250)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "Back-off 5m0s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(e76b38cb-d66f-4b4b-a962-cdd9c670e250)" * * ==> storage-provisioner [c5fc8cb46441ceaef6bf0347ecae8bb423d4fc849fad64b2afb93b63052026f2] <== * F0724 22:48:28.232635 1 main.go:37] Error getting server version: Get https://10.96.0.1:443/version: dial tcp 10.96.0.1:443: getsockopt: no route to host -- /stdout -- helpers_test.go:247: (dbg) Run: ./minikube-linux-amd64 status --format={{.APIServer}} -p crio-20200724220901-14997 -n crio-20200724220901-14997 helpers_test.go:254: (dbg) Run: kubectl --context crio-20200724220901-14997 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running helpers_test.go:260: non-running pods: busybox coredns-5d4dd4b4db-9ssg6 dashboard-metrics-scraper-c8b69c96c-ljbr4 kubernetes-dashboard-6979c57f4c-wbxrt helpers_test.go:262: ======> post-mortem[TestStartStop/group/crio/serial/Pause]: describe non-running pods <====== helpers_test.go:265: (dbg) Run: kubectl --context crio-20200724220901-14997 describe pod busybox coredns-5d4dd4b4db-9ssg6 dashboard-metrics-scraper-c8b69c96c-ljbr4 kubernetes-dashboard-6979c57f4c-wbxrt helpers_test.go:265: (dbg) Non-zero exit: kubectl --context crio-20200724220901-14997 describe pod busybox coredns-5d4dd4b4db-9ssg6 dashboard-metrics-scraper-c8b69c96c-ljbr4 kubernetes-dashboard-6979c57f4c-wbxrt: exit status 1 (82.780621ms) -- stdout -- Name: busybox Namespace: default Priority: 0 Node: crio-20200724220901-14997/172.17.0.2 Start Time: Fri, 24 Jul 2020 22:17:30 +0000 Labels: integration-test=busybox Annotations: Status: Pending IP: IPs: Containers: busybox: Container ID: Image: busybox:1.28.4-glibc Image ID: Port: Host Port: Command: sleep 3600 State: Waiting Reason: ContainerCreating Ready: False Restart Count: 0 Environment: Mounts: /var/run/secrets/kubernetes.io/serviceaccount from default-token-2jsfl (ro) Conditions: Type Status Initialized True Ready False ContainersReady False PodScheduled True Volumes: default-token-2jsfl: Type: Secret (a volume populated by a Secret) SecretName: default-token-2jsfl Optional: false QoS Class: BestEffort Node-Selectors: Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s node.kubernetes.io/unreachable:NoExecute for 300s Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Scheduled 34m default-scheduler Successfully assigned default/busybox to crio-20200724220901-14997 Warning FailedCreatePodSandBox 34m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(df0c3a384af70876e631b79afa238f8219e909abbaf7684ff30d1dce8a9a54fa): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 33m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(7e1e51912dee7c82a399942b88c2992e156a60bb2bdcfc5a0340b0f5eb894fe0): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 33m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(eb6542b74bcd5168e50e03b41a224e2df5f3761d00e96252755d3d77cabf510e): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 33m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(95381eb738e20acf009ca3de52d9dd44c0e10363127a19f55cc9d8b9b15d935d): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 33m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(5da5f64daf132db1d2f57fcbbd23261f5b055438f42fdc9e58f02b1dd57b9240): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 33m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(d843353d3186fd450ada3d11767e5294d07ef90a741caba47e62a59eb82f9e97): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 32m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(cdb66590d250947869cc36af8c6d22ecc1fd99752b4ba0d09be444e2de9b06eb): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 32m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(590dbdf2276a1c93cf59fe15b35411b3d19be44299a08851ea9957e8c6c03681): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 32m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(fe109ba9d55515001bcdb9c5ba7b51262263252184892313119b14411c1ba69a): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 27m (x17 over 32m) kubelet, crio-20200724220901-14997 (combined from similar events): Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(4bee5e35c962aafeecadb6e69afb27ad039cbcf256940e8735ce4f079bf10862): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 24m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(e8fcb63d4fbd6ae3b748da2e936f37eeaa140b43d0f98056e85b082da82fbb01): netplugin failed with no error message Warning FailedCreatePodSandBox 24m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(60cd38ffb8a1dd4fa390aba070fecc1acec7811fd5775ea0d2a431e9e2424e62): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 24m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(68df75d364336016be8023c132977fe34319aa4b9f92c7d70e457a57217a281e): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 23m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(aaae971e15b6884b2bb1799dd4abb10b195368e13b5e4f290f43c5b4b11ec194): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 23m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(bd1d721cd146bbfe1fa05dc287b325ab526dc19d3404ff58cef06e259e2b5986): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 23m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(df52b05097dfc5b141cfc44d2e97b43954c3d247a95a09c7cfb30e7c69871395): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 22m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(d5729ae94c7a9707bb77f5dc43fb6a6d6d451a9f25758519bb72c37e64012cbe): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 22m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(1080ac6803a0e7d14628d92578ed58468a1fddfae32c4476765ee0ff2580e28e): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 22m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(7b666099a72822b0a53ba3bb57632f31f4390032be187dca2fe8fb309f923937): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 4m32s (x70 over 22m) kubelet, crio-20200724220901-14997 (combined from similar events): Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(ddf5dab0e040cc42e18461726216644f257157271baffe0b4e6c06f634f4a48c): failed to set bridge addr: could not add IP address to "cni0": permission denied -- /stdout -- ** stderr ** Error from server (NotFound): pods "coredns-5d4dd4b4db-9ssg6" not found Error from server (NotFound): pods "dashboard-metrics-scraper-c8b69c96c-ljbr4" not found Error from server (NotFound): pods "kubernetes-dashboard-6979c57f4c-wbxrt" not found ** /stderr ** helpers_test.go:267: kubectl --context crio-20200724220901-14997 describe pod busybox coredns-5d4dd4b4db-9ssg6 dashboard-metrics-scraper-c8b69c96c-ljbr4 kubernetes-dashboard-6979c57f4c-wbxrt: exit status 1 helpers_test.go:215: -----------------------post-mortem-------------------------------- helpers_test.go:223: ======> post-mortem[TestStartStop/group/crio/serial/Pause]: docker inspect <====== helpers_test.go:224: (dbg) Run: docker inspect crio-20200724220901-14997 helpers_test.go:228: (dbg) docker inspect crio-20200724220901-14997: -- stdout -- [ { "Id": "d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a", "Created": "2020-07-24T22:09:11.178770681Z", "Path": "/usr/local/bin/entrypoint", "Args": [ "/sbin/init" ], "State": { "Status": "running", "Running": true, "Paused": false, "Restarting": false, "OOMKilled": false, "Dead": false, "Pid": 667119, "ExitCode": 0, "Error": "", "StartedAt": "2020-07-24T22:26:07.35920725Z", "FinishedAt": "2020-07-24T22:26:00.416732195Z" }, "Image": "sha256:e6bc41c39dc48b2b472936db36aedb28527ce0f675ed1bc20d029125c9ccf578", "ResolvConfPath": "/var/lib/docker/containers/d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a/resolv.conf", "HostnamePath": "/var/lib/docker/containers/d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a/hostname", "HostsPath": "/var/lib/docker/containers/d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a/hosts", "LogPath": "/var/lib/docker/containers/d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a/d5ad054c9d1682a085b5564e8cba10a0f4046f4c096659855494ef3270ba8f6a-json.log", "Name": "/crio-20200724220901-14997", "RestartCount": 0, "Driver": "overlay2", "Platform": "linux", "MountLabel": "", "ProcessLabel": "", "AppArmorProfile": "unconfined", "ExecIDs": null, "HostConfig": { "Binds": [ "/lib/modules:/lib/modules:ro", "crio-20200724220901-14997:/var" ], "ContainerIDFile": "", "LogConfig": { "Type": "json-file", "Config": {} }, "NetworkMode": "default", "PortBindings": { "22/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "2376/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "5000/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "8443/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ] }, "RestartPolicy": { "Name": "no", "MaximumRetryCount": 0 }, "AutoRemove": false, "VolumeDriver": "", "VolumesFrom": null, "CapAdd": null, "CapDrop": null, "Capabilities": null, "Dns": [], "DnsOptions": [], "DnsSearch": [], "ExtraHosts": null, "GroupAdd": null, "IpcMode": "private", "Cgroup": "", "Links": null, "OomScoreAdj": 0, "PidMode": "", "Privileged": true, "PublishAllPorts": false, "ReadonlyRootfs": false, "SecurityOpt": [ "seccomp=unconfined", "apparmor=unconfined", "label=disable" ], "Tmpfs": { "/run": "", "/tmp": "" }, "UTSMode": "", "UsernsMode": "", "ShmSize": 67108864, "Runtime": "runc", "ConsoleSize": [ 0, 0 ], "Isolation": "", "CpuShares": 0, "Memory": 2306867200, "NanoCpus": 2000000000, "CgroupParent": "", "BlkioWeight": 0, "BlkioWeightDevice": [], "BlkioDeviceReadBps": null, "BlkioDeviceWriteBps": null, "BlkioDeviceReadIOps": null, "BlkioDeviceWriteIOps": null, "CpuPeriod": 0, "CpuQuota": 0, "CpuRealtimePeriod": 0, "CpuRealtimeRuntime": 0, "CpusetCpus": "", "CpusetMems": "", "Devices": [], "DeviceCgroupRules": null, "DeviceRequests": null, "KernelMemory": 0, "KernelMemoryTCP": 0, "MemoryReservation": 0, "MemorySwap": -1, "MemorySwappiness": null, "OomKillDisable": false, "PidsLimit": null, "Ulimits": null, "CpuCount": 0, "CpuPercent": 0, "IOMaximumIOps": 0, "IOMaximumBandwidth": 0, "MaskedPaths": null, "ReadonlyPaths": null }, "GraphDriver": { "Data": { "LowerDir": "/var/lib/docker/overlay2/25235e8a657907fa460b6393f79aab4ea109b0924e4843714a3776886a051c03-init/diff:/var/lib/docker/overlay2/33ef5d7ed4fbb30d55d2e5925de9b5c359ee2f6bcaf5a31308f0c4c11e358759/diff:/var/lib/docker/overlay2/3cf31b1deb19cf9de586952311d9f94baab00dac6393d96e1a80da943c3823e6/diff:/var/lib/docker/overlay2/5e25be0edbb143d613aab16e23056ece18370c2427214464ccc0b8f033fc76c5/diff:/var/lib/docker/overlay2/933d7b760faf1726c0231eb48cbf3e7af8aeea7d1f449959b7bf07bbe9a84376/diff:/var/lib/docker/overlay2/69ec5555996fe29cf36b7e927034690ecb66f32d4c72396f3c2f3d73303ab10c/diff:/var/lib/docker/overlay2/891f9311e0e93aca44c576d873c0860bf67e972178833fc3ac0569308996933c/diff:/var/lib/docker/overlay2/431ddf5eea9395c020c3a27539c6984764cb7a0dd8f3eeadfbb24f37a897caf7/diff:/var/lib/docker/overlay2/ef7d3d9d53c985fdb46a41c8bcbc04c7d98baa3d8030b1432d115657269a4611/diff:/var/lib/docker/overlay2/d976f7471257da8ef85e0de564cae8e6b16eeb8f83fff6750b3985387f87052c/diff:/var/lib/docker/overlay2/16ed6f6abe25e98b50b10529e70d835da8b64ca99934243f70e5cc63cca44ba1/diff:/var/lib/docker/overlay2/a6495d6f579c32bd0faf387dc3b48c733dae0219e3cfc5d94f9f6ba027261668/diff:/var/lib/docker/overlay2/a34db548da9e24b0323e43e53767c49484fa19509ba9c1e738fb42330788ec52/diff:/var/lib/docker/overlay2/03f93f74dafc48e2bb0234297546abf70dc9918c047a7e3b500ed2242aec09ff/diff:/var/lib/docker/overlay2/b0d08d0be034080c25561d9451d8081050a591c2991a7f3d242234b9bf7c046d/diff:/var/lib/docker/overlay2/d7be6ed61eedc76988e74fe1d87a6a1ef287912654acea5566e7f9cb797b3e3c/diff:/var/lib/docker/overlay2/bde6a4a42d5652b3ef22e3a1dddd0f6fb765d2cd9f5b1a38d4ecb9b44c45b998/diff:/var/lib/docker/overlay2/0f384da0b034b730d4bdd22f270821719c0ac0b7b7d608c3a52fa89b9a386b90/diff:/var/lib/docker/overlay2/46e9d50bc162d2d523c12da3e378e5b859515056dc7d7f11344c1c42c64718d4/diff:/var/lib/docker/overlay2/6e7552cdb94dfc1c9a0a8efae25c758d93fff3d2eec58a3e2b710496d0c94653/diff:/var/lib/docker/overlay2/b9217960caef08b70ded801ca89f5a0808227383f2ab1de5ae7313c200a8090b/diff:/var/lib/docker/overlay2/d11fc39cafccd3e3cb8fba6cfdda2bbfdb99c2e41619ce169b0d6ede0b3f6a64/diff:/var/lib/docker/overlay2/0f89620291cba4bc3afac7c9fa1faf205450d5dcae032c3a9882009633b72b4a/diff", "MergedDir": "/var/lib/docker/overlay2/25235e8a657907fa460b6393f79aab4ea109b0924e4843714a3776886a051c03/merged", "UpperDir": "/var/lib/docker/overlay2/25235e8a657907fa460b6393f79aab4ea109b0924e4843714a3776886a051c03/diff", "WorkDir": "/var/lib/docker/overlay2/25235e8a657907fa460b6393f79aab4ea109b0924e4843714a3776886a051c03/work" }, "Name": "overlay2" }, "Mounts": [ { "Type": "bind", "Source": "/lib/modules", "Destination": "/lib/modules", "Mode": "ro", "RW": false, "Propagation": "rprivate" }, { "Type": "volume", "Name": "crio-20200724220901-14997", "Source": "/var/lib/docker/volumes/crio-20200724220901-14997/_data", "Destination": "/var", "Driver": "local", "Mode": "z", "RW": true, "Propagation": "" } ], "Config": { "Hostname": "crio-20200724220901-14997", "Domainname": "", "User": "root", "AttachStdin": false, "AttachStdout": false, "AttachStderr": false, "ExposedPorts": { "22/tcp": {}, "2376/tcp": {}, "5000/tcp": {}, "8443/tcp": {} }, "Tty": true, "OpenStdin": false, "StdinOnce": false, "Env": [ "container=docker", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], "Cmd": null, "Image": "kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438", "Volumes": null, "WorkingDir": "", "Entrypoint": [ "/usr/local/bin/entrypoint", "/sbin/init" ], "OnBuild": null, "Labels": { "created_by.minikube.sigs.k8s.io": "true", "mode.minikube.sigs.k8s.io": "crio-20200724220901-14997", "name.minikube.sigs.k8s.io": "crio-20200724220901-14997", "role.minikube.sigs.k8s.io": "" }, "StopSignal": "SIGRTMIN+3" }, "NetworkSettings": { "Bridge": "", "SandboxID": "5953f0ce29bf68bb7146ac5384ca1a6be3ccb39427dcde4428b82a503b037325", "HairpinMode": false, "LinkLocalIPv6Address": "", "LinkLocalIPv6PrefixLen": 0, "Ports": { "22/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32916" } ], "2376/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32915" } ], "5000/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32914" } ], "8443/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32913" } ] }, "SandboxKey": "/var/run/docker/netns/5953f0ce29bf", "SecondaryIPAddresses": null, "SecondaryIPv6Addresses": null, "EndpointID": "280707bfa744faabf6b6bcfee77f56c3d14c11c8d8070fd9a4796c96665833f5", "Gateway": "172.17.0.1", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "IPAddress": "172.17.0.2", "IPPrefixLen": 16, "IPv6Gateway": "", "MacAddress": "02:42:ac:11:00:02", "Networks": { "bridge": { "IPAMConfig": null, "Links": null, "Aliases": null, "NetworkID": "d4a420189740571bc15c14b2ebac1ab66e95c9c6796461aa96bb3486a5f7e03d", "EndpointID": "280707bfa744faabf6b6bcfee77f56c3d14c11c8d8070fd9a4796c96665833f5", "Gateway": "172.17.0.1", "IPAddress": "172.17.0.2", "IPPrefixLen": 16, "IPv6Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "MacAddress": "02:42:ac:11:00:02", "DriverOpts": null } } } } ] -- /stdout -- helpers_test.go:232: (dbg) Run: ./minikube-linux-amd64 status --format={{.Host}} -p crio-20200724220901-14997 -n crio-20200724220901-14997 helpers_test.go:237: <<< TestStartStop/group/crio/serial/Pause FAILED: start of post-mortem logs <<< helpers_test.go:238: ======> post-mortem[TestStartStop/group/crio/serial/Pause]: minikube logs <====== helpers_test.go:240: (dbg) Run: ./minikube-linux-amd64 -p crio-20200724220901-14997 logs -n 25 helpers_test.go:245: TestStartStop/group/crio/serial/Pause logs: -- stdout -- * ==> CRI-O <== * -- Logs begin at Fri 2020-07-24 22:26:07 UTC, end at Fri 2020-07-24 22:51:45 UTC. -- * Jul 24 22:51:36 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:36.949567007Z" level=error msg="Error adding network: failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:51:36 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:36.949602509Z" level=error msg="Error while adding pod to CNI network \"crio-bridge\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:51:36 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:36.949730918Z" level=info msg="About to del CNI network lo (type=loopback)" * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.036625217Z" level=info msg="Got pod network &{Name:coredns-5d4dd4b4db-9ssg6 Namespace:kube-system ID:c16b06815e3b08f65dd9e7bea079da08a86b6801b4df5c196e85f0a68b20bffc NetNS:/proc/52890/ns/net Networks:[] RuntimeConfig:map[crio-bridge:{IP: MAC: PortMappings:[] Bandwidth: IpRanges:[]}]}" * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.036699122Z" level=error msg="error loading cached network config: network \"crio-bridge\" not found in CNI cache" * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.036714023Z" level=info msg="About to del CNI network crio-bridge (type=bridge)" * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.067026215Z" level=info msg="Created container af520b21cf962cfe633916631b6dfff80514346fa3f77e21c279c048e3c47ce9: kube-system/kindnet-4qfcd/kindnet-cni" id=ce94fdb6-1741-49d4-a097-efa6bf9201b9 * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.075803521Z" level=info msg="Started container af520b21cf962cfe633916631b6dfff80514346fa3f77e21c279c048e3c47ce9: kube-system/kindnet-4qfcd/kindnet-cni" id=80c4438a-9514-4533-a110-67970c02411d * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.108584484Z" level=error msg="Error deleting network: running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.1.215 -j CNI-9aa498ed525f8c8017ed6801 -m comment --comment name: \"crio-bridge\" id: \"c16b06815e3b08f65dd9e7bea079da08a86b6801b4df5c196e85f0a68b20bffc\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-9aa498ed525f8c8017ed6801':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.108632487Z" level=error msg="Error while removing pod from CNI network \"crio-bridge\": running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.1.215 -j CNI-9aa498ed525f8c8017ed6801 -m comment --comment name: \"crio-bridge\" id: \"c16b06815e3b08f65dd9e7bea079da08a86b6801b4df5c196e85f0a68b20bffc\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-9aa498ed525f8c8017ed6801':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.108687891Z" level=error msg="error stopping network on cleanup: failed to destroy network for pod sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(c16b06815e3b08f65dd9e7bea079da08a86b6801b4df5c196e85f0a68b20bffc): running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.1.215 -j CNI-9aa498ed525f8c8017ed6801 -m comment --comment name: \"crio-bridge\" id: \"c16b06815e3b08f65dd9e7bea079da08a86b6801b4df5c196e85f0a68b20bffc\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-9aa498ed525f8c8017ed6801':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" id=f28b0fbe-43a9-42fe-a945-e8bf9a55d2a7 * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.510470527Z" level=info msg="exec'd [/bin/sh -ec ETCDCTL_API=3 etcdctl --endpoints=https://[127.0.0.1]:2379 --cacert=/var/lib/minikube/certs/etcd/ca.crt --cert=/var/lib/minikube/certs/etcd/healthcheck-client.crt --key=/var/lib/minikube/certs/etcd/healthcheck-client.key get foo] in kube-system/etcd-crio-20200724220901-14997/etcd" id=78468eb4-799e-459a-86d1-ccda369af23d * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.993484870Z" level=error msg="Error adding network: failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.993530573Z" level=error msg="Error while adding pod to CNI network \"crio-bridge\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.993660882Z" level=info msg="About to del CNI network lo (type=loopback)" * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.999273169Z" level=info msg="Got pod network &{Name:dashboard-metrics-scraper-c8b69c96c-ljbr4 Namespace:kubernetes-dashboard ID:b0a7787b999a8e94452730c7c3f574d37d249d15216835555313ddbf498ad6fb NetNS:/proc/52960/ns/net Networks:[] RuntimeConfig:map[crio-bridge:{IP: MAC: PortMappings:[] Bandwidth: IpRanges:[]}]}" * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.999325273Z" level=error msg="error loading cached network config: network \"crio-bridge\" not found in CNI cache" * Jul 24 22:51:37 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:37.999337074Z" level=info msg="About to del CNI network crio-bridge (type=bridge)" * Jul 24 22:51:38 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:38.064872198Z" level=error msg="Error deleting network: running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.1.216 -j CNI-db244da69fafc09d440b57a8 -m comment --comment name: \"crio-bridge\" id: \"b0a7787b999a8e94452730c7c3f574d37d249d15216835555313ddbf498ad6fb\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-db244da69fafc09d440b57a8':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" * Jul 24 22:51:38 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:38.064918701Z" level=error msg="Error while removing pod from CNI network \"crio-bridge\": running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.1.216 -j CNI-db244da69fafc09d440b57a8 -m comment --comment name: \"crio-bridge\" id: \"b0a7787b999a8e94452730c7c3f574d37d249d15216835555313ddbf498ad6fb\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-db244da69fafc09d440b57a8':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" * Jul 24 22:51:38 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:38.064975305Z" level=error msg="error stopping network on cleanup: failed to destroy network for pod sandbox k8s_dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard_145a2077-6b60-46d0-9d89-7151f4a78b4b_0(b0a7787b999a8e94452730c7c3f574d37d249d15216835555313ddbf498ad6fb): running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.244.1.216 -j CNI-db244da69fafc09d440b57a8 -m comment --comment name: \"crio-bridge\" id: \"b0a7787b999a8e94452730c7c3f574d37d249d15216835555313ddbf498ad6fb\" --wait]: exit status 2: iptables v1.8.3 (legacy): Couldn't load target `CNI-db244da69fafc09d440b57a8':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information.\n" id=7717042a-5394-4708-808f-4e6bbaa1ba7d * Jul 24 22:51:44 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:44.744091853Z" level=info msg="attempting to run pod sandbox with infra container: default/busybox/POD" id=86320b9a-74c7-4ff7-97c6-53c9f80f2393 * Jul 24 22:51:44 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:44.898776031Z" level=info msg="About to add CNI network lo (type=loopback)" * Jul 24 22:51:44 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:44.903244845Z" level=info msg="Got pod network &{Name:busybox Namespace:default ID:f82aa4e93efc376633fea1704b12cfcb7f4515efdadd0a54946936db7dd733ec NetNS:/proc/54015/ns/net Networks:[] RuntimeConfig:map[crio-bridge:{IP: MAC: PortMappings:[] Bandwidth: IpRanges:[]}]}" * Jul 24 22:51:44 crio-20200724220901-14997 crio[3547]: time="2020-07-24 22:51:44.903279347Z" level=info msg="About to add CNI network crio-bridge (type=bridge)" * * ==> container status <== * CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID * af520b21cf962 2186a1a396deb58f1ea5eaf20193a518ca05049b46ccd754ec83366b5c8c13d5 8 seconds ago Running kindnet-cni 9 22d8d62f2f19c * c5fc8cb46441c 4689081edb103a9e8174bf23a255bfbe0b2d9ed82edc907abab6989d1c60f02c 3 minutes ago Exited storage-provisioner 8 ed36006be83da * 9b721f7ac557c 2186a1a396deb58f1ea5eaf20193a518ca05049b46ccd754ec83366b5c8c13d5 5 minutes ago Exited kindnet-cni 8 22d8d62f2f19c * 4e28cb6807125 ae3d9889423ede337df3814baa77445e566597a5a882f3cdf933b4d9e0025f0f 24 minutes ago Running kube-proxy 0 8b8f6c79bb715 * 5e2ae26239902 d2f090f2479fbf92c508100e0a6106b3516bb70421a465586661feb1494145a2 24 minutes ago Running kube-controller-manager 0 f5860ba3909ed * 9881d2c304cdc 78b4180ab00d0fb99b1be2b5ef92a4831ad07f00f27e6746828f374497d79367 24 minutes ago Running kube-scheduler 0 4b2a96d2f6659 * 7cd3d2c27ce8d 2c4adeb21b4ff8ed3309d0e42b6b4ae39872399f7b37e0856e673b13c4aba13d 24 minutes ago Running etcd 0 4187a344080e1 * 60aac29a1caea c500a024ff843278184e5454ff6ee040a106c867c5a0361886fd3057cace2264 24 minutes ago Running kube-apiserver 0 79ca654167789 * * ==> describe nodes <== * Name: crio-20200724220901-14997 * Roles: master * Labels: beta.kubernetes.io/arch=amd64 * beta.kubernetes.io/os=linux * kubernetes.io/arch=amd64 * kubernetes.io/hostname=crio-20200724220901-14997 * kubernetes.io/os=linux * minikube.k8s.io/commit=40eac8ce825d2bb784efa63b900c8d788ea49faf * minikube.k8s.io/name=crio-20200724220901-14997 * minikube.k8s.io/updated_at=2020_07_24T22_10_37_0700 * minikube.k8s.io/version=v1.12.1 * node-role.kubernetes.io/master= * Annotations: kubeadm.alpha.kubernetes.io/cri-socket: /var/run/crio/crio.sock * node.alpha.kubernetes.io/ttl: 0 * volumes.kubernetes.io/controller-managed-attach-detach: true * CreationTimestamp: Fri, 24 Jul 2020 22:10:31 +0000 * Taints: * Unschedulable: false * Conditions: * Type Status LastHeartbeatTime LastTransitionTime Reason Message * ---- ------ ----------------- ------------------ ------ ------- * MemoryPressure False Fri, 24 Jul 2020 22:51:05 +0000 Fri, 24 Jul 2020 22:10:27 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available * DiskPressure False Fri, 24 Jul 2020 22:51:05 +0000 Fri, 24 Jul 2020 22:10:27 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure * PIDPressure False Fri, 24 Jul 2020 22:51:05 +0000 Fri, 24 Jul 2020 22:10:27 +0000 KubeletHasSufficientPID kubelet has sufficient PID available * Ready True Fri, 24 Jul 2020 22:51:05 +0000 Fri, 24 Jul 2020 22:10:27 +0000 KubeletReady kubelet is posting ready status * Addresses: * InternalIP: 172.17.0.2 * Hostname: crio-20200724220901-14997 * Capacity: * cpu: 16 * ephemeral-storage: 128884272Ki * hugepages-1Gi: 0 * hugepages-2Mi: 0 * memory: 65817044Ki * pods: 110 * Allocatable: * cpu: 16 * ephemeral-storage: 128884272Ki * hugepages-1Gi: 0 * hugepages-2Mi: 0 * memory: 65817044Ki * pods: 110 * System Info: * Machine ID: e7c76c839aa944e99c5c76ea1345e361 * System UUID: 8677386b-5379-4ccc-90e7-5b585098762e * Boot ID: 65219ec9-ab55-4151-85fa-6cbcd6144529 * Kernel Version: 5.4.0-1022-azure * OS Image: Ubuntu 19.10 * Operating System: linux * Architecture: amd64 * Container Runtime Version: cri-o://1.17.3 * Kubelet Version: v1.15.7 * Kube-Proxy Version: v1.15.7 * PodCIDR: 10.244.0.0/24 * Non-terminated Pods: (11 in total) * Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits AGE * --------- ---- ------------ ---------- --------------- ------------- --- * default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 34m * kube-system coredns-5d4dd4b4db-9ssg6 100m (0%) 0 (0%) 70Mi (0%) 170Mi (0%) 40m * kube-system etcd-crio-20200724220901-14997 0 (0%) 0 (0%) 0 (0%) 0 (0%) 40m * kube-system kindnet-4qfcd 100m (0%) 100m (0%) 50Mi (0%) 50Mi (0%) 40m * kube-system kube-apiserver-crio-20200724220901-14997 250m (1%) 0 (0%) 0 (0%) 0 (0%) 40m * kube-system kube-controller-manager-crio-20200724220901-14997 200m (1%) 0 (0%) 0 (0%) 0 (0%) 24m * kube-system kube-proxy-6wf4w 0 (0%) 0 (0%) 0 (0%) 0 (0%) 40m * kube-system kube-scheduler-crio-20200724220901-14997 100m (0%) 0 (0%) 0 (0%) 0 (0%) 40m * kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 40m * kubernetes-dashboard dashboard-metrics-scraper-c8b69c96c-ljbr4 0 (0%) 0 (0%) 0 (0%) 0 (0%) 24m * kubernetes-dashboard kubernetes-dashboard-6979c57f4c-wbxrt 0 (0%) 0 (0%) 0 (0%) 0 (0%) 24m * Allocated resources: * (Total limits may be over 100 percent, i.e., overcommitted.) * Resource Requests Limits * -------- -------- ------ * cpu 750m (4%) 100m (0%) * memory 120Mi (0%) 220Mi (0%) * ephemeral-storage 0 (0%) 0 (0%) * Events: * Type Reason Age From Message * ---- ------ ---- ---- ------- * Normal NodeHasSufficientMemory 41m (x7 over 41m) kubelet, crio-20200724220901-14997 Node crio-20200724220901-14997 status is now: NodeHasSufficientMemory * Normal NodeHasNoDiskPressure 41m (x7 over 41m) kubelet, crio-20200724220901-14997 Node crio-20200724220901-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientPID 41m (x7 over 41m) kubelet, crio-20200724220901-14997 Node crio-20200724220901-14997 status is now: NodeHasSufficientPID * Warning readOnlySysFS 40m kube-proxy, crio-20200724220901-14997 CRI error: /sys is read-only: cannot modify conntrack limits, problems may arise later (If running Docker, see docker issue #24000) * Normal Starting 40m kube-proxy, crio-20200724220901-14997 Starting kube-proxy. * Normal Starting 24m kubelet, crio-20200724220901-14997 Starting kubelet. * Warning SystemOOM 24m (x2 over 24m) kubelet, crio-20200724220901-14997 System OOM encountered * Normal NodeHasSufficientMemory 24m (x7 over 24m) kubelet, crio-20200724220901-14997 Node crio-20200724220901-14997 status is now: NodeHasSufficientMemory * Normal NodeHasNoDiskPressure 24m (x7 over 24m) kubelet, crio-20200724220901-14997 Node crio-20200724220901-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientPID 24m (x7 over 24m) kubelet, crio-20200724220901-14997 Node crio-20200724220901-14997 status is now: NodeHasSufficientPID * Normal NodeAllocatableEnforced 24m kubelet, crio-20200724220901-14997 Updated Node Allocatable limit across pods * Warning readOnlySysFS 24m kube-proxy, crio-20200724220901-14997 CRI error: /sys is read-only: cannot modify conntrack limits, problems may arise later (If running Docker, see docker issue #24000) * Normal Starting 24m kube-proxy, crio-20200724220901-14997 Starting kube-proxy. * * ==> dmesg <== * [ +0.008042] FS-Cache: N-cookie d=00000000087e516b n=00000000639e5e65 * [ +0.007221] FS-Cache: N-key=[8] '56c00f0000000000' * [ +0.005631] FS-Cache: Duplicate cookie detected * [ +0.003841] FS-Cache: O-cookie c=0000000011bf07ab [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.005951] FS-Cache: O-cookie d=00000000087e516b n=00000000251e97b7 * [ +0.004790] FS-Cache: O-key=[8] '56c00f0000000000' * [ +0.003643] FS-Cache: N-cookie c=00000000e2eec606 [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.005689] FS-Cache: N-cookie d=00000000087e516b n=000000007550b1ca * [ +0.004826] FS-Cache: N-key=[8] '56c00f0000000000' * [ +1.890005] FS-Cache: Duplicate cookie detected * [ +0.022632] FS-Cache: O-cookie c=0000000036b15294 [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.008521] FS-Cache: O-cookie d=00000000087e516b n=00000000efd734b0 * [ +0.007450] FS-Cache: O-key=[8] '55c00f0000000000' * [ +0.004298] FS-Cache: N-cookie c=00000000c6760349 [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.007729] FS-Cache: N-cookie d=00000000087e516b n=000000002b6d28f2 * [ +0.006730] FS-Cache: N-key=[8] '55c00f0000000000' * [ +2.781512] FS-Cache: Duplicate cookie detected * [ +0.004691] FS-Cache: O-cookie c=0000000026b60cdb [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.006817] FS-Cache: O-cookie d=00000000087e516b n=0000000083eaff9d * [ +0.006146] FS-Cache: O-key=[8] '5ac00f0000000000' * [ +0.004163] FS-Cache: N-cookie c=000000006a4278df [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.006241] FS-Cache: N-cookie d=00000000087e516b n=00000000e4c3dd6e * [ +0.005074] FS-Cache: N-key=[8] '5ac00f0000000000' * [Jul24 22:02] tee (116897): /proc/108378/oom_adj is deprecated, please use /proc/108378/oom_score_adj instead. * [Jul24 22:09] hrtimer: interrupt took 1877431 ns * * ==> etcd [7cd3d2c27ce8d87211660e16ef5baac83ffc42799754d9ad03e5cf733dcd820c] <== * 2020-07-24 22:28:20.693526 W | etcdserver: failed to revoke 3c247382efb5110a ("lease not found") * 2020-07-24 22:28:20.693537 W | etcdserver: failed to revoke 3c247382efb5110a ("lease not found") * 2020-07-24 22:28:20.693546 W | etcdserver: failed to revoke 3c247382efb5110a ("lease not found") * 2020-07-24 22:28:20.715917 W | etcdserver: read-only range request "key:\"foo\" " with result "range_response_count:0 size:5" took too long (3.215685555s) to execute * 2020-07-24 22:28:20.715967 W | etcdserver: read-only range request "key:\"/registry/statefulsets\" range_end:\"/registry/statefulsett\" count_only:true " with result "range_response_count:0 size:5" took too long (3.467781535s) to execute * 2020-07-24 22:28:20.715982 W | etcdserver: read-only range request "key:\"/registry/horizontalpodautoscalers\" range_end:\"/registry/horizontalpodautoscalert\" count_only:true " with result "range_response_count:0 size:5" took too long (1.594353687s) to execute * 2020-07-24 22:28:20.716160 W | etcdserver: read-only range request "key:\"/registry/statefulsets\" range_end:\"/registry/statefulsett\" count_only:true " with result "range_response_count:0 size:5" took too long (2.909137077s) to execute * 2020-07-24 22:28:20.716193 W | etcdserver: read-only range request "key:\"/registry/pods/kube-system/storage-provisioner\" " with result "range_response_count:1 size:2416" took too long (4.129795802s) to execute * 2020-07-24 22:28:20.716213 W | etcdserver: read-only range request "key:\"/registry/horizontalpodautoscalers\" range_end:\"/registry/horizontalpodautoscalert\" count_only:true " with result "range_response_count:0 size:5" took too long (3.658155312s) to execute * 2020-07-24 22:28:20.716294 W | etcdserver: read-only range request "key:\"/registry/services/specs/default/kubernetes\" " with result "range_response_count:1 size:293" took too long (4.130839176s) to execute * 2020-07-24 22:28:20.716427 W | etcdserver: read-only range request "key:\"/registry/clusterroles\" range_end:\"/registry/clusterrolet\" count_only:true " with result "range_response_count:0 size:7" took too long (1.282647649s) to execute * 2020-07-24 22:28:20.844313 W | etcdserver: read-only range request "key:\"/registry/events/kube-system/storage-provisioner.1624d0b10da12b9b\" " with result "range_response_count:1 size:483" took too long (122.063812ms) to execute * 2020-07-24 22:28:20.950803 W | etcdserver: read-only range request "key:\"/registry/services/endpoints/default/kubernetes\" " with result "range_response_count:1 size:213" took too long (105.11143ms) to execute * 2020-07-24 22:31:03.116431 W | etcdserver: read-only range request "key:\"/registry/jobs/\" range_end:\"/registry/jobs0\" limit:500 " with result "range_response_count:0 size:5" took too long (2.348768413s) to execute * 2020-07-24 22:31:03.116531 W | etcdserver: request "header: txn: success:> failure: >>" with result "size:16" took too long (2.159753959s) to execute * 2020-07-24 22:31:03.117809 W | etcdserver: read-only range request "key:\"/registry/health\" " with result "error:context deadline exceeded" took too long (2.000019975s) to execute * 2020-07-24 22:31:03.118930 W | wal: sync duration of 2.16222982s, expected less than 1s * 2020-07-24 22:31:03.119141 W | etcdserver: read-only range request "key:\"/registry/configmaps\" range_end:\"/registry/configmapt\" count_only:true " with result "range_response_count:0 size:7" took too long (1.514657725s) to execute * 2020-07-24 22:31:03.119390 W | etcdserver: read-only range request "key:\"/registry/namespaces/default\" " with result "range_response_count:1 size:172" took too long (1.472246821s) to execute * 2020-07-24 22:36:56.295220 I | mvcc: store.index: compact 1090 * 2020-07-24 22:36:56.296281 I | mvcc: finished scheduled compaction at 1090 (took 726.75µs) * 2020-07-24 22:41:56.312125 I | mvcc: store.index: compact 1203 * 2020-07-24 22:41:56.312972 I | mvcc: finished scheduled compaction at 1203 (took 523.537µs) * 2020-07-24 22:46:56.323897 I | mvcc: store.index: compact 1285 * 2020-07-24 22:46:56.324603 I | mvcc: finished scheduled compaction at 1285 (took 389.624µs) * * ==> kernel <== * 22:51:46 up 1:19, 0 users, load average: 0.78, 0.93, 2.71 * Linux crio-20200724220901-14997 5.4.0-1022-azure #22-Ubuntu SMP Fri Jul 10 06:14:37 UTC 2020 x86_64 x86_64 x86_64 GNU/Linux * PRETTY_NAME="Ubuntu 19.10" * * ==> kube-apiserver [60aac29a1caeafa329981bf8f9a73c7f579633e998d2e82caabedfba3f1d1138] <== * I0724 22:32:16.144038 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:16.144097 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:16.144148 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:16.152885 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:36.144044 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [] * I0724 22:32:36.144250 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:36.144381 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:36.144412 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:36.144424 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:36.153545 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:56.144278 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [] * I0724 22:32:56.144441 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:56.144512 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:56.144567 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:56.144612 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:32:56.156564 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:33:16.144467 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [] * I0724 22:33:16.144655 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:33:16.144710 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:33:16.144738 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:33:16.154631 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:33:36.144698 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [] * I0724 22:33:36.144939 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:33:36.145007 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * I0724 22:33:36.155063 1 asm_amd64.s:1337] balancerWrapper: got update addr from Notify: [{127.0.0.1:2379 }] * * ==> kube-controller-manager [5e2ae262399021f9b26d8e88b478a4ff629d1bc4dffef0901936b18cc1e4548c] <== * I0724 22:27:16.476945 1 taint_manager.go:182] Starting NoExecuteTaintManager * I0724 22:27:16.476964 1 event.go:258] Event(v1.ObjectReference{Kind:"Node", Namespace:"", Name:"crio-20200724220901-14997", UID:"ef0f0ad1-a556-4ea4-8b4e-4df0035f7a0b", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'RegisteredNode' Node crio-20200724220901-14997 event: Registered Node crio-20200724220901-14997 in Controller * W0724 22:27:16.476984 1 node_lifecycle_controller.go:863] Missing timestamp for Node crio-20200724220901-14997. Assuming now as a timestamp. * I0724 22:27:16.477018 1 node_lifecycle_controller.go:1089] Controller detected that zone is now in state Normal. * I0724 22:27:16.496120 1 controller_utils.go:1036] Caches are synced for daemon sets controller * I0724 22:27:16.517177 1 controller_utils.go:1036] Caches are synced for endpoint controller * I0724 22:27:16.522134 1 controller_utils.go:1036] Caches are synced for ReplicationController controller * I0724 22:27:16.526074 1 controller_utils.go:1036] Caches are synced for disruption controller * I0724 22:27:16.526095 1 disruption.go:338] Sending events to api server. * I0724 22:27:16.526636 1 controller_utils.go:1036] Caches are synced for job controller * I0724 22:27:16.526835 1 controller_utils.go:1036] Caches are synced for PVC protection controller * I0724 22:27:16.531393 1 controller_utils.go:1036] Caches are synced for attach detach controller * I0724 22:27:16.540016 1 controller_utils.go:1036] Caches are synced for persistent volume controller * I0724 22:27:16.551348 1 controller_utils.go:1036] Caches are synced for ReplicaSet controller * I0724 22:27:16.577221 1 controller_utils.go:1036] Caches are synced for deployment controller * I0724 22:27:16.580595 1 event.go:258] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"kubernetes-dashboard", Name:"kubernetes-dashboard", UID:"db313703-4ad6-497b-8b1b-c7657766a8c5", APIVersion:"apps/v1", ResourceVersion:"821", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set kubernetes-dashboard-6979c57f4c to 1 * I0724 22:27:16.584728 1 event.go:258] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"kubernetes-dashboard", Name:"dashboard-metrics-scraper", UID:"a8d0d7cc-7814-4653-af48-855bd4001c27", APIVersion:"apps/v1", ResourceVersion:"819", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set dashboard-metrics-scraper-c8b69c96c to 1 * I0724 22:27:16.589419 1 event.go:258] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"kubernetes-dashboard", Name:"dashboard-metrics-scraper-c8b69c96c", UID:"79acf5d3-9c82-447f-b71d-910de5b42f1b", APIVersion:"apps/v1", ResourceVersion:"855", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: dashboard-metrics-scraper-c8b69c96c-ljbr4 * I0724 22:27:16.589458 1 event.go:258] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"kubernetes-dashboard", Name:"kubernetes-dashboard-6979c57f4c", UID:"b43829eb-0088-472e-9584-6679aa6b72c4", APIVersion:"apps/v1", ResourceVersion:"854", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: kubernetes-dashboard-6979c57f4c-wbxrt * I0724 22:27:16.589508 1 controller_utils.go:1036] Caches are synced for resource quota controller * I0724 22:27:16.635535 1 controller_utils.go:1036] Caches are synced for garbage collector controller * I0724 22:27:16.635571 1 garbagecollector.go:137] Garbage collector: all resource monitors have synced. Proceeding to collect garbage * I0724 22:27:16.640852 1 controller_utils.go:1036] Caches are synced for resource quota controller * I0724 22:27:17.230884 1 controller_utils.go:1029] Waiting for caches to sync for garbage collector controller * I0724 22:27:17.331259 1 controller_utils.go:1036] Caches are synced for garbage collector controller * * ==> kube-proxy [4e28cb6807125f410044e3fce6354030e93abbb1dd7a5b0efeb136ab9f75cc6f] <== * I0724 22:10:51.711236 1 server_others.go:143] Using iptables Proxier. * I0724 22:10:51.711733 1 server.go:534] Version: v1.15.7 * I0724 22:10:51.747623 1 conntrack.go:52] Setting nf_conntrack_max to 524288 * E0724 22:10:51.748800 1 conntrack.go:127] sysfs is not writable: {Device:sysfs Path:/sys Type:sysfs Opts:[ro nosuid nodev noexec relatime] Freq:0 Pass:0} (mount options are [ro nosuid nodev noexec relatime]) * I0724 22:10:51.749038 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_established' to 86400 * I0724 22:10:51.749127 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_close_wait' to 3600 * I0724 22:10:51.749315 1 config.go:187] Starting service config controller * I0724 22:10:51.749352 1 controller_utils.go:1029] Waiting for caches to sync for service config controller * I0724 22:10:51.749404 1 config.go:96] Starting endpoints config controller * I0724 22:10:51.749502 1 controller_utils.go:1029] Waiting for caches to sync for endpoints config controller * I0724 22:10:51.849555 1 controller_utils.go:1036] Caches are synced for service config controller * I0724 22:10:51.849731 1 controller_utils.go:1036] Caches are synced for endpoints config controller * W0724 22:27:02.252143 1 server_others.go:249] Flag proxy-mode="" unknown, assuming iptables proxy * I0724 22:27:02.336837 1 server_others.go:143] Using iptables Proxier. * I0724 22:27:02.337285 1 server.go:534] Version: v1.15.7 * I0724 22:27:02.437683 1 conntrack.go:52] Setting nf_conntrack_max to 524288 * E0724 22:27:02.438093 1 conntrack.go:127] sysfs is not writable: {Device:sysfs Path:/sys Type:sysfs Opts:[ro nosuid nodev noexec relatime] Freq:0 Pass:0} (mount options are [ro nosuid nodev noexec relatime]) * I0724 22:27:02.438352 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_established' to 86400 * I0724 22:27:02.438437 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_close_wait' to 3600 * I0724 22:27:02.438748 1 config.go:96] Starting endpoints config controller * I0724 22:27:02.438828 1 controller_utils.go:1029] Waiting for caches to sync for endpoints config controller * I0724 22:27:02.438814 1 config.go:187] Starting service config controller * I0724 22:27:02.438854 1 controller_utils.go:1029] Waiting for caches to sync for service config controller * I0724 22:27:02.539002 1 controller_utils.go:1036] Caches are synced for endpoints config controller * I0724 22:27:02.539024 1 controller_utils.go:1036] Caches are synced for service config controller * * ==> kube-scheduler [9881d2c304cdca9fe0f38468be746e289b9fa419917792f7aa9019077b4a4374] <== * E0724 22:10:32.938093 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope * E0724 22:10:32.940907 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope * E0724 22:10:32.940967 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope * E0724 22:10:32.941768 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope * E0724 22:10:32.942905 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope * E0724 22:10:32.943936 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope * E0724 22:10:32.947924 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope * I0724 22:26:55.290417 1 serving.go:319] Generated self-signed cert in-memory * W0724 22:26:55.521509 1 authentication.go:249] No authentication-kubeconfig provided in order to lookup client-ca-file in configmap/extension-apiserver-authentication in kube-system, so client certificate authentication won't work. * W0724 22:26:55.521537 1 authentication.go:252] No authentication-kubeconfig provided in order to lookup requestheader-client-ca-file in configmap/extension-apiserver-authentication in kube-system, so request-header client certificate authentication won't work. * W0724 22:26:55.521549 1 authorization.go:146] No authorization-kubeconfig provided, so SubjectAccessReview of authorization tokens won't work. * I0724 22:26:55.524427 1 server.go:142] Version: v1.15.7 * I0724 22:26:55.524493 1 defaults.go:87] TaintNodesByCondition is enabled, PodToleratesNodeTaints predicate is mandatory * W0724 22:26:55.525401 1 authorization.go:47] Authorization is disabled * W0724 22:26:55.525419 1 authentication.go:55] Authentication is disabled * I0724 22:26:55.525435 1 deprecated_insecure_serving.go:51] Serving healthz insecurely on [::]:10251 * I0724 22:26:55.535873 1 secure_serving.go:116] Serving securely on 127.0.0.1:10259 * E0724 22:27:00.040558 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope * E0724 22:27:00.140295 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope: RBAC: [clusterrole.rbac.authorization.k8s.io "system:public-info-viewer" not found, clusterrole.rbac.authorization.k8s.io "system:discovery" not found, clusterrole.rbac.authorization.k8s.io "system:kube-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:volume-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:basic-user" not found] * E0724 22:27:00.140491 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope * E0724 22:27:00.140663 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope * E0724 22:27:00.140655 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1beta1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope: RBAC: [clusterrole.rbac.authorization.k8s.io "system:kube-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:discovery" not found, clusterrole.rbac.authorization.k8s.io "system:basic-user" not found, clusterrole.rbac.authorization.k8s.io "system:volume-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:public-info-viewer" not found] * E0724 22:27:00.140755 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope: RBAC: [clusterrole.rbac.authorization.k8s.io "system:public-info-viewer" not found, clusterrole.rbac.authorization.k8s.io "system:kube-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:discovery" not found, clusterrole.rbac.authorization.k8s.io "system:basic-user" not found, clusterrole.rbac.authorization.k8s.io "system:volume-scheduler" not found] * E0724 22:27:00.140806 1 reflector.go:125] k8s.io/client-go/informers/factory.go:133: Failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope: RBAC: [clusterrole.rbac.authorization.k8s.io "system:kube-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:discovery" not found, clusterrole.rbac.authorization.k8s.io "system:basic-user" not found, clusterrole.rbac.authorization.k8s.io "system:volume-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:public-info-viewer" not found] * E0724 22:27:00.140869 1 reflector.go:125] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:226: Failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope: RBAC: [clusterrole.rbac.authorization.k8s.io "system:basic-user" not found, clusterrole.rbac.authorization.k8s.io "system:volume-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:public-info-viewer" not found, clusterrole.rbac.authorization.k8s.io "system:kube-scheduler" not found, clusterrole.rbac.authorization.k8s.io "system:discovery" not found] * * ==> kubelet <== * -- Logs begin at Fri 2020-07-24 22:26:07 UTC, end at Fri 2020-07-24 22:51:46 UTC. -- * Jul 24 22:51:22 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:22.824313 1598 kuberuntime_manager.go:692] createPodSandbox for pod "coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(5640d9f74f19b787273675f6175658f5a4b944f93df8a5e82447b8adc4afddff): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:22 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:22.824404 1598 pod_workers.go:190] Error syncing pod dbd8dc2c-1af2-4c99-89f0-d44686805742 ("coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)"), skipping: failed to "CreatePodSandbox" for "coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)" with CreatePodSandboxError: "CreatePodSandbox for pod \"coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)\" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(5640d9f74f19b787273675f6175658f5a4b944f93df8a5e82447b8adc4afddff): failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:51:24 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:24.531997 1598 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard_145a2077-6b60-46d0-9d89-7151f4a78b4b_0(25492f81a77c03ed1561da5355054094967a64c28fd4b6fe0b3c3ae3e2148fee): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:24 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:24.532069 1598 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard_145a2077-6b60-46d0-9d89-7151f4a78b4b_0(25492f81a77c03ed1561da5355054094967a64c28fd4b6fe0b3c3ae3e2148fee): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:24 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:24.532093 1598 kuberuntime_manager.go:692] createPodSandbox for pod "dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard_145a2077-6b60-46d0-9d89-7151f4a78b4b_0(25492f81a77c03ed1561da5355054094967a64c28fd4b6fe0b3c3ae3e2148fee): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:24 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:24.532169 1598 pod_workers.go:190] Error syncing pod 145a2077-6b60-46d0-9d89-7151f4a78b4b ("dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)"), skipping: failed to "CreatePodSandbox" for "dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)" with CreatePodSandboxError: "CreatePodSandbox for pod \"dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)\" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard_145a2077-6b60-46d0-9d89-7151f4a78b4b_0(25492f81a77c03ed1561da5355054094967a64c28fd4b6fe0b3c3ae3e2148fee): failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:51:25 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:25.744397 1598 pod_workers.go:190] Error syncing pod e76b38cb-d66f-4b4b-a962-cdd9c670e250 ("storage-provisioner_kube-system(e76b38cb-d66f-4b4b-a962-cdd9c670e250)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "Back-off 5m0s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(e76b38cb-d66f-4b4b-a962-cdd9c670e250)" * Jul 24 22:51:25 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:25.744505 1598 pod_workers.go:190] Error syncing pod 9fb35a28-5601-47e4-88e6-be2a18fa55ef ("kindnet-4qfcd_kube-system(9fb35a28-5601-47e4-88e6-be2a18fa55ef)"), skipping: failed to "StartContainer" for "kindnet-cni" with CrashLoopBackOff: "Back-off 5m0s restarting failed container=kindnet-cni pod=kindnet-4qfcd_kube-system(9fb35a28-5601-47e4-88e6-be2a18fa55ef)" * Jul 24 22:51:32 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:32.248818 1598 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard_2734d7c0-daef-420d-966b-49f33f94e547_0(9ccf0f0cce6b567ad2f291f016191bb44502de1d3e86e6df90c6e7576ed5ab81): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:32 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:32.248893 1598 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard(2734d7c0-daef-420d-966b-49f33f94e547)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard_2734d7c0-daef-420d-966b-49f33f94e547_0(9ccf0f0cce6b567ad2f291f016191bb44502de1d3e86e6df90c6e7576ed5ab81): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:32 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:32.248923 1598 kuberuntime_manager.go:692] createPodSandbox for pod "kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard(2734d7c0-daef-420d-966b-49f33f94e547)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard_2734d7c0-daef-420d-966b-49f33f94e547_0(9ccf0f0cce6b567ad2f291f016191bb44502de1d3e86e6df90c6e7576ed5ab81): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:32 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:32.248993 1598 pod_workers.go:190] Error syncing pod 2734d7c0-daef-420d-966b-49f33f94e547 ("kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard(2734d7c0-daef-420d-966b-49f33f94e547)"), skipping: failed to "CreatePodSandbox" for "kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard(2734d7c0-daef-420d-966b-49f33f94e547)" with CreatePodSandboxError: "CreatePodSandbox for pod \"kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard(2734d7c0-daef-420d-966b-49f33f94e547)\" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_kubernetes-dashboard-6979c57f4c-wbxrt_kubernetes-dashboard_2734d7c0-daef-420d-966b-49f33f94e547_0(9ccf0f0cce6b567ad2f291f016191bb44502de1d3e86e6df90c6e7576ed5ab81): failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:51:33 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:33.883234 1598 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(86aa6f798e8cfd97aef81555b92613e818d4c7173384c653d026f84ec976db8e): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:33 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:33.883300 1598 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(86aa6f798e8cfd97aef81555b92613e818d4c7173384c653d026f84ec976db8e): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:33 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:33.883327 1598 kuberuntime_manager.go:692] createPodSandbox for pod "busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(86aa6f798e8cfd97aef81555b92613e818d4c7173384c653d026f84ec976db8e): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:33 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:33.883383 1598 pod_workers.go:190] Error syncing pod b947226f-0b0e-4916-9b65-c7b70a6e137e ("busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)"), skipping: failed to "CreatePodSandbox" for "busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)" with CreatePodSandboxError: "CreatePodSandbox for pod \"busybox_default(b947226f-0b0e-4916-9b65-c7b70a6e137e)\" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(86aa6f798e8cfd97aef81555b92613e818d4c7173384c653d026f84ec976db8e): failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:51:37 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:37.322582 1598 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(c16b06815e3b08f65dd9e7bea079da08a86b6801b4df5c196e85f0a68b20bffc): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:37 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:37.322652 1598 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(c16b06815e3b08f65dd9e7bea079da08a86b6801b4df5c196e85f0a68b20bffc): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:37 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:37.322685 1598 kuberuntime_manager.go:692] createPodSandbox for pod "coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(c16b06815e3b08f65dd9e7bea079da08a86b6801b4df5c196e85f0a68b20bffc): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:37 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:37.322744 1598 pod_workers.go:190] Error syncing pod dbd8dc2c-1af2-4c99-89f0-d44686805742 ("coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)"), skipping: failed to "CreatePodSandbox" for "coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)" with CreatePodSandboxError: "CreatePodSandbox for pod \"coredns-5d4dd4b4db-9ssg6_kube-system(dbd8dc2c-1af2-4c99-89f0-d44686805742)\" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_coredns-5d4dd4b4db-9ssg6_kube-system_dbd8dc2c-1af2-4c99-89f0-d44686805742_0(c16b06815e3b08f65dd9e7bea079da08a86b6801b4df5c196e85f0a68b20bffc): failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:51:38 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:38.268197 1598 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard_145a2077-6b60-46d0-9d89-7151f4a78b4b_0(b0a7787b999a8e94452730c7c3f574d37d249d15216835555313ddbf498ad6fb): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:38 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:38.268262 1598 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard_145a2077-6b60-46d0-9d89-7151f4a78b4b_0(b0a7787b999a8e94452730c7c3f574d37d249d15216835555313ddbf498ad6fb): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:38 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:38.268285 1598 kuberuntime_manager.go:692] createPodSandbox for pod "dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard_145a2077-6b60-46d0-9d89-7151f4a78b4b_0(b0a7787b999a8e94452730c7c3f574d37d249d15216835555313ddbf498ad6fb): failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:51:38 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:38.268374 1598 pod_workers.go:190] Error syncing pod 145a2077-6b60-46d0-9d89-7151f4a78b4b ("dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)"), skipping: failed to "CreatePodSandbox" for "dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)" with CreatePodSandboxError: "CreatePodSandbox for pod \"dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard(145a2077-6b60-46d0-9d89-7151f4a78b4b)\" failed: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_dashboard-metrics-scraper-c8b69c96c-ljbr4_kubernetes-dashboard_145a2077-6b60-46d0-9d89-7151f4a78b4b_0(b0a7787b999a8e94452730c7c3f574d37d249d15216835555313ddbf498ad6fb): failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:51:38 crio-20200724220901-14997 kubelet[1598]: E0724 22:51:38.744063 1598 pod_workers.go:190] Error syncing pod e76b38cb-d66f-4b4b-a962-cdd9c670e250 ("storage-provisioner_kube-system(e76b38cb-d66f-4b4b-a962-cdd9c670e250)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "Back-off 5m0s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(e76b38cb-d66f-4b4b-a962-cdd9c670e250)" * * ==> storage-provisioner [c5fc8cb46441ceaef6bf0347ecae8bb423d4fc849fad64b2afb93b63052026f2] <== * F0724 22:48:28.232635 1 main.go:37] Error getting server version: Get https://10.96.0.1:443/version: dial tcp 10.96.0.1:443: getsockopt: no route to host -- /stdout -- helpers_test.go:247: (dbg) Run: ./minikube-linux-amd64 status --format={{.APIServer}} -p crio-20200724220901-14997 -n crio-20200724220901-14997 helpers_test.go:254: (dbg) Run: kubectl --context crio-20200724220901-14997 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running helpers_test.go:260: non-running pods: busybox coredns-5d4dd4b4db-9ssg6 dashboard-metrics-scraper-c8b69c96c-ljbr4 kubernetes-dashboard-6979c57f4c-wbxrt helpers_test.go:262: ======> post-mortem[TestStartStop/group/crio/serial/Pause]: describe non-running pods <====== helpers_test.go:265: (dbg) Run: kubectl --context crio-20200724220901-14997 describe pod busybox coredns-5d4dd4b4db-9ssg6 dashboard-metrics-scraper-c8b69c96c-ljbr4 kubernetes-dashboard-6979c57f4c-wbxrt helpers_test.go:265: (dbg) Non-zero exit: kubectl --context crio-20200724220901-14997 describe pod busybox coredns-5d4dd4b4db-9ssg6 dashboard-metrics-scraper-c8b69c96c-ljbr4 kubernetes-dashboard-6979c57f4c-wbxrt: exit status 1 (98.729743ms) -- stdout -- Name: busybox Namespace: default Priority: 0 Node: crio-20200724220901-14997/172.17.0.2 Start Time: Fri, 24 Jul 2020 22:17:30 +0000 Labels: integration-test=busybox Annotations: Status: Pending IP: IPs: Containers: busybox: Container ID: Image: busybox:1.28.4-glibc Image ID: Port: Host Port: Command: sleep 3600 State: Waiting Reason: ContainerCreating Ready: False Restart Count: 0 Environment: Mounts: /var/run/secrets/kubernetes.io/serviceaccount from default-token-2jsfl (ro) Conditions: Type Status Initialized True Ready False ContainersReady False PodScheduled True Volumes: default-token-2jsfl: Type: Secret (a volume populated by a Secret) SecretName: default-token-2jsfl Optional: false QoS Class: BestEffort Node-Selectors: Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s node.kubernetes.io/unreachable:NoExecute for 300s Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Scheduled 34m default-scheduler Successfully assigned default/busybox to crio-20200724220901-14997 Warning FailedCreatePodSandBox 34m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(df0c3a384af70876e631b79afa238f8219e909abbaf7684ff30d1dce8a9a54fa): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 34m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(7e1e51912dee7c82a399942b88c2992e156a60bb2bdcfc5a0340b0f5eb894fe0): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 33m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(eb6542b74bcd5168e50e03b41a224e2df5f3761d00e96252755d3d77cabf510e): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 33m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(95381eb738e20acf009ca3de52d9dd44c0e10363127a19f55cc9d8b9b15d935d): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 33m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(5da5f64daf132db1d2f57fcbbd23261f5b055438f42fdc9e58f02b1dd57b9240): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 33m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(d843353d3186fd450ada3d11767e5294d07ef90a741caba47e62a59eb82f9e97): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 32m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(cdb66590d250947869cc36af8c6d22ecc1fd99752b4ba0d09be444e2de9b06eb): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 32m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(590dbdf2276a1c93cf59fe15b35411b3d19be44299a08851ea9957e8c6c03681): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 32m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(fe109ba9d55515001bcdb9c5ba7b51262263252184892313119b14411c1ba69a): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 27m (x17 over 32m) kubelet, crio-20200724220901-14997 (combined from similar events): Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(4bee5e35c962aafeecadb6e69afb27ad039cbcf256940e8735ce4f079bf10862): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 24m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(e8fcb63d4fbd6ae3b748da2e936f37eeaa140b43d0f98056e85b082da82fbb01): netplugin failed with no error message Warning FailedCreatePodSandBox 24m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(60cd38ffb8a1dd4fa390aba070fecc1acec7811fd5775ea0d2a431e9e2424e62): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 24m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(68df75d364336016be8023c132977fe34319aa4b9f92c7d70e457a57217a281e): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 23m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(aaae971e15b6884b2bb1799dd4abb10b195368e13b5e4f290f43c5b4b11ec194): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 23m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(bd1d721cd146bbfe1fa05dc287b325ab526dc19d3404ff58cef06e259e2b5986): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 23m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(df52b05097dfc5b141cfc44d2e97b43954c3d247a95a09c7cfb30e7c69871395): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 22m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(d5729ae94c7a9707bb77f5dc43fb6a6d6d451a9f25758519bb72c37e64012cbe): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 22m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(1080ac6803a0e7d14628d92578ed58468a1fddfae32c4476765ee0ff2580e28e): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 22m kubelet, crio-20200724220901-14997 Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(7b666099a72822b0a53ba3bb57632f31f4390032be187dca2fe8fb309f923937): failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 4m33s (x70 over 22m) kubelet, crio-20200724220901-14997 (combined from similar events): Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_busybox_default_b947226f-0b0e-4916-9b65-c7b70a6e137e_0(ddf5dab0e040cc42e18461726216644f257157271baffe0b4e6c06f634f4a48c): failed to set bridge addr: could not add IP address to "cni0": permission denied -- /stdout -- ** stderr ** Error from server (NotFound): pods "coredns-5d4dd4b4db-9ssg6" not found Error from server (NotFound): pods "dashboard-metrics-scraper-c8b69c96c-ljbr4" not found Error from server (NotFound): pods "kubernetes-dashboard-6979c57f4c-wbxrt" not found ** /stderr ** helpers_test.go:267: kubectl --context crio-20200724220901-14997 describe pod busybox coredns-5d4dd4b4db-9ssg6 dashboard-metrics-scraper-c8b69c96c-ljbr4 kubernetes-dashboard-6979c57f4c-wbxrt: exit status 1 === CONT TestStartStop/group/crio/serial start_stop_delete_test.go:126: (dbg) Run: ./minikube-linux-amd64 delete -p crio-20200724220901-14997 start_stop_delete_test.go:126: (dbg) Non-zero exit: ./minikube-linux-amd64 delete -p crio-20200724220901-14997: context deadline exceeded (1.2µs) start_stop_delete_test.go:128: failed to clean up: args "./minikube-linux-amd64 delete -p crio-20200724220901-14997": context deadline exceeded start_stop_delete_test.go:131: (dbg) Run: kubectl config get-contexts crio-20200724220901-14997 start_stop_delete_test.go:131: (dbg) Non-zero exit: kubectl config get-contexts crio-20200724220901-14997: context deadline exceeded (200ns) start_stop_delete_test.go:133: config context error: context deadline exceeded (may be ok) start_stop_delete_test.go:136: expected exit code 1, got 0. output: === CONT TestStartStop/group/crio helpers_test.go:170: Cleaning up "crio-20200724220901-14997" profile ... helpers_test.go:171: (dbg) Run: ./minikube-linux-amd64 delete -p crio-20200724220901-14997 helpers_test.go:171: (dbg) Done: ./minikube-linux-amd64 delete -p crio-20200724220901-14997: (6.720622746s) === CONT TestStartStop/group/containerd/serial/AddonExistsAfterStop start_stop_delete_test.go:219: ***** TestStartStop/group/containerd/serial/AddonExistsAfterStop: pod "k8s-app=kubernetes-dashboard" failed to start within 9m0s: timed out waiting for the condition **** start_stop_delete_test.go:219: (dbg) Run: ./minikube-linux-amd64 status --format={{.APIServer}} -p containerd-20200724221200-14997 -n containerd-20200724221200-14997 start_stop_delete_test.go:219: TestStartStop/group/containerd/serial/AddonExistsAfterStop: showing logs for failed pods as of 2020-07-24 22:53:59.553188146 +0000 UTC m=+4669.677552631 start_stop_delete_test.go:219: (dbg) Run: kubectl --context containerd-20200724221200-14997 describe po kubernetes-dashboard-6dbb54fd95-ms9wg -n kubernetes-dashboard start_stop_delete_test.go:219: (dbg) Non-zero exit: kubectl --context containerd-20200724221200-14997 describe po kubernetes-dashboard-6dbb54fd95-ms9wg -n kubernetes-dashboard: context deadline exceeded (1.3µs) start_stop_delete_test.go:219: kubectl --context containerd-20200724221200-14997 describe po kubernetes-dashboard-6dbb54fd95-ms9wg -n kubernetes-dashboard: context deadline exceeded start_stop_delete_test.go:219: (dbg) Run: kubectl --context containerd-20200724221200-14997 logs kubernetes-dashboard-6dbb54fd95-ms9wg -n kubernetes-dashboard start_stop_delete_test.go:219: (dbg) Non-zero exit: kubectl --context containerd-20200724221200-14997 logs kubernetes-dashboard-6dbb54fd95-ms9wg -n kubernetes-dashboard: context deadline exceeded (300ns) start_stop_delete_test.go:219: kubectl --context containerd-20200724221200-14997 logs kubernetes-dashboard-6dbb54fd95-ms9wg -n kubernetes-dashboard: context deadline exceeded start_stop_delete_test.go:220: failed waiting for 'addon dashboard' pod post-stop-start: k8s-app=kubernetes-dashboard within 9m0s: timed out waiting for the condition helpers_test.go:215: -----------------------post-mortem-------------------------------- helpers_test.go:223: ======> post-mortem[TestStartStop/group/containerd/serial/AddonExistsAfterStop]: docker inspect <====== helpers_test.go:224: (dbg) Run: docker inspect containerd-20200724221200-14997 helpers_test.go:228: (dbg) docker inspect containerd-20200724221200-14997: -- stdout -- [ { "Id": "0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318", "Created": "2020-07-24T22:12:08.823590057Z", "Path": "/usr/local/bin/entrypoint", "Args": [ "/sbin/init" ], "State": { "Status": "running", "Running": true, "Paused": false, "Restarting": false, "OOMKilled": false, "Dead": false, "Pid": 707805, "ExitCode": 0, "Error": "", "StartedAt": "2020-07-24T22:27:53.861621416Z", "FinishedAt": "2020-07-24T22:27:47.093520839Z" }, "Image": "sha256:e6bc41c39dc48b2b472936db36aedb28527ce0f675ed1bc20d029125c9ccf578", "ResolvConfPath": "/var/lib/docker/containers/0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318/resolv.conf", "HostnamePath": "/var/lib/docker/containers/0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318/hostname", "HostsPath": "/var/lib/docker/containers/0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318/hosts", "LogPath": "/var/lib/docker/containers/0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318/0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318-json.log", "Name": "/containerd-20200724221200-14997", "RestartCount": 0, "Driver": "overlay2", "Platform": "linux", "MountLabel": "", "ProcessLabel": "", "AppArmorProfile": "unconfined", "ExecIDs": null, "HostConfig": { "Binds": [ "/lib/modules:/lib/modules:ro", "containerd-20200724221200-14997:/var" ], "ContainerIDFile": "", "LogConfig": { "Type": "json-file", "Config": {} }, "NetworkMode": "default", "PortBindings": { "22/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "2376/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "5000/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "8444/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ] }, "RestartPolicy": { "Name": "no", "MaximumRetryCount": 0 }, "AutoRemove": false, "VolumeDriver": "", "VolumesFrom": null, "CapAdd": null, "CapDrop": null, "Capabilities": null, "Dns": [], "DnsOptions": [], "DnsSearch": [], "ExtraHosts": null, "GroupAdd": null, "IpcMode": "private", "Cgroup": "", "Links": null, "OomScoreAdj": 0, "PidMode": "", "Privileged": true, "PublishAllPorts": false, "ReadonlyRootfs": false, "SecurityOpt": [ "seccomp=unconfined", "apparmor=unconfined", "label=disable" ], "Tmpfs": { "/run": "", "/tmp": "" }, "UTSMode": "", "UsernsMode": "", "ShmSize": 67108864, "Runtime": "runc", "ConsoleSize": [ 0, 0 ], "Isolation": "", "CpuShares": 0, "Memory": 2306867200, "NanoCpus": 2000000000, "CgroupParent": "", "BlkioWeight": 0, "BlkioWeightDevice": [], "BlkioDeviceReadBps": null, "BlkioDeviceWriteBps": null, "BlkioDeviceReadIOps": null, "BlkioDeviceWriteIOps": null, "CpuPeriod": 0, "CpuQuota": 0, "CpuRealtimePeriod": 0, "CpuRealtimeRuntime": 0, "CpusetCpus": "", "CpusetMems": "", "Devices": [], "DeviceCgroupRules": null, "DeviceRequests": null, "KernelMemory": 0, "KernelMemoryTCP": 0, "MemoryReservation": 0, "MemorySwap": -1, "MemorySwappiness": null, "OomKillDisable": false, "PidsLimit": null, "Ulimits": null, "CpuCount": 0, "CpuPercent": 0, "IOMaximumIOps": 0, "IOMaximumBandwidth": 0, "MaskedPaths": null, "ReadonlyPaths": null }, "GraphDriver": { "Data": { "LowerDir": "/var/lib/docker/overlay2/302021dc5d13c095c42529cd14fcb13b8e0e062683e3b8dd811d0a384f95b4d8-init/diff:/var/lib/docker/overlay2/33ef5d7ed4fbb30d55d2e5925de9b5c359ee2f6bcaf5a31308f0c4c11e358759/diff:/var/lib/docker/overlay2/3cf31b1deb19cf9de586952311d9f94baab00dac6393d96e1a80da943c3823e6/diff:/var/lib/docker/overlay2/5e25be0edbb143d613aab16e23056ece18370c2427214464ccc0b8f033fc76c5/diff:/var/lib/docker/overlay2/933d7b760faf1726c0231eb48cbf3e7af8aeea7d1f449959b7bf07bbe9a84376/diff:/var/lib/docker/overlay2/69ec5555996fe29cf36b7e927034690ecb66f32d4c72396f3c2f3d73303ab10c/diff:/var/lib/docker/overlay2/891f9311e0e93aca44c576d873c0860bf67e972178833fc3ac0569308996933c/diff:/var/lib/docker/overlay2/431ddf5eea9395c020c3a27539c6984764cb7a0dd8f3eeadfbb24f37a897caf7/diff:/var/lib/docker/overlay2/ef7d3d9d53c985fdb46a41c8bcbc04c7d98baa3d8030b1432d115657269a4611/diff:/var/lib/docker/overlay2/d976f7471257da8ef85e0de564cae8e6b16eeb8f83fff6750b3985387f87052c/diff:/var/lib/docker/overlay2/16ed6f6abe25e98b50b10529e70d835da8b64ca99934243f70e5cc63cca44ba1/diff:/var/lib/docker/overlay2/a6495d6f579c32bd0faf387dc3b48c733dae0219e3cfc5d94f9f6ba027261668/diff:/var/lib/docker/overlay2/a34db548da9e24b0323e43e53767c49484fa19509ba9c1e738fb42330788ec52/diff:/var/lib/docker/overlay2/03f93f74dafc48e2bb0234297546abf70dc9918c047a7e3b500ed2242aec09ff/diff:/var/lib/docker/overlay2/b0d08d0be034080c25561d9451d8081050a591c2991a7f3d242234b9bf7c046d/diff:/var/lib/docker/overlay2/d7be6ed61eedc76988e74fe1d87a6a1ef287912654acea5566e7f9cb797b3e3c/diff:/var/lib/docker/overlay2/bde6a4a42d5652b3ef22e3a1dddd0f6fb765d2cd9f5b1a38d4ecb9b44c45b998/diff:/var/lib/docker/overlay2/0f384da0b034b730d4bdd22f270821719c0ac0b7b7d608c3a52fa89b9a386b90/diff:/var/lib/docker/overlay2/46e9d50bc162d2d523c12da3e378e5b859515056dc7d7f11344c1c42c64718d4/diff:/var/lib/docker/overlay2/6e7552cdb94dfc1c9a0a8efae25c758d93fff3d2eec58a3e2b710496d0c94653/diff:/var/lib/docker/overlay2/b9217960caef08b70ded801ca89f5a0808227383f2ab1de5ae7313c200a8090b/diff:/var/lib/docker/overlay2/d11fc39cafccd3e3cb8fba6cfdda2bbfdb99c2e41619ce169b0d6ede0b3f6a64/diff:/var/lib/docker/overlay2/0f89620291cba4bc3afac7c9fa1faf205450d5dcae032c3a9882009633b72b4a/diff", "MergedDir": "/var/lib/docker/overlay2/302021dc5d13c095c42529cd14fcb13b8e0e062683e3b8dd811d0a384f95b4d8/merged", "UpperDir": "/var/lib/docker/overlay2/302021dc5d13c095c42529cd14fcb13b8e0e062683e3b8dd811d0a384f95b4d8/diff", "WorkDir": "/var/lib/docker/overlay2/302021dc5d13c095c42529cd14fcb13b8e0e062683e3b8dd811d0a384f95b4d8/work" }, "Name": "overlay2" }, "Mounts": [ { "Type": "bind", "Source": "/lib/modules", "Destination": "/lib/modules", "Mode": "ro", "RW": false, "Propagation": "rprivate" }, { "Type": "volume", "Name": "containerd-20200724221200-14997", "Source": "/var/lib/docker/volumes/containerd-20200724221200-14997/_data", "Destination": "/var", "Driver": "local", "Mode": "z", "RW": true, "Propagation": "" } ], "Config": { "Hostname": "containerd-20200724221200-14997", "Domainname": "", "User": "root", "AttachStdin": false, "AttachStdout": false, "AttachStderr": false, "ExposedPorts": { "22/tcp": {}, "2376/tcp": {}, "5000/tcp": {}, "8444/tcp": {} }, "Tty": true, "OpenStdin": false, "StdinOnce": false, "Env": [ "container=docker", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], "Cmd": null, "Image": "kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438", "Volumes": null, "WorkingDir": "", "Entrypoint": [ "/usr/local/bin/entrypoint", "/sbin/init" ], "OnBuild": null, "Labels": { "created_by.minikube.sigs.k8s.io": "true", "mode.minikube.sigs.k8s.io": "containerd-20200724221200-14997", "name.minikube.sigs.k8s.io": "containerd-20200724221200-14997", "role.minikube.sigs.k8s.io": "" }, "StopSignal": "SIGRTMIN+3" }, "NetworkSettings": { "Bridge": "", "SandboxID": "14a5046cd9d512b8bd2af14bc8fd545f797506a7c8ca9b3e98d538950faaa7ca", "HairpinMode": false, "LinkLocalIPv6Address": "", "LinkLocalIPv6PrefixLen": 0, "Ports": { "22/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32920" } ], "2376/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32919" } ], "5000/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32918" } ], "8444/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32917" } ] }, "SandboxKey": "/var/run/docker/netns/14a5046cd9d5", "SecondaryIPAddresses": null, "SecondaryIPv6Addresses": null, "EndpointID": "8ef326025492f4d9f256559b312826a73e65239c14de22e27539be8d3e9580f4", "Gateway": "172.17.0.1", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "IPAddress": "172.17.0.3", "IPPrefixLen": 16, "IPv6Gateway": "", "MacAddress": "02:42:ac:11:00:03", "Networks": { "bridge": { "IPAMConfig": null, "Links": null, "Aliases": null, "NetworkID": "d4a420189740571bc15c14b2ebac1ab66e95c9c6796461aa96bb3486a5f7e03d", "EndpointID": "8ef326025492f4d9f256559b312826a73e65239c14de22e27539be8d3e9580f4", "Gateway": "172.17.0.1", "IPAddress": "172.17.0.3", "IPPrefixLen": 16, "IPv6Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "MacAddress": "02:42:ac:11:00:03", "DriverOpts": null } } } } ] -- /stdout -- helpers_test.go:232: (dbg) Run: ./minikube-linux-amd64 status --format={{.Host}} -p containerd-20200724221200-14997 -n containerd-20200724221200-14997 helpers_test.go:237: <<< TestStartStop/group/containerd/serial/AddonExistsAfterStop FAILED: start of post-mortem logs <<< helpers_test.go:238: ======> post-mortem[TestStartStop/group/containerd/serial/AddonExistsAfterStop]: minikube logs <====== helpers_test.go:240: (dbg) Run: ./minikube-linux-amd64 -p containerd-20200724221200-14997 logs -n 25 helpers_test.go:245: TestStartStop/group/containerd/serial/AddonExistsAfterStop logs: -- stdout -- * ==> container status <== * CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID * 00a604004ac81 4689081edb103 28 seconds ago Exited storage-provisioner 9 0418a87b40342 * a09e6baaadfd2 2186a1a396deb 2 minutes ago Exited kindnet-cni 8 c2246648589e4 * 8662707b3b0f7 da26705ccb4b5 23 minutes ago Running kube-controller-manager 3 7b50a18d935ac * cdb7f0919992f 3439b7546f29b 24 minutes ago Running kube-proxy 0 2fca05c662478 * 7dc9f83693a99 7e28efa976bd1 24 minutes ago Running kube-apiserver 0 236935089e2fb * 90f7be9dd5648 da26705ccb4b5 24 minutes ago Exited kube-controller-manager 2 7b50a18d935ac * 574a9379a97ce 76216c34ed0c7 25 minutes ago Running kube-scheduler 0 9b3229d6d9980 * e1da8367b2af7 303ce5db0e90d 25 minutes ago Running etcd 0 0941e8d40a5bd * * ==> containerd <== * -- Logs begin at Fri 2020-07-24 22:27:54 UTC, end at Fri 2020-07-24 22:54:00 UTC. -- * Jul 24 22:53:31 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:31.429250714Z" level=info msg="shim containerd-shim started" address=/containerd-shim/f9f1891fdca8f0559e014d321d135906149a4e76eb3e27e458bb410dc3c40b1a.sock debug=false pid=17785 * Jul 24 22:53:31 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:31.513387859Z" level=info msg="StartContainer for \"00a604004ac81017478505940a9e65265df2e1d0d95ee5ccee3c95ec0687461f\" returns successfully" * Jul 24 22:53:34 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:34.338915929Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:busybox,Uid:4b662b5f-6e78-48de-818c-81989d7f4ea9,Namespace:default,Attempt:0,}" * Jul 24 22:53:34 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:34.702740002Z" level=info msg="Finish piping stderr of container \"00a604004ac81017478505940a9e65265df2e1d0d95ee5ccee3c95ec0687461f\"" * Jul 24 22:53:34 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:34.702824808Z" level=info msg="Finish piping stdout of container \"00a604004ac81017478505940a9e65265df2e1d0d95ee5ccee3c95ec0687461f\"" * Jul 24 22:53:34 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:34.704089695Z" level=info msg="TaskExit event &TaskExit{ContainerID:00a604004ac81017478505940a9e65265df2e1d0d95ee5ccee3c95ec0687461f,ID:00a604004ac81017478505940a9e65265df2e1d0d95ee5ccee3c95ec0687461f,Pid:17802,ExitStatus:1,ExitedAt:2020-07-24 22:53:34.703903283 +0000 UTC,XXX_unrecognized:[],}" * Jul 24 22:53:34 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:34.740804546Z" level=info msg="shim reaped" id=00a604004ac81017478505940a9e65265df2e1d0d95ee5ccee3c95ec0687461f * Jul 24 22:53:35 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:35.339152909Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,}" * Jul 24 22:53:35 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:35.651833529Z" level=info msg="RemoveContainer for \"228d0592b230e8e10ae8f53c83bc4cb3c2ca8cc714c0a428cb04f2a8a22c709f\"" * Jul 24 22:53:35 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:35.683705943Z" level=info msg="RemoveContainer for \"228d0592b230e8e10ae8f53c83bc4cb3c2ca8cc714c0a428cb04f2a8a22c709f\" returns successfully" * Jul 24 22:53:36 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:36.354723545Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:4b662b5f-6e78-48de-818c-81989d7f4ea9,Namespace:default,Attempt:0,} failed, error" error="failed to setup network for sandbox \"991c799eb067a2479795a0d8680f9d5a0ff1d7d2123a8bf6bda88e3ee5a83b4a\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:53:36 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:36.738603794Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,} failed, error" error="failed to setup network for sandbox \"210eec6aba4991cf665f2d5410e52443de3952fd434a0daf4aa63714b939571b\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:53:37 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:37.339027807Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:dashboard-metrics-scraper-dc6947fbf-xphhd,Uid:02a59124-2c99-4a38-abea-eedfd3e1ba46,Namespace:kubernetes-dashboard,Attempt:0,}" * Jul 24 22:53:39 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:39.067863973Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:dashboard-metrics-scraper-dc6947fbf-xphhd,Uid:02a59124-2c99-4a38-abea-eedfd3e1ba46,Namespace:kubernetes-dashboard,Attempt:0,} failed, error" error="failed to setup network for sandbox \"c2bb74b49f5e994c0e371fd97ba9a2d6d244c89b1e5393c7de421babe20e8060\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:53:40 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:40.339118131Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:kubernetes-dashboard-6dbb54fd95-ms9wg,Uid:267f98a1-434e-45a2-abda-53ad3bb7bea1,Namespace:kubernetes-dashboard,Attempt:0,}" * Jul 24 22:53:42 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:42.042455447Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:kubernetes-dashboard-6dbb54fd95-ms9wg,Uid:267f98a1-434e-45a2-abda-53ad3bb7bea1,Namespace:kubernetes-dashboard,Attempt:0,} failed, error" error="failed to setup network for sandbox \"ca19fc91f1eae4b80b05c4f3de8023e68e4bdb40295bd935d96ceccdd34a62bc\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:53:47 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:47.338855711Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:busybox,Uid:4b662b5f-6e78-48de-818c-81989d7f4ea9,Namespace:default,Attempt:0,}" * Jul 24 22:53:48 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:48.576257482Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:4b662b5f-6e78-48de-818c-81989d7f4ea9,Namespace:default,Attempt:0,} failed, error" error="failed to setup network for sandbox \"54a499035f788abb0121b0ac23492595ca813d209dfd1491be7b9f188bde9f6f\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:53:51 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:51.339129846Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,}" * Jul 24 22:53:52 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:52.339151661Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:dashboard-metrics-scraper-dc6947fbf-xphhd,Uid:02a59124-2c99-4a38-abea-eedfd3e1ba46,Namespace:kubernetes-dashboard,Attempt:0,}" * Jul 24 22:53:52 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:52.812196296Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,} failed, error" error="failed to setup network for sandbox \"a24be12ec90ceddc1aa383fee9cea18174d23821e586a00253d5836a2d9ee0e8\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:53:54 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:54.158472835Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:dashboard-metrics-scraper-dc6947fbf-xphhd,Uid:02a59124-2c99-4a38-abea-eedfd3e1ba46,Namespace:kubernetes-dashboard,Attempt:0,} failed, error" error="failed to setup network for sandbox \"379562d4d181012c170675f941a008c0691dfd635c16e63e4454c1384b1dbe65\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:53:54 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:54.339193050Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:kubernetes-dashboard-6dbb54fd95-ms9wg,Uid:267f98a1-434e-45a2-abda-53ad3bb7bea1,Namespace:kubernetes-dashboard,Attempt:0,}" * Jul 24 22:53:56 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:56.227009391Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:kubernetes-dashboard-6dbb54fd95-ms9wg,Uid:267f98a1-434e-45a2-abda-53ad3bb7bea1,Namespace:kubernetes-dashboard,Attempt:0,} failed, error" error="failed to setup network for sandbox \"84d1b0c841d5797b077ebe499d0687d2c5027bef218423269fb1ab927c4b21d3\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:54:00 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:54:00.338868058Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:busybox,Uid:4b662b5f-6e78-48de-818c-81989d7f4ea9,Namespace:default,Attempt:0,}" * * ==> describe nodes <== * Name: containerd-20200724221200-14997 * Roles: master * Labels: beta.kubernetes.io/arch=amd64 * beta.kubernetes.io/os=linux * kubernetes.io/arch=amd64 * kubernetes.io/hostname=containerd-20200724221200-14997 * kubernetes.io/os=linux * minikube.k8s.io/commit=40eac8ce825d2bb784efa63b900c8d788ea49faf * minikube.k8s.io/name=containerd-20200724221200-14997 * minikube.k8s.io/updated_at=2020_07_24T22_13_02_0700 * minikube.k8s.io/version=v1.12.1 * node-role.kubernetes.io/master= * Annotations: kubeadm.alpha.kubernetes.io/cri-socket: /run/containerd/containerd.sock * node.alpha.kubernetes.io/ttl: 0 * volumes.kubernetes.io/controller-managed-attach-detach: true * CreationTimestamp: Fri, 24 Jul 2020 22:12:47 +0000 * Taints: * Unschedulable: false * Lease: * HolderIdentity: containerd-20200724221200-14997 * AcquireTime: * RenewTime: Fri, 24 Jul 2020 22:53:53 +0000 * Conditions: * Type Status LastHeartbeatTime LastTransitionTime Reason Message * ---- ------ ----------------- ------------------ ------ ------- * MemoryPressure False Fri, 24 Jul 2020 22:49:32 +0000 Fri, 24 Jul 2020 22:12:43 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available * DiskPressure False Fri, 24 Jul 2020 22:49:32 +0000 Fri, 24 Jul 2020 22:12:43 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure * PIDPressure False Fri, 24 Jul 2020 22:49:32 +0000 Fri, 24 Jul 2020 22:12:43 +0000 KubeletHasSufficientPID kubelet has sufficient PID available * Ready True Fri, 24 Jul 2020 22:49:32 +0000 Fri, 24 Jul 2020 22:13:11 +0000 KubeletReady kubelet is posting ready status * Addresses: * InternalIP: 172.17.0.3 * Hostname: containerd-20200724221200-14997 * Capacity: * cpu: 16 * ephemeral-storage: 128884272Ki * hugepages-1Gi: 0 * hugepages-2Mi: 0 * memory: 65817044Ki * pods: 110 * Allocatable: * cpu: 16 * ephemeral-storage: 128884272Ki * hugepages-1Gi: 0 * hugepages-2Mi: 0 * memory: 65817044Ki * pods: 110 * System Info: * Machine ID: 5ea7312d3bbd4189a79e31122cb237a6 * System UUID: 763ff36b-3261-45b1-b62e-092cbae790ce * Boot ID: 65219ec9-ab55-4151-85fa-6cbcd6144529 * Kernel Version: 5.4.0-1022-azure * OS Image: Ubuntu 19.10 * Operating System: linux * Architecture: amd64 * Container Runtime Version: containerd://1.3.3-14-g449e9269 * Kubelet Version: v1.18.3 * Kube-Proxy Version: v1.18.3 * PodCIDR: 10.244.0.0/24 * PodCIDRs: 10.244.0.0/24 * Non-terminated Pods: (11 in total) * Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits AGE * --------- ---- ------------ ---------- --------------- ------------- --- * default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 34m * kube-system coredns-66bff467f8-hlk9j 100m (0%) 0 (0%) 70Mi (0%) 170Mi (0%) 40m * kube-system etcd-containerd-20200724221200-14997 0 (0%) 0 (0%) 0 (0%) 0 (0%) 22m * kube-system kindnet-nsc8k 100m (0%) 100m (0%) 50Mi (0%) 50Mi (0%) 40m * kube-system kube-apiserver-containerd-20200724221200-14997 250m (1%) 0 (0%) 0 (0%) 0 (0%) 22m * kube-system kube-controller-manager-containerd-20200724221200-14997 200m (1%) 0 (0%) 0 (0%) 0 (0%) 41m * kube-system kube-proxy-x7fwq 0 (0%) 0 (0%) 0 (0%) 0 (0%) 40m * kube-system kube-scheduler-containerd-20200724221200-14997 100m (0%) 0 (0%) 0 (0%) 0 (0%) 40m * kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 40m * kubernetes-dashboard dashboard-metrics-scraper-dc6947fbf-xphhd 0 (0%) 0 (0%) 0 (0%) 0 (0%) 23m * kubernetes-dashboard kubernetes-dashboard-6dbb54fd95-ms9wg 0 (0%) 0 (0%) 0 (0%) 0 (0%) 23m * Allocated resources: * (Total limits may be over 100 percent, i.e., overcommitted.) * Resource Requests Limits * -------- -------- ------ * cpu 750m (4%) 100m (0%) * memory 120Mi (0%) 220Mi (0%) * ephemeral-storage 0 (0%) 0 (0%) * hugepages-1Gi 0 (0%) 0 (0%) * hugepages-2Mi 0 (0%) 0 (0%) * Events: * Type Reason Age From Message * ---- ------ ---- ---- ------- * Normal NodeHasNoDiskPressure 41m (x6 over 41m) kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientPID 41m (x5 over 41m) kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasSufficientPID * Normal NodeHasSufficientMemory 41m (x6 over 41m) kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasSufficientMemory * Normal Starting 41m kubelet, containerd-20200724221200-14997 Starting kubelet. * Warning SystemOOM 41m kubelet, containerd-20200724221200-14997 System OOM encountered, victim process: registry-server, pid: 26041 * Warning SystemOOM 41m kubelet, containerd-20200724221200-14997 System OOM encountered, victim process: registry-server, pid: 28567 * Normal NodeHasSufficientMemory 40m kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasSufficientMemory * Normal NodeHasNoDiskPressure 40m kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientPID 40m kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasSufficientPID * Normal NodeAllocatableEnforced 40m kubelet, containerd-20200724221200-14997 Updated Node Allocatable limit across pods * Normal NodeReady 40m kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeReady * Normal Starting 40m kube-proxy, containerd-20200724221200-14997 Starting kube-proxy. * Warning readOnlySysFS 40m kube-proxy, containerd-20200724221200-14997 CRI error: /sys is read-only: cannot modify conntrack limits, problems may arise later (If running Docker, see docker issue #24000) * Normal Starting 25m kubelet, containerd-20200724221200-14997 Starting kubelet. * Warning SystemOOM 25m kubelet, containerd-20200724221200-14997 System OOM encountered, victim process: registry-server, pid: 26041 * Warning SystemOOM 25m kubelet, containerd-20200724221200-14997 System OOM encountered, victim process: registry-server, pid: 28567 * Normal NodeAllocatableEnforced 25m kubelet, containerd-20200724221200-14997 Updated Node Allocatable limit across pods * Normal NodeHasSufficientMemory 25m (x7 over 25m) kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasSufficientMemory * Normal NodeHasNoDiskPressure 25m (x7 over 25m) kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientPID 25m (x7 over 25m) kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasSufficientPID * Warning readOnlySysFS 24m kube-proxy, containerd-20200724221200-14997 CRI error: /sys is read-only: cannot modify conntrack limits, problems may arise later (If running Docker, see docker issue #24000) * Normal Starting 24m kube-proxy, containerd-20200724221200-14997 Starting kube-proxy. * * ==> dmesg <== * [ +0.008042] FS-Cache: N-cookie d=00000000087e516b n=00000000639e5e65 * [ +0.007221] FS-Cache: N-key=[8] '56c00f0000000000' * [ +0.005631] FS-Cache: Duplicate cookie detected * [ +0.003841] FS-Cache: O-cookie c=0000000011bf07ab [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.005951] FS-Cache: O-cookie d=00000000087e516b n=00000000251e97b7 * [ +0.004790] FS-Cache: O-key=[8] '56c00f0000000000' * [ +0.003643] FS-Cache: N-cookie c=00000000e2eec606 [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.005689] FS-Cache: N-cookie d=00000000087e516b n=000000007550b1ca * [ +0.004826] FS-Cache: N-key=[8] '56c00f0000000000' * [ +1.890005] FS-Cache: Duplicate cookie detected * [ +0.022632] FS-Cache: O-cookie c=0000000036b15294 [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.008521] FS-Cache: O-cookie d=00000000087e516b n=00000000efd734b0 * [ +0.007450] FS-Cache: O-key=[8] '55c00f0000000000' * [ +0.004298] FS-Cache: N-cookie c=00000000c6760349 [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.007729] FS-Cache: N-cookie d=00000000087e516b n=000000002b6d28f2 * [ +0.006730] FS-Cache: N-key=[8] '55c00f0000000000' * [ +2.781512] FS-Cache: Duplicate cookie detected * [ +0.004691] FS-Cache: O-cookie c=0000000026b60cdb [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.006817] FS-Cache: O-cookie d=00000000087e516b n=0000000083eaff9d * [ +0.006146] FS-Cache: O-key=[8] '5ac00f0000000000' * [ +0.004163] FS-Cache: N-cookie c=000000006a4278df [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.006241] FS-Cache: N-cookie d=00000000087e516b n=00000000e4c3dd6e * [ +0.005074] FS-Cache: N-key=[8] '5ac00f0000000000' * [Jul24 22:02] tee (116897): /proc/108378/oom_adj is deprecated, please use /proc/108378/oom_score_adj instead. * [Jul24 22:09] hrtimer: interrupt took 1877431 ns * * ==> etcd [e1da8367b2af71bf928910238acc6b877c309718e6be22ad7c222ff9731c93cc] <== * 2020-07-24 22:29:30.038314 W | etcdserver: read-only range request "key:\"/registry/namespaces/kube-system\" " with result "range_response_count:1 size:263" took too long (102.05731ms) to execute * 2020-07-24 22:29:30.038475 W | etcdserver: read-only range request "key:\"/registry/pods/kube-system/kube-apiserver-containerd-20200724221200-14997\" " with result "range_response_count:1 size:6560" took too long (102.234322ms) to execute * 2020-07-24 22:30:00.757431 W | etcdserver: read-only range request "key:\"/registry/events/kube-system/storage-provisioner.1624d0d94aaa7ad0\" " with result "range_response_count:1 size:865" took too long (111.660057ms) to execute * 2020-07-24 22:31:03.103374 W | wal: sync duration of 4.321780012s, expected less than 1s * 2020-07-24 22:31:03.104064 W | etcdserver: read-only range request "key:\"/registry/roles\" range_end:\"/registry/rolet\" count_only:true " with result "range_response_count:0 size:7" took too long (4.143856667s) to execute * 2020-07-24 22:31:03.110173 W | etcdserver: read-only range request "key:\"/registry/csidrivers\" range_end:\"/registry/csidrivert\" count_only:true " with result "range_response_count:0 size:5" took too long (3.893694523s) to execute * 2020-07-24 22:31:03.110244 W | etcdserver: read-only range request "key:\"/registry/mutatingwebhookconfigurations\" range_end:\"/registry/mutatingwebhookconfigurationt\" count_only:true " with result "range_response_count:0 size:5" took too long (748.311405ms) to execute * 2020-07-24 22:31:03.110295 W | etcdserver: read-only range request "key:\"/registry/namespaces/default\" " with result "range_response_count:1 size:257" took too long (2.065079912s) to execute * 2020-07-24 22:31:27.394849 W | wal: sync duration of 1.161100746s, expected less than 1s * 2020-07-24 22:31:28.903901 W | etcdserver: read-only range request "key:\"/registry/mutatingwebhookconfigurations\" range_end:\"/registry/mutatingwebhookconfigurationt\" count_only:true " with result "range_response_count:0 size:5" took too long (638.619667ms) to execute * 2020-07-24 22:31:28.903925 W | etcdserver: read-only range request "key:\"/registry/controllers\" range_end:\"/registry/controllert\" count_only:true " with result "range_response_count:0 size:5" took too long (469.929992ms) to execute * 2020-07-24 22:39:26.228142 I | mvcc: store.index: compact 1128 * 2020-07-24 22:39:26.229399 I | mvcc: finished scheduled compaction at 1128 (took 884.061µs) * 2020-07-24 22:44:26.238245 I | mvcc: store.index: compact 1236 * 2020-07-24 22:44:26.238942 I | mvcc: finished scheduled compaction at 1236 (took 365.326µs) * 2020-07-24 22:49:26.248026 I | mvcc: store.index: compact 1314 * 2020-07-24 22:49:26.248710 I | mvcc: finished scheduled compaction at 1314 (took 339.022µs) * 2020-07-24 22:51:52.521928 W | etcdserver: request "header: txn: success:> failure: >>" with result "size:16" took too long (902.174992ms) to execute * 2020-07-24 22:51:52.522127 W | etcdserver: read-only range request "key:\"/registry/pods/kubernetes-dashboard/\" range_end:\"/registry/pods/kubernetes-dashboard0\" " with result "range_response_count:2 size:7466" took too long (1.329719758s) to execute * 2020-07-24 22:51:52.522249 W | etcdserver: read-only range request "key:\"/registry/priorityclasses\" range_end:\"/registry/priorityclasset\" count_only:true " with result "range_response_count:0 size:7" took too long (263.931372ms) to execute * 2020-07-24 22:51:52.522268 W | etcdserver: read-only range request "key:\"/registry/podtemplates\" range_end:\"/registry/podtemplatet\" count_only:true " with result "range_response_count:0 size:5" took too long (651.529003ms) to execute * 2020-07-24 22:51:52.522324 W | etcdserver: read-only range request "key:\"/registry/csinodes\" range_end:\"/registry/csinodet\" count_only:true " with result "range_response_count:0 size:7" took too long (315.365732ms) to execute * 2020-07-24 22:51:52.522347 W | etcdserver: read-only range request "key:\"/registry/services/specs\" range_end:\"/registry/services/spect\" count_only:true " with result "range_response_count:0 size:7" took too long (193.248178ms) to execute * 2020-07-24 22:51:53.489920 W | etcdserver: read-only range request "key:\"/registry/health\" " with result "range_response_count:0 size:5" took too long (140.684539ms) to execute * 2020-07-24 22:51:53.489983 W | etcdserver: read-only range request "key:\"/registry/pods/kubernetes-dashboard/\" range_end:\"/registry/pods/kubernetes-dashboard0\" " with result "range_response_count:2 size:7466" took too long (297.755813ms) to execute * * ==> kernel <== * 22:54:00 up 1:21, 0 users, load average: 0.16, 0.65, 2.37 * Linux containerd-20200724221200-14997 5.4.0-1022-azure #22-Ubuntu SMP Fri Jul 10 06:14:37 UTC 2020 x86_64 x86_64 x86_64 GNU/Linux * PRETTY_NAME="Ubuntu 19.10" * * ==> kube-apiserver [7dc9f83693a992e6e76f2932a3ce43efa3ffd6bbef97f8a3dab2e9ab04809fda] <== * I0724 22:29:30.838357 1 storage_scheduling.go:143] all system priority classes are created successfully or already exist. * W0724 22:29:31.103453 1 lease.go:224] Resetting endpoints for master service "kubernetes" to [172.17.0.3] * I0724 22:29:31.104915 1 controller.go:606] quota admission added evaluator for: endpoints * I0724 22:29:31.150383 1 controller.go:606] quota admission added evaluator for: endpointslices.discovery.k8s.io * I0724 22:29:31.881534 1 controller.go:606] quota admission added evaluator for: leases.coordination.k8s.io * I0724 22:29:34.181947 1 controller.go:606] quota admission added evaluator for: daemonsets.apps * I0724 22:29:34.431603 1 controller.go:606] quota admission added evaluator for: serviceaccounts * I0724 22:29:34.453874 1 controller.go:606] quota admission added evaluator for: deployments.apps * I0724 22:29:34.700275 1 controller.go:606] quota admission added evaluator for: roles.rbac.authorization.k8s.io * I0724 22:29:34.711676 1 controller.go:606] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io * I0724 22:30:24.960939 1 controller.go:606] quota admission added evaluator for: replicasets.apps * I0724 22:31:03.104488 1 trace.go:116] Trace[910046044]: "GuaranteedUpdate etcd3" type:*coordination.Lease (started: 2020-07-24 22:31:01.925257756 +0000 UTC m=+96.221406956) (total time: 1.179189211s): * Trace[910046044]: [1.179160609s] [1.178557271s] Transaction committed * I0724 22:31:03.104575 1 trace.go:116] Trace[1472583103]: "Create" url:/api/v1/namespaces/kubernetes-dashboard/events,user-agent:kubelet/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:172.17.0.3 (started: 2020-07-24 22:30:58.882075293 +0000 UTC m=+93.178224393) (total time: 4.222377974s): * Trace[1472583103]: [4.222302769s] [4.222217764s] Object stored in database * I0724 22:31:03.104626 1 trace.go:116] Trace[118677254]: "Update" url:/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/containerd-20200724221200-14997,user-agent:kubelet/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:172.17.0.3 (started: 2020-07-24 22:31:01.925076644 +0000 UTC m=+96.221225844) (total time: 1.179528033s): * Trace[118677254]: [1.17947683s] [1.179338821s] Object stored in database * I0724 22:31:03.110755 1 trace.go:116] Trace[1968230455]: "Get" url:/api/v1/namespaces/default,user-agent:kube-apiserver/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:127.0.0.1 (started: 2020-07-24 22:31:01.044778208 +0000 UTC m=+95.340927408) (total time: 2.065944568s): * Trace[1968230455]: [2.065912066s] [2.065904566s] About to write a response * I0724 22:51:52.522679 1 trace.go:116] Trace[642232690]: "GuaranteedUpdate etcd3" type:*v1.Endpoints (started: 2020-07-24 22:51:51.086288237 +0000 UTC m=+1345.382437437) (total time: 1.436354155s): * Trace[642232690]: [1.436324653s] [1.434510025s] Transaction committed * I0724 22:51:52.522911 1 trace.go:116] Trace[857693863]: "List etcd3" key:/pods/kubernetes-dashboard,resourceVersion:,limit:0,continue: (started: 2020-07-24 22:51:51.191949067 +0000 UTC m=+1345.488098267) (total time: 1.330928342s): * Trace[857693863]: [1.330928342s] [1.330928342s] END * I0724 22:51:52.523386 1 trace.go:116] Trace[1580627152]: "List" url:/api/v1/namespaces/kubernetes-dashboard/pods,user-agent:e2e-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format,client:172.17.0.1 (started: 2020-07-24 22:51:51.191908464 +0000 UTC m=+1345.488057664) (total time: 1.331449678s): * Trace[1580627152]: [1.331028249s] [1.330994846s] Listing from storage done * * ==> kube-controller-manager [8662707b3b0f7d44ff6c5e080ea0aba7dd34f515065281accd9db2796d1eabbb] <== * I0724 22:30:25.016265 1 event.go:278] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"kubernetes-dashboard", Name:"dashboard-metrics-scraper-dc6947fbf", UID:"f7f90ea4-8326-451c-9d3d-7807a3878e9a", APIVersion:"apps/v1", ResourceVersion:"932", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: dashboard-metrics-scraper-dc6947fbf-xphhd * I0724 22:30:25.047095 1 event.go:278] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"kubernetes-dashboard", Name:"kubernetes-dashboard-6dbb54fd95", UID:"7d191e90-0bb9-4f25-b536-3a59e9a338ae", APIVersion:"apps/v1", ResourceVersion:"934", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: kubernetes-dashboard-6dbb54fd95-ms9wg * W0724 22:30:25.140679 1 actual_state_of_world.go:506] Failed to update statusUpdateNeeded field in actual state of world: Failed to set statusUpdateNeeded to needed true, because nodeName="containerd-20200724221200-14997" does not exist * I0724 22:30:25.167526 1 shared_informer.go:230] Caches are synced for disruption * I0724 22:30:25.167548 1 disruption.go:339] Sending events to api server. * I0724 22:30:25.174767 1 shared_informer.go:230] Caches are synced for persistent volume * I0724 22:30:25.194865 1 shared_informer.go:230] Caches are synced for TTL * I0724 22:30:25.222168 1 shared_informer.go:230] Caches are synced for node * I0724 22:30:25.222200 1 range_allocator.go:172] Starting range CIDR allocator * I0724 22:30:25.222204 1 shared_informer.go:223] Waiting for caches to sync for cidrallocator * I0724 22:30:25.222209 1 shared_informer.go:230] Caches are synced for cidrallocator * I0724 22:30:25.225090 1 shared_informer.go:230] Caches are synced for GC * I0724 22:30:25.407503 1 shared_informer.go:230] Caches are synced for resource quota * I0724 22:30:25.410855 1 shared_informer.go:230] Caches are synced for attach detach * I0724 22:30:25.419771 1 shared_informer.go:230] Caches are synced for taint * I0724 22:30:25.419834 1 node_lifecycle_controller.go:1433] Initializing eviction metric for zone: * I0724 22:30:25.419834 1 taint_manager.go:187] Starting NoExecuteTaintManager * I0724 22:30:25.419876 1 event.go:278] Event(v1.ObjectReference{Kind:"Node", Namespace:"", Name:"containerd-20200724221200-14997", UID:"878dba67-2126-43d2-a5be-2ad809c96173", APIVersion:"v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'RegisteredNode' Node containerd-20200724221200-14997 event: Registered Node containerd-20200724221200-14997 in Controller * W0724 22:30:25.419894 1 node_lifecycle_controller.go:1048] Missing timestamp for Node containerd-20200724221200-14997. Assuming now as a timestamp. * I0724 22:30:25.419932 1 node_lifecycle_controller.go:1249] Controller detected that zone is now in state Normal. * I0724 22:30:25.474250 1 shared_informer.go:230] Caches are synced for resource quota * I0724 22:30:25.474554 1 shared_informer.go:230] Caches are synced for daemon sets * I0724 22:30:25.475756 1 shared_informer.go:230] Caches are synced for garbage collector * I0724 22:30:25.513010 1 shared_informer.go:230] Caches are synced for garbage collector * I0724 22:30:25.513029 1 garbagecollector.go:142] Garbage collector: all resource monitors have synced. Proceeding to collect garbage * * ==> kube-controller-manager [90f7be9dd5648fdf7d6e6f282cc9119322bc28911e75d3b73a928fa1ad2134ec] <== * I0724 22:29:12.067440 1 serving.go:313] Generated self-signed cert in-memory * I0724 22:29:12.831319 1 controllermanager.go:161] Version: v1.18.3 * I0724 22:29:12.832693 1 dynamic_cafile_content.go:167] Starting request-header::/var/lib/minikube/certs/front-proxy-ca.crt * I0724 22:29:12.832741 1 dynamic_cafile_content.go:167] Starting client-ca-bundle::/var/lib/minikube/certs/ca.crt * I0724 22:29:12.833285 1 secure_serving.go:178] Serving securely on 127.0.0.1:10257 * I0724 22:29:12.833363 1 tlsconfig.go:240] Starting DynamicServingCertificateController * I0724 22:29:12.833994 1 deprecated_insecure_serving.go:53] Serving insecurely on [::]:10252 * W0724 22:29:12.834610 1 controllermanager.go:612] fetch api resource lists failed, use legacy client builder: Get https://control-plane.minikube.internal:8444/api/v1?timeout=32s: dial tcp 172.17.0.3:8444: connect: connection refused * F0724 22:29:22.836138 1 controllermanager.go:230] error building controller context: failed to wait for apiserver being healthy: timed out waiting for the condition: failed to get apiserver /healthz status: Get https://control-plane.minikube.internal:8444/healthz?timeout=32s: dial tcp 172.17.0.3:8444: connect: connection refused * * ==> kube-proxy [cdb7f0919992f1b17fe65623faf7b9984d3edb56993d5621bef544d49a0ce798] <== * I0724 22:13:19.703119 1 server.go:583] Version: v1.18.3 * I0724 22:13:19.703827 1 conntrack.go:52] Setting nf_conntrack_max to 524288 * E0724 22:13:19.704435 1 conntrack.go:127] sysfs is not writable: {Device:sysfs Path:/sys Type:sysfs Opts:[ro nosuid nodev noexec relatime] Freq:0 Pass:0} (mount options are [ro nosuid nodev noexec relatime]) * I0724 22:13:19.704682 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_established' to 86400 * I0724 22:13:19.704788 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_close_wait' to 3600 * I0724 22:13:19.706305 1 config.go:133] Starting endpoints config controller * I0724 22:13:19.706338 1 config.go:315] Starting service config controller * I0724 22:13:19.706344 1 shared_informer.go:223] Waiting for caches to sync for endpoints config * I0724 22:13:19.706365 1 shared_informer.go:223] Waiting for caches to sync for service config * I0724 22:13:19.806590 1 shared_informer.go:230] Caches are synced for service config * I0724 22:13:19.806609 1 shared_informer.go:230] Caches are synced for endpoints config * W0724 22:30:00.451331 1 server_others.go:559] Unknown proxy mode "", assuming iptables proxy * I0724 22:30:00.459120 1 node.go:136] Successfully retrieved node IP: 172.17.0.3 * I0724 22:30:00.459158 1 server_others.go:186] Using iptables Proxier. * I0724 22:30:00.459548 1 server.go:583] Version: v1.18.3 * I0724 22:30:00.460146 1 conntrack.go:52] Setting nf_conntrack_max to 524288 * E0724 22:30:00.460599 1 conntrack.go:127] sysfs is not writable: {Device:sysfs Path:/sys Type:sysfs Opts:[ro nosuid nodev noexec relatime] Freq:0 Pass:0} (mount options are [ro nosuid nodev noexec relatime]) * I0724 22:30:00.460736 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_established' to 86400 * I0724 22:30:00.460806 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_close_wait' to 3600 * I0724 22:30:00.460979 1 config.go:315] Starting service config controller * I0724 22:30:00.460995 1 shared_informer.go:223] Waiting for caches to sync for service config * I0724 22:30:00.461156 1 config.go:133] Starting endpoints config controller * I0724 22:30:00.462702 1 shared_informer.go:223] Waiting for caches to sync for endpoints config * I0724 22:30:00.561150 1 shared_informer.go:230] Caches are synced for service config * I0724 22:30:00.562880 1 shared_informer.go:230] Caches are synced for endpoints config * * ==> kube-scheduler [574a9379a97ce5c734627b9b0340a240dea259b901f1dca96ad8d9978331c503] <== * E0724 22:28:46.956193 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Node: Get https://control-plane.minikube.internal:8444/api/v1/nodes?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:47.936021 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolumeClaim: Get https://control-plane.minikube.internal:8444/api/v1/persistentvolumeclaims?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:48.615795 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolume: Get https://control-plane.minikube.internal:8444/api/v1/persistentvolumes?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:48.627285 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Service: Get https://control-plane.minikube.internal:8444/api/v1/services?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:48.844520 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1beta1.PodDisruptionBudget: Get https://control-plane.minikube.internal:8444/apis/policy/v1beta1/poddisruptionbudgets?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:49.268244 1 reflector.go:178] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:233: Failed to list *v1.Pod: Get https://control-plane.minikube.internal:8444/api/v1/pods?fieldSelector=status.phase%21%3DFailed%2Cstatus.phase%21%3DSucceeded&limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:52.983500 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.CSINode: Get https://control-plane.minikube.internal:8444/apis/storage.k8s.io/v1/csinodes?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:54.489861 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.StorageClass: Get https://control-plane.minikube.internal:8444/apis/storage.k8s.io/v1/storageclasses?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:55.798147 1 reflector.go:178] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to list *v1.ConfigMap: Get https://control-plane.minikube.internal:8444/api/v1/namespaces/kube-system/configmaps?fieldSelector=metadata.name%3Dextension-apiserver-authentication&limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:56.415849 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolumeClaim: Get https://control-plane.minikube.internal:8444/api/v1/persistentvolumeclaims?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:57.297889 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Service: Get https://control-plane.minikube.internal:8444/api/v1/services?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:58.395517 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1beta1.PodDisruptionBudget: Get https://control-plane.minikube.internal:8444/apis/policy/v1beta1/poddisruptionbudgets?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:58.615198 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Node: Get https://control-plane.minikube.internal:8444/api/v1/nodes?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:00.876683 1 reflector.go:178] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:233: Failed to list *v1.Pod: Get https://control-plane.minikube.internal:8444/api/v1/pods?fieldSelector=status.phase%21%3DFailed%2Cstatus.phase%21%3DSucceeded&limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:01.380532 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolume: Get https://control-plane.minikube.internal:8444/api/v1/persistentvolumes?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:10.017422 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.StorageClass: Get https://control-plane.minikube.internal:8444/apis/storage.k8s.io/v1/storageclasses?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:13.820706 1 reflector.go:178] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:233: Failed to list *v1.Pod: Get https://control-plane.minikube.internal:8444/api/v1/pods?fieldSelector=status.phase%21%3DFailed%2Cstatus.phase%21%3DSucceeded&limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:14.849393 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.CSINode: Get https://control-plane.minikube.internal:8444/apis/storage.k8s.io/v1/csinodes?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:17.299470 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Node: Get https://control-plane.minikube.internal:8444/api/v1/nodes?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:17.476542 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolumeClaim: Get https://control-plane.minikube.internal:8444/api/v1/persistentvolumeclaims?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:17.787962 1 reflector.go:178] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to list *v1.ConfigMap: Get https://control-plane.minikube.internal:8444/api/v1/namespaces/kube-system/configmaps?fieldSelector=metadata.name%3Dextension-apiserver-authentication&limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:18.847726 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Service: Get https://control-plane.minikube.internal:8444/api/v1/services?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:20.242683 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1beta1.PodDisruptionBudget: Get https://control-plane.minikube.internal:8444/apis/policy/v1beta1/poddisruptionbudgets?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:25.306254 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolume: Get https://control-plane.minikube.internal:8444/api/v1/persistentvolumes?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * I0724 22:30:02.056615 1 shared_informer.go:230] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file * * ==> kubelet <== * -- Logs begin at Fri 2020-07-24 22:27:54 UTC, end at Fri 2020-07-24 22:54:00 UTC. -- * Jul 24 22:53:40 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:40.339278 544 pod_workers.go:191] Error syncing pod 562af1c8-c195-4fed-b475-4ff2a3cca8b5 ("kindnet-nsc8k_kube-system(562af1c8-c195-4fed-b475-4ff2a3cca8b5)"), skipping: failed to "StartContainer" for "kindnet-cni" with CrashLoopBackOff: "back-off 5m0s restarting failed container=kindnet-cni pod=kindnet-nsc8k_kube-system(562af1c8-c195-4fed-b475-4ff2a3cca8b5)" * Jul 24 22:53:42 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:42.042815 544 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to setup network for sandbox "ca19fc91f1eae4b80b05c4f3de8023e68e4bdb40295bd935d96ceccdd34a62bc": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:53:42 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:42.042880 544 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "kubernetes-dashboard-6dbb54fd95-ms9wg_kubernetes-dashboard(267f98a1-434e-45a2-abda-53ad3bb7bea1)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "ca19fc91f1eae4b80b05c4f3de8023e68e4bdb40295bd935d96ceccdd34a62bc": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:53:42 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:42.042901 544 kuberuntime_manager.go:727] createPodSandbox for pod "kubernetes-dashboard-6dbb54fd95-ms9wg_kubernetes-dashboard(267f98a1-434e-45a2-abda-53ad3bb7bea1)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "ca19fc91f1eae4b80b05c4f3de8023e68e4bdb40295bd935d96ceccdd34a62bc": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:53:42 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:42.042962 544 pod_workers.go:191] Error syncing pod 267f98a1-434e-45a2-abda-53ad3bb7bea1 ("kubernetes-dashboard-6dbb54fd95-ms9wg_kubernetes-dashboard(267f98a1-434e-45a2-abda-53ad3bb7bea1)"), skipping: failed to "CreatePodSandbox" for "kubernetes-dashboard-6dbb54fd95-ms9wg_kubernetes-dashboard(267f98a1-434e-45a2-abda-53ad3bb7bea1)" with CreatePodSandboxError: "CreatePodSandbox for pod \"kubernetes-dashboard-6dbb54fd95-ms9wg_kubernetes-dashboard(267f98a1-434e-45a2-abda-53ad3bb7bea1)\" failed: rpc error: code = Unknown desc = failed to setup network for sandbox \"ca19fc91f1eae4b80b05c4f3de8023e68e4bdb40295bd935d96ceccdd34a62bc\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:53:48 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:48.576647 544 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to setup network for sandbox "54a499035f788abb0121b0ac23492595ca813d209dfd1491be7b9f188bde9f6f": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:53:48 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:48.576720 544 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "54a499035f788abb0121b0ac23492595ca813d209dfd1491be7b9f188bde9f6f": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:53:48 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:48.576746 544 kuberuntime_manager.go:727] createPodSandbox for pod "busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "54a499035f788abb0121b0ac23492595ca813d209dfd1491be7b9f188bde9f6f": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:53:48 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:48.576818 544 pod_workers.go:191] Error syncing pod 4b662b5f-6e78-48de-818c-81989d7f4ea9 ("busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)"), skipping: failed to "CreatePodSandbox" for "busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)" with CreatePodSandboxError: "CreatePodSandbox for pod \"busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)\" failed: rpc error: code = Unknown desc = failed to setup network for sandbox \"54a499035f788abb0121b0ac23492595ca813d209dfd1491be7b9f188bde9f6f\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:53:50 containerd-20200724221200-14997 kubelet[544]: I0724 22:53:50.338414 544 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: 00a604004ac81017478505940a9e65265df2e1d0d95ee5ccee3c95ec0687461f * Jul 24 22:53:50 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:50.338685 544 pod_workers.go:191] Error syncing pod 25727a03-b071-4a2c-8dfb-ea2a7038e4cd ("storage-provisioner_kube-system(25727a03-b071-4a2c-8dfb-ea2a7038e4cd)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 5m0s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(25727a03-b071-4a2c-8dfb-ea2a7038e4cd)" * Jul 24 22:53:52 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:52.812518 544 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to setup network for sandbox "a24be12ec90ceddc1aa383fee9cea18174d23821e586a00253d5836a2d9ee0e8": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:53:52 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:52.812582 544 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "a24be12ec90ceddc1aa383fee9cea18174d23821e586a00253d5836a2d9ee0e8": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:53:52 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:52.812607 544 kuberuntime_manager.go:727] createPodSandbox for pod "coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "a24be12ec90ceddc1aa383fee9cea18174d23821e586a00253d5836a2d9ee0e8": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:53:52 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:52.812678 544 pod_workers.go:191] Error syncing pod 584286ea-2ddf-4194-8a3c-48d505f7f607 ("coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)"), skipping: failed to "CreatePodSandbox" for "coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)" with CreatePodSandboxError: "CreatePodSandbox for pod \"coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)\" failed: rpc error: code = Unknown desc = failed to setup network for sandbox \"a24be12ec90ceddc1aa383fee9cea18174d23821e586a00253d5836a2d9ee0e8\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:53:54 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:54.158739 544 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to setup network for sandbox "379562d4d181012c170675f941a008c0691dfd635c16e63e4454c1384b1dbe65": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:53:54 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:54.158803 544 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "dashboard-metrics-scraper-dc6947fbf-xphhd_kubernetes-dashboard(02a59124-2c99-4a38-abea-eedfd3e1ba46)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "379562d4d181012c170675f941a008c0691dfd635c16e63e4454c1384b1dbe65": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:53:54 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:54.158824 544 kuberuntime_manager.go:727] createPodSandbox for pod "dashboard-metrics-scraper-dc6947fbf-xphhd_kubernetes-dashboard(02a59124-2c99-4a38-abea-eedfd3e1ba46)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "379562d4d181012c170675f941a008c0691dfd635c16e63e4454c1384b1dbe65": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:53:54 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:54.158890 544 pod_workers.go:191] Error syncing pod 02a59124-2c99-4a38-abea-eedfd3e1ba46 ("dashboard-metrics-scraper-dc6947fbf-xphhd_kubernetes-dashboard(02a59124-2c99-4a38-abea-eedfd3e1ba46)"), skipping: failed to "CreatePodSandbox" for "dashboard-metrics-scraper-dc6947fbf-xphhd_kubernetes-dashboard(02a59124-2c99-4a38-abea-eedfd3e1ba46)" with CreatePodSandboxError: "CreatePodSandbox for pod \"dashboard-metrics-scraper-dc6947fbf-xphhd_kubernetes-dashboard(02a59124-2c99-4a38-abea-eedfd3e1ba46)\" failed: rpc error: code = Unknown desc = failed to setup network for sandbox \"379562d4d181012c170675f941a008c0691dfd635c16e63e4454c1384b1dbe65\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:53:54 containerd-20200724221200-14997 kubelet[544]: I0724 22:53:54.338557 544 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: a09e6baaadfd28ee566e88056be5b5b6c39ad7b1953f9bc53a101febc380383d * Jul 24 22:53:54 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:54.338953 544 pod_workers.go:191] Error syncing pod 562af1c8-c195-4fed-b475-4ff2a3cca8b5 ("kindnet-nsc8k_kube-system(562af1c8-c195-4fed-b475-4ff2a3cca8b5)"), skipping: failed to "StartContainer" for "kindnet-cni" with CrashLoopBackOff: "back-off 5m0s restarting failed container=kindnet-cni pod=kindnet-nsc8k_kube-system(562af1c8-c195-4fed-b475-4ff2a3cca8b5)" * Jul 24 22:53:56 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:56.227288 544 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to setup network for sandbox "84d1b0c841d5797b077ebe499d0687d2c5027bef218423269fb1ab927c4b21d3": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:53:56 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:56.227343 544 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "kubernetes-dashboard-6dbb54fd95-ms9wg_kubernetes-dashboard(267f98a1-434e-45a2-abda-53ad3bb7bea1)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "84d1b0c841d5797b077ebe499d0687d2c5027bef218423269fb1ab927c4b21d3": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:53:56 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:56.227363 544 kuberuntime_manager.go:727] createPodSandbox for pod "kubernetes-dashboard-6dbb54fd95-ms9wg_kubernetes-dashboard(267f98a1-434e-45a2-abda-53ad3bb7bea1)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "84d1b0c841d5797b077ebe499d0687d2c5027bef218423269fb1ab927c4b21d3": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:53:56 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:56.227421 544 pod_workers.go:191] Error syncing pod 267f98a1-434e-45a2-abda-53ad3bb7bea1 ("kubernetes-dashboard-6dbb54fd95-ms9wg_kubernetes-dashboard(267f98a1-434e-45a2-abda-53ad3bb7bea1)"), skipping: failed to "CreatePodSandbox" for "kubernetes-dashboard-6dbb54fd95-ms9wg_kubernetes-dashboard(267f98a1-434e-45a2-abda-53ad3bb7bea1)" with CreatePodSandboxError: "CreatePodSandbox for pod \"kubernetes-dashboard-6dbb54fd95-ms9wg_kubernetes-dashboard(267f98a1-434e-45a2-abda-53ad3bb7bea1)\" failed: rpc error: code = Unknown desc = failed to setup network for sandbox \"84d1b0c841d5797b077ebe499d0687d2c5027bef218423269fb1ab927c4b21d3\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * * ==> storage-provisioner [00a604004ac81017478505940a9e65265df2e1d0d95ee5ccee3c95ec0687461f] <== * F0724 22:53:34.696730 1 main.go:37] Error getting server version: Get https://10.96.0.1:443/version: dial tcp 10.96.0.1:443: getsockopt: no route to host -- /stdout -- helpers_test.go:247: (dbg) Run: ./minikube-linux-amd64 status --format={{.APIServer}} -p containerd-20200724221200-14997 -n containerd-20200724221200-14997 helpers_test.go:254: (dbg) Run: kubectl --context containerd-20200724221200-14997 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running helpers_test.go:260: non-running pods: busybox coredns-66bff467f8-hlk9j dashboard-metrics-scraper-dc6947fbf-xphhd kubernetes-dashboard-6dbb54fd95-ms9wg helpers_test.go:262: ======> post-mortem[TestStartStop/group/containerd/serial/AddonExistsAfterStop]: describe non-running pods <====== helpers_test.go:265: (dbg) Run: kubectl --context containerd-20200724221200-14997 describe pod busybox coredns-66bff467f8-hlk9j dashboard-metrics-scraper-dc6947fbf-xphhd kubernetes-dashboard-6dbb54fd95-ms9wg helpers_test.go:265: (dbg) Non-zero exit: kubectl --context containerd-20200724221200-14997 describe pod busybox coredns-66bff467f8-hlk9j dashboard-metrics-scraper-dc6947fbf-xphhd kubernetes-dashboard-6dbb54fd95-ms9wg: exit status 1 (85.481123ms) -- stdout -- Name: busybox Namespace: default Priority: 0 Node: containerd-20200724221200-14997/172.17.0.3 Start Time: Fri, 24 Jul 2020 22:19:40 +0000 Labels: integration-test=busybox Annotations: Status: Pending IP: IPs: Containers: busybox: Container ID: Image: busybox:1.28.4-glibc Image ID: Port: Host Port: Command: sleep 3600 State: Waiting Reason: ContainerCreating Ready: False Restart Count: 0 Environment: Mounts: /var/run/secrets/kubernetes.io/serviceaccount from default-token-xmm9f (ro) Conditions: Type Status Initialized True Ready False ContainersReady False PodScheduled True Volumes: default-token-xmm9f: Type: Secret (a volume populated by a Secret) SecretName: default-token-xmm9f Optional: false QoS Class: BestEffort Node-Selectors: Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s node.kubernetes.io/unreachable:NoExecute for 300s Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Scheduled 34m default-scheduler Successfully assigned default/busybox to containerd-20200724221200-14997 Warning FailedCreatePodSandBox 34m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "5311a1ffb81c3ec44164ca704d1b425a50851c7a615951d885f3e261bb56b331": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 34m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "e0b3338bd82723225aa39e3a572b31f4b10340fa640d33ac956ec7982b47a365": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 33m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "bce48c23f5090e307249f38d5e9c17615b5ce4547b68fd7dd207f5616546b1ff": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 33m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "c260da116bad44bab092e4453833efc9ce5c3c70209770b30f7aeffed5db766d": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 33m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "e352d7747cb44ae844f2847c8895d8a03a59a5bd62570299b6f91ba0d2b31e93": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 33m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "7a8e1f3fa957ea5ffbd5b203818f66025f20b18e8ba1398842a4a1dcb4beade1": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 32m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "02713a33eaf3192cf9c63e173bd72b67773f28f4dfcef338060dda2a7f8489e1": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 32m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "530cf34860dcb75c557c7cbc9a86910a3f4919230e74cdf10126ba1e75f3f49b": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 32m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "a33b097bd953e28e4d7499a4b0ca06585fe1b9029b95aac214b0bba904677259": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 28m (x17 over 32m) kubelet, containerd-20200724221200-14997 (combined from similar events): Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "47073b588559d0af9dafbb8171df657751c2a35ea5f0466a20f5762627e9cd56": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 24m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "cc2af3d00db0854dd201af575fe5d65f4c2208b59d12cea5f983ef22b810f25d": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 23m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "4cf1a61ad6807315736c91341b7763dc41aca876152d03c2db9814cfa254e7ee": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 23m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "a7cd4b3d30ded88757df1727167c36721efb9fd28978628b3503c0b86fc912e2": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 23m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "14fb4d9bb3714b5e4d1b49fd744c91c0f36ada0ca3c4313f0bb85e74660c9ab1": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 23m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "f5f36f4c79df279b40deef49d26e0ef042c075b3ba24396147e670314e61a159": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 22m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "8f22e7112e69d6377a16927508a438643f8cea01307d448d397775ae85526176": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 22m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "c1197cb670c56b20720daecde3535335370ba2b2f2515fa58f3d6b0bbc3e647d": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 22m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "21a1ae9d38e4f47d5bd6abf3cd1f776466ad5d6dc481a886df660824479af77e": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 22m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "f8e86b484453c0e4d0c68fceed3b3e44519bce50c31c2b6cd480c017ee9d684e": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 3m57s (x75 over 21m) kubelet, containerd-20200724221200-14997 (combined from similar events): Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "5ff19316dc062bffc4d2ed705ac7465ced0eb93bb31ece97a9a7976e0e544892": failed to set bridge addr: could not add IP address to "cni0": permission denied -- /stdout -- ** stderr ** Error from server (NotFound): pods "coredns-66bff467f8-hlk9j" not found Error from server (NotFound): pods "dashboard-metrics-scraper-dc6947fbf-xphhd" not found Error from server (NotFound): pods "kubernetes-dashboard-6dbb54fd95-ms9wg" not found ** /stderr ** helpers_test.go:267: kubectl --context containerd-20200724221200-14997 describe pod busybox coredns-66bff467f8-hlk9j dashboard-metrics-scraper-dc6947fbf-xphhd kubernetes-dashboard-6dbb54fd95-ms9wg: exit status 1 === RUN TestStartStop/group/containerd/serial/VerifyKubernetesImages start_stop_delete_test.go:227: (dbg) Run: ./minikube-linux-amd64 ssh -p containerd-20200724221200-14997 "sudo crictl images -o json" start_stop_delete_test.go:227: (dbg) Non-zero exit: ./minikube-linux-amd64 ssh -p containerd-20200724221200-14997 "sudo crictl images -o json": context deadline exceeded (1.4µs) start_stop_delete_test.go:227: failed tp get images inside minikube. args "./minikube-linux-amd64 ssh -p containerd-20200724221200-14997 \"sudo crictl images -o json\"": context deadline exceeded start_stop_delete_test.go:227: failed to decode images json unexpected end of JSON input. output: start_stop_delete_test.go:227: v1.18.3 images mismatch (-want +got): []string{ - "gcr.io/k8s-minikube/storage-provisioner:v1.8.1", - "k8s.gcr.io/coredns:1.6.7", - "k8s.gcr.io/etcd:3.4.3-0", - "k8s.gcr.io/kube-apiserver:v1.18.3", - "k8s.gcr.io/kube-controller-manager:v1.18.3", - "k8s.gcr.io/kube-proxy:v1.18.3", - "k8s.gcr.io/kube-scheduler:v1.18.3", - "k8s.gcr.io/pause:3.2", - "kubernetesui/dashboard:v2.0.1", - "kubernetesui/metrics-scraper:v1.0.4", } helpers_test.go:215: -----------------------post-mortem-------------------------------- helpers_test.go:223: ======> post-mortem[TestStartStop/group/containerd/serial/VerifyKubernetesImages]: docker inspect <====== helpers_test.go:224: (dbg) Run: docker inspect containerd-20200724221200-14997 helpers_test.go:228: (dbg) docker inspect containerd-20200724221200-14997: -- stdout -- [ { "Id": "0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318", "Created": "2020-07-24T22:12:08.823590057Z", "Path": "/usr/local/bin/entrypoint", "Args": [ "/sbin/init" ], "State": { "Status": "running", "Running": true, "Paused": false, "Restarting": false, "OOMKilled": false, "Dead": false, "Pid": 707805, "ExitCode": 0, "Error": "", "StartedAt": "2020-07-24T22:27:53.861621416Z", "FinishedAt": "2020-07-24T22:27:47.093520839Z" }, "Image": "sha256:e6bc41c39dc48b2b472936db36aedb28527ce0f675ed1bc20d029125c9ccf578", "ResolvConfPath": "/var/lib/docker/containers/0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318/resolv.conf", "HostnamePath": "/var/lib/docker/containers/0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318/hostname", "HostsPath": "/var/lib/docker/containers/0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318/hosts", "LogPath": "/var/lib/docker/containers/0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318/0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318-json.log", "Name": "/containerd-20200724221200-14997", "RestartCount": 0, "Driver": "overlay2", "Platform": "linux", "MountLabel": "", "ProcessLabel": "", "AppArmorProfile": "unconfined", "ExecIDs": null, "HostConfig": { "Binds": [ "/lib/modules:/lib/modules:ro", "containerd-20200724221200-14997:/var" ], "ContainerIDFile": "", "LogConfig": { "Type": "json-file", "Config": {} }, "NetworkMode": "default", "PortBindings": { "22/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "2376/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "5000/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "8444/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ] }, "RestartPolicy": { "Name": "no", "MaximumRetryCount": 0 }, "AutoRemove": false, "VolumeDriver": "", "VolumesFrom": null, "CapAdd": null, "CapDrop": null, "Capabilities": null, "Dns": [], "DnsOptions": [], "DnsSearch": [], "ExtraHosts": null, "GroupAdd": null, "IpcMode": "private", "Cgroup": "", "Links": null, "OomScoreAdj": 0, "PidMode": "", "Privileged": true, "PublishAllPorts": false, "ReadonlyRootfs": false, "SecurityOpt": [ "seccomp=unconfined", "apparmor=unconfined", "label=disable" ], "Tmpfs": { "/run": "", "/tmp": "" }, "UTSMode": "", "UsernsMode": "", "ShmSize": 67108864, "Runtime": "runc", "ConsoleSize": [ 0, 0 ], "Isolation": "", "CpuShares": 0, "Memory": 2306867200, "NanoCpus": 2000000000, "CgroupParent": "", "BlkioWeight": 0, "BlkioWeightDevice": [], "BlkioDeviceReadBps": null, "BlkioDeviceWriteBps": null, "BlkioDeviceReadIOps": null, "BlkioDeviceWriteIOps": null, "CpuPeriod": 0, "CpuQuota": 0, "CpuRealtimePeriod": 0, "CpuRealtimeRuntime": 0, "CpusetCpus": "", "CpusetMems": "", "Devices": [], "DeviceCgroupRules": null, "DeviceRequests": null, "KernelMemory": 0, "KernelMemoryTCP": 0, "MemoryReservation": 0, "MemorySwap": -1, "MemorySwappiness": null, "OomKillDisable": false, "PidsLimit": null, "Ulimits": null, "CpuCount": 0, "CpuPercent": 0, "IOMaximumIOps": 0, "IOMaximumBandwidth": 0, "MaskedPaths": null, "ReadonlyPaths": null }, "GraphDriver": { "Data": { "LowerDir": "/var/lib/docker/overlay2/302021dc5d13c095c42529cd14fcb13b8e0e062683e3b8dd811d0a384f95b4d8-init/diff:/var/lib/docker/overlay2/33ef5d7ed4fbb30d55d2e5925de9b5c359ee2f6bcaf5a31308f0c4c11e358759/diff:/var/lib/docker/overlay2/3cf31b1deb19cf9de586952311d9f94baab00dac6393d96e1a80da943c3823e6/diff:/var/lib/docker/overlay2/5e25be0edbb143d613aab16e23056ece18370c2427214464ccc0b8f033fc76c5/diff:/var/lib/docker/overlay2/933d7b760faf1726c0231eb48cbf3e7af8aeea7d1f449959b7bf07bbe9a84376/diff:/var/lib/docker/overlay2/69ec5555996fe29cf36b7e927034690ecb66f32d4c72396f3c2f3d73303ab10c/diff:/var/lib/docker/overlay2/891f9311e0e93aca44c576d873c0860bf67e972178833fc3ac0569308996933c/diff:/var/lib/docker/overlay2/431ddf5eea9395c020c3a27539c6984764cb7a0dd8f3eeadfbb24f37a897caf7/diff:/var/lib/docker/overlay2/ef7d3d9d53c985fdb46a41c8bcbc04c7d98baa3d8030b1432d115657269a4611/diff:/var/lib/docker/overlay2/d976f7471257da8ef85e0de564cae8e6b16eeb8f83fff6750b3985387f87052c/diff:/var/lib/docker/overlay2/16ed6f6abe25e98b50b10529e70d835da8b64ca99934243f70e5cc63cca44ba1/diff:/var/lib/docker/overlay2/a6495d6f579c32bd0faf387dc3b48c733dae0219e3cfc5d94f9f6ba027261668/diff:/var/lib/docker/overlay2/a34db548da9e24b0323e43e53767c49484fa19509ba9c1e738fb42330788ec52/diff:/var/lib/docker/overlay2/03f93f74dafc48e2bb0234297546abf70dc9918c047a7e3b500ed2242aec09ff/diff:/var/lib/docker/overlay2/b0d08d0be034080c25561d9451d8081050a591c2991a7f3d242234b9bf7c046d/diff:/var/lib/docker/overlay2/d7be6ed61eedc76988e74fe1d87a6a1ef287912654acea5566e7f9cb797b3e3c/diff:/var/lib/docker/overlay2/bde6a4a42d5652b3ef22e3a1dddd0f6fb765d2cd9f5b1a38d4ecb9b44c45b998/diff:/var/lib/docker/overlay2/0f384da0b034b730d4bdd22f270821719c0ac0b7b7d608c3a52fa89b9a386b90/diff:/var/lib/docker/overlay2/46e9d50bc162d2d523c12da3e378e5b859515056dc7d7f11344c1c42c64718d4/diff:/var/lib/docker/overlay2/6e7552cdb94dfc1c9a0a8efae25c758d93fff3d2eec58a3e2b710496d0c94653/diff:/var/lib/docker/overlay2/b9217960caef08b70ded801ca89f5a0808227383f2ab1de5ae7313c200a8090b/diff:/var/lib/docker/overlay2/d11fc39cafccd3e3cb8fba6cfdda2bbfdb99c2e41619ce169b0d6ede0b3f6a64/diff:/var/lib/docker/overlay2/0f89620291cba4bc3afac7c9fa1faf205450d5dcae032c3a9882009633b72b4a/diff", "MergedDir": "/var/lib/docker/overlay2/302021dc5d13c095c42529cd14fcb13b8e0e062683e3b8dd811d0a384f95b4d8/merged", "UpperDir": "/var/lib/docker/overlay2/302021dc5d13c095c42529cd14fcb13b8e0e062683e3b8dd811d0a384f95b4d8/diff", "WorkDir": "/var/lib/docker/overlay2/302021dc5d13c095c42529cd14fcb13b8e0e062683e3b8dd811d0a384f95b4d8/work" }, "Name": "overlay2" }, "Mounts": [ { "Type": "bind", "Source": "/lib/modules", "Destination": "/lib/modules", "Mode": "ro", "RW": false, "Propagation": "rprivate" }, { "Type": "volume", "Name": "containerd-20200724221200-14997", "Source": "/var/lib/docker/volumes/containerd-20200724221200-14997/_data", "Destination": "/var", "Driver": "local", "Mode": "z", "RW": true, "Propagation": "" } ], "Config": { "Hostname": "containerd-20200724221200-14997", "Domainname": "", "User": "root", "AttachStdin": false, "AttachStdout": false, "AttachStderr": false, "ExposedPorts": { "22/tcp": {}, "2376/tcp": {}, "5000/tcp": {}, "8444/tcp": {} }, "Tty": true, "OpenStdin": false, "StdinOnce": false, "Env": [ "container=docker", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], "Cmd": null, "Image": "kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438", "Volumes": null, "WorkingDir": "", "Entrypoint": [ "/usr/local/bin/entrypoint", "/sbin/init" ], "OnBuild": null, "Labels": { "created_by.minikube.sigs.k8s.io": "true", "mode.minikube.sigs.k8s.io": "containerd-20200724221200-14997", "name.minikube.sigs.k8s.io": "containerd-20200724221200-14997", "role.minikube.sigs.k8s.io": "" }, "StopSignal": "SIGRTMIN+3" }, "NetworkSettings": { "Bridge": "", "SandboxID": "14a5046cd9d512b8bd2af14bc8fd545f797506a7c8ca9b3e98d538950faaa7ca", "HairpinMode": false, "LinkLocalIPv6Address": "", "LinkLocalIPv6PrefixLen": 0, "Ports": { "22/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32920" } ], "2376/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32919" } ], "5000/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32918" } ], "8444/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32917" } ] }, "SandboxKey": "/var/run/docker/netns/14a5046cd9d5", "SecondaryIPAddresses": null, "SecondaryIPv6Addresses": null, "EndpointID": "8ef326025492f4d9f256559b312826a73e65239c14de22e27539be8d3e9580f4", "Gateway": "172.17.0.1", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "IPAddress": "172.17.0.3", "IPPrefixLen": 16, "IPv6Gateway": "", "MacAddress": "02:42:ac:11:00:03", "Networks": { "bridge": { "IPAMConfig": null, "Links": null, "Aliases": null, "NetworkID": "d4a420189740571bc15c14b2ebac1ab66e95c9c6796461aa96bb3486a5f7e03d", "EndpointID": "8ef326025492f4d9f256559b312826a73e65239c14de22e27539be8d3e9580f4", "Gateway": "172.17.0.1", "IPAddress": "172.17.0.3", "IPPrefixLen": 16, "IPv6Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "MacAddress": "02:42:ac:11:00:03", "DriverOpts": null } } } } ] -- /stdout -- helpers_test.go:232: (dbg) Run: ./minikube-linux-amd64 status --format={{.Host}} -p containerd-20200724221200-14997 -n containerd-20200724221200-14997 helpers_test.go:237: <<< TestStartStop/group/containerd/serial/VerifyKubernetesImages FAILED: start of post-mortem logs <<< helpers_test.go:238: ======> post-mortem[TestStartStop/group/containerd/serial/VerifyKubernetesImages]: minikube logs <====== helpers_test.go:240: (dbg) Run: ./minikube-linux-amd64 -p containerd-20200724221200-14997 logs -n 25 helpers_test.go:245: TestStartStop/group/containerd/serial/VerifyKubernetesImages logs: -- stdout -- * ==> container status <== * CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID * 00a604004ac81 4689081edb103 31 seconds ago Exited storage-provisioner 9 0418a87b40342 * a09e6baaadfd2 2186a1a396deb 2 minutes ago Exited kindnet-cni 8 c2246648589e4 * 8662707b3b0f7 da26705ccb4b5 23 minutes ago Running kube-controller-manager 3 7b50a18d935ac * cdb7f0919992f 3439b7546f29b 24 minutes ago Running kube-proxy 0 2fca05c662478 * 7dc9f83693a99 7e28efa976bd1 24 minutes ago Running kube-apiserver 0 236935089e2fb * 90f7be9dd5648 da26705ccb4b5 24 minutes ago Exited kube-controller-manager 2 7b50a18d935ac * 574a9379a97ce 76216c34ed0c7 25 minutes ago Running kube-scheduler 0 9b3229d6d9980 * e1da8367b2af7 303ce5db0e90d 25 minutes ago Running etcd 0 0941e8d40a5bd * * ==> containerd <== * -- Logs begin at Fri 2020-07-24 22:27:54 UTC, end at Fri 2020-07-24 22:54:02 UTC. -- * Jul 24 22:53:31 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:31.513387859Z" level=info msg="StartContainer for \"00a604004ac81017478505940a9e65265df2e1d0d95ee5ccee3c95ec0687461f\" returns successfully" * Jul 24 22:53:34 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:34.338915929Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:busybox,Uid:4b662b5f-6e78-48de-818c-81989d7f4ea9,Namespace:default,Attempt:0,}" * Jul 24 22:53:34 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:34.702740002Z" level=info msg="Finish piping stderr of container \"00a604004ac81017478505940a9e65265df2e1d0d95ee5ccee3c95ec0687461f\"" * Jul 24 22:53:34 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:34.702824808Z" level=info msg="Finish piping stdout of container \"00a604004ac81017478505940a9e65265df2e1d0d95ee5ccee3c95ec0687461f\"" * Jul 24 22:53:34 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:34.704089695Z" level=info msg="TaskExit event &TaskExit{ContainerID:00a604004ac81017478505940a9e65265df2e1d0d95ee5ccee3c95ec0687461f,ID:00a604004ac81017478505940a9e65265df2e1d0d95ee5ccee3c95ec0687461f,Pid:17802,ExitStatus:1,ExitedAt:2020-07-24 22:53:34.703903283 +0000 UTC,XXX_unrecognized:[],}" * Jul 24 22:53:34 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:34.740804546Z" level=info msg="shim reaped" id=00a604004ac81017478505940a9e65265df2e1d0d95ee5ccee3c95ec0687461f * Jul 24 22:53:35 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:35.339152909Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,}" * Jul 24 22:53:35 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:35.651833529Z" level=info msg="RemoveContainer for \"228d0592b230e8e10ae8f53c83bc4cb3c2ca8cc714c0a428cb04f2a8a22c709f\"" * Jul 24 22:53:35 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:35.683705943Z" level=info msg="RemoveContainer for \"228d0592b230e8e10ae8f53c83bc4cb3c2ca8cc714c0a428cb04f2a8a22c709f\" returns successfully" * Jul 24 22:53:36 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:36.354723545Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:4b662b5f-6e78-48de-818c-81989d7f4ea9,Namespace:default,Attempt:0,} failed, error" error="failed to setup network for sandbox \"991c799eb067a2479795a0d8680f9d5a0ff1d7d2123a8bf6bda88e3ee5a83b4a\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:53:36 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:36.738603794Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,} failed, error" error="failed to setup network for sandbox \"210eec6aba4991cf665f2d5410e52443de3952fd434a0daf4aa63714b939571b\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:53:37 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:37.339027807Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:dashboard-metrics-scraper-dc6947fbf-xphhd,Uid:02a59124-2c99-4a38-abea-eedfd3e1ba46,Namespace:kubernetes-dashboard,Attempt:0,}" * Jul 24 22:53:39 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:39.067863973Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:dashboard-metrics-scraper-dc6947fbf-xphhd,Uid:02a59124-2c99-4a38-abea-eedfd3e1ba46,Namespace:kubernetes-dashboard,Attempt:0,} failed, error" error="failed to setup network for sandbox \"c2bb74b49f5e994c0e371fd97ba9a2d6d244c89b1e5393c7de421babe20e8060\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:53:40 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:40.339118131Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:kubernetes-dashboard-6dbb54fd95-ms9wg,Uid:267f98a1-434e-45a2-abda-53ad3bb7bea1,Namespace:kubernetes-dashboard,Attempt:0,}" * Jul 24 22:53:42 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:42.042455447Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:kubernetes-dashboard-6dbb54fd95-ms9wg,Uid:267f98a1-434e-45a2-abda-53ad3bb7bea1,Namespace:kubernetes-dashboard,Attempt:0,} failed, error" error="failed to setup network for sandbox \"ca19fc91f1eae4b80b05c4f3de8023e68e4bdb40295bd935d96ceccdd34a62bc\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:53:47 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:47.338855711Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:busybox,Uid:4b662b5f-6e78-48de-818c-81989d7f4ea9,Namespace:default,Attempt:0,}" * Jul 24 22:53:48 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:48.576257482Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:4b662b5f-6e78-48de-818c-81989d7f4ea9,Namespace:default,Attempt:0,} failed, error" error="failed to setup network for sandbox \"54a499035f788abb0121b0ac23492595ca813d209dfd1491be7b9f188bde9f6f\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:53:51 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:51.339129846Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,}" * Jul 24 22:53:52 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:52.339151661Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:dashboard-metrics-scraper-dc6947fbf-xphhd,Uid:02a59124-2c99-4a38-abea-eedfd3e1ba46,Namespace:kubernetes-dashboard,Attempt:0,}" * Jul 24 22:53:52 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:52.812196296Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,} failed, error" error="failed to setup network for sandbox \"a24be12ec90ceddc1aa383fee9cea18174d23821e586a00253d5836a2d9ee0e8\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:53:54 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:54.158472835Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:dashboard-metrics-scraper-dc6947fbf-xphhd,Uid:02a59124-2c99-4a38-abea-eedfd3e1ba46,Namespace:kubernetes-dashboard,Attempt:0,} failed, error" error="failed to setup network for sandbox \"379562d4d181012c170675f941a008c0691dfd635c16e63e4454c1384b1dbe65\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:53:54 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:54.339193050Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:kubernetes-dashboard-6dbb54fd95-ms9wg,Uid:267f98a1-434e-45a2-abda-53ad3bb7bea1,Namespace:kubernetes-dashboard,Attempt:0,}" * Jul 24 22:53:56 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:56.227009391Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:kubernetes-dashboard-6dbb54fd95-ms9wg,Uid:267f98a1-434e-45a2-abda-53ad3bb7bea1,Namespace:kubernetes-dashboard,Attempt:0,} failed, error" error="failed to setup network for sandbox \"84d1b0c841d5797b077ebe499d0687d2c5027bef218423269fb1ab927c4b21d3\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:54:00 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:54:00.338868058Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:busybox,Uid:4b662b5f-6e78-48de-818c-81989d7f4ea9,Namespace:default,Attempt:0,}" * Jul 24 22:54:02 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:54:02.289758233Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:4b662b5f-6e78-48de-818c-81989d7f4ea9,Namespace:default,Attempt:0,} failed, error" error="failed to setup network for sandbox \"0bfa003ac5bb781640c1be03bc1ad923421f177be599acda227ae251c6acbd1c\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * * ==> describe nodes <== * Name: containerd-20200724221200-14997 * Roles: master * Labels: beta.kubernetes.io/arch=amd64 * beta.kubernetes.io/os=linux * kubernetes.io/arch=amd64 * kubernetes.io/hostname=containerd-20200724221200-14997 * kubernetes.io/os=linux * minikube.k8s.io/commit=40eac8ce825d2bb784efa63b900c8d788ea49faf * minikube.k8s.io/name=containerd-20200724221200-14997 * minikube.k8s.io/updated_at=2020_07_24T22_13_02_0700 * minikube.k8s.io/version=v1.12.1 * node-role.kubernetes.io/master= * Annotations: kubeadm.alpha.kubernetes.io/cri-socket: /run/containerd/containerd.sock * node.alpha.kubernetes.io/ttl: 0 * volumes.kubernetes.io/controller-managed-attach-detach: true * CreationTimestamp: Fri, 24 Jul 2020 22:12:47 +0000 * Taints: * Unschedulable: false * Lease: * HolderIdentity: containerd-20200724221200-14997 * AcquireTime: * RenewTime: Fri, 24 Jul 2020 22:53:53 +0000 * Conditions: * Type Status LastHeartbeatTime LastTransitionTime Reason Message * ---- ------ ----------------- ------------------ ------ ------- * MemoryPressure False Fri, 24 Jul 2020 22:49:32 +0000 Fri, 24 Jul 2020 22:12:43 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available * DiskPressure False Fri, 24 Jul 2020 22:49:32 +0000 Fri, 24 Jul 2020 22:12:43 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure * PIDPressure False Fri, 24 Jul 2020 22:49:32 +0000 Fri, 24 Jul 2020 22:12:43 +0000 KubeletHasSufficientPID kubelet has sufficient PID available * Ready True Fri, 24 Jul 2020 22:49:32 +0000 Fri, 24 Jul 2020 22:13:11 +0000 KubeletReady kubelet is posting ready status * Addresses: * InternalIP: 172.17.0.3 * Hostname: containerd-20200724221200-14997 * Capacity: * cpu: 16 * ephemeral-storage: 128884272Ki * hugepages-1Gi: 0 * hugepages-2Mi: 0 * memory: 65817044Ki * pods: 110 * Allocatable: * cpu: 16 * ephemeral-storage: 128884272Ki * hugepages-1Gi: 0 * hugepages-2Mi: 0 * memory: 65817044Ki * pods: 110 * System Info: * Machine ID: 5ea7312d3bbd4189a79e31122cb237a6 * System UUID: 763ff36b-3261-45b1-b62e-092cbae790ce * Boot ID: 65219ec9-ab55-4151-85fa-6cbcd6144529 * Kernel Version: 5.4.0-1022-azure * OS Image: Ubuntu 19.10 * Operating System: linux * Architecture: amd64 * Container Runtime Version: containerd://1.3.3-14-g449e9269 * Kubelet Version: v1.18.3 * Kube-Proxy Version: v1.18.3 * PodCIDR: 10.244.0.0/24 * PodCIDRs: 10.244.0.0/24 * Non-terminated Pods: (11 in total) * Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits AGE * --------- ---- ------------ ---------- --------------- ------------- --- * default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 34m * kube-system coredns-66bff467f8-hlk9j 100m (0%) 0 (0%) 70Mi (0%) 170Mi (0%) 40m * kube-system etcd-containerd-20200724221200-14997 0 (0%) 0 (0%) 0 (0%) 0 (0%) 22m * kube-system kindnet-nsc8k 100m (0%) 100m (0%) 50Mi (0%) 50Mi (0%) 40m * kube-system kube-apiserver-containerd-20200724221200-14997 250m (1%) 0 (0%) 0 (0%) 0 (0%) 22m * kube-system kube-controller-manager-containerd-20200724221200-14997 200m (1%) 0 (0%) 0 (0%) 0 (0%) 41m * kube-system kube-proxy-x7fwq 0 (0%) 0 (0%) 0 (0%) 0 (0%) 40m * kube-system kube-scheduler-containerd-20200724221200-14997 100m (0%) 0 (0%) 0 (0%) 0 (0%) 41m * kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 40m * kubernetes-dashboard dashboard-metrics-scraper-dc6947fbf-xphhd 0 (0%) 0 (0%) 0 (0%) 0 (0%) 23m * kubernetes-dashboard kubernetes-dashboard-6dbb54fd95-ms9wg 0 (0%) 0 (0%) 0 (0%) 0 (0%) 23m * Allocated resources: * (Total limits may be over 100 percent, i.e., overcommitted.) * Resource Requests Limits * -------- -------- ------ * cpu 750m (4%) 100m (0%) * memory 120Mi (0%) 220Mi (0%) * ephemeral-storage 0 (0%) 0 (0%) * hugepages-1Gi 0 (0%) 0 (0%) * hugepages-2Mi 0 (0%) 0 (0%) * Events: * Type Reason Age From Message * ---- ------ ---- ---- ------- * Normal NodeHasNoDiskPressure 41m (x6 over 41m) kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientPID 41m (x5 over 41m) kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasSufficientPID * Normal NodeHasSufficientMemory 41m (x6 over 41m) kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasSufficientMemory * Normal Starting 41m kubelet, containerd-20200724221200-14997 Starting kubelet. * Warning SystemOOM 41m kubelet, containerd-20200724221200-14997 System OOM encountered, victim process: registry-server, pid: 26041 * Warning SystemOOM 41m kubelet, containerd-20200724221200-14997 System OOM encountered, victim process: registry-server, pid: 28567 * Normal NodeHasSufficientMemory 41m kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasSufficientMemory * Normal NodeHasNoDiskPressure 41m kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientPID 41m kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasSufficientPID * Normal NodeAllocatableEnforced 41m kubelet, containerd-20200724221200-14997 Updated Node Allocatable limit across pods * Normal NodeReady 40m kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeReady * Normal Starting 40m kube-proxy, containerd-20200724221200-14997 Starting kube-proxy. * Warning readOnlySysFS 40m kube-proxy, containerd-20200724221200-14997 CRI error: /sys is read-only: cannot modify conntrack limits, problems may arise later (If running Docker, see docker issue #24000) * Normal Starting 25m kubelet, containerd-20200724221200-14997 Starting kubelet. * Warning SystemOOM 25m kubelet, containerd-20200724221200-14997 System OOM encountered, victim process: registry-server, pid: 26041 * Warning SystemOOM 25m kubelet, containerd-20200724221200-14997 System OOM encountered, victim process: registry-server, pid: 28567 * Normal NodeAllocatableEnforced 25m kubelet, containerd-20200724221200-14997 Updated Node Allocatable limit across pods * Normal NodeHasSufficientMemory 25m (x7 over 25m) kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasSufficientMemory * Normal NodeHasNoDiskPressure 25m (x7 over 25m) kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientPID 25m (x7 over 25m) kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasSufficientPID * Warning readOnlySysFS 24m kube-proxy, containerd-20200724221200-14997 CRI error: /sys is read-only: cannot modify conntrack limits, problems may arise later (If running Docker, see docker issue #24000) * Normal Starting 24m kube-proxy, containerd-20200724221200-14997 Starting kube-proxy. * * ==> dmesg <== * [ +0.008042] FS-Cache: N-cookie d=00000000087e516b n=00000000639e5e65 * [ +0.007221] FS-Cache: N-key=[8] '56c00f0000000000' * [ +0.005631] FS-Cache: Duplicate cookie detected * [ +0.003841] FS-Cache: O-cookie c=0000000011bf07ab [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.005951] FS-Cache: O-cookie d=00000000087e516b n=00000000251e97b7 * [ +0.004790] FS-Cache: O-key=[8] '56c00f0000000000' * [ +0.003643] FS-Cache: N-cookie c=00000000e2eec606 [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.005689] FS-Cache: N-cookie d=00000000087e516b n=000000007550b1ca * [ +0.004826] FS-Cache: N-key=[8] '56c00f0000000000' * [ +1.890005] FS-Cache: Duplicate cookie detected * [ +0.022632] FS-Cache: O-cookie c=0000000036b15294 [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.008521] FS-Cache: O-cookie d=00000000087e516b n=00000000efd734b0 * [ +0.007450] FS-Cache: O-key=[8] '55c00f0000000000' * [ +0.004298] FS-Cache: N-cookie c=00000000c6760349 [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.007729] FS-Cache: N-cookie d=00000000087e516b n=000000002b6d28f2 * [ +0.006730] FS-Cache: N-key=[8] '55c00f0000000000' * [ +2.781512] FS-Cache: Duplicate cookie detected * [ +0.004691] FS-Cache: O-cookie c=0000000026b60cdb [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.006817] FS-Cache: O-cookie d=00000000087e516b n=0000000083eaff9d * [ +0.006146] FS-Cache: O-key=[8] '5ac00f0000000000' * [ +0.004163] FS-Cache: N-cookie c=000000006a4278df [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.006241] FS-Cache: N-cookie d=00000000087e516b n=00000000e4c3dd6e * [ +0.005074] FS-Cache: N-key=[8] '5ac00f0000000000' * [Jul24 22:02] tee (116897): /proc/108378/oom_adj is deprecated, please use /proc/108378/oom_score_adj instead. * [Jul24 22:09] hrtimer: interrupt took 1877431 ns * * ==> etcd [e1da8367b2af71bf928910238acc6b877c309718e6be22ad7c222ff9731c93cc] <== * 2020-07-24 22:29:30.038314 W | etcdserver: read-only range request "key:\"/registry/namespaces/kube-system\" " with result "range_response_count:1 size:263" took too long (102.05731ms) to execute * 2020-07-24 22:29:30.038475 W | etcdserver: read-only range request "key:\"/registry/pods/kube-system/kube-apiserver-containerd-20200724221200-14997\" " with result "range_response_count:1 size:6560" took too long (102.234322ms) to execute * 2020-07-24 22:30:00.757431 W | etcdserver: read-only range request "key:\"/registry/events/kube-system/storage-provisioner.1624d0d94aaa7ad0\" " with result "range_response_count:1 size:865" took too long (111.660057ms) to execute * 2020-07-24 22:31:03.103374 W | wal: sync duration of 4.321780012s, expected less than 1s * 2020-07-24 22:31:03.104064 W | etcdserver: read-only range request "key:\"/registry/roles\" range_end:\"/registry/rolet\" count_only:true " with result "range_response_count:0 size:7" took too long (4.143856667s) to execute * 2020-07-24 22:31:03.110173 W | etcdserver: read-only range request "key:\"/registry/csidrivers\" range_end:\"/registry/csidrivert\" count_only:true " with result "range_response_count:0 size:5" took too long (3.893694523s) to execute * 2020-07-24 22:31:03.110244 W | etcdserver: read-only range request "key:\"/registry/mutatingwebhookconfigurations\" range_end:\"/registry/mutatingwebhookconfigurationt\" count_only:true " with result "range_response_count:0 size:5" took too long (748.311405ms) to execute * 2020-07-24 22:31:03.110295 W | etcdserver: read-only range request "key:\"/registry/namespaces/default\" " with result "range_response_count:1 size:257" took too long (2.065079912s) to execute * 2020-07-24 22:31:27.394849 W | wal: sync duration of 1.161100746s, expected less than 1s * 2020-07-24 22:31:28.903901 W | etcdserver: read-only range request "key:\"/registry/mutatingwebhookconfigurations\" range_end:\"/registry/mutatingwebhookconfigurationt\" count_only:true " with result "range_response_count:0 size:5" took too long (638.619667ms) to execute * 2020-07-24 22:31:28.903925 W | etcdserver: read-only range request "key:\"/registry/controllers\" range_end:\"/registry/controllert\" count_only:true " with result "range_response_count:0 size:5" took too long (469.929992ms) to execute * 2020-07-24 22:39:26.228142 I | mvcc: store.index: compact 1128 * 2020-07-24 22:39:26.229399 I | mvcc: finished scheduled compaction at 1128 (took 884.061µs) * 2020-07-24 22:44:26.238245 I | mvcc: store.index: compact 1236 * 2020-07-24 22:44:26.238942 I | mvcc: finished scheduled compaction at 1236 (took 365.326µs) * 2020-07-24 22:49:26.248026 I | mvcc: store.index: compact 1314 * 2020-07-24 22:49:26.248710 I | mvcc: finished scheduled compaction at 1314 (took 339.022µs) * 2020-07-24 22:51:52.521928 W | etcdserver: request "header: txn: success:> failure: >>" with result "size:16" took too long (902.174992ms) to execute * 2020-07-24 22:51:52.522127 W | etcdserver: read-only range request "key:\"/registry/pods/kubernetes-dashboard/\" range_end:\"/registry/pods/kubernetes-dashboard0\" " with result "range_response_count:2 size:7466" took too long (1.329719758s) to execute * 2020-07-24 22:51:52.522249 W | etcdserver: read-only range request "key:\"/registry/priorityclasses\" range_end:\"/registry/priorityclasset\" count_only:true " with result "range_response_count:0 size:7" took too long (263.931372ms) to execute * 2020-07-24 22:51:52.522268 W | etcdserver: read-only range request "key:\"/registry/podtemplates\" range_end:\"/registry/podtemplatet\" count_only:true " with result "range_response_count:0 size:5" took too long (651.529003ms) to execute * 2020-07-24 22:51:52.522324 W | etcdserver: read-only range request "key:\"/registry/csinodes\" range_end:\"/registry/csinodet\" count_only:true " with result "range_response_count:0 size:7" took too long (315.365732ms) to execute * 2020-07-24 22:51:52.522347 W | etcdserver: read-only range request "key:\"/registry/services/specs\" range_end:\"/registry/services/spect\" count_only:true " with result "range_response_count:0 size:7" took too long (193.248178ms) to execute * 2020-07-24 22:51:53.489920 W | etcdserver: read-only range request "key:\"/registry/health\" " with result "range_response_count:0 size:5" took too long (140.684539ms) to execute * 2020-07-24 22:51:53.489983 W | etcdserver: read-only range request "key:\"/registry/pods/kubernetes-dashboard/\" range_end:\"/registry/pods/kubernetes-dashboard0\" " with result "range_response_count:2 size:7466" took too long (297.755813ms) to execute * * ==> kernel <== * 22:54:03 up 1:21, 0 users, load average: 0.22, 0.66, 2.37 * Linux containerd-20200724221200-14997 5.4.0-1022-azure #22-Ubuntu SMP Fri Jul 10 06:14:37 UTC 2020 x86_64 x86_64 x86_64 GNU/Linux * PRETTY_NAME="Ubuntu 19.10" * * ==> kube-apiserver [7dc9f83693a992e6e76f2932a3ce43efa3ffd6bbef97f8a3dab2e9ab04809fda] <== * I0724 22:29:30.838357 1 storage_scheduling.go:143] all system priority classes are created successfully or already exist. * W0724 22:29:31.103453 1 lease.go:224] Resetting endpoints for master service "kubernetes" to [172.17.0.3] * I0724 22:29:31.104915 1 controller.go:606] quota admission added evaluator for: endpoints * I0724 22:29:31.150383 1 controller.go:606] quota admission added evaluator for: endpointslices.discovery.k8s.io * I0724 22:29:31.881534 1 controller.go:606] quota admission added evaluator for: leases.coordination.k8s.io * I0724 22:29:34.181947 1 controller.go:606] quota admission added evaluator for: daemonsets.apps * I0724 22:29:34.431603 1 controller.go:606] quota admission added evaluator for: serviceaccounts * I0724 22:29:34.453874 1 controller.go:606] quota admission added evaluator for: deployments.apps * I0724 22:29:34.700275 1 controller.go:606] quota admission added evaluator for: roles.rbac.authorization.k8s.io * I0724 22:29:34.711676 1 controller.go:606] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io * I0724 22:30:24.960939 1 controller.go:606] quota admission added evaluator for: replicasets.apps * I0724 22:31:03.104488 1 trace.go:116] Trace[910046044]: "GuaranteedUpdate etcd3" type:*coordination.Lease (started: 2020-07-24 22:31:01.925257756 +0000 UTC m=+96.221406956) (total time: 1.179189211s): * Trace[910046044]: [1.179160609s] [1.178557271s] Transaction committed * I0724 22:31:03.104575 1 trace.go:116] Trace[1472583103]: "Create" url:/api/v1/namespaces/kubernetes-dashboard/events,user-agent:kubelet/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:172.17.0.3 (started: 2020-07-24 22:30:58.882075293 +0000 UTC m=+93.178224393) (total time: 4.222377974s): * Trace[1472583103]: [4.222302769s] [4.222217764s] Object stored in database * I0724 22:31:03.104626 1 trace.go:116] Trace[118677254]: "Update" url:/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/containerd-20200724221200-14997,user-agent:kubelet/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:172.17.0.3 (started: 2020-07-24 22:31:01.925076644 +0000 UTC m=+96.221225844) (total time: 1.179528033s): * Trace[118677254]: [1.17947683s] [1.179338821s] Object stored in database * I0724 22:31:03.110755 1 trace.go:116] Trace[1968230455]: "Get" url:/api/v1/namespaces/default,user-agent:kube-apiserver/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:127.0.0.1 (started: 2020-07-24 22:31:01.044778208 +0000 UTC m=+95.340927408) (total time: 2.065944568s): * Trace[1968230455]: [2.065912066s] [2.065904566s] About to write a response * I0724 22:51:52.522679 1 trace.go:116] Trace[642232690]: "GuaranteedUpdate etcd3" type:*v1.Endpoints (started: 2020-07-24 22:51:51.086288237 +0000 UTC m=+1345.382437437) (total time: 1.436354155s): * Trace[642232690]: [1.436324653s] [1.434510025s] Transaction committed * I0724 22:51:52.522911 1 trace.go:116] Trace[857693863]: "List etcd3" key:/pods/kubernetes-dashboard,resourceVersion:,limit:0,continue: (started: 2020-07-24 22:51:51.191949067 +0000 UTC m=+1345.488098267) (total time: 1.330928342s): * Trace[857693863]: [1.330928342s] [1.330928342s] END * I0724 22:51:52.523386 1 trace.go:116] Trace[1580627152]: "List" url:/api/v1/namespaces/kubernetes-dashboard/pods,user-agent:e2e-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format,client:172.17.0.1 (started: 2020-07-24 22:51:51.191908464 +0000 UTC m=+1345.488057664) (total time: 1.331449678s): * Trace[1580627152]: [1.331028249s] [1.330994846s] Listing from storage done * * ==> kube-controller-manager [8662707b3b0f7d44ff6c5e080ea0aba7dd34f515065281accd9db2796d1eabbb] <== * I0724 22:30:25.016265 1 event.go:278] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"kubernetes-dashboard", Name:"dashboard-metrics-scraper-dc6947fbf", UID:"f7f90ea4-8326-451c-9d3d-7807a3878e9a", APIVersion:"apps/v1", ResourceVersion:"932", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: dashboard-metrics-scraper-dc6947fbf-xphhd * I0724 22:30:25.047095 1 event.go:278] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"kubernetes-dashboard", Name:"kubernetes-dashboard-6dbb54fd95", UID:"7d191e90-0bb9-4f25-b536-3a59e9a338ae", APIVersion:"apps/v1", ResourceVersion:"934", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: kubernetes-dashboard-6dbb54fd95-ms9wg * W0724 22:30:25.140679 1 actual_state_of_world.go:506] Failed to update statusUpdateNeeded field in actual state of world: Failed to set statusUpdateNeeded to needed true, because nodeName="containerd-20200724221200-14997" does not exist * I0724 22:30:25.167526 1 shared_informer.go:230] Caches are synced for disruption * I0724 22:30:25.167548 1 disruption.go:339] Sending events to api server. * I0724 22:30:25.174767 1 shared_informer.go:230] Caches are synced for persistent volume * I0724 22:30:25.194865 1 shared_informer.go:230] Caches are synced for TTL * I0724 22:30:25.222168 1 shared_informer.go:230] Caches are synced for node * I0724 22:30:25.222200 1 range_allocator.go:172] Starting range CIDR allocator * I0724 22:30:25.222204 1 shared_informer.go:223] Waiting for caches to sync for cidrallocator * I0724 22:30:25.222209 1 shared_informer.go:230] Caches are synced for cidrallocator * I0724 22:30:25.225090 1 shared_informer.go:230] Caches are synced for GC * I0724 22:30:25.407503 1 shared_informer.go:230] Caches are synced for resource quota * I0724 22:30:25.410855 1 shared_informer.go:230] Caches are synced for attach detach * I0724 22:30:25.419771 1 shared_informer.go:230] Caches are synced for taint * I0724 22:30:25.419834 1 node_lifecycle_controller.go:1433] Initializing eviction metric for zone: * I0724 22:30:25.419834 1 taint_manager.go:187] Starting NoExecuteTaintManager * I0724 22:30:25.419876 1 event.go:278] Event(v1.ObjectReference{Kind:"Node", Namespace:"", Name:"containerd-20200724221200-14997", UID:"878dba67-2126-43d2-a5be-2ad809c96173", APIVersion:"v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'RegisteredNode' Node containerd-20200724221200-14997 event: Registered Node containerd-20200724221200-14997 in Controller * W0724 22:30:25.419894 1 node_lifecycle_controller.go:1048] Missing timestamp for Node containerd-20200724221200-14997. Assuming now as a timestamp. * I0724 22:30:25.419932 1 node_lifecycle_controller.go:1249] Controller detected that zone is now in state Normal. * I0724 22:30:25.474250 1 shared_informer.go:230] Caches are synced for resource quota * I0724 22:30:25.474554 1 shared_informer.go:230] Caches are synced for daemon sets * I0724 22:30:25.475756 1 shared_informer.go:230] Caches are synced for garbage collector * I0724 22:30:25.513010 1 shared_informer.go:230] Caches are synced for garbage collector * I0724 22:30:25.513029 1 garbagecollector.go:142] Garbage collector: all resource monitors have synced. Proceeding to collect garbage * * ==> kube-controller-manager [90f7be9dd5648fdf7d6e6f282cc9119322bc28911e75d3b73a928fa1ad2134ec] <== * I0724 22:29:12.067440 1 serving.go:313] Generated self-signed cert in-memory * I0724 22:29:12.831319 1 controllermanager.go:161] Version: v1.18.3 * I0724 22:29:12.832693 1 dynamic_cafile_content.go:167] Starting request-header::/var/lib/minikube/certs/front-proxy-ca.crt * I0724 22:29:12.832741 1 dynamic_cafile_content.go:167] Starting client-ca-bundle::/var/lib/minikube/certs/ca.crt * I0724 22:29:12.833285 1 secure_serving.go:178] Serving securely on 127.0.0.1:10257 * I0724 22:29:12.833363 1 tlsconfig.go:240] Starting DynamicServingCertificateController * I0724 22:29:12.833994 1 deprecated_insecure_serving.go:53] Serving insecurely on [::]:10252 * W0724 22:29:12.834610 1 controllermanager.go:612] fetch api resource lists failed, use legacy client builder: Get https://control-plane.minikube.internal:8444/api/v1?timeout=32s: dial tcp 172.17.0.3:8444: connect: connection refused * F0724 22:29:22.836138 1 controllermanager.go:230] error building controller context: failed to wait for apiserver being healthy: timed out waiting for the condition: failed to get apiserver /healthz status: Get https://control-plane.minikube.internal:8444/healthz?timeout=32s: dial tcp 172.17.0.3:8444: connect: connection refused * * ==> kube-proxy [cdb7f0919992f1b17fe65623faf7b9984d3edb56993d5621bef544d49a0ce798] <== * I0724 22:13:19.703119 1 server.go:583] Version: v1.18.3 * I0724 22:13:19.703827 1 conntrack.go:52] Setting nf_conntrack_max to 524288 * E0724 22:13:19.704435 1 conntrack.go:127] sysfs is not writable: {Device:sysfs Path:/sys Type:sysfs Opts:[ro nosuid nodev noexec relatime] Freq:0 Pass:0} (mount options are [ro nosuid nodev noexec relatime]) * I0724 22:13:19.704682 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_established' to 86400 * I0724 22:13:19.704788 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_close_wait' to 3600 * I0724 22:13:19.706305 1 config.go:133] Starting endpoints config controller * I0724 22:13:19.706338 1 config.go:315] Starting service config controller * I0724 22:13:19.706344 1 shared_informer.go:223] Waiting for caches to sync for endpoints config * I0724 22:13:19.706365 1 shared_informer.go:223] Waiting for caches to sync for service config * I0724 22:13:19.806590 1 shared_informer.go:230] Caches are synced for service config * I0724 22:13:19.806609 1 shared_informer.go:230] Caches are synced for endpoints config * W0724 22:30:00.451331 1 server_others.go:559] Unknown proxy mode "", assuming iptables proxy * I0724 22:30:00.459120 1 node.go:136] Successfully retrieved node IP: 172.17.0.3 * I0724 22:30:00.459158 1 server_others.go:186] Using iptables Proxier. * I0724 22:30:00.459548 1 server.go:583] Version: v1.18.3 * I0724 22:30:00.460146 1 conntrack.go:52] Setting nf_conntrack_max to 524288 * E0724 22:30:00.460599 1 conntrack.go:127] sysfs is not writable: {Device:sysfs Path:/sys Type:sysfs Opts:[ro nosuid nodev noexec relatime] Freq:0 Pass:0} (mount options are [ro nosuid nodev noexec relatime]) * I0724 22:30:00.460736 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_established' to 86400 * I0724 22:30:00.460806 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_close_wait' to 3600 * I0724 22:30:00.460979 1 config.go:315] Starting service config controller * I0724 22:30:00.460995 1 shared_informer.go:223] Waiting for caches to sync for service config * I0724 22:30:00.461156 1 config.go:133] Starting endpoints config controller * I0724 22:30:00.462702 1 shared_informer.go:223] Waiting for caches to sync for endpoints config * I0724 22:30:00.561150 1 shared_informer.go:230] Caches are synced for service config * I0724 22:30:00.562880 1 shared_informer.go:230] Caches are synced for endpoints config * * ==> kube-scheduler [574a9379a97ce5c734627b9b0340a240dea259b901f1dca96ad8d9978331c503] <== * E0724 22:28:46.956193 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Node: Get https://control-plane.minikube.internal:8444/api/v1/nodes?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:47.936021 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolumeClaim: Get https://control-plane.minikube.internal:8444/api/v1/persistentvolumeclaims?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:48.615795 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolume: Get https://control-plane.minikube.internal:8444/api/v1/persistentvolumes?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:48.627285 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Service: Get https://control-plane.minikube.internal:8444/api/v1/services?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:48.844520 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1beta1.PodDisruptionBudget: Get https://control-plane.minikube.internal:8444/apis/policy/v1beta1/poddisruptionbudgets?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:49.268244 1 reflector.go:178] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:233: Failed to list *v1.Pod: Get https://control-plane.minikube.internal:8444/api/v1/pods?fieldSelector=status.phase%21%3DFailed%2Cstatus.phase%21%3DSucceeded&limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:52.983500 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.CSINode: Get https://control-plane.minikube.internal:8444/apis/storage.k8s.io/v1/csinodes?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:54.489861 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.StorageClass: Get https://control-plane.minikube.internal:8444/apis/storage.k8s.io/v1/storageclasses?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:55.798147 1 reflector.go:178] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to list *v1.ConfigMap: Get https://control-plane.minikube.internal:8444/api/v1/namespaces/kube-system/configmaps?fieldSelector=metadata.name%3Dextension-apiserver-authentication&limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:56.415849 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolumeClaim: Get https://control-plane.minikube.internal:8444/api/v1/persistentvolumeclaims?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:57.297889 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Service: Get https://control-plane.minikube.internal:8444/api/v1/services?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:58.395517 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1beta1.PodDisruptionBudget: Get https://control-plane.minikube.internal:8444/apis/policy/v1beta1/poddisruptionbudgets?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:58.615198 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Node: Get https://control-plane.minikube.internal:8444/api/v1/nodes?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:00.876683 1 reflector.go:178] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:233: Failed to list *v1.Pod: Get https://control-plane.minikube.internal:8444/api/v1/pods?fieldSelector=status.phase%21%3DFailed%2Cstatus.phase%21%3DSucceeded&limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:01.380532 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolume: Get https://control-plane.minikube.internal:8444/api/v1/persistentvolumes?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:10.017422 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.StorageClass: Get https://control-plane.minikube.internal:8444/apis/storage.k8s.io/v1/storageclasses?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:13.820706 1 reflector.go:178] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:233: Failed to list *v1.Pod: Get https://control-plane.minikube.internal:8444/api/v1/pods?fieldSelector=status.phase%21%3DFailed%2Cstatus.phase%21%3DSucceeded&limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:14.849393 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.CSINode: Get https://control-plane.minikube.internal:8444/apis/storage.k8s.io/v1/csinodes?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:17.299470 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Node: Get https://control-plane.minikube.internal:8444/api/v1/nodes?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:17.476542 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolumeClaim: Get https://control-plane.minikube.internal:8444/api/v1/persistentvolumeclaims?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:17.787962 1 reflector.go:178] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to list *v1.ConfigMap: Get https://control-plane.minikube.internal:8444/api/v1/namespaces/kube-system/configmaps?fieldSelector=metadata.name%3Dextension-apiserver-authentication&limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:18.847726 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Service: Get https://control-plane.minikube.internal:8444/api/v1/services?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:20.242683 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1beta1.PodDisruptionBudget: Get https://control-plane.minikube.internal:8444/apis/policy/v1beta1/poddisruptionbudgets?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:25.306254 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolume: Get https://control-plane.minikube.internal:8444/api/v1/persistentvolumes?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * I0724 22:30:02.056615 1 shared_informer.go:230] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file * * ==> kubelet <== * -- Logs begin at Fri 2020-07-24 22:27:54 UTC, end at Fri 2020-07-24 22:54:03 UTC. -- * Jul 24 22:53:42 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:42.042962 544 pod_workers.go:191] Error syncing pod 267f98a1-434e-45a2-abda-53ad3bb7bea1 ("kubernetes-dashboard-6dbb54fd95-ms9wg_kubernetes-dashboard(267f98a1-434e-45a2-abda-53ad3bb7bea1)"), skipping: failed to "CreatePodSandbox" for "kubernetes-dashboard-6dbb54fd95-ms9wg_kubernetes-dashboard(267f98a1-434e-45a2-abda-53ad3bb7bea1)" with CreatePodSandboxError: "CreatePodSandbox for pod \"kubernetes-dashboard-6dbb54fd95-ms9wg_kubernetes-dashboard(267f98a1-434e-45a2-abda-53ad3bb7bea1)\" failed: rpc error: code = Unknown desc = failed to setup network for sandbox \"ca19fc91f1eae4b80b05c4f3de8023e68e4bdb40295bd935d96ceccdd34a62bc\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:53:48 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:48.576647 544 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to setup network for sandbox "54a499035f788abb0121b0ac23492595ca813d209dfd1491be7b9f188bde9f6f": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:53:48 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:48.576720 544 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "54a499035f788abb0121b0ac23492595ca813d209dfd1491be7b9f188bde9f6f": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:53:48 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:48.576746 544 kuberuntime_manager.go:727] createPodSandbox for pod "busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "54a499035f788abb0121b0ac23492595ca813d209dfd1491be7b9f188bde9f6f": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:53:48 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:48.576818 544 pod_workers.go:191] Error syncing pod 4b662b5f-6e78-48de-818c-81989d7f4ea9 ("busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)"), skipping: failed to "CreatePodSandbox" for "busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)" with CreatePodSandboxError: "CreatePodSandbox for pod \"busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)\" failed: rpc error: code = Unknown desc = failed to setup network for sandbox \"54a499035f788abb0121b0ac23492595ca813d209dfd1491be7b9f188bde9f6f\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:53:50 containerd-20200724221200-14997 kubelet[544]: I0724 22:53:50.338414 544 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: 00a604004ac81017478505940a9e65265df2e1d0d95ee5ccee3c95ec0687461f * Jul 24 22:53:50 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:50.338685 544 pod_workers.go:191] Error syncing pod 25727a03-b071-4a2c-8dfb-ea2a7038e4cd ("storage-provisioner_kube-system(25727a03-b071-4a2c-8dfb-ea2a7038e4cd)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 5m0s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(25727a03-b071-4a2c-8dfb-ea2a7038e4cd)" * Jul 24 22:53:52 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:52.812518 544 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to setup network for sandbox "a24be12ec90ceddc1aa383fee9cea18174d23821e586a00253d5836a2d9ee0e8": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:53:52 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:52.812582 544 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "a24be12ec90ceddc1aa383fee9cea18174d23821e586a00253d5836a2d9ee0e8": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:53:52 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:52.812607 544 kuberuntime_manager.go:727] createPodSandbox for pod "coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "a24be12ec90ceddc1aa383fee9cea18174d23821e586a00253d5836a2d9ee0e8": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:53:52 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:52.812678 544 pod_workers.go:191] Error syncing pod 584286ea-2ddf-4194-8a3c-48d505f7f607 ("coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)"), skipping: failed to "CreatePodSandbox" for "coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)" with CreatePodSandboxError: "CreatePodSandbox for pod \"coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)\" failed: rpc error: code = Unknown desc = failed to setup network for sandbox \"a24be12ec90ceddc1aa383fee9cea18174d23821e586a00253d5836a2d9ee0e8\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:53:54 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:54.158739 544 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to setup network for sandbox "379562d4d181012c170675f941a008c0691dfd635c16e63e4454c1384b1dbe65": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:53:54 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:54.158803 544 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "dashboard-metrics-scraper-dc6947fbf-xphhd_kubernetes-dashboard(02a59124-2c99-4a38-abea-eedfd3e1ba46)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "379562d4d181012c170675f941a008c0691dfd635c16e63e4454c1384b1dbe65": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:53:54 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:54.158824 544 kuberuntime_manager.go:727] createPodSandbox for pod "dashboard-metrics-scraper-dc6947fbf-xphhd_kubernetes-dashboard(02a59124-2c99-4a38-abea-eedfd3e1ba46)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "379562d4d181012c170675f941a008c0691dfd635c16e63e4454c1384b1dbe65": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:53:54 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:54.158890 544 pod_workers.go:191] Error syncing pod 02a59124-2c99-4a38-abea-eedfd3e1ba46 ("dashboard-metrics-scraper-dc6947fbf-xphhd_kubernetes-dashboard(02a59124-2c99-4a38-abea-eedfd3e1ba46)"), skipping: failed to "CreatePodSandbox" for "dashboard-metrics-scraper-dc6947fbf-xphhd_kubernetes-dashboard(02a59124-2c99-4a38-abea-eedfd3e1ba46)" with CreatePodSandboxError: "CreatePodSandbox for pod \"dashboard-metrics-scraper-dc6947fbf-xphhd_kubernetes-dashboard(02a59124-2c99-4a38-abea-eedfd3e1ba46)\" failed: rpc error: code = Unknown desc = failed to setup network for sandbox \"379562d4d181012c170675f941a008c0691dfd635c16e63e4454c1384b1dbe65\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:53:54 containerd-20200724221200-14997 kubelet[544]: I0724 22:53:54.338557 544 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: a09e6baaadfd28ee566e88056be5b5b6c39ad7b1953f9bc53a101febc380383d * Jul 24 22:53:54 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:54.338953 544 pod_workers.go:191] Error syncing pod 562af1c8-c195-4fed-b475-4ff2a3cca8b5 ("kindnet-nsc8k_kube-system(562af1c8-c195-4fed-b475-4ff2a3cca8b5)"), skipping: failed to "StartContainer" for "kindnet-cni" with CrashLoopBackOff: "back-off 5m0s restarting failed container=kindnet-cni pod=kindnet-nsc8k_kube-system(562af1c8-c195-4fed-b475-4ff2a3cca8b5)" * Jul 24 22:53:56 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:56.227288 544 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to setup network for sandbox "84d1b0c841d5797b077ebe499d0687d2c5027bef218423269fb1ab927c4b21d3": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:53:56 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:56.227343 544 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "kubernetes-dashboard-6dbb54fd95-ms9wg_kubernetes-dashboard(267f98a1-434e-45a2-abda-53ad3bb7bea1)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "84d1b0c841d5797b077ebe499d0687d2c5027bef218423269fb1ab927c4b21d3": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:53:56 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:56.227363 544 kuberuntime_manager.go:727] createPodSandbox for pod "kubernetes-dashboard-6dbb54fd95-ms9wg_kubernetes-dashboard(267f98a1-434e-45a2-abda-53ad3bb7bea1)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "84d1b0c841d5797b077ebe499d0687d2c5027bef218423269fb1ab927c4b21d3": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:53:56 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:56.227421 544 pod_workers.go:191] Error syncing pod 267f98a1-434e-45a2-abda-53ad3bb7bea1 ("kubernetes-dashboard-6dbb54fd95-ms9wg_kubernetes-dashboard(267f98a1-434e-45a2-abda-53ad3bb7bea1)"), skipping: failed to "CreatePodSandbox" for "kubernetes-dashboard-6dbb54fd95-ms9wg_kubernetes-dashboard(267f98a1-434e-45a2-abda-53ad3bb7bea1)" with CreatePodSandboxError: "CreatePodSandbox for pod \"kubernetes-dashboard-6dbb54fd95-ms9wg_kubernetes-dashboard(267f98a1-434e-45a2-abda-53ad3bb7bea1)\" failed: rpc error: code = Unknown desc = failed to setup network for sandbox \"84d1b0c841d5797b077ebe499d0687d2c5027bef218423269fb1ab927c4b21d3\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:54:02 containerd-20200724221200-14997 kubelet[544]: E0724 22:54:02.290048 544 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to setup network for sandbox "0bfa003ac5bb781640c1be03bc1ad923421f177be599acda227ae251c6acbd1c": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:54:02 containerd-20200724221200-14997 kubelet[544]: E0724 22:54:02.290106 544 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "0bfa003ac5bb781640c1be03bc1ad923421f177be599acda227ae251c6acbd1c": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:54:02 containerd-20200724221200-14997 kubelet[544]: E0724 22:54:02.290121 544 kuberuntime_manager.go:727] createPodSandbox for pod "busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "0bfa003ac5bb781640c1be03bc1ad923421f177be599acda227ae251c6acbd1c": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:54:02 containerd-20200724221200-14997 kubelet[544]: E0724 22:54:02.290172 544 pod_workers.go:191] Error syncing pod 4b662b5f-6e78-48de-818c-81989d7f4ea9 ("busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)"), skipping: failed to "CreatePodSandbox" for "busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)" with CreatePodSandboxError: "CreatePodSandbox for pod \"busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)\" failed: rpc error: code = Unknown desc = failed to setup network for sandbox \"0bfa003ac5bb781640c1be03bc1ad923421f177be599acda227ae251c6acbd1c\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * * ==> storage-provisioner [00a604004ac81017478505940a9e65265df2e1d0d95ee5ccee3c95ec0687461f] <== * F0724 22:53:34.696730 1 main.go:37] Error getting server version: Get https://10.96.0.1:443/version: dial tcp 10.96.0.1:443: getsockopt: no route to host -- /stdout -- helpers_test.go:247: (dbg) Run: ./minikube-linux-amd64 status --format={{.APIServer}} -p containerd-20200724221200-14997 -n containerd-20200724221200-14997 helpers_test.go:254: (dbg) Run: kubectl --context containerd-20200724221200-14997 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running helpers_test.go:260: non-running pods: busybox coredns-66bff467f8-hlk9j dashboard-metrics-scraper-dc6947fbf-xphhd kubernetes-dashboard-6dbb54fd95-ms9wg helpers_test.go:262: ======> post-mortem[TestStartStop/group/containerd/serial/VerifyKubernetesImages]: describe non-running pods <====== helpers_test.go:265: (dbg) Run: kubectl --context containerd-20200724221200-14997 describe pod busybox coredns-66bff467f8-hlk9j dashboard-metrics-scraper-dc6947fbf-xphhd kubernetes-dashboard-6dbb54fd95-ms9wg helpers_test.go:265: (dbg) Non-zero exit: kubectl --context containerd-20200724221200-14997 describe pod busybox coredns-66bff467f8-hlk9j dashboard-metrics-scraper-dc6947fbf-xphhd kubernetes-dashboard-6dbb54fd95-ms9wg: exit status 1 (85.43512ms) -- stdout -- Name: busybox Namespace: default Priority: 0 Node: containerd-20200724221200-14997/172.17.0.3 Start Time: Fri, 24 Jul 2020 22:19:40 +0000 Labels: integration-test=busybox Annotations: Status: Pending IP: IPs: Containers: busybox: Container ID: Image: busybox:1.28.4-glibc Image ID: Port: Host Port: Command: sleep 3600 State: Waiting Reason: ContainerCreating Ready: False Restart Count: 0 Environment: Mounts: /var/run/secrets/kubernetes.io/serviceaccount from default-token-xmm9f (ro) Conditions: Type Status Initialized True Ready False ContainersReady False PodScheduled True Volumes: default-token-xmm9f: Type: Secret (a volume populated by a Secret) SecretName: default-token-xmm9f Optional: false QoS Class: BestEffort Node-Selectors: Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s node.kubernetes.io/unreachable:NoExecute for 300s Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Scheduled 34m default-scheduler Successfully assigned default/busybox to containerd-20200724221200-14997 Warning FailedCreatePodSandBox 34m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "5311a1ffb81c3ec44164ca704d1b425a50851c7a615951d885f3e261bb56b331": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 34m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "e0b3338bd82723225aa39e3a572b31f4b10340fa640d33ac956ec7982b47a365": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 33m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "bce48c23f5090e307249f38d5e9c17615b5ce4547b68fd7dd207f5616546b1ff": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 33m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "c260da116bad44bab092e4453833efc9ce5c3c70209770b30f7aeffed5db766d": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 33m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "e352d7747cb44ae844f2847c8895d8a03a59a5bd62570299b6f91ba0d2b31e93": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 33m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "7a8e1f3fa957ea5ffbd5b203818f66025f20b18e8ba1398842a4a1dcb4beade1": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 32m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "02713a33eaf3192cf9c63e173bd72b67773f28f4dfcef338060dda2a7f8489e1": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 32m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "530cf34860dcb75c557c7cbc9a86910a3f4919230e74cdf10126ba1e75f3f49b": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 32m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "a33b097bd953e28e4d7499a4b0ca06585fe1b9029b95aac214b0bba904677259": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 28m (x17 over 32m) kubelet, containerd-20200724221200-14997 (combined from similar events): Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "47073b588559d0af9dafbb8171df657751c2a35ea5f0466a20f5762627e9cd56": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 24m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "cc2af3d00db0854dd201af575fe5d65f4c2208b59d12cea5f983ef22b810f25d": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 23m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "4cf1a61ad6807315736c91341b7763dc41aca876152d03c2db9814cfa254e7ee": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 23m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "a7cd4b3d30ded88757df1727167c36721efb9fd28978628b3503c0b86fc912e2": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 23m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "14fb4d9bb3714b5e4d1b49fd744c91c0f36ada0ca3c4313f0bb85e74660c9ab1": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 23m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "f5f36f4c79df279b40deef49d26e0ef042c075b3ba24396147e670314e61a159": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 22m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "8f22e7112e69d6377a16927508a438643f8cea01307d448d397775ae85526176": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 22m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "c1197cb670c56b20720daecde3535335370ba2b2f2515fa58f3d6b0bbc3e647d": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 22m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "21a1ae9d38e4f47d5bd6abf3cd1f776466ad5d6dc481a886df660824479af77e": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 22m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "f8e86b484453c0e4d0c68fceed3b3e44519bce50c31c2b6cd480c017ee9d684e": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 3m58s (x75 over 21m) kubelet, containerd-20200724221200-14997 (combined from similar events): Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "5ff19316dc062bffc4d2ed705ac7465ced0eb93bb31ece97a9a7976e0e544892": failed to set bridge addr: could not add IP address to "cni0": permission denied -- /stdout -- ** stderr ** Error from server (NotFound): pods "coredns-66bff467f8-hlk9j" not found Error from server (NotFound): pods "dashboard-metrics-scraper-dc6947fbf-xphhd" not found Error from server (NotFound): pods "kubernetes-dashboard-6dbb54fd95-ms9wg" not found ** /stderr ** helpers_test.go:267: kubectl --context containerd-20200724221200-14997 describe pod busybox coredns-66bff467f8-hlk9j dashboard-metrics-scraper-dc6947fbf-xphhd kubernetes-dashboard-6dbb54fd95-ms9wg: exit status 1 helpers_test.go:215: -----------------------post-mortem-------------------------------- helpers_test.go:223: ======> post-mortem[TestStartStop/group/containerd/serial/VerifyKubernetesImages]: docker inspect <====== helpers_test.go:224: (dbg) Run: docker inspect containerd-20200724221200-14997 helpers_test.go:228: (dbg) docker inspect containerd-20200724221200-14997: -- stdout -- [ { "Id": "0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318", "Created": "2020-07-24T22:12:08.823590057Z", "Path": "/usr/local/bin/entrypoint", "Args": [ "/sbin/init" ], "State": { "Status": "running", "Running": true, "Paused": false, "Restarting": false, "OOMKilled": false, "Dead": false, "Pid": 707805, "ExitCode": 0, "Error": "", "StartedAt": "2020-07-24T22:27:53.861621416Z", "FinishedAt": "2020-07-24T22:27:47.093520839Z" }, "Image": "sha256:e6bc41c39dc48b2b472936db36aedb28527ce0f675ed1bc20d029125c9ccf578", "ResolvConfPath": "/var/lib/docker/containers/0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318/resolv.conf", "HostnamePath": "/var/lib/docker/containers/0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318/hostname", "HostsPath": "/var/lib/docker/containers/0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318/hosts", "LogPath": "/var/lib/docker/containers/0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318/0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318-json.log", "Name": "/containerd-20200724221200-14997", "RestartCount": 0, "Driver": "overlay2", "Platform": "linux", "MountLabel": "", "ProcessLabel": "", "AppArmorProfile": "unconfined", "ExecIDs": null, "HostConfig": { "Binds": [ "/lib/modules:/lib/modules:ro", "containerd-20200724221200-14997:/var" ], "ContainerIDFile": "", "LogConfig": { "Type": "json-file", "Config": {} }, "NetworkMode": "default", "PortBindings": { "22/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "2376/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "5000/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "8444/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ] }, "RestartPolicy": { "Name": "no", "MaximumRetryCount": 0 }, "AutoRemove": false, "VolumeDriver": "", "VolumesFrom": null, "CapAdd": null, "CapDrop": null, "Capabilities": null, "Dns": [], "DnsOptions": [], "DnsSearch": [], "ExtraHosts": null, "GroupAdd": null, "IpcMode": "private", "Cgroup": "", "Links": null, "OomScoreAdj": 0, "PidMode": "", "Privileged": true, "PublishAllPorts": false, "ReadonlyRootfs": false, "SecurityOpt": [ "seccomp=unconfined", "apparmor=unconfined", "label=disable" ], "Tmpfs": { "/run": "", "/tmp": "" }, "UTSMode": "", "UsernsMode": "", "ShmSize": 67108864, "Runtime": "runc", "ConsoleSize": [ 0, 0 ], "Isolation": "", "CpuShares": 0, "Memory": 2306867200, "NanoCpus": 2000000000, "CgroupParent": "", "BlkioWeight": 0, "BlkioWeightDevice": [], "BlkioDeviceReadBps": null, "BlkioDeviceWriteBps": null, "BlkioDeviceReadIOps": null, "BlkioDeviceWriteIOps": null, "CpuPeriod": 0, "CpuQuota": 0, "CpuRealtimePeriod": 0, "CpuRealtimeRuntime": 0, "CpusetCpus": "", "CpusetMems": "", "Devices": [], "DeviceCgroupRules": null, "DeviceRequests": null, "KernelMemory": 0, "KernelMemoryTCP": 0, "MemoryReservation": 0, "MemorySwap": -1, "MemorySwappiness": null, "OomKillDisable": false, "PidsLimit": null, "Ulimits": null, "CpuCount": 0, "CpuPercent": 0, "IOMaximumIOps": 0, "IOMaximumBandwidth": 0, "MaskedPaths": null, "ReadonlyPaths": null }, "GraphDriver": { "Data": { "LowerDir": "/var/lib/docker/overlay2/302021dc5d13c095c42529cd14fcb13b8e0e062683e3b8dd811d0a384f95b4d8-init/diff:/var/lib/docker/overlay2/33ef5d7ed4fbb30d55d2e5925de9b5c359ee2f6bcaf5a31308f0c4c11e358759/diff:/var/lib/docker/overlay2/3cf31b1deb19cf9de586952311d9f94baab00dac6393d96e1a80da943c3823e6/diff:/var/lib/docker/overlay2/5e25be0edbb143d613aab16e23056ece18370c2427214464ccc0b8f033fc76c5/diff:/var/lib/docker/overlay2/933d7b760faf1726c0231eb48cbf3e7af8aeea7d1f449959b7bf07bbe9a84376/diff:/var/lib/docker/overlay2/69ec5555996fe29cf36b7e927034690ecb66f32d4c72396f3c2f3d73303ab10c/diff:/var/lib/docker/overlay2/891f9311e0e93aca44c576d873c0860bf67e972178833fc3ac0569308996933c/diff:/var/lib/docker/overlay2/431ddf5eea9395c020c3a27539c6984764cb7a0dd8f3eeadfbb24f37a897caf7/diff:/var/lib/docker/overlay2/ef7d3d9d53c985fdb46a41c8bcbc04c7d98baa3d8030b1432d115657269a4611/diff:/var/lib/docker/overlay2/d976f7471257da8ef85e0de564cae8e6b16eeb8f83fff6750b3985387f87052c/diff:/var/lib/docker/overlay2/16ed6f6abe25e98b50b10529e70d835da8b64ca99934243f70e5cc63cca44ba1/diff:/var/lib/docker/overlay2/a6495d6f579c32bd0faf387dc3b48c733dae0219e3cfc5d94f9f6ba027261668/diff:/var/lib/docker/overlay2/a34db548da9e24b0323e43e53767c49484fa19509ba9c1e738fb42330788ec52/diff:/var/lib/docker/overlay2/03f93f74dafc48e2bb0234297546abf70dc9918c047a7e3b500ed2242aec09ff/diff:/var/lib/docker/overlay2/b0d08d0be034080c25561d9451d8081050a591c2991a7f3d242234b9bf7c046d/diff:/var/lib/docker/overlay2/d7be6ed61eedc76988e74fe1d87a6a1ef287912654acea5566e7f9cb797b3e3c/diff:/var/lib/docker/overlay2/bde6a4a42d5652b3ef22e3a1dddd0f6fb765d2cd9f5b1a38d4ecb9b44c45b998/diff:/var/lib/docker/overlay2/0f384da0b034b730d4bdd22f270821719c0ac0b7b7d608c3a52fa89b9a386b90/diff:/var/lib/docker/overlay2/46e9d50bc162d2d523c12da3e378e5b859515056dc7d7f11344c1c42c64718d4/diff:/var/lib/docker/overlay2/6e7552cdb94dfc1c9a0a8efae25c758d93fff3d2eec58a3e2b710496d0c94653/diff:/var/lib/docker/overlay2/b9217960caef08b70ded801ca89f5a0808227383f2ab1de5ae7313c200a8090b/diff:/var/lib/docker/overlay2/d11fc39cafccd3e3cb8fba6cfdda2bbfdb99c2e41619ce169b0d6ede0b3f6a64/diff:/var/lib/docker/overlay2/0f89620291cba4bc3afac7c9fa1faf205450d5dcae032c3a9882009633b72b4a/diff", "MergedDir": "/var/lib/docker/overlay2/302021dc5d13c095c42529cd14fcb13b8e0e062683e3b8dd811d0a384f95b4d8/merged", "UpperDir": "/var/lib/docker/overlay2/302021dc5d13c095c42529cd14fcb13b8e0e062683e3b8dd811d0a384f95b4d8/diff", "WorkDir": "/var/lib/docker/overlay2/302021dc5d13c095c42529cd14fcb13b8e0e062683e3b8dd811d0a384f95b4d8/work" }, "Name": "overlay2" }, "Mounts": [ { "Type": "bind", "Source": "/lib/modules", "Destination": "/lib/modules", "Mode": "ro", "RW": false, "Propagation": "rprivate" }, { "Type": "volume", "Name": "containerd-20200724221200-14997", "Source": "/var/lib/docker/volumes/containerd-20200724221200-14997/_data", "Destination": "/var", "Driver": "local", "Mode": "z", "RW": true, "Propagation": "" } ], "Config": { "Hostname": "containerd-20200724221200-14997", "Domainname": "", "User": "root", "AttachStdin": false, "AttachStdout": false, "AttachStderr": false, "ExposedPorts": { "22/tcp": {}, "2376/tcp": {}, "5000/tcp": {}, "8444/tcp": {} }, "Tty": true, "OpenStdin": false, "StdinOnce": false, "Env": [ "container=docker", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], "Cmd": null, "Image": "kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438", "Volumes": null, "WorkingDir": "", "Entrypoint": [ "/usr/local/bin/entrypoint", "/sbin/init" ], "OnBuild": null, "Labels": { "created_by.minikube.sigs.k8s.io": "true", "mode.minikube.sigs.k8s.io": "containerd-20200724221200-14997", "name.minikube.sigs.k8s.io": "containerd-20200724221200-14997", "role.minikube.sigs.k8s.io": "" }, "StopSignal": "SIGRTMIN+3" }, "NetworkSettings": { "Bridge": "", "SandboxID": "14a5046cd9d512b8bd2af14bc8fd545f797506a7c8ca9b3e98d538950faaa7ca", "HairpinMode": false, "LinkLocalIPv6Address": "", "LinkLocalIPv6PrefixLen": 0, "Ports": { "22/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32920" } ], "2376/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32919" } ], "5000/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32918" } ], "8444/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32917" } ] }, "SandboxKey": "/var/run/docker/netns/14a5046cd9d5", "SecondaryIPAddresses": null, "SecondaryIPv6Addresses": null, "EndpointID": "8ef326025492f4d9f256559b312826a73e65239c14de22e27539be8d3e9580f4", "Gateway": "172.17.0.1", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "IPAddress": "172.17.0.3", "IPPrefixLen": 16, "IPv6Gateway": "", "MacAddress": "02:42:ac:11:00:03", "Networks": { "bridge": { "IPAMConfig": null, "Links": null, "Aliases": null, "NetworkID": "d4a420189740571bc15c14b2ebac1ab66e95c9c6796461aa96bb3486a5f7e03d", "EndpointID": "8ef326025492f4d9f256559b312826a73e65239c14de22e27539be8d3e9580f4", "Gateway": "172.17.0.1", "IPAddress": "172.17.0.3", "IPPrefixLen": 16, "IPv6Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "MacAddress": "02:42:ac:11:00:03", "DriverOpts": null } } } } ] -- /stdout -- helpers_test.go:232: (dbg) Run: ./minikube-linux-amd64 status --format={{.Host}} -p containerd-20200724221200-14997 -n containerd-20200724221200-14997 helpers_test.go:237: <<< TestStartStop/group/containerd/serial/VerifyKubernetesImages FAILED: start of post-mortem logs <<< helpers_test.go:238: ======> post-mortem[TestStartStop/group/containerd/serial/VerifyKubernetesImages]: minikube logs <====== helpers_test.go:240: (dbg) Run: ./minikube-linux-amd64 -p containerd-20200724221200-14997 logs -n 25 helpers_test.go:245: TestStartStop/group/containerd/serial/VerifyKubernetesImages logs: -- stdout -- * ==> container status <== * CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID * 00a604004ac81 4689081edb103 33 seconds ago Exited storage-provisioner 9 0418a87b40342 * a09e6baaadfd2 2186a1a396deb 2 minutes ago Exited kindnet-cni 8 c2246648589e4 * 8662707b3b0f7 da26705ccb4b5 23 minutes ago Running kube-controller-manager 3 7b50a18d935ac * cdb7f0919992f 3439b7546f29b 24 minutes ago Running kube-proxy 0 2fca05c662478 * 7dc9f83693a99 7e28efa976bd1 24 minutes ago Running kube-apiserver 0 236935089e2fb * 90f7be9dd5648 da26705ccb4b5 24 minutes ago Exited kube-controller-manager 2 7b50a18d935ac * 574a9379a97ce 76216c34ed0c7 25 minutes ago Running kube-scheduler 0 9b3229d6d9980 * e1da8367b2af7 303ce5db0e90d 25 minutes ago Running etcd 0 0941e8d40a5bd * * ==> containerd <== * -- Logs begin at Fri 2020-07-24 22:27:54 UTC, end at Fri 2020-07-24 22:54:04 UTC. -- * Jul 24 22:53:34 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:34.338915929Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:busybox,Uid:4b662b5f-6e78-48de-818c-81989d7f4ea9,Namespace:default,Attempt:0,}" * Jul 24 22:53:34 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:34.702740002Z" level=info msg="Finish piping stderr of container \"00a604004ac81017478505940a9e65265df2e1d0d95ee5ccee3c95ec0687461f\"" * Jul 24 22:53:34 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:34.702824808Z" level=info msg="Finish piping stdout of container \"00a604004ac81017478505940a9e65265df2e1d0d95ee5ccee3c95ec0687461f\"" * Jul 24 22:53:34 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:34.704089695Z" level=info msg="TaskExit event &TaskExit{ContainerID:00a604004ac81017478505940a9e65265df2e1d0d95ee5ccee3c95ec0687461f,ID:00a604004ac81017478505940a9e65265df2e1d0d95ee5ccee3c95ec0687461f,Pid:17802,ExitStatus:1,ExitedAt:2020-07-24 22:53:34.703903283 +0000 UTC,XXX_unrecognized:[],}" * Jul 24 22:53:34 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:34.740804546Z" level=info msg="shim reaped" id=00a604004ac81017478505940a9e65265df2e1d0d95ee5ccee3c95ec0687461f * Jul 24 22:53:35 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:35.339152909Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,}" * Jul 24 22:53:35 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:35.651833529Z" level=info msg="RemoveContainer for \"228d0592b230e8e10ae8f53c83bc4cb3c2ca8cc714c0a428cb04f2a8a22c709f\"" * Jul 24 22:53:35 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:35.683705943Z" level=info msg="RemoveContainer for \"228d0592b230e8e10ae8f53c83bc4cb3c2ca8cc714c0a428cb04f2a8a22c709f\" returns successfully" * Jul 24 22:53:36 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:36.354723545Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:4b662b5f-6e78-48de-818c-81989d7f4ea9,Namespace:default,Attempt:0,} failed, error" error="failed to setup network for sandbox \"991c799eb067a2479795a0d8680f9d5a0ff1d7d2123a8bf6bda88e3ee5a83b4a\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:53:36 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:36.738603794Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,} failed, error" error="failed to setup network for sandbox \"210eec6aba4991cf665f2d5410e52443de3952fd434a0daf4aa63714b939571b\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:53:37 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:37.339027807Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:dashboard-metrics-scraper-dc6947fbf-xphhd,Uid:02a59124-2c99-4a38-abea-eedfd3e1ba46,Namespace:kubernetes-dashboard,Attempt:0,}" * Jul 24 22:53:39 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:39.067863973Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:dashboard-metrics-scraper-dc6947fbf-xphhd,Uid:02a59124-2c99-4a38-abea-eedfd3e1ba46,Namespace:kubernetes-dashboard,Attempt:0,} failed, error" error="failed to setup network for sandbox \"c2bb74b49f5e994c0e371fd97ba9a2d6d244c89b1e5393c7de421babe20e8060\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:53:40 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:40.339118131Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:kubernetes-dashboard-6dbb54fd95-ms9wg,Uid:267f98a1-434e-45a2-abda-53ad3bb7bea1,Namespace:kubernetes-dashboard,Attempt:0,}" * Jul 24 22:53:42 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:42.042455447Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:kubernetes-dashboard-6dbb54fd95-ms9wg,Uid:267f98a1-434e-45a2-abda-53ad3bb7bea1,Namespace:kubernetes-dashboard,Attempt:0,} failed, error" error="failed to setup network for sandbox \"ca19fc91f1eae4b80b05c4f3de8023e68e4bdb40295bd935d96ceccdd34a62bc\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:53:47 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:47.338855711Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:busybox,Uid:4b662b5f-6e78-48de-818c-81989d7f4ea9,Namespace:default,Attempt:0,}" * Jul 24 22:53:48 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:48.576257482Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:4b662b5f-6e78-48de-818c-81989d7f4ea9,Namespace:default,Attempt:0,} failed, error" error="failed to setup network for sandbox \"54a499035f788abb0121b0ac23492595ca813d209dfd1491be7b9f188bde9f6f\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:53:51 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:51.339129846Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,}" * Jul 24 22:53:52 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:52.339151661Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:dashboard-metrics-scraper-dc6947fbf-xphhd,Uid:02a59124-2c99-4a38-abea-eedfd3e1ba46,Namespace:kubernetes-dashboard,Attempt:0,}" * Jul 24 22:53:52 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:52.812196296Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,} failed, error" error="failed to setup network for sandbox \"a24be12ec90ceddc1aa383fee9cea18174d23821e586a00253d5836a2d9ee0e8\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:53:54 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:54.158472835Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:dashboard-metrics-scraper-dc6947fbf-xphhd,Uid:02a59124-2c99-4a38-abea-eedfd3e1ba46,Namespace:kubernetes-dashboard,Attempt:0,} failed, error" error="failed to setup network for sandbox \"379562d4d181012c170675f941a008c0691dfd635c16e63e4454c1384b1dbe65\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:53:54 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:54.339193050Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:kubernetes-dashboard-6dbb54fd95-ms9wg,Uid:267f98a1-434e-45a2-abda-53ad3bb7bea1,Namespace:kubernetes-dashboard,Attempt:0,}" * Jul 24 22:53:56 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:56.227009391Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:kubernetes-dashboard-6dbb54fd95-ms9wg,Uid:267f98a1-434e-45a2-abda-53ad3bb7bea1,Namespace:kubernetes-dashboard,Attempt:0,} failed, error" error="failed to setup network for sandbox \"84d1b0c841d5797b077ebe499d0687d2c5027bef218423269fb1ab927c4b21d3\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:54:00 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:54:00.338868058Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:busybox,Uid:4b662b5f-6e78-48de-818c-81989d7f4ea9,Namespace:default,Attempt:0,}" * Jul 24 22:54:02 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:54:02.289758233Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:4b662b5f-6e78-48de-818c-81989d7f4ea9,Namespace:default,Attempt:0,} failed, error" error="failed to setup network for sandbox \"0bfa003ac5bb781640c1be03bc1ad923421f177be599acda227ae251c6acbd1c\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:54:04 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:54:04.338704902Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,}" * * ==> describe nodes <== * Name: containerd-20200724221200-14997 * Roles: master * Labels: beta.kubernetes.io/arch=amd64 * beta.kubernetes.io/os=linux * kubernetes.io/arch=amd64 * kubernetes.io/hostname=containerd-20200724221200-14997 * kubernetes.io/os=linux * minikube.k8s.io/commit=40eac8ce825d2bb784efa63b900c8d788ea49faf * minikube.k8s.io/name=containerd-20200724221200-14997 * minikube.k8s.io/updated_at=2020_07_24T22_13_02_0700 * minikube.k8s.io/version=v1.12.1 * node-role.kubernetes.io/master= * Annotations: kubeadm.alpha.kubernetes.io/cri-socket: /run/containerd/containerd.sock * node.alpha.kubernetes.io/ttl: 0 * volumes.kubernetes.io/controller-managed-attach-detach: true * CreationTimestamp: Fri, 24 Jul 2020 22:12:47 +0000 * Taints: * Unschedulable: false * Lease: * HolderIdentity: containerd-20200724221200-14997 * AcquireTime: * RenewTime: Fri, 24 Jul 2020 22:54:03 +0000 * Conditions: * Type Status LastHeartbeatTime LastTransitionTime Reason Message * ---- ------ ----------------- ------------------ ------ ------- * MemoryPressure False Fri, 24 Jul 2020 22:49:32 +0000 Fri, 24 Jul 2020 22:12:43 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available * DiskPressure False Fri, 24 Jul 2020 22:49:32 +0000 Fri, 24 Jul 2020 22:12:43 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure * PIDPressure False Fri, 24 Jul 2020 22:49:32 +0000 Fri, 24 Jul 2020 22:12:43 +0000 KubeletHasSufficientPID kubelet has sufficient PID available * Ready True Fri, 24 Jul 2020 22:49:32 +0000 Fri, 24 Jul 2020 22:13:11 +0000 KubeletReady kubelet is posting ready status * Addresses: * InternalIP: 172.17.0.3 * Hostname: containerd-20200724221200-14997 * Capacity: * cpu: 16 * ephemeral-storage: 128884272Ki * hugepages-1Gi: 0 * hugepages-2Mi: 0 * memory: 65817044Ki * pods: 110 * Allocatable: * cpu: 16 * ephemeral-storage: 128884272Ki * hugepages-1Gi: 0 * hugepages-2Mi: 0 * memory: 65817044Ki * pods: 110 * System Info: * Machine ID: 5ea7312d3bbd4189a79e31122cb237a6 * System UUID: 763ff36b-3261-45b1-b62e-092cbae790ce * Boot ID: 65219ec9-ab55-4151-85fa-6cbcd6144529 * Kernel Version: 5.4.0-1022-azure * OS Image: Ubuntu 19.10 * Operating System: linux * Architecture: amd64 * Container Runtime Version: containerd://1.3.3-14-g449e9269 * Kubelet Version: v1.18.3 * Kube-Proxy Version: v1.18.3 * PodCIDR: 10.244.0.0/24 * PodCIDRs: 10.244.0.0/24 * Non-terminated Pods: (11 in total) * Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits AGE * --------- ---- ------------ ---------- --------------- ------------- --- * default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 34m * kube-system coredns-66bff467f8-hlk9j 100m (0%) 0 (0%) 70Mi (0%) 170Mi (0%) 40m * kube-system etcd-containerd-20200724221200-14997 0 (0%) 0 (0%) 0 (0%) 0 (0%) 22m * kube-system kindnet-nsc8k 100m (0%) 100m (0%) 50Mi (0%) 50Mi (0%) 40m * kube-system kube-apiserver-containerd-20200724221200-14997 250m (1%) 0 (0%) 0 (0%) 0 (0%) 22m * kube-system kube-controller-manager-containerd-20200724221200-14997 200m (1%) 0 (0%) 0 (0%) 0 (0%) 41m * kube-system kube-proxy-x7fwq 0 (0%) 0 (0%) 0 (0%) 0 (0%) 40m * kube-system kube-scheduler-containerd-20200724221200-14997 100m (0%) 0 (0%) 0 (0%) 0 (0%) 41m * kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 40m * kubernetes-dashboard dashboard-metrics-scraper-dc6947fbf-xphhd 0 (0%) 0 (0%) 0 (0%) 0 (0%) 23m * kubernetes-dashboard kubernetes-dashboard-6dbb54fd95-ms9wg 0 (0%) 0 (0%) 0 (0%) 0 (0%) 23m * Allocated resources: * (Total limits may be over 100 percent, i.e., overcommitted.) * Resource Requests Limits * -------- -------- ------ * cpu 750m (4%) 100m (0%) * memory 120Mi (0%) 220Mi (0%) * ephemeral-storage 0 (0%) 0 (0%) * hugepages-1Gi 0 (0%) 0 (0%) * hugepages-2Mi 0 (0%) 0 (0%) * Events: * Type Reason Age From Message * ---- ------ ---- ---- ------- * Normal NodeHasNoDiskPressure 41m (x6 over 41m) kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientPID 41m (x5 over 41m) kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasSufficientPID * Normal NodeHasSufficientMemory 41m (x6 over 41m) kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasSufficientMemory * Normal Starting 41m kubelet, containerd-20200724221200-14997 Starting kubelet. * Warning SystemOOM 41m kubelet, containerd-20200724221200-14997 System OOM encountered, victim process: registry-server, pid: 26041 * Warning SystemOOM 41m kubelet, containerd-20200724221200-14997 System OOM encountered, victim process: registry-server, pid: 28567 * Normal NodeHasSufficientMemory 41m kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasSufficientMemory * Normal NodeHasNoDiskPressure 41m kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientPID 41m kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasSufficientPID * Normal NodeAllocatableEnforced 41m kubelet, containerd-20200724221200-14997 Updated Node Allocatable limit across pods * Normal NodeReady 40m kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeReady * Normal Starting 40m kube-proxy, containerd-20200724221200-14997 Starting kube-proxy. * Warning readOnlySysFS 40m kube-proxy, containerd-20200724221200-14997 CRI error: /sys is read-only: cannot modify conntrack limits, problems may arise later (If running Docker, see docker issue #24000) * Normal Starting 25m kubelet, containerd-20200724221200-14997 Starting kubelet. * Warning SystemOOM 25m kubelet, containerd-20200724221200-14997 System OOM encountered, victim process: registry-server, pid: 26041 * Warning SystemOOM 25m kubelet, containerd-20200724221200-14997 System OOM encountered, victim process: registry-server, pid: 28567 * Normal NodeAllocatableEnforced 25m kubelet, containerd-20200724221200-14997 Updated Node Allocatable limit across pods * Normal NodeHasSufficientMemory 25m (x7 over 25m) kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasSufficientMemory * Normal NodeHasNoDiskPressure 25m (x7 over 25m) kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientPID 25m (x7 over 25m) kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasSufficientPID * Warning readOnlySysFS 24m kube-proxy, containerd-20200724221200-14997 CRI error: /sys is read-only: cannot modify conntrack limits, problems may arise later (If running Docker, see docker issue #24000) * Normal Starting 24m kube-proxy, containerd-20200724221200-14997 Starting kube-proxy. * * ==> dmesg <== * [ +0.008042] FS-Cache: N-cookie d=00000000087e516b n=00000000639e5e65 * [ +0.007221] FS-Cache: N-key=[8] '56c00f0000000000' * [ +0.005631] FS-Cache: Duplicate cookie detected * [ +0.003841] FS-Cache: O-cookie c=0000000011bf07ab [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.005951] FS-Cache: O-cookie d=00000000087e516b n=00000000251e97b7 * [ +0.004790] FS-Cache: O-key=[8] '56c00f0000000000' * [ +0.003643] FS-Cache: N-cookie c=00000000e2eec606 [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.005689] FS-Cache: N-cookie d=00000000087e516b n=000000007550b1ca * [ +0.004826] FS-Cache: N-key=[8] '56c00f0000000000' * [ +1.890005] FS-Cache: Duplicate cookie detected * [ +0.022632] FS-Cache: O-cookie c=0000000036b15294 [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.008521] FS-Cache: O-cookie d=00000000087e516b n=00000000efd734b0 * [ +0.007450] FS-Cache: O-key=[8] '55c00f0000000000' * [ +0.004298] FS-Cache: N-cookie c=00000000c6760349 [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.007729] FS-Cache: N-cookie d=00000000087e516b n=000000002b6d28f2 * [ +0.006730] FS-Cache: N-key=[8] '55c00f0000000000' * [ +2.781512] FS-Cache: Duplicate cookie detected * [ +0.004691] FS-Cache: O-cookie c=0000000026b60cdb [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.006817] FS-Cache: O-cookie d=00000000087e516b n=0000000083eaff9d * [ +0.006146] FS-Cache: O-key=[8] '5ac00f0000000000' * [ +0.004163] FS-Cache: N-cookie c=000000006a4278df [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.006241] FS-Cache: N-cookie d=00000000087e516b n=00000000e4c3dd6e * [ +0.005074] FS-Cache: N-key=[8] '5ac00f0000000000' * [Jul24 22:02] tee (116897): /proc/108378/oom_adj is deprecated, please use /proc/108378/oom_score_adj instead. * [Jul24 22:09] hrtimer: interrupt took 1877431 ns * * ==> etcd [e1da8367b2af71bf928910238acc6b877c309718e6be22ad7c222ff9731c93cc] <== * 2020-07-24 22:29:30.038314 W | etcdserver: read-only range request "key:\"/registry/namespaces/kube-system\" " with result "range_response_count:1 size:263" took too long (102.05731ms) to execute * 2020-07-24 22:29:30.038475 W | etcdserver: read-only range request "key:\"/registry/pods/kube-system/kube-apiserver-containerd-20200724221200-14997\" " with result "range_response_count:1 size:6560" took too long (102.234322ms) to execute * 2020-07-24 22:30:00.757431 W | etcdserver: read-only range request "key:\"/registry/events/kube-system/storage-provisioner.1624d0d94aaa7ad0\" " with result "range_response_count:1 size:865" took too long (111.660057ms) to execute * 2020-07-24 22:31:03.103374 W | wal: sync duration of 4.321780012s, expected less than 1s * 2020-07-24 22:31:03.104064 W | etcdserver: read-only range request "key:\"/registry/roles\" range_end:\"/registry/rolet\" count_only:true " with result "range_response_count:0 size:7" took too long (4.143856667s) to execute * 2020-07-24 22:31:03.110173 W | etcdserver: read-only range request "key:\"/registry/csidrivers\" range_end:\"/registry/csidrivert\" count_only:true " with result "range_response_count:0 size:5" took too long (3.893694523s) to execute * 2020-07-24 22:31:03.110244 W | etcdserver: read-only range request "key:\"/registry/mutatingwebhookconfigurations\" range_end:\"/registry/mutatingwebhookconfigurationt\" count_only:true " with result "range_response_count:0 size:5" took too long (748.311405ms) to execute * 2020-07-24 22:31:03.110295 W | etcdserver: read-only range request "key:\"/registry/namespaces/default\" " with result "range_response_count:1 size:257" took too long (2.065079912s) to execute * 2020-07-24 22:31:27.394849 W | wal: sync duration of 1.161100746s, expected less than 1s * 2020-07-24 22:31:28.903901 W | etcdserver: read-only range request "key:\"/registry/mutatingwebhookconfigurations\" range_end:\"/registry/mutatingwebhookconfigurationt\" count_only:true " with result "range_response_count:0 size:5" took too long (638.619667ms) to execute * 2020-07-24 22:31:28.903925 W | etcdserver: read-only range request "key:\"/registry/controllers\" range_end:\"/registry/controllert\" count_only:true " with result "range_response_count:0 size:5" took too long (469.929992ms) to execute * 2020-07-24 22:39:26.228142 I | mvcc: store.index: compact 1128 * 2020-07-24 22:39:26.229399 I | mvcc: finished scheduled compaction at 1128 (took 884.061µs) * 2020-07-24 22:44:26.238245 I | mvcc: store.index: compact 1236 * 2020-07-24 22:44:26.238942 I | mvcc: finished scheduled compaction at 1236 (took 365.326µs) * 2020-07-24 22:49:26.248026 I | mvcc: store.index: compact 1314 * 2020-07-24 22:49:26.248710 I | mvcc: finished scheduled compaction at 1314 (took 339.022µs) * 2020-07-24 22:51:52.521928 W | etcdserver: request "header: txn: success:> failure: >>" with result "size:16" took too long (902.174992ms) to execute * 2020-07-24 22:51:52.522127 W | etcdserver: read-only range request "key:\"/registry/pods/kubernetes-dashboard/\" range_end:\"/registry/pods/kubernetes-dashboard0\" " with result "range_response_count:2 size:7466" took too long (1.329719758s) to execute * 2020-07-24 22:51:52.522249 W | etcdserver: read-only range request "key:\"/registry/priorityclasses\" range_end:\"/registry/priorityclasset\" count_only:true " with result "range_response_count:0 size:7" took too long (263.931372ms) to execute * 2020-07-24 22:51:52.522268 W | etcdserver: read-only range request "key:\"/registry/podtemplates\" range_end:\"/registry/podtemplatet\" count_only:true " with result "range_response_count:0 size:5" took too long (651.529003ms) to execute * 2020-07-24 22:51:52.522324 W | etcdserver: read-only range request "key:\"/registry/csinodes\" range_end:\"/registry/csinodet\" count_only:true " with result "range_response_count:0 size:7" took too long (315.365732ms) to execute * 2020-07-24 22:51:52.522347 W | etcdserver: read-only range request "key:\"/registry/services/specs\" range_end:\"/registry/services/spect\" count_only:true " with result "range_response_count:0 size:7" took too long (193.248178ms) to execute * 2020-07-24 22:51:53.489920 W | etcdserver: read-only range request "key:\"/registry/health\" " with result "range_response_count:0 size:5" took too long (140.684539ms) to execute * 2020-07-24 22:51:53.489983 W | etcdserver: read-only range request "key:\"/registry/pods/kubernetes-dashboard/\" range_end:\"/registry/pods/kubernetes-dashboard0\" " with result "range_response_count:2 size:7466" took too long (297.755813ms) to execute * * ==> kernel <== * 22:54:04 up 1:21, 0 users, load average: 0.22, 0.66, 2.37 * Linux containerd-20200724221200-14997 5.4.0-1022-azure #22-Ubuntu SMP Fri Jul 10 06:14:37 UTC 2020 x86_64 x86_64 x86_64 GNU/Linux * PRETTY_NAME="Ubuntu 19.10" * * ==> kube-apiserver [7dc9f83693a992e6e76f2932a3ce43efa3ffd6bbef97f8a3dab2e9ab04809fda] <== * I0724 22:29:30.838357 1 storage_scheduling.go:143] all system priority classes are created successfully or already exist. * W0724 22:29:31.103453 1 lease.go:224] Resetting endpoints for master service "kubernetes" to [172.17.0.3] * I0724 22:29:31.104915 1 controller.go:606] quota admission added evaluator for: endpoints * I0724 22:29:31.150383 1 controller.go:606] quota admission added evaluator for: endpointslices.discovery.k8s.io * I0724 22:29:31.881534 1 controller.go:606] quota admission added evaluator for: leases.coordination.k8s.io * I0724 22:29:34.181947 1 controller.go:606] quota admission added evaluator for: daemonsets.apps * I0724 22:29:34.431603 1 controller.go:606] quota admission added evaluator for: serviceaccounts * I0724 22:29:34.453874 1 controller.go:606] quota admission added evaluator for: deployments.apps * I0724 22:29:34.700275 1 controller.go:606] quota admission added evaluator for: roles.rbac.authorization.k8s.io * I0724 22:29:34.711676 1 controller.go:606] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io * I0724 22:30:24.960939 1 controller.go:606] quota admission added evaluator for: replicasets.apps * I0724 22:31:03.104488 1 trace.go:116] Trace[910046044]: "GuaranteedUpdate etcd3" type:*coordination.Lease (started: 2020-07-24 22:31:01.925257756 +0000 UTC m=+96.221406956) (total time: 1.179189211s): * Trace[910046044]: [1.179160609s] [1.178557271s] Transaction committed * I0724 22:31:03.104575 1 trace.go:116] Trace[1472583103]: "Create" url:/api/v1/namespaces/kubernetes-dashboard/events,user-agent:kubelet/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:172.17.0.3 (started: 2020-07-24 22:30:58.882075293 +0000 UTC m=+93.178224393) (total time: 4.222377974s): * Trace[1472583103]: [4.222302769s] [4.222217764s] Object stored in database * I0724 22:31:03.104626 1 trace.go:116] Trace[118677254]: "Update" url:/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/containerd-20200724221200-14997,user-agent:kubelet/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:172.17.0.3 (started: 2020-07-24 22:31:01.925076644 +0000 UTC m=+96.221225844) (total time: 1.179528033s): * Trace[118677254]: [1.17947683s] [1.179338821s] Object stored in database * I0724 22:31:03.110755 1 trace.go:116] Trace[1968230455]: "Get" url:/api/v1/namespaces/default,user-agent:kube-apiserver/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:127.0.0.1 (started: 2020-07-24 22:31:01.044778208 +0000 UTC m=+95.340927408) (total time: 2.065944568s): * Trace[1968230455]: [2.065912066s] [2.065904566s] About to write a response * I0724 22:51:52.522679 1 trace.go:116] Trace[642232690]: "GuaranteedUpdate etcd3" type:*v1.Endpoints (started: 2020-07-24 22:51:51.086288237 +0000 UTC m=+1345.382437437) (total time: 1.436354155s): * Trace[642232690]: [1.436324653s] [1.434510025s] Transaction committed * I0724 22:51:52.522911 1 trace.go:116] Trace[857693863]: "List etcd3" key:/pods/kubernetes-dashboard,resourceVersion:,limit:0,continue: (started: 2020-07-24 22:51:51.191949067 +0000 UTC m=+1345.488098267) (total time: 1.330928342s): * Trace[857693863]: [1.330928342s] [1.330928342s] END * I0724 22:51:52.523386 1 trace.go:116] Trace[1580627152]: "List" url:/api/v1/namespaces/kubernetes-dashboard/pods,user-agent:e2e-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format,client:172.17.0.1 (started: 2020-07-24 22:51:51.191908464 +0000 UTC m=+1345.488057664) (total time: 1.331449678s): * Trace[1580627152]: [1.331028249s] [1.330994846s] Listing from storage done * * ==> kube-controller-manager [8662707b3b0f7d44ff6c5e080ea0aba7dd34f515065281accd9db2796d1eabbb] <== * I0724 22:30:25.016265 1 event.go:278] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"kubernetes-dashboard", Name:"dashboard-metrics-scraper-dc6947fbf", UID:"f7f90ea4-8326-451c-9d3d-7807a3878e9a", APIVersion:"apps/v1", ResourceVersion:"932", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: dashboard-metrics-scraper-dc6947fbf-xphhd * I0724 22:30:25.047095 1 event.go:278] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"kubernetes-dashboard", Name:"kubernetes-dashboard-6dbb54fd95", UID:"7d191e90-0bb9-4f25-b536-3a59e9a338ae", APIVersion:"apps/v1", ResourceVersion:"934", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: kubernetes-dashboard-6dbb54fd95-ms9wg * W0724 22:30:25.140679 1 actual_state_of_world.go:506] Failed to update statusUpdateNeeded field in actual state of world: Failed to set statusUpdateNeeded to needed true, because nodeName="containerd-20200724221200-14997" does not exist * I0724 22:30:25.167526 1 shared_informer.go:230] Caches are synced for disruption * I0724 22:30:25.167548 1 disruption.go:339] Sending events to api server. * I0724 22:30:25.174767 1 shared_informer.go:230] Caches are synced for persistent volume * I0724 22:30:25.194865 1 shared_informer.go:230] Caches are synced for TTL * I0724 22:30:25.222168 1 shared_informer.go:230] Caches are synced for node * I0724 22:30:25.222200 1 range_allocator.go:172] Starting range CIDR allocator * I0724 22:30:25.222204 1 shared_informer.go:223] Waiting for caches to sync for cidrallocator * I0724 22:30:25.222209 1 shared_informer.go:230] Caches are synced for cidrallocator * I0724 22:30:25.225090 1 shared_informer.go:230] Caches are synced for GC * I0724 22:30:25.407503 1 shared_informer.go:230] Caches are synced for resource quota * I0724 22:30:25.410855 1 shared_informer.go:230] Caches are synced for attach detach * I0724 22:30:25.419771 1 shared_informer.go:230] Caches are synced for taint * I0724 22:30:25.419834 1 node_lifecycle_controller.go:1433] Initializing eviction metric for zone: * I0724 22:30:25.419834 1 taint_manager.go:187] Starting NoExecuteTaintManager * I0724 22:30:25.419876 1 event.go:278] Event(v1.ObjectReference{Kind:"Node", Namespace:"", Name:"containerd-20200724221200-14997", UID:"878dba67-2126-43d2-a5be-2ad809c96173", APIVersion:"v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'RegisteredNode' Node containerd-20200724221200-14997 event: Registered Node containerd-20200724221200-14997 in Controller * W0724 22:30:25.419894 1 node_lifecycle_controller.go:1048] Missing timestamp for Node containerd-20200724221200-14997. Assuming now as a timestamp. * I0724 22:30:25.419932 1 node_lifecycle_controller.go:1249] Controller detected that zone is now in state Normal. * I0724 22:30:25.474250 1 shared_informer.go:230] Caches are synced for resource quota * I0724 22:30:25.474554 1 shared_informer.go:230] Caches are synced for daemon sets * I0724 22:30:25.475756 1 shared_informer.go:230] Caches are synced for garbage collector * I0724 22:30:25.513010 1 shared_informer.go:230] Caches are synced for garbage collector * I0724 22:30:25.513029 1 garbagecollector.go:142] Garbage collector: all resource monitors have synced. Proceeding to collect garbage * * ==> kube-controller-manager [90f7be9dd5648fdf7d6e6f282cc9119322bc28911e75d3b73a928fa1ad2134ec] <== * I0724 22:29:12.067440 1 serving.go:313] Generated self-signed cert in-memory * I0724 22:29:12.831319 1 controllermanager.go:161] Version: v1.18.3 * I0724 22:29:12.832693 1 dynamic_cafile_content.go:167] Starting request-header::/var/lib/minikube/certs/front-proxy-ca.crt * I0724 22:29:12.832741 1 dynamic_cafile_content.go:167] Starting client-ca-bundle::/var/lib/minikube/certs/ca.crt * I0724 22:29:12.833285 1 secure_serving.go:178] Serving securely on 127.0.0.1:10257 * I0724 22:29:12.833363 1 tlsconfig.go:240] Starting DynamicServingCertificateController * I0724 22:29:12.833994 1 deprecated_insecure_serving.go:53] Serving insecurely on [::]:10252 * W0724 22:29:12.834610 1 controllermanager.go:612] fetch api resource lists failed, use legacy client builder: Get https://control-plane.minikube.internal:8444/api/v1?timeout=32s: dial tcp 172.17.0.3:8444: connect: connection refused * F0724 22:29:22.836138 1 controllermanager.go:230] error building controller context: failed to wait for apiserver being healthy: timed out waiting for the condition: failed to get apiserver /healthz status: Get https://control-plane.minikube.internal:8444/healthz?timeout=32s: dial tcp 172.17.0.3:8444: connect: connection refused * * ==> kube-proxy [cdb7f0919992f1b17fe65623faf7b9984d3edb56993d5621bef544d49a0ce798] <== * I0724 22:13:19.703119 1 server.go:583] Version: v1.18.3 * I0724 22:13:19.703827 1 conntrack.go:52] Setting nf_conntrack_max to 524288 * E0724 22:13:19.704435 1 conntrack.go:127] sysfs is not writable: {Device:sysfs Path:/sys Type:sysfs Opts:[ro nosuid nodev noexec relatime] Freq:0 Pass:0} (mount options are [ro nosuid nodev noexec relatime]) * I0724 22:13:19.704682 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_established' to 86400 * I0724 22:13:19.704788 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_close_wait' to 3600 * I0724 22:13:19.706305 1 config.go:133] Starting endpoints config controller * I0724 22:13:19.706338 1 config.go:315] Starting service config controller * I0724 22:13:19.706344 1 shared_informer.go:223] Waiting for caches to sync for endpoints config * I0724 22:13:19.706365 1 shared_informer.go:223] Waiting for caches to sync for service config * I0724 22:13:19.806590 1 shared_informer.go:230] Caches are synced for service config * I0724 22:13:19.806609 1 shared_informer.go:230] Caches are synced for endpoints config * W0724 22:30:00.451331 1 server_others.go:559] Unknown proxy mode "", assuming iptables proxy * I0724 22:30:00.459120 1 node.go:136] Successfully retrieved node IP: 172.17.0.3 * I0724 22:30:00.459158 1 server_others.go:186] Using iptables Proxier. * I0724 22:30:00.459548 1 server.go:583] Version: v1.18.3 * I0724 22:30:00.460146 1 conntrack.go:52] Setting nf_conntrack_max to 524288 * E0724 22:30:00.460599 1 conntrack.go:127] sysfs is not writable: {Device:sysfs Path:/sys Type:sysfs Opts:[ro nosuid nodev noexec relatime] Freq:0 Pass:0} (mount options are [ro nosuid nodev noexec relatime]) * I0724 22:30:00.460736 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_established' to 86400 * I0724 22:30:00.460806 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_close_wait' to 3600 * I0724 22:30:00.460979 1 config.go:315] Starting service config controller * I0724 22:30:00.460995 1 shared_informer.go:223] Waiting for caches to sync for service config * I0724 22:30:00.461156 1 config.go:133] Starting endpoints config controller * I0724 22:30:00.462702 1 shared_informer.go:223] Waiting for caches to sync for endpoints config * I0724 22:30:00.561150 1 shared_informer.go:230] Caches are synced for service config * I0724 22:30:00.562880 1 shared_informer.go:230] Caches are synced for endpoints config * * ==> kube-scheduler [574a9379a97ce5c734627b9b0340a240dea259b901f1dca96ad8d9978331c503] <== * E0724 22:28:46.956193 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Node: Get https://control-plane.minikube.internal:8444/api/v1/nodes?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:47.936021 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolumeClaim: Get https://control-plane.minikube.internal:8444/api/v1/persistentvolumeclaims?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:48.615795 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolume: Get https://control-plane.minikube.internal:8444/api/v1/persistentvolumes?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:48.627285 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Service: Get https://control-plane.minikube.internal:8444/api/v1/services?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:48.844520 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1beta1.PodDisruptionBudget: Get https://control-plane.minikube.internal:8444/apis/policy/v1beta1/poddisruptionbudgets?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:49.268244 1 reflector.go:178] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:233: Failed to list *v1.Pod: Get https://control-plane.minikube.internal:8444/api/v1/pods?fieldSelector=status.phase%21%3DFailed%2Cstatus.phase%21%3DSucceeded&limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:52.983500 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.CSINode: Get https://control-plane.minikube.internal:8444/apis/storage.k8s.io/v1/csinodes?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:54.489861 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.StorageClass: Get https://control-plane.minikube.internal:8444/apis/storage.k8s.io/v1/storageclasses?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:55.798147 1 reflector.go:178] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to list *v1.ConfigMap: Get https://control-plane.minikube.internal:8444/api/v1/namespaces/kube-system/configmaps?fieldSelector=metadata.name%3Dextension-apiserver-authentication&limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:56.415849 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolumeClaim: Get https://control-plane.minikube.internal:8444/api/v1/persistentvolumeclaims?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:57.297889 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Service: Get https://control-plane.minikube.internal:8444/api/v1/services?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:58.395517 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1beta1.PodDisruptionBudget: Get https://control-plane.minikube.internal:8444/apis/policy/v1beta1/poddisruptionbudgets?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:58.615198 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Node: Get https://control-plane.minikube.internal:8444/api/v1/nodes?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:00.876683 1 reflector.go:178] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:233: Failed to list *v1.Pod: Get https://control-plane.minikube.internal:8444/api/v1/pods?fieldSelector=status.phase%21%3DFailed%2Cstatus.phase%21%3DSucceeded&limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:01.380532 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolume: Get https://control-plane.minikube.internal:8444/api/v1/persistentvolumes?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:10.017422 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.StorageClass: Get https://control-plane.minikube.internal:8444/apis/storage.k8s.io/v1/storageclasses?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:13.820706 1 reflector.go:178] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:233: Failed to list *v1.Pod: Get https://control-plane.minikube.internal:8444/api/v1/pods?fieldSelector=status.phase%21%3DFailed%2Cstatus.phase%21%3DSucceeded&limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:14.849393 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.CSINode: Get https://control-plane.minikube.internal:8444/apis/storage.k8s.io/v1/csinodes?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:17.299470 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Node: Get https://control-plane.minikube.internal:8444/api/v1/nodes?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:17.476542 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolumeClaim: Get https://control-plane.minikube.internal:8444/api/v1/persistentvolumeclaims?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:17.787962 1 reflector.go:178] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to list *v1.ConfigMap: Get https://control-plane.minikube.internal:8444/api/v1/namespaces/kube-system/configmaps?fieldSelector=metadata.name%3Dextension-apiserver-authentication&limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:18.847726 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Service: Get https://control-plane.minikube.internal:8444/api/v1/services?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:20.242683 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1beta1.PodDisruptionBudget: Get https://control-plane.minikube.internal:8444/apis/policy/v1beta1/poddisruptionbudgets?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:25.306254 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolume: Get https://control-plane.minikube.internal:8444/api/v1/persistentvolumes?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * I0724 22:30:02.056615 1 shared_informer.go:230] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file * * ==> kubelet <== * -- Logs begin at Fri 2020-07-24 22:27:54 UTC, end at Fri 2020-07-24 22:54:05 UTC. -- * Jul 24 22:53:42 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:42.042962 544 pod_workers.go:191] Error syncing pod 267f98a1-434e-45a2-abda-53ad3bb7bea1 ("kubernetes-dashboard-6dbb54fd95-ms9wg_kubernetes-dashboard(267f98a1-434e-45a2-abda-53ad3bb7bea1)"), skipping: failed to "CreatePodSandbox" for "kubernetes-dashboard-6dbb54fd95-ms9wg_kubernetes-dashboard(267f98a1-434e-45a2-abda-53ad3bb7bea1)" with CreatePodSandboxError: "CreatePodSandbox for pod \"kubernetes-dashboard-6dbb54fd95-ms9wg_kubernetes-dashboard(267f98a1-434e-45a2-abda-53ad3bb7bea1)\" failed: rpc error: code = Unknown desc = failed to setup network for sandbox \"ca19fc91f1eae4b80b05c4f3de8023e68e4bdb40295bd935d96ceccdd34a62bc\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:53:48 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:48.576647 544 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to setup network for sandbox "54a499035f788abb0121b0ac23492595ca813d209dfd1491be7b9f188bde9f6f": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:53:48 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:48.576720 544 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "54a499035f788abb0121b0ac23492595ca813d209dfd1491be7b9f188bde9f6f": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:53:48 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:48.576746 544 kuberuntime_manager.go:727] createPodSandbox for pod "busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "54a499035f788abb0121b0ac23492595ca813d209dfd1491be7b9f188bde9f6f": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:53:48 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:48.576818 544 pod_workers.go:191] Error syncing pod 4b662b5f-6e78-48de-818c-81989d7f4ea9 ("busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)"), skipping: failed to "CreatePodSandbox" for "busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)" with CreatePodSandboxError: "CreatePodSandbox for pod \"busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)\" failed: rpc error: code = Unknown desc = failed to setup network for sandbox \"54a499035f788abb0121b0ac23492595ca813d209dfd1491be7b9f188bde9f6f\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:53:50 containerd-20200724221200-14997 kubelet[544]: I0724 22:53:50.338414 544 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: 00a604004ac81017478505940a9e65265df2e1d0d95ee5ccee3c95ec0687461f * Jul 24 22:53:50 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:50.338685 544 pod_workers.go:191] Error syncing pod 25727a03-b071-4a2c-8dfb-ea2a7038e4cd ("storage-provisioner_kube-system(25727a03-b071-4a2c-8dfb-ea2a7038e4cd)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 5m0s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(25727a03-b071-4a2c-8dfb-ea2a7038e4cd)" * Jul 24 22:53:52 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:52.812518 544 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to setup network for sandbox "a24be12ec90ceddc1aa383fee9cea18174d23821e586a00253d5836a2d9ee0e8": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:53:52 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:52.812582 544 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "a24be12ec90ceddc1aa383fee9cea18174d23821e586a00253d5836a2d9ee0e8": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:53:52 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:52.812607 544 kuberuntime_manager.go:727] createPodSandbox for pod "coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "a24be12ec90ceddc1aa383fee9cea18174d23821e586a00253d5836a2d9ee0e8": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:53:52 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:52.812678 544 pod_workers.go:191] Error syncing pod 584286ea-2ddf-4194-8a3c-48d505f7f607 ("coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)"), skipping: failed to "CreatePodSandbox" for "coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)" with CreatePodSandboxError: "CreatePodSandbox for pod \"coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)\" failed: rpc error: code = Unknown desc = failed to setup network for sandbox \"a24be12ec90ceddc1aa383fee9cea18174d23821e586a00253d5836a2d9ee0e8\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:53:54 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:54.158739 544 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to setup network for sandbox "379562d4d181012c170675f941a008c0691dfd635c16e63e4454c1384b1dbe65": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:53:54 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:54.158803 544 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "dashboard-metrics-scraper-dc6947fbf-xphhd_kubernetes-dashboard(02a59124-2c99-4a38-abea-eedfd3e1ba46)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "379562d4d181012c170675f941a008c0691dfd635c16e63e4454c1384b1dbe65": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:53:54 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:54.158824 544 kuberuntime_manager.go:727] createPodSandbox for pod "dashboard-metrics-scraper-dc6947fbf-xphhd_kubernetes-dashboard(02a59124-2c99-4a38-abea-eedfd3e1ba46)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "379562d4d181012c170675f941a008c0691dfd635c16e63e4454c1384b1dbe65": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:53:54 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:54.158890 544 pod_workers.go:191] Error syncing pod 02a59124-2c99-4a38-abea-eedfd3e1ba46 ("dashboard-metrics-scraper-dc6947fbf-xphhd_kubernetes-dashboard(02a59124-2c99-4a38-abea-eedfd3e1ba46)"), skipping: failed to "CreatePodSandbox" for "dashboard-metrics-scraper-dc6947fbf-xphhd_kubernetes-dashboard(02a59124-2c99-4a38-abea-eedfd3e1ba46)" with CreatePodSandboxError: "CreatePodSandbox for pod \"dashboard-metrics-scraper-dc6947fbf-xphhd_kubernetes-dashboard(02a59124-2c99-4a38-abea-eedfd3e1ba46)\" failed: rpc error: code = Unknown desc = failed to setup network for sandbox \"379562d4d181012c170675f941a008c0691dfd635c16e63e4454c1384b1dbe65\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:53:54 containerd-20200724221200-14997 kubelet[544]: I0724 22:53:54.338557 544 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: a09e6baaadfd28ee566e88056be5b5b6c39ad7b1953f9bc53a101febc380383d * Jul 24 22:53:54 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:54.338953 544 pod_workers.go:191] Error syncing pod 562af1c8-c195-4fed-b475-4ff2a3cca8b5 ("kindnet-nsc8k_kube-system(562af1c8-c195-4fed-b475-4ff2a3cca8b5)"), skipping: failed to "StartContainer" for "kindnet-cni" with CrashLoopBackOff: "back-off 5m0s restarting failed container=kindnet-cni pod=kindnet-nsc8k_kube-system(562af1c8-c195-4fed-b475-4ff2a3cca8b5)" * Jul 24 22:53:56 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:56.227288 544 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to setup network for sandbox "84d1b0c841d5797b077ebe499d0687d2c5027bef218423269fb1ab927c4b21d3": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:53:56 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:56.227343 544 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "kubernetes-dashboard-6dbb54fd95-ms9wg_kubernetes-dashboard(267f98a1-434e-45a2-abda-53ad3bb7bea1)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "84d1b0c841d5797b077ebe499d0687d2c5027bef218423269fb1ab927c4b21d3": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:53:56 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:56.227363 544 kuberuntime_manager.go:727] createPodSandbox for pod "kubernetes-dashboard-6dbb54fd95-ms9wg_kubernetes-dashboard(267f98a1-434e-45a2-abda-53ad3bb7bea1)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "84d1b0c841d5797b077ebe499d0687d2c5027bef218423269fb1ab927c4b21d3": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:53:56 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:56.227421 544 pod_workers.go:191] Error syncing pod 267f98a1-434e-45a2-abda-53ad3bb7bea1 ("kubernetes-dashboard-6dbb54fd95-ms9wg_kubernetes-dashboard(267f98a1-434e-45a2-abda-53ad3bb7bea1)"), skipping: failed to "CreatePodSandbox" for "kubernetes-dashboard-6dbb54fd95-ms9wg_kubernetes-dashboard(267f98a1-434e-45a2-abda-53ad3bb7bea1)" with CreatePodSandboxError: "CreatePodSandbox for pod \"kubernetes-dashboard-6dbb54fd95-ms9wg_kubernetes-dashboard(267f98a1-434e-45a2-abda-53ad3bb7bea1)\" failed: rpc error: code = Unknown desc = failed to setup network for sandbox \"84d1b0c841d5797b077ebe499d0687d2c5027bef218423269fb1ab927c4b21d3\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:54:02 containerd-20200724221200-14997 kubelet[544]: E0724 22:54:02.290048 544 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to setup network for sandbox "0bfa003ac5bb781640c1be03bc1ad923421f177be599acda227ae251c6acbd1c": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:54:02 containerd-20200724221200-14997 kubelet[544]: E0724 22:54:02.290106 544 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "0bfa003ac5bb781640c1be03bc1ad923421f177be599acda227ae251c6acbd1c": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:54:02 containerd-20200724221200-14997 kubelet[544]: E0724 22:54:02.290121 544 kuberuntime_manager.go:727] createPodSandbox for pod "busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "0bfa003ac5bb781640c1be03bc1ad923421f177be599acda227ae251c6acbd1c": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:54:02 containerd-20200724221200-14997 kubelet[544]: E0724 22:54:02.290172 544 pod_workers.go:191] Error syncing pod 4b662b5f-6e78-48de-818c-81989d7f4ea9 ("busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)"), skipping: failed to "CreatePodSandbox" for "busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)" with CreatePodSandboxError: "CreatePodSandbox for pod \"busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)\" failed: rpc error: code = Unknown desc = failed to setup network for sandbox \"0bfa003ac5bb781640c1be03bc1ad923421f177be599acda227ae251c6acbd1c\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * * ==> storage-provisioner [00a604004ac81017478505940a9e65265df2e1d0d95ee5ccee3c95ec0687461f] <== * F0724 22:53:34.696730 1 main.go:37] Error getting server version: Get https://10.96.0.1:443/version: dial tcp 10.96.0.1:443: getsockopt: no route to host -- /stdout -- helpers_test.go:247: (dbg) Run: ./minikube-linux-amd64 status --format={{.APIServer}} -p containerd-20200724221200-14997 -n containerd-20200724221200-14997 helpers_test.go:254: (dbg) Run: kubectl --context containerd-20200724221200-14997 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running helpers_test.go:260: non-running pods: busybox coredns-66bff467f8-hlk9j dashboard-metrics-scraper-dc6947fbf-xphhd kubernetes-dashboard-6dbb54fd95-ms9wg helpers_test.go:262: ======> post-mortem[TestStartStop/group/containerd/serial/VerifyKubernetesImages]: describe non-running pods <====== helpers_test.go:265: (dbg) Run: kubectl --context containerd-20200724221200-14997 describe pod busybox coredns-66bff467f8-hlk9j dashboard-metrics-scraper-dc6947fbf-xphhd kubernetes-dashboard-6dbb54fd95-ms9wg helpers_test.go:265: (dbg) Non-zero exit: kubectl --context containerd-20200724221200-14997 describe pod busybox coredns-66bff467f8-hlk9j dashboard-metrics-scraper-dc6947fbf-xphhd kubernetes-dashboard-6dbb54fd95-ms9wg: exit status 1 (87.831286ms) -- stdout -- Name: busybox Namespace: default Priority: 0 Node: containerd-20200724221200-14997/172.17.0.3 Start Time: Fri, 24 Jul 2020 22:19:40 +0000 Labels: integration-test=busybox Annotations: Status: Pending IP: IPs: Containers: busybox: Container ID: Image: busybox:1.28.4-glibc Image ID: Port: Host Port: Command: sleep 3600 State: Waiting Reason: ContainerCreating Ready: False Restart Count: 0 Environment: Mounts: /var/run/secrets/kubernetes.io/serviceaccount from default-token-xmm9f (ro) Conditions: Type Status Initialized True Ready False ContainersReady False PodScheduled True Volumes: default-token-xmm9f: Type: Secret (a volume populated by a Secret) SecretName: default-token-xmm9f Optional: false QoS Class: BestEffort Node-Selectors: Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s node.kubernetes.io/unreachable:NoExecute for 300s Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Scheduled 34m default-scheduler Successfully assigned default/busybox to containerd-20200724221200-14997 Warning FailedCreatePodSandBox 34m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "5311a1ffb81c3ec44164ca704d1b425a50851c7a615951d885f3e261bb56b331": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 34m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "e0b3338bd82723225aa39e3a572b31f4b10340fa640d33ac956ec7982b47a365": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 33m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "bce48c23f5090e307249f38d5e9c17615b5ce4547b68fd7dd207f5616546b1ff": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 33m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "c260da116bad44bab092e4453833efc9ce5c3c70209770b30f7aeffed5db766d": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 33m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "e352d7747cb44ae844f2847c8895d8a03a59a5bd62570299b6f91ba0d2b31e93": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 33m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "7a8e1f3fa957ea5ffbd5b203818f66025f20b18e8ba1398842a4a1dcb4beade1": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 32m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "02713a33eaf3192cf9c63e173bd72b67773f28f4dfcef338060dda2a7f8489e1": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 32m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "530cf34860dcb75c557c7cbc9a86910a3f4919230e74cdf10126ba1e75f3f49b": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 32m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "a33b097bd953e28e4d7499a4b0ca06585fe1b9029b95aac214b0bba904677259": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 28m (x17 over 32m) kubelet, containerd-20200724221200-14997 (combined from similar events): Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "47073b588559d0af9dafbb8171df657751c2a35ea5f0466a20f5762627e9cd56": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 24m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "cc2af3d00db0854dd201af575fe5d65f4c2208b59d12cea5f983ef22b810f25d": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 23m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "4cf1a61ad6807315736c91341b7763dc41aca876152d03c2db9814cfa254e7ee": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 23m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "a7cd4b3d30ded88757df1727167c36721efb9fd28978628b3503c0b86fc912e2": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 23m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "14fb4d9bb3714b5e4d1b49fd744c91c0f36ada0ca3c4313f0bb85e74660c9ab1": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 23m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "f5f36f4c79df279b40deef49d26e0ef042c075b3ba24396147e670314e61a159": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 22m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "8f22e7112e69d6377a16927508a438643f8cea01307d448d397775ae85526176": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 22m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "c1197cb670c56b20720daecde3535335370ba2b2f2515fa58f3d6b0bbc3e647d": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 22m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "21a1ae9d38e4f47d5bd6abf3cd1f776466ad5d6dc481a886df660824479af77e": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 22m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "f8e86b484453c0e4d0c68fceed3b3e44519bce50c31c2b6cd480c017ee9d684e": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 4m (x75 over 21m) kubelet, containerd-20200724221200-14997 (combined from similar events): Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "5ff19316dc062bffc4d2ed705ac7465ced0eb93bb31ece97a9a7976e0e544892": failed to set bridge addr: could not add IP address to "cni0": permission denied -- /stdout -- ** stderr ** Error from server (NotFound): pods "coredns-66bff467f8-hlk9j" not found Error from server (NotFound): pods "dashboard-metrics-scraper-dc6947fbf-xphhd" not found Error from server (NotFound): pods "kubernetes-dashboard-6dbb54fd95-ms9wg" not found ** /stderr ** helpers_test.go:267: kubectl --context containerd-20200724221200-14997 describe pod busybox coredns-66bff467f8-hlk9j dashboard-metrics-scraper-dc6947fbf-xphhd kubernetes-dashboard-6dbb54fd95-ms9wg: exit status 1 === RUN TestStartStop/group/containerd/serial/Pause start_stop_delete_test.go:233: (dbg) Run: ./minikube-linux-amd64 pause -p containerd-20200724221200-14997 --alsologtostderr -v=1 start_stop_delete_test.go:233: (dbg) Non-zero exit: ./minikube-linux-amd64 pause -p containerd-20200724221200-14997 --alsologtostderr -v=1: context deadline exceeded (1.4µs) start_stop_delete_test.go:233: ./minikube-linux-amd64 pause -p containerd-20200724221200-14997 --alsologtostderr -v=1 failed: context deadline exceeded helpers_test.go:215: -----------------------post-mortem-------------------------------- helpers_test.go:223: ======> post-mortem[TestStartStop/group/containerd/serial/Pause]: docker inspect <====== helpers_test.go:224: (dbg) Run: docker inspect containerd-20200724221200-14997 helpers_test.go:228: (dbg) docker inspect containerd-20200724221200-14997: -- stdout -- [ { "Id": "0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318", "Created": "2020-07-24T22:12:08.823590057Z", "Path": "/usr/local/bin/entrypoint", "Args": [ "/sbin/init" ], "State": { "Status": "running", "Running": true, "Paused": false, "Restarting": false, "OOMKilled": false, "Dead": false, "Pid": 707805, "ExitCode": 0, "Error": "", "StartedAt": "2020-07-24T22:27:53.861621416Z", "FinishedAt": "2020-07-24T22:27:47.093520839Z" }, "Image": "sha256:e6bc41c39dc48b2b472936db36aedb28527ce0f675ed1bc20d029125c9ccf578", "ResolvConfPath": "/var/lib/docker/containers/0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318/resolv.conf", "HostnamePath": "/var/lib/docker/containers/0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318/hostname", "HostsPath": "/var/lib/docker/containers/0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318/hosts", "LogPath": "/var/lib/docker/containers/0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318/0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318-json.log", "Name": "/containerd-20200724221200-14997", "RestartCount": 0, "Driver": "overlay2", "Platform": "linux", "MountLabel": "", "ProcessLabel": "", "AppArmorProfile": "unconfined", "ExecIDs": null, "HostConfig": { "Binds": [ "/lib/modules:/lib/modules:ro", "containerd-20200724221200-14997:/var" ], "ContainerIDFile": "", "LogConfig": { "Type": "json-file", "Config": {} }, "NetworkMode": "default", "PortBindings": { "22/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "2376/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "5000/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "8444/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ] }, "RestartPolicy": { "Name": "no", "MaximumRetryCount": 0 }, "AutoRemove": false, "VolumeDriver": "", "VolumesFrom": null, "CapAdd": null, "CapDrop": null, "Capabilities": null, "Dns": [], "DnsOptions": [], "DnsSearch": [], "ExtraHosts": null, "GroupAdd": null, "IpcMode": "private", "Cgroup": "", "Links": null, "OomScoreAdj": 0, "PidMode": "", "Privileged": true, "PublishAllPorts": false, "ReadonlyRootfs": false, "SecurityOpt": [ "seccomp=unconfined", "apparmor=unconfined", "label=disable" ], "Tmpfs": { "/run": "", "/tmp": "" }, "UTSMode": "", "UsernsMode": "", "ShmSize": 67108864, "Runtime": "runc", "ConsoleSize": [ 0, 0 ], "Isolation": "", "CpuShares": 0, "Memory": 2306867200, "NanoCpus": 2000000000, "CgroupParent": "", "BlkioWeight": 0, "BlkioWeightDevice": [], "BlkioDeviceReadBps": null, "BlkioDeviceWriteBps": null, "BlkioDeviceReadIOps": null, "BlkioDeviceWriteIOps": null, "CpuPeriod": 0, "CpuQuota": 0, "CpuRealtimePeriod": 0, "CpuRealtimeRuntime": 0, "CpusetCpus": "", "CpusetMems": "", "Devices": [], "DeviceCgroupRules": null, "DeviceRequests": null, "KernelMemory": 0, "KernelMemoryTCP": 0, "MemoryReservation": 0, "MemorySwap": -1, "MemorySwappiness": null, "OomKillDisable": false, "PidsLimit": null, "Ulimits": null, "CpuCount": 0, "CpuPercent": 0, "IOMaximumIOps": 0, "IOMaximumBandwidth": 0, "MaskedPaths": null, "ReadonlyPaths": null }, "GraphDriver": { "Data": { "LowerDir": "/var/lib/docker/overlay2/302021dc5d13c095c42529cd14fcb13b8e0e062683e3b8dd811d0a384f95b4d8-init/diff:/var/lib/docker/overlay2/33ef5d7ed4fbb30d55d2e5925de9b5c359ee2f6bcaf5a31308f0c4c11e358759/diff:/var/lib/docker/overlay2/3cf31b1deb19cf9de586952311d9f94baab00dac6393d96e1a80da943c3823e6/diff:/var/lib/docker/overlay2/5e25be0edbb143d613aab16e23056ece18370c2427214464ccc0b8f033fc76c5/diff:/var/lib/docker/overlay2/933d7b760faf1726c0231eb48cbf3e7af8aeea7d1f449959b7bf07bbe9a84376/diff:/var/lib/docker/overlay2/69ec5555996fe29cf36b7e927034690ecb66f32d4c72396f3c2f3d73303ab10c/diff:/var/lib/docker/overlay2/891f9311e0e93aca44c576d873c0860bf67e972178833fc3ac0569308996933c/diff:/var/lib/docker/overlay2/431ddf5eea9395c020c3a27539c6984764cb7a0dd8f3eeadfbb24f37a897caf7/diff:/var/lib/docker/overlay2/ef7d3d9d53c985fdb46a41c8bcbc04c7d98baa3d8030b1432d115657269a4611/diff:/var/lib/docker/overlay2/d976f7471257da8ef85e0de564cae8e6b16eeb8f83fff6750b3985387f87052c/diff:/var/lib/docker/overlay2/16ed6f6abe25e98b50b10529e70d835da8b64ca99934243f70e5cc63cca44ba1/diff:/var/lib/docker/overlay2/a6495d6f579c32bd0faf387dc3b48c733dae0219e3cfc5d94f9f6ba027261668/diff:/var/lib/docker/overlay2/a34db548da9e24b0323e43e53767c49484fa19509ba9c1e738fb42330788ec52/diff:/var/lib/docker/overlay2/03f93f74dafc48e2bb0234297546abf70dc9918c047a7e3b500ed2242aec09ff/diff:/var/lib/docker/overlay2/b0d08d0be034080c25561d9451d8081050a591c2991a7f3d242234b9bf7c046d/diff:/var/lib/docker/overlay2/d7be6ed61eedc76988e74fe1d87a6a1ef287912654acea5566e7f9cb797b3e3c/diff:/var/lib/docker/overlay2/bde6a4a42d5652b3ef22e3a1dddd0f6fb765d2cd9f5b1a38d4ecb9b44c45b998/diff:/var/lib/docker/overlay2/0f384da0b034b730d4bdd22f270821719c0ac0b7b7d608c3a52fa89b9a386b90/diff:/var/lib/docker/overlay2/46e9d50bc162d2d523c12da3e378e5b859515056dc7d7f11344c1c42c64718d4/diff:/var/lib/docker/overlay2/6e7552cdb94dfc1c9a0a8efae25c758d93fff3d2eec58a3e2b710496d0c94653/diff:/var/lib/docker/overlay2/b9217960caef08b70ded801ca89f5a0808227383f2ab1de5ae7313c200a8090b/diff:/var/lib/docker/overlay2/d11fc39cafccd3e3cb8fba6cfdda2bbfdb99c2e41619ce169b0d6ede0b3f6a64/diff:/var/lib/docker/overlay2/0f89620291cba4bc3afac7c9fa1faf205450d5dcae032c3a9882009633b72b4a/diff", "MergedDir": "/var/lib/docker/overlay2/302021dc5d13c095c42529cd14fcb13b8e0e062683e3b8dd811d0a384f95b4d8/merged", "UpperDir": "/var/lib/docker/overlay2/302021dc5d13c095c42529cd14fcb13b8e0e062683e3b8dd811d0a384f95b4d8/diff", "WorkDir": "/var/lib/docker/overlay2/302021dc5d13c095c42529cd14fcb13b8e0e062683e3b8dd811d0a384f95b4d8/work" }, "Name": "overlay2" }, "Mounts": [ { "Type": "bind", "Source": "/lib/modules", "Destination": "/lib/modules", "Mode": "ro", "RW": false, "Propagation": "rprivate" }, { "Type": "volume", "Name": "containerd-20200724221200-14997", "Source": "/var/lib/docker/volumes/containerd-20200724221200-14997/_data", "Destination": "/var", "Driver": "local", "Mode": "z", "RW": true, "Propagation": "" } ], "Config": { "Hostname": "containerd-20200724221200-14997", "Domainname": "", "User": "root", "AttachStdin": false, "AttachStdout": false, "AttachStderr": false, "ExposedPorts": { "22/tcp": {}, "2376/tcp": {}, "5000/tcp": {}, "8444/tcp": {} }, "Tty": true, "OpenStdin": false, "StdinOnce": false, "Env": [ "container=docker", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], "Cmd": null, "Image": "kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438", "Volumes": null, "WorkingDir": "", "Entrypoint": [ "/usr/local/bin/entrypoint", "/sbin/init" ], "OnBuild": null, "Labels": { "created_by.minikube.sigs.k8s.io": "true", "mode.minikube.sigs.k8s.io": "containerd-20200724221200-14997", "name.minikube.sigs.k8s.io": "containerd-20200724221200-14997", "role.minikube.sigs.k8s.io": "" }, "StopSignal": "SIGRTMIN+3" }, "NetworkSettings": { "Bridge": "", "SandboxID": "14a5046cd9d512b8bd2af14bc8fd545f797506a7c8ca9b3e98d538950faaa7ca", "HairpinMode": false, "LinkLocalIPv6Address": "", "LinkLocalIPv6PrefixLen": 0, "Ports": { "22/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32920" } ], "2376/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32919" } ], "5000/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32918" } ], "8444/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32917" } ] }, "SandboxKey": "/var/run/docker/netns/14a5046cd9d5", "SecondaryIPAddresses": null, "SecondaryIPv6Addresses": null, "EndpointID": "8ef326025492f4d9f256559b312826a73e65239c14de22e27539be8d3e9580f4", "Gateway": "172.17.0.1", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "IPAddress": "172.17.0.3", "IPPrefixLen": 16, "IPv6Gateway": "", "MacAddress": "02:42:ac:11:00:03", "Networks": { "bridge": { "IPAMConfig": null, "Links": null, "Aliases": null, "NetworkID": "d4a420189740571bc15c14b2ebac1ab66e95c9c6796461aa96bb3486a5f7e03d", "EndpointID": "8ef326025492f4d9f256559b312826a73e65239c14de22e27539be8d3e9580f4", "Gateway": "172.17.0.1", "IPAddress": "172.17.0.3", "IPPrefixLen": 16, "IPv6Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "MacAddress": "02:42:ac:11:00:03", "DriverOpts": null } } } } ] -- /stdout -- helpers_test.go:232: (dbg) Run: ./minikube-linux-amd64 status --format={{.Host}} -p containerd-20200724221200-14997 -n containerd-20200724221200-14997 helpers_test.go:237: <<< TestStartStop/group/containerd/serial/Pause FAILED: start of post-mortem logs <<< helpers_test.go:238: ======> post-mortem[TestStartStop/group/containerd/serial/Pause]: minikube logs <====== helpers_test.go:240: (dbg) Run: ./minikube-linux-amd64 -p containerd-20200724221200-14997 logs -n 25 helpers_test.go:245: TestStartStop/group/containerd/serial/Pause logs: -- stdout -- * ==> container status <== * CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID * 00a604004ac81 4689081edb103 35 seconds ago Exited storage-provisioner 9 0418a87b40342 * a09e6baaadfd2 2186a1a396deb 2 minutes ago Exited kindnet-cni 8 c2246648589e4 * 8662707b3b0f7 da26705ccb4b5 23 minutes ago Running kube-controller-manager 3 7b50a18d935ac * cdb7f0919992f 3439b7546f29b 24 minutes ago Running kube-proxy 0 2fca05c662478 * 7dc9f83693a99 7e28efa976bd1 24 minutes ago Running kube-apiserver 0 236935089e2fb * 90f7be9dd5648 da26705ccb4b5 24 minutes ago Exited kube-controller-manager 2 7b50a18d935ac * 574a9379a97ce 76216c34ed0c7 25 minutes ago Running kube-scheduler 0 9b3229d6d9980 * e1da8367b2af7 303ce5db0e90d 25 minutes ago Running etcd 0 0941e8d40a5bd * * ==> containerd <== * -- Logs begin at Fri 2020-07-24 22:27:54 UTC, end at Fri 2020-07-24 22:54:06 UTC. -- * Jul 24 22:53:34 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:34.702824808Z" level=info msg="Finish piping stdout of container \"00a604004ac81017478505940a9e65265df2e1d0d95ee5ccee3c95ec0687461f\"" * Jul 24 22:53:34 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:34.704089695Z" level=info msg="TaskExit event &TaskExit{ContainerID:00a604004ac81017478505940a9e65265df2e1d0d95ee5ccee3c95ec0687461f,ID:00a604004ac81017478505940a9e65265df2e1d0d95ee5ccee3c95ec0687461f,Pid:17802,ExitStatus:1,ExitedAt:2020-07-24 22:53:34.703903283 +0000 UTC,XXX_unrecognized:[],}" * Jul 24 22:53:34 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:34.740804546Z" level=info msg="shim reaped" id=00a604004ac81017478505940a9e65265df2e1d0d95ee5ccee3c95ec0687461f * Jul 24 22:53:35 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:35.339152909Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,}" * Jul 24 22:53:35 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:35.651833529Z" level=info msg="RemoveContainer for \"228d0592b230e8e10ae8f53c83bc4cb3c2ca8cc714c0a428cb04f2a8a22c709f\"" * Jul 24 22:53:35 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:35.683705943Z" level=info msg="RemoveContainer for \"228d0592b230e8e10ae8f53c83bc4cb3c2ca8cc714c0a428cb04f2a8a22c709f\" returns successfully" * Jul 24 22:53:36 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:36.354723545Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:4b662b5f-6e78-48de-818c-81989d7f4ea9,Namespace:default,Attempt:0,} failed, error" error="failed to setup network for sandbox \"991c799eb067a2479795a0d8680f9d5a0ff1d7d2123a8bf6bda88e3ee5a83b4a\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:53:36 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:36.738603794Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,} failed, error" error="failed to setup network for sandbox \"210eec6aba4991cf665f2d5410e52443de3952fd434a0daf4aa63714b939571b\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:53:37 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:37.339027807Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:dashboard-metrics-scraper-dc6947fbf-xphhd,Uid:02a59124-2c99-4a38-abea-eedfd3e1ba46,Namespace:kubernetes-dashboard,Attempt:0,}" * Jul 24 22:53:39 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:39.067863973Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:dashboard-metrics-scraper-dc6947fbf-xphhd,Uid:02a59124-2c99-4a38-abea-eedfd3e1ba46,Namespace:kubernetes-dashboard,Attempt:0,} failed, error" error="failed to setup network for sandbox \"c2bb74b49f5e994c0e371fd97ba9a2d6d244c89b1e5393c7de421babe20e8060\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:53:40 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:40.339118131Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:kubernetes-dashboard-6dbb54fd95-ms9wg,Uid:267f98a1-434e-45a2-abda-53ad3bb7bea1,Namespace:kubernetes-dashboard,Attempt:0,}" * Jul 24 22:53:42 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:42.042455447Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:kubernetes-dashboard-6dbb54fd95-ms9wg,Uid:267f98a1-434e-45a2-abda-53ad3bb7bea1,Namespace:kubernetes-dashboard,Attempt:0,} failed, error" error="failed to setup network for sandbox \"ca19fc91f1eae4b80b05c4f3de8023e68e4bdb40295bd935d96ceccdd34a62bc\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:53:47 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:47.338855711Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:busybox,Uid:4b662b5f-6e78-48de-818c-81989d7f4ea9,Namespace:default,Attempt:0,}" * Jul 24 22:53:48 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:48.576257482Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:4b662b5f-6e78-48de-818c-81989d7f4ea9,Namespace:default,Attempt:0,} failed, error" error="failed to setup network for sandbox \"54a499035f788abb0121b0ac23492595ca813d209dfd1491be7b9f188bde9f6f\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:53:51 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:51.339129846Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,}" * Jul 24 22:53:52 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:52.339151661Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:dashboard-metrics-scraper-dc6947fbf-xphhd,Uid:02a59124-2c99-4a38-abea-eedfd3e1ba46,Namespace:kubernetes-dashboard,Attempt:0,}" * Jul 24 22:53:52 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:52.812196296Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,} failed, error" error="failed to setup network for sandbox \"a24be12ec90ceddc1aa383fee9cea18174d23821e586a00253d5836a2d9ee0e8\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:53:54 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:54.158472835Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:dashboard-metrics-scraper-dc6947fbf-xphhd,Uid:02a59124-2c99-4a38-abea-eedfd3e1ba46,Namespace:kubernetes-dashboard,Attempt:0,} failed, error" error="failed to setup network for sandbox \"379562d4d181012c170675f941a008c0691dfd635c16e63e4454c1384b1dbe65\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:53:54 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:54.339193050Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:kubernetes-dashboard-6dbb54fd95-ms9wg,Uid:267f98a1-434e-45a2-abda-53ad3bb7bea1,Namespace:kubernetes-dashboard,Attempt:0,}" * Jul 24 22:53:56 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:56.227009391Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:kubernetes-dashboard-6dbb54fd95-ms9wg,Uid:267f98a1-434e-45a2-abda-53ad3bb7bea1,Namespace:kubernetes-dashboard,Attempt:0,} failed, error" error="failed to setup network for sandbox \"84d1b0c841d5797b077ebe499d0687d2c5027bef218423269fb1ab927c4b21d3\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:54:00 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:54:00.338868058Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:busybox,Uid:4b662b5f-6e78-48de-818c-81989d7f4ea9,Namespace:default,Attempt:0,}" * Jul 24 22:54:02 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:54:02.289758233Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:4b662b5f-6e78-48de-818c-81989d7f4ea9,Namespace:default,Attempt:0,} failed, error" error="failed to setup network for sandbox \"0bfa003ac5bb781640c1be03bc1ad923421f177be599acda227ae251c6acbd1c\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:54:04 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:54:04.338704902Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,}" * Jul 24 22:54:05 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:54:05.339243128Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:dashboard-metrics-scraper-dc6947fbf-xphhd,Uid:02a59124-2c99-4a38-abea-eedfd3e1ba46,Namespace:kubernetes-dashboard,Attempt:0,}" * Jul 24 22:54:05 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:54:05.795456739Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,} failed, error" error="failed to setup network for sandbox \"c4ef10da47d31af55d6587ca09517240114b3a041cedd662dd519a57c502f898\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * * ==> describe nodes <== * Name: containerd-20200724221200-14997 * Roles: master * Labels: beta.kubernetes.io/arch=amd64 * beta.kubernetes.io/os=linux * kubernetes.io/arch=amd64 * kubernetes.io/hostname=containerd-20200724221200-14997 * kubernetes.io/os=linux * minikube.k8s.io/commit=40eac8ce825d2bb784efa63b900c8d788ea49faf * minikube.k8s.io/name=containerd-20200724221200-14997 * minikube.k8s.io/updated_at=2020_07_24T22_13_02_0700 * minikube.k8s.io/version=v1.12.1 * node-role.kubernetes.io/master= * Annotations: kubeadm.alpha.kubernetes.io/cri-socket: /run/containerd/containerd.sock * node.alpha.kubernetes.io/ttl: 0 * volumes.kubernetes.io/controller-managed-attach-detach: true * CreationTimestamp: Fri, 24 Jul 2020 22:12:47 +0000 * Taints: * Unschedulable: false * Lease: * HolderIdentity: containerd-20200724221200-14997 * AcquireTime: * RenewTime: Fri, 24 Jul 2020 22:54:03 +0000 * Conditions: * Type Status LastHeartbeatTime LastTransitionTime Reason Message * ---- ------ ----------------- ------------------ ------ ------- * MemoryPressure False Fri, 24 Jul 2020 22:49:32 +0000 Fri, 24 Jul 2020 22:12:43 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available * DiskPressure False Fri, 24 Jul 2020 22:49:32 +0000 Fri, 24 Jul 2020 22:12:43 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure * PIDPressure False Fri, 24 Jul 2020 22:49:32 +0000 Fri, 24 Jul 2020 22:12:43 +0000 KubeletHasSufficientPID kubelet has sufficient PID available * Ready True Fri, 24 Jul 2020 22:49:32 +0000 Fri, 24 Jul 2020 22:13:11 +0000 KubeletReady kubelet is posting ready status * Addresses: * InternalIP: 172.17.0.3 * Hostname: containerd-20200724221200-14997 * Capacity: * cpu: 16 * ephemeral-storage: 128884272Ki * hugepages-1Gi: 0 * hugepages-2Mi: 0 * memory: 65817044Ki * pods: 110 * Allocatable: * cpu: 16 * ephemeral-storage: 128884272Ki * hugepages-1Gi: 0 * hugepages-2Mi: 0 * memory: 65817044Ki * pods: 110 * System Info: * Machine ID: 5ea7312d3bbd4189a79e31122cb237a6 * System UUID: 763ff36b-3261-45b1-b62e-092cbae790ce * Boot ID: 65219ec9-ab55-4151-85fa-6cbcd6144529 * Kernel Version: 5.4.0-1022-azure * OS Image: Ubuntu 19.10 * Operating System: linux * Architecture: amd64 * Container Runtime Version: containerd://1.3.3-14-g449e9269 * Kubelet Version: v1.18.3 * Kube-Proxy Version: v1.18.3 * PodCIDR: 10.244.0.0/24 * PodCIDRs: 10.244.0.0/24 * Non-terminated Pods: (11 in total) * Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits AGE * --------- ---- ------------ ---------- --------------- ------------- --- * default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 34m * kube-system coredns-66bff467f8-hlk9j 100m (0%) 0 (0%) 70Mi (0%) 170Mi (0%) 40m * kube-system etcd-containerd-20200724221200-14997 0 (0%) 0 (0%) 0 (0%) 0 (0%) 23m * kube-system kindnet-nsc8k 100m (0%) 100m (0%) 50Mi (0%) 50Mi (0%) 40m * kube-system kube-apiserver-containerd-20200724221200-14997 250m (1%) 0 (0%) 0 (0%) 0 (0%) 22m * kube-system kube-controller-manager-containerd-20200724221200-14997 200m (1%) 0 (0%) 0 (0%) 0 (0%) 41m * kube-system kube-proxy-x7fwq 0 (0%) 0 (0%) 0 (0%) 0 (0%) 40m * kube-system kube-scheduler-containerd-20200724221200-14997 100m (0%) 0 (0%) 0 (0%) 0 (0%) 41m * kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 40m * kubernetes-dashboard dashboard-metrics-scraper-dc6947fbf-xphhd 0 (0%) 0 (0%) 0 (0%) 0 (0%) 23m * kubernetes-dashboard kubernetes-dashboard-6dbb54fd95-ms9wg 0 (0%) 0 (0%) 0 (0%) 0 (0%) 23m * Allocated resources: * (Total limits may be over 100 percent, i.e., overcommitted.) * Resource Requests Limits * -------- -------- ------ * cpu 750m (4%) 100m (0%) * memory 120Mi (0%) 220Mi (0%) * ephemeral-storage 0 (0%) 0 (0%) * hugepages-1Gi 0 (0%) 0 (0%) * hugepages-2Mi 0 (0%) 0 (0%) * Events: * Type Reason Age From Message * ---- ------ ---- ---- ------- * Normal NodeHasNoDiskPressure 41m (x6 over 41m) kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientPID 41m (x5 over 41m) kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasSufficientPID * Normal NodeHasSufficientMemory 41m (x6 over 41m) kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasSufficientMemory * Normal Starting 41m kubelet, containerd-20200724221200-14997 Starting kubelet. * Warning SystemOOM 41m kubelet, containerd-20200724221200-14997 System OOM encountered, victim process: registry-server, pid: 26041 * Warning SystemOOM 41m kubelet, containerd-20200724221200-14997 System OOM encountered, victim process: registry-server, pid: 28567 * Normal NodeHasSufficientMemory 41m kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasSufficientMemory * Normal NodeHasNoDiskPressure 41m kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientPID 41m kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasSufficientPID * Normal NodeAllocatableEnforced 41m kubelet, containerd-20200724221200-14997 Updated Node Allocatable limit across pods * Normal NodeReady 40m kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeReady * Normal Starting 40m kube-proxy, containerd-20200724221200-14997 Starting kube-proxy. * Warning readOnlySysFS 40m kube-proxy, containerd-20200724221200-14997 CRI error: /sys is read-only: cannot modify conntrack limits, problems may arise later (If running Docker, see docker issue #24000) * Normal Starting 25m kubelet, containerd-20200724221200-14997 Starting kubelet. * Warning SystemOOM 25m kubelet, containerd-20200724221200-14997 System OOM encountered, victim process: registry-server, pid: 26041 * Warning SystemOOM 25m kubelet, containerd-20200724221200-14997 System OOM encountered, victim process: registry-server, pid: 28567 * Normal NodeAllocatableEnforced 25m kubelet, containerd-20200724221200-14997 Updated Node Allocatable limit across pods * Normal NodeHasSufficientMemory 25m (x7 over 25m) kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasSufficientMemory * Normal NodeHasNoDiskPressure 25m (x7 over 25m) kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientPID 25m (x7 over 25m) kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasSufficientPID * Warning readOnlySysFS 24m kube-proxy, containerd-20200724221200-14997 CRI error: /sys is read-only: cannot modify conntrack limits, problems may arise later (If running Docker, see docker issue #24000) * Normal Starting 24m kube-proxy, containerd-20200724221200-14997 Starting kube-proxy. * * ==> dmesg <== * [ +0.008042] FS-Cache: N-cookie d=00000000087e516b n=00000000639e5e65 * [ +0.007221] FS-Cache: N-key=[8] '56c00f0000000000' * [ +0.005631] FS-Cache: Duplicate cookie detected * [ +0.003841] FS-Cache: O-cookie c=0000000011bf07ab [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.005951] FS-Cache: O-cookie d=00000000087e516b n=00000000251e97b7 * [ +0.004790] FS-Cache: O-key=[8] '56c00f0000000000' * [ +0.003643] FS-Cache: N-cookie c=00000000e2eec606 [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.005689] FS-Cache: N-cookie d=00000000087e516b n=000000007550b1ca * [ +0.004826] FS-Cache: N-key=[8] '56c00f0000000000' * [ +1.890005] FS-Cache: Duplicate cookie detected * [ +0.022632] FS-Cache: O-cookie c=0000000036b15294 [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.008521] FS-Cache: O-cookie d=00000000087e516b n=00000000efd734b0 * [ +0.007450] FS-Cache: O-key=[8] '55c00f0000000000' * [ +0.004298] FS-Cache: N-cookie c=00000000c6760349 [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.007729] FS-Cache: N-cookie d=00000000087e516b n=000000002b6d28f2 * [ +0.006730] FS-Cache: N-key=[8] '55c00f0000000000' * [ +2.781512] FS-Cache: Duplicate cookie detected * [ +0.004691] FS-Cache: O-cookie c=0000000026b60cdb [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.006817] FS-Cache: O-cookie d=00000000087e516b n=0000000083eaff9d * [ +0.006146] FS-Cache: O-key=[8] '5ac00f0000000000' * [ +0.004163] FS-Cache: N-cookie c=000000006a4278df [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.006241] FS-Cache: N-cookie d=00000000087e516b n=00000000e4c3dd6e * [ +0.005074] FS-Cache: N-key=[8] '5ac00f0000000000' * [Jul24 22:02] tee (116897): /proc/108378/oom_adj is deprecated, please use /proc/108378/oom_score_adj instead. * [Jul24 22:09] hrtimer: interrupt took 1877431 ns * * ==> etcd [e1da8367b2af71bf928910238acc6b877c309718e6be22ad7c222ff9731c93cc] <== * 2020-07-24 22:29:30.038314 W | etcdserver: read-only range request "key:\"/registry/namespaces/kube-system\" " with result "range_response_count:1 size:263" took too long (102.05731ms) to execute * 2020-07-24 22:29:30.038475 W | etcdserver: read-only range request "key:\"/registry/pods/kube-system/kube-apiserver-containerd-20200724221200-14997\" " with result "range_response_count:1 size:6560" took too long (102.234322ms) to execute * 2020-07-24 22:30:00.757431 W | etcdserver: read-only range request "key:\"/registry/events/kube-system/storage-provisioner.1624d0d94aaa7ad0\" " with result "range_response_count:1 size:865" took too long (111.660057ms) to execute * 2020-07-24 22:31:03.103374 W | wal: sync duration of 4.321780012s, expected less than 1s * 2020-07-24 22:31:03.104064 W | etcdserver: read-only range request "key:\"/registry/roles\" range_end:\"/registry/rolet\" count_only:true " with result "range_response_count:0 size:7" took too long (4.143856667s) to execute * 2020-07-24 22:31:03.110173 W | etcdserver: read-only range request "key:\"/registry/csidrivers\" range_end:\"/registry/csidrivert\" count_only:true " with result "range_response_count:0 size:5" took too long (3.893694523s) to execute * 2020-07-24 22:31:03.110244 W | etcdserver: read-only range request "key:\"/registry/mutatingwebhookconfigurations\" range_end:\"/registry/mutatingwebhookconfigurationt\" count_only:true " with result "range_response_count:0 size:5" took too long (748.311405ms) to execute * 2020-07-24 22:31:03.110295 W | etcdserver: read-only range request "key:\"/registry/namespaces/default\" " with result "range_response_count:1 size:257" took too long (2.065079912s) to execute * 2020-07-24 22:31:27.394849 W | wal: sync duration of 1.161100746s, expected less than 1s * 2020-07-24 22:31:28.903901 W | etcdserver: read-only range request "key:\"/registry/mutatingwebhookconfigurations\" range_end:\"/registry/mutatingwebhookconfigurationt\" count_only:true " with result "range_response_count:0 size:5" took too long (638.619667ms) to execute * 2020-07-24 22:31:28.903925 W | etcdserver: read-only range request "key:\"/registry/controllers\" range_end:\"/registry/controllert\" count_only:true " with result "range_response_count:0 size:5" took too long (469.929992ms) to execute * 2020-07-24 22:39:26.228142 I | mvcc: store.index: compact 1128 * 2020-07-24 22:39:26.229399 I | mvcc: finished scheduled compaction at 1128 (took 884.061µs) * 2020-07-24 22:44:26.238245 I | mvcc: store.index: compact 1236 * 2020-07-24 22:44:26.238942 I | mvcc: finished scheduled compaction at 1236 (took 365.326µs) * 2020-07-24 22:49:26.248026 I | mvcc: store.index: compact 1314 * 2020-07-24 22:49:26.248710 I | mvcc: finished scheduled compaction at 1314 (took 339.022µs) * 2020-07-24 22:51:52.521928 W | etcdserver: request "header: txn: success:> failure: >>" with result "size:16" took too long (902.174992ms) to execute * 2020-07-24 22:51:52.522127 W | etcdserver: read-only range request "key:\"/registry/pods/kubernetes-dashboard/\" range_end:\"/registry/pods/kubernetes-dashboard0\" " with result "range_response_count:2 size:7466" took too long (1.329719758s) to execute * 2020-07-24 22:51:52.522249 W | etcdserver: read-only range request "key:\"/registry/priorityclasses\" range_end:\"/registry/priorityclasset\" count_only:true " with result "range_response_count:0 size:7" took too long (263.931372ms) to execute * 2020-07-24 22:51:52.522268 W | etcdserver: read-only range request "key:\"/registry/podtemplates\" range_end:\"/registry/podtemplatet\" count_only:true " with result "range_response_count:0 size:5" took too long (651.529003ms) to execute * 2020-07-24 22:51:52.522324 W | etcdserver: read-only range request "key:\"/registry/csinodes\" range_end:\"/registry/csinodet\" count_only:true " with result "range_response_count:0 size:7" took too long (315.365732ms) to execute * 2020-07-24 22:51:52.522347 W | etcdserver: read-only range request "key:\"/registry/services/specs\" range_end:\"/registry/services/spect\" count_only:true " with result "range_response_count:0 size:7" took too long (193.248178ms) to execute * 2020-07-24 22:51:53.489920 W | etcdserver: read-only range request "key:\"/registry/health\" " with result "range_response_count:0 size:5" took too long (140.684539ms) to execute * 2020-07-24 22:51:53.489983 W | etcdserver: read-only range request "key:\"/registry/pods/kubernetes-dashboard/\" range_end:\"/registry/pods/kubernetes-dashboard0\" " with result "range_response_count:2 size:7466" took too long (297.755813ms) to execute * * ==> kernel <== * 22:54:06 up 1:21, 0 users, load average: 0.29, 0.66, 2.36 * Linux containerd-20200724221200-14997 5.4.0-1022-azure #22-Ubuntu SMP Fri Jul 10 06:14:37 UTC 2020 x86_64 x86_64 x86_64 GNU/Linux * PRETTY_NAME="Ubuntu 19.10" * * ==> kube-apiserver [7dc9f83693a992e6e76f2932a3ce43efa3ffd6bbef97f8a3dab2e9ab04809fda] <== * I0724 22:29:30.838357 1 storage_scheduling.go:143] all system priority classes are created successfully or already exist. * W0724 22:29:31.103453 1 lease.go:224] Resetting endpoints for master service "kubernetes" to [172.17.0.3] * I0724 22:29:31.104915 1 controller.go:606] quota admission added evaluator for: endpoints * I0724 22:29:31.150383 1 controller.go:606] quota admission added evaluator for: endpointslices.discovery.k8s.io * I0724 22:29:31.881534 1 controller.go:606] quota admission added evaluator for: leases.coordination.k8s.io * I0724 22:29:34.181947 1 controller.go:606] quota admission added evaluator for: daemonsets.apps * I0724 22:29:34.431603 1 controller.go:606] quota admission added evaluator for: serviceaccounts * I0724 22:29:34.453874 1 controller.go:606] quota admission added evaluator for: deployments.apps * I0724 22:29:34.700275 1 controller.go:606] quota admission added evaluator for: roles.rbac.authorization.k8s.io * I0724 22:29:34.711676 1 controller.go:606] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io * I0724 22:30:24.960939 1 controller.go:606] quota admission added evaluator for: replicasets.apps * I0724 22:31:03.104488 1 trace.go:116] Trace[910046044]: "GuaranteedUpdate etcd3" type:*coordination.Lease (started: 2020-07-24 22:31:01.925257756 +0000 UTC m=+96.221406956) (total time: 1.179189211s): * Trace[910046044]: [1.179160609s] [1.178557271s] Transaction committed * I0724 22:31:03.104575 1 trace.go:116] Trace[1472583103]: "Create" url:/api/v1/namespaces/kubernetes-dashboard/events,user-agent:kubelet/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:172.17.0.3 (started: 2020-07-24 22:30:58.882075293 +0000 UTC m=+93.178224393) (total time: 4.222377974s): * Trace[1472583103]: [4.222302769s] [4.222217764s] Object stored in database * I0724 22:31:03.104626 1 trace.go:116] Trace[118677254]: "Update" url:/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/containerd-20200724221200-14997,user-agent:kubelet/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:172.17.0.3 (started: 2020-07-24 22:31:01.925076644 +0000 UTC m=+96.221225844) (total time: 1.179528033s): * Trace[118677254]: [1.17947683s] [1.179338821s] Object stored in database * I0724 22:31:03.110755 1 trace.go:116] Trace[1968230455]: "Get" url:/api/v1/namespaces/default,user-agent:kube-apiserver/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:127.0.0.1 (started: 2020-07-24 22:31:01.044778208 +0000 UTC m=+95.340927408) (total time: 2.065944568s): * Trace[1968230455]: [2.065912066s] [2.065904566s] About to write a response * I0724 22:51:52.522679 1 trace.go:116] Trace[642232690]: "GuaranteedUpdate etcd3" type:*v1.Endpoints (started: 2020-07-24 22:51:51.086288237 +0000 UTC m=+1345.382437437) (total time: 1.436354155s): * Trace[642232690]: [1.436324653s] [1.434510025s] Transaction committed * I0724 22:51:52.522911 1 trace.go:116] Trace[857693863]: "List etcd3" key:/pods/kubernetes-dashboard,resourceVersion:,limit:0,continue: (started: 2020-07-24 22:51:51.191949067 +0000 UTC m=+1345.488098267) (total time: 1.330928342s): * Trace[857693863]: [1.330928342s] [1.330928342s] END * I0724 22:51:52.523386 1 trace.go:116] Trace[1580627152]: "List" url:/api/v1/namespaces/kubernetes-dashboard/pods,user-agent:e2e-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format,client:172.17.0.1 (started: 2020-07-24 22:51:51.191908464 +0000 UTC m=+1345.488057664) (total time: 1.331449678s): * Trace[1580627152]: [1.331028249s] [1.330994846s] Listing from storage done * * ==> kube-controller-manager [8662707b3b0f7d44ff6c5e080ea0aba7dd34f515065281accd9db2796d1eabbb] <== * I0724 22:30:25.016265 1 event.go:278] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"kubernetes-dashboard", Name:"dashboard-metrics-scraper-dc6947fbf", UID:"f7f90ea4-8326-451c-9d3d-7807a3878e9a", APIVersion:"apps/v1", ResourceVersion:"932", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: dashboard-metrics-scraper-dc6947fbf-xphhd * I0724 22:30:25.047095 1 event.go:278] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"kubernetes-dashboard", Name:"kubernetes-dashboard-6dbb54fd95", UID:"7d191e90-0bb9-4f25-b536-3a59e9a338ae", APIVersion:"apps/v1", ResourceVersion:"934", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: kubernetes-dashboard-6dbb54fd95-ms9wg * W0724 22:30:25.140679 1 actual_state_of_world.go:506] Failed to update statusUpdateNeeded field in actual state of world: Failed to set statusUpdateNeeded to needed true, because nodeName="containerd-20200724221200-14997" does not exist * I0724 22:30:25.167526 1 shared_informer.go:230] Caches are synced for disruption * I0724 22:30:25.167548 1 disruption.go:339] Sending events to api server. * I0724 22:30:25.174767 1 shared_informer.go:230] Caches are synced for persistent volume * I0724 22:30:25.194865 1 shared_informer.go:230] Caches are synced for TTL * I0724 22:30:25.222168 1 shared_informer.go:230] Caches are synced for node * I0724 22:30:25.222200 1 range_allocator.go:172] Starting range CIDR allocator * I0724 22:30:25.222204 1 shared_informer.go:223] Waiting for caches to sync for cidrallocator * I0724 22:30:25.222209 1 shared_informer.go:230] Caches are synced for cidrallocator * I0724 22:30:25.225090 1 shared_informer.go:230] Caches are synced for GC * I0724 22:30:25.407503 1 shared_informer.go:230] Caches are synced for resource quota * I0724 22:30:25.410855 1 shared_informer.go:230] Caches are synced for attach detach * I0724 22:30:25.419771 1 shared_informer.go:230] Caches are synced for taint * I0724 22:30:25.419834 1 node_lifecycle_controller.go:1433] Initializing eviction metric for zone: * I0724 22:30:25.419834 1 taint_manager.go:187] Starting NoExecuteTaintManager * I0724 22:30:25.419876 1 event.go:278] Event(v1.ObjectReference{Kind:"Node", Namespace:"", Name:"containerd-20200724221200-14997", UID:"878dba67-2126-43d2-a5be-2ad809c96173", APIVersion:"v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'RegisteredNode' Node containerd-20200724221200-14997 event: Registered Node containerd-20200724221200-14997 in Controller * W0724 22:30:25.419894 1 node_lifecycle_controller.go:1048] Missing timestamp for Node containerd-20200724221200-14997. Assuming now as a timestamp. * I0724 22:30:25.419932 1 node_lifecycle_controller.go:1249] Controller detected that zone is now in state Normal. * I0724 22:30:25.474250 1 shared_informer.go:230] Caches are synced for resource quota * I0724 22:30:25.474554 1 shared_informer.go:230] Caches are synced for daemon sets * I0724 22:30:25.475756 1 shared_informer.go:230] Caches are synced for garbage collector * I0724 22:30:25.513010 1 shared_informer.go:230] Caches are synced for garbage collector * I0724 22:30:25.513029 1 garbagecollector.go:142] Garbage collector: all resource monitors have synced. Proceeding to collect garbage * * ==> kube-controller-manager [90f7be9dd5648fdf7d6e6f282cc9119322bc28911e75d3b73a928fa1ad2134ec] <== * I0724 22:29:12.067440 1 serving.go:313] Generated self-signed cert in-memory * I0724 22:29:12.831319 1 controllermanager.go:161] Version: v1.18.3 * I0724 22:29:12.832693 1 dynamic_cafile_content.go:167] Starting request-header::/var/lib/minikube/certs/front-proxy-ca.crt * I0724 22:29:12.832741 1 dynamic_cafile_content.go:167] Starting client-ca-bundle::/var/lib/minikube/certs/ca.crt * I0724 22:29:12.833285 1 secure_serving.go:178] Serving securely on 127.0.0.1:10257 * I0724 22:29:12.833363 1 tlsconfig.go:240] Starting DynamicServingCertificateController * I0724 22:29:12.833994 1 deprecated_insecure_serving.go:53] Serving insecurely on [::]:10252 * W0724 22:29:12.834610 1 controllermanager.go:612] fetch api resource lists failed, use legacy client builder: Get https://control-plane.minikube.internal:8444/api/v1?timeout=32s: dial tcp 172.17.0.3:8444: connect: connection refused * F0724 22:29:22.836138 1 controllermanager.go:230] error building controller context: failed to wait for apiserver being healthy: timed out waiting for the condition: failed to get apiserver /healthz status: Get https://control-plane.minikube.internal:8444/healthz?timeout=32s: dial tcp 172.17.0.3:8444: connect: connection refused * * ==> kube-proxy [cdb7f0919992f1b17fe65623faf7b9984d3edb56993d5621bef544d49a0ce798] <== * I0724 22:13:19.703119 1 server.go:583] Version: v1.18.3 * I0724 22:13:19.703827 1 conntrack.go:52] Setting nf_conntrack_max to 524288 * E0724 22:13:19.704435 1 conntrack.go:127] sysfs is not writable: {Device:sysfs Path:/sys Type:sysfs Opts:[ro nosuid nodev noexec relatime] Freq:0 Pass:0} (mount options are [ro nosuid nodev noexec relatime]) * I0724 22:13:19.704682 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_established' to 86400 * I0724 22:13:19.704788 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_close_wait' to 3600 * I0724 22:13:19.706305 1 config.go:133] Starting endpoints config controller * I0724 22:13:19.706338 1 config.go:315] Starting service config controller * I0724 22:13:19.706344 1 shared_informer.go:223] Waiting for caches to sync for endpoints config * I0724 22:13:19.706365 1 shared_informer.go:223] Waiting for caches to sync for service config * I0724 22:13:19.806590 1 shared_informer.go:230] Caches are synced for service config * I0724 22:13:19.806609 1 shared_informer.go:230] Caches are synced for endpoints config * W0724 22:30:00.451331 1 server_others.go:559] Unknown proxy mode "", assuming iptables proxy * I0724 22:30:00.459120 1 node.go:136] Successfully retrieved node IP: 172.17.0.3 * I0724 22:30:00.459158 1 server_others.go:186] Using iptables Proxier. * I0724 22:30:00.459548 1 server.go:583] Version: v1.18.3 * I0724 22:30:00.460146 1 conntrack.go:52] Setting nf_conntrack_max to 524288 * E0724 22:30:00.460599 1 conntrack.go:127] sysfs is not writable: {Device:sysfs Path:/sys Type:sysfs Opts:[ro nosuid nodev noexec relatime] Freq:0 Pass:0} (mount options are [ro nosuid nodev noexec relatime]) * I0724 22:30:00.460736 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_established' to 86400 * I0724 22:30:00.460806 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_close_wait' to 3600 * I0724 22:30:00.460979 1 config.go:315] Starting service config controller * I0724 22:30:00.460995 1 shared_informer.go:223] Waiting for caches to sync for service config * I0724 22:30:00.461156 1 config.go:133] Starting endpoints config controller * I0724 22:30:00.462702 1 shared_informer.go:223] Waiting for caches to sync for endpoints config * I0724 22:30:00.561150 1 shared_informer.go:230] Caches are synced for service config * I0724 22:30:00.562880 1 shared_informer.go:230] Caches are synced for endpoints config * * ==> kube-scheduler [574a9379a97ce5c734627b9b0340a240dea259b901f1dca96ad8d9978331c503] <== * E0724 22:28:46.956193 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Node: Get https://control-plane.minikube.internal:8444/api/v1/nodes?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:47.936021 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolumeClaim: Get https://control-plane.minikube.internal:8444/api/v1/persistentvolumeclaims?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:48.615795 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolume: Get https://control-plane.minikube.internal:8444/api/v1/persistentvolumes?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:48.627285 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Service: Get https://control-plane.minikube.internal:8444/api/v1/services?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:48.844520 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1beta1.PodDisruptionBudget: Get https://control-plane.minikube.internal:8444/apis/policy/v1beta1/poddisruptionbudgets?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:49.268244 1 reflector.go:178] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:233: Failed to list *v1.Pod: Get https://control-plane.minikube.internal:8444/api/v1/pods?fieldSelector=status.phase%21%3DFailed%2Cstatus.phase%21%3DSucceeded&limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:52.983500 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.CSINode: Get https://control-plane.minikube.internal:8444/apis/storage.k8s.io/v1/csinodes?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:54.489861 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.StorageClass: Get https://control-plane.minikube.internal:8444/apis/storage.k8s.io/v1/storageclasses?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:55.798147 1 reflector.go:178] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to list *v1.ConfigMap: Get https://control-plane.minikube.internal:8444/api/v1/namespaces/kube-system/configmaps?fieldSelector=metadata.name%3Dextension-apiserver-authentication&limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:56.415849 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolumeClaim: Get https://control-plane.minikube.internal:8444/api/v1/persistentvolumeclaims?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:57.297889 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Service: Get https://control-plane.minikube.internal:8444/api/v1/services?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:58.395517 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1beta1.PodDisruptionBudget: Get https://control-plane.minikube.internal:8444/apis/policy/v1beta1/poddisruptionbudgets?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:58.615198 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Node: Get https://control-plane.minikube.internal:8444/api/v1/nodes?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:00.876683 1 reflector.go:178] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:233: Failed to list *v1.Pod: Get https://control-plane.minikube.internal:8444/api/v1/pods?fieldSelector=status.phase%21%3DFailed%2Cstatus.phase%21%3DSucceeded&limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:01.380532 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolume: Get https://control-plane.minikube.internal:8444/api/v1/persistentvolumes?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:10.017422 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.StorageClass: Get https://control-plane.minikube.internal:8444/apis/storage.k8s.io/v1/storageclasses?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:13.820706 1 reflector.go:178] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:233: Failed to list *v1.Pod: Get https://control-plane.minikube.internal:8444/api/v1/pods?fieldSelector=status.phase%21%3DFailed%2Cstatus.phase%21%3DSucceeded&limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:14.849393 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.CSINode: Get https://control-plane.minikube.internal:8444/apis/storage.k8s.io/v1/csinodes?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:17.299470 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Node: Get https://control-plane.minikube.internal:8444/api/v1/nodes?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:17.476542 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolumeClaim: Get https://control-plane.minikube.internal:8444/api/v1/persistentvolumeclaims?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:17.787962 1 reflector.go:178] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to list *v1.ConfigMap: Get https://control-plane.minikube.internal:8444/api/v1/namespaces/kube-system/configmaps?fieldSelector=metadata.name%3Dextension-apiserver-authentication&limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:18.847726 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Service: Get https://control-plane.minikube.internal:8444/api/v1/services?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:20.242683 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1beta1.PodDisruptionBudget: Get https://control-plane.minikube.internal:8444/apis/policy/v1beta1/poddisruptionbudgets?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:25.306254 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolume: Get https://control-plane.minikube.internal:8444/api/v1/persistentvolumes?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * I0724 22:30:02.056615 1 shared_informer.go:230] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file * * ==> kubelet <== * -- Logs begin at Fri 2020-07-24 22:27:54 UTC, end at Fri 2020-07-24 22:54:06 UTC. -- * Jul 24 22:53:50 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:50.338685 544 pod_workers.go:191] Error syncing pod 25727a03-b071-4a2c-8dfb-ea2a7038e4cd ("storage-provisioner_kube-system(25727a03-b071-4a2c-8dfb-ea2a7038e4cd)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 5m0s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(25727a03-b071-4a2c-8dfb-ea2a7038e4cd)" * Jul 24 22:53:52 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:52.812518 544 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to setup network for sandbox "a24be12ec90ceddc1aa383fee9cea18174d23821e586a00253d5836a2d9ee0e8": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:53:52 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:52.812582 544 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "a24be12ec90ceddc1aa383fee9cea18174d23821e586a00253d5836a2d9ee0e8": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:53:52 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:52.812607 544 kuberuntime_manager.go:727] createPodSandbox for pod "coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "a24be12ec90ceddc1aa383fee9cea18174d23821e586a00253d5836a2d9ee0e8": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:53:52 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:52.812678 544 pod_workers.go:191] Error syncing pod 584286ea-2ddf-4194-8a3c-48d505f7f607 ("coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)"), skipping: failed to "CreatePodSandbox" for "coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)" with CreatePodSandboxError: "CreatePodSandbox for pod \"coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)\" failed: rpc error: code = Unknown desc = failed to setup network for sandbox \"a24be12ec90ceddc1aa383fee9cea18174d23821e586a00253d5836a2d9ee0e8\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:53:54 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:54.158739 544 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to setup network for sandbox "379562d4d181012c170675f941a008c0691dfd635c16e63e4454c1384b1dbe65": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:53:54 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:54.158803 544 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "dashboard-metrics-scraper-dc6947fbf-xphhd_kubernetes-dashboard(02a59124-2c99-4a38-abea-eedfd3e1ba46)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "379562d4d181012c170675f941a008c0691dfd635c16e63e4454c1384b1dbe65": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:53:54 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:54.158824 544 kuberuntime_manager.go:727] createPodSandbox for pod "dashboard-metrics-scraper-dc6947fbf-xphhd_kubernetes-dashboard(02a59124-2c99-4a38-abea-eedfd3e1ba46)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "379562d4d181012c170675f941a008c0691dfd635c16e63e4454c1384b1dbe65": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:53:54 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:54.158890 544 pod_workers.go:191] Error syncing pod 02a59124-2c99-4a38-abea-eedfd3e1ba46 ("dashboard-metrics-scraper-dc6947fbf-xphhd_kubernetes-dashboard(02a59124-2c99-4a38-abea-eedfd3e1ba46)"), skipping: failed to "CreatePodSandbox" for "dashboard-metrics-scraper-dc6947fbf-xphhd_kubernetes-dashboard(02a59124-2c99-4a38-abea-eedfd3e1ba46)" with CreatePodSandboxError: "CreatePodSandbox for pod \"dashboard-metrics-scraper-dc6947fbf-xphhd_kubernetes-dashboard(02a59124-2c99-4a38-abea-eedfd3e1ba46)\" failed: rpc error: code = Unknown desc = failed to setup network for sandbox \"379562d4d181012c170675f941a008c0691dfd635c16e63e4454c1384b1dbe65\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:53:54 containerd-20200724221200-14997 kubelet[544]: I0724 22:53:54.338557 544 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: a09e6baaadfd28ee566e88056be5b5b6c39ad7b1953f9bc53a101febc380383d * Jul 24 22:53:54 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:54.338953 544 pod_workers.go:191] Error syncing pod 562af1c8-c195-4fed-b475-4ff2a3cca8b5 ("kindnet-nsc8k_kube-system(562af1c8-c195-4fed-b475-4ff2a3cca8b5)"), skipping: failed to "StartContainer" for "kindnet-cni" with CrashLoopBackOff: "back-off 5m0s restarting failed container=kindnet-cni pod=kindnet-nsc8k_kube-system(562af1c8-c195-4fed-b475-4ff2a3cca8b5)" * Jul 24 22:53:56 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:56.227288 544 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to setup network for sandbox "84d1b0c841d5797b077ebe499d0687d2c5027bef218423269fb1ab927c4b21d3": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:53:56 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:56.227343 544 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "kubernetes-dashboard-6dbb54fd95-ms9wg_kubernetes-dashboard(267f98a1-434e-45a2-abda-53ad3bb7bea1)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "84d1b0c841d5797b077ebe499d0687d2c5027bef218423269fb1ab927c4b21d3": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:53:56 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:56.227363 544 kuberuntime_manager.go:727] createPodSandbox for pod "kubernetes-dashboard-6dbb54fd95-ms9wg_kubernetes-dashboard(267f98a1-434e-45a2-abda-53ad3bb7bea1)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "84d1b0c841d5797b077ebe499d0687d2c5027bef218423269fb1ab927c4b21d3": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:53:56 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:56.227421 544 pod_workers.go:191] Error syncing pod 267f98a1-434e-45a2-abda-53ad3bb7bea1 ("kubernetes-dashboard-6dbb54fd95-ms9wg_kubernetes-dashboard(267f98a1-434e-45a2-abda-53ad3bb7bea1)"), skipping: failed to "CreatePodSandbox" for "kubernetes-dashboard-6dbb54fd95-ms9wg_kubernetes-dashboard(267f98a1-434e-45a2-abda-53ad3bb7bea1)" with CreatePodSandboxError: "CreatePodSandbox for pod \"kubernetes-dashboard-6dbb54fd95-ms9wg_kubernetes-dashboard(267f98a1-434e-45a2-abda-53ad3bb7bea1)\" failed: rpc error: code = Unknown desc = failed to setup network for sandbox \"84d1b0c841d5797b077ebe499d0687d2c5027bef218423269fb1ab927c4b21d3\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:54:02 containerd-20200724221200-14997 kubelet[544]: E0724 22:54:02.290048 544 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to setup network for sandbox "0bfa003ac5bb781640c1be03bc1ad923421f177be599acda227ae251c6acbd1c": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:54:02 containerd-20200724221200-14997 kubelet[544]: E0724 22:54:02.290106 544 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "0bfa003ac5bb781640c1be03bc1ad923421f177be599acda227ae251c6acbd1c": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:54:02 containerd-20200724221200-14997 kubelet[544]: E0724 22:54:02.290121 544 kuberuntime_manager.go:727] createPodSandbox for pod "busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "0bfa003ac5bb781640c1be03bc1ad923421f177be599acda227ae251c6acbd1c": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:54:02 containerd-20200724221200-14997 kubelet[544]: E0724 22:54:02.290172 544 pod_workers.go:191] Error syncing pod 4b662b5f-6e78-48de-818c-81989d7f4ea9 ("busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)"), skipping: failed to "CreatePodSandbox" for "busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)" with CreatePodSandboxError: "CreatePodSandbox for pod \"busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)\" failed: rpc error: code = Unknown desc = failed to setup network for sandbox \"0bfa003ac5bb781640c1be03bc1ad923421f177be599acda227ae251c6acbd1c\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:54:05 containerd-20200724221200-14997 kubelet[544]: I0724 22:54:05.338585 544 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: 00a604004ac81017478505940a9e65265df2e1d0d95ee5ccee3c95ec0687461f * Jul 24 22:54:05 containerd-20200724221200-14997 kubelet[544]: E0724 22:54:05.338926 544 pod_workers.go:191] Error syncing pod 25727a03-b071-4a2c-8dfb-ea2a7038e4cd ("storage-provisioner_kube-system(25727a03-b071-4a2c-8dfb-ea2a7038e4cd)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 5m0s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(25727a03-b071-4a2c-8dfb-ea2a7038e4cd)" * Jul 24 22:54:05 containerd-20200724221200-14997 kubelet[544]: E0724 22:54:05.795728 544 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to setup network for sandbox "c4ef10da47d31af55d6587ca09517240114b3a041cedd662dd519a57c502f898": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:54:05 containerd-20200724221200-14997 kubelet[544]: E0724 22:54:05.795793 544 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "c4ef10da47d31af55d6587ca09517240114b3a041cedd662dd519a57c502f898": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:54:05 containerd-20200724221200-14997 kubelet[544]: E0724 22:54:05.795810 544 kuberuntime_manager.go:727] createPodSandbox for pod "coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "c4ef10da47d31af55d6587ca09517240114b3a041cedd662dd519a57c502f898": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:54:05 containerd-20200724221200-14997 kubelet[544]: E0724 22:54:05.795861 544 pod_workers.go:191] Error syncing pod 584286ea-2ddf-4194-8a3c-48d505f7f607 ("coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)"), skipping: failed to "CreatePodSandbox" for "coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)" with CreatePodSandboxError: "CreatePodSandbox for pod \"coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)\" failed: rpc error: code = Unknown desc = failed to setup network for sandbox \"c4ef10da47d31af55d6587ca09517240114b3a041cedd662dd519a57c502f898\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * * ==> storage-provisioner [00a604004ac81017478505940a9e65265df2e1d0d95ee5ccee3c95ec0687461f] <== * F0724 22:53:34.696730 1 main.go:37] Error getting server version: Get https://10.96.0.1:443/version: dial tcp 10.96.0.1:443: getsockopt: no route to host -- /stdout -- helpers_test.go:247: (dbg) Run: ./minikube-linux-amd64 status --format={{.APIServer}} -p containerd-20200724221200-14997 -n containerd-20200724221200-14997 helpers_test.go:254: (dbg) Run: kubectl --context containerd-20200724221200-14997 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running helpers_test.go:260: non-running pods: busybox coredns-66bff467f8-hlk9j dashboard-metrics-scraper-dc6947fbf-xphhd kubernetes-dashboard-6dbb54fd95-ms9wg helpers_test.go:262: ======> post-mortem[TestStartStop/group/containerd/serial/Pause]: describe non-running pods <====== helpers_test.go:265: (dbg) Run: kubectl --context containerd-20200724221200-14997 describe pod busybox coredns-66bff467f8-hlk9j dashboard-metrics-scraper-dc6947fbf-xphhd kubernetes-dashboard-6dbb54fd95-ms9wg helpers_test.go:265: (dbg) Non-zero exit: kubectl --context containerd-20200724221200-14997 describe pod busybox coredns-66bff467f8-hlk9j dashboard-metrics-scraper-dc6947fbf-xphhd kubernetes-dashboard-6dbb54fd95-ms9wg: exit status 1 (81.341436ms) -- stdout -- Name: busybox Namespace: default Priority: 0 Node: containerd-20200724221200-14997/172.17.0.3 Start Time: Fri, 24 Jul 2020 22:19:40 +0000 Labels: integration-test=busybox Annotations: Status: Pending IP: IPs: Containers: busybox: Container ID: Image: busybox:1.28.4-glibc Image ID: Port: Host Port: Command: sleep 3600 State: Waiting Reason: ContainerCreating Ready: False Restart Count: 0 Environment: Mounts: /var/run/secrets/kubernetes.io/serviceaccount from default-token-xmm9f (ro) Conditions: Type Status Initialized True Ready False ContainersReady False PodScheduled True Volumes: default-token-xmm9f: Type: Secret (a volume populated by a Secret) SecretName: default-token-xmm9f Optional: false QoS Class: BestEffort Node-Selectors: Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s node.kubernetes.io/unreachable:NoExecute for 300s Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Scheduled 34m default-scheduler Successfully assigned default/busybox to containerd-20200724221200-14997 Warning FailedCreatePodSandBox 34m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "5311a1ffb81c3ec44164ca704d1b425a50851c7a615951d885f3e261bb56b331": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 34m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "e0b3338bd82723225aa39e3a572b31f4b10340fa640d33ac956ec7982b47a365": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 33m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "bce48c23f5090e307249f38d5e9c17615b5ce4547b68fd7dd207f5616546b1ff": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 33m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "c260da116bad44bab092e4453833efc9ce5c3c70209770b30f7aeffed5db766d": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 33m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "e352d7747cb44ae844f2847c8895d8a03a59a5bd62570299b6f91ba0d2b31e93": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 33m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "7a8e1f3fa957ea5ffbd5b203818f66025f20b18e8ba1398842a4a1dcb4beade1": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 32m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "02713a33eaf3192cf9c63e173bd72b67773f28f4dfcef338060dda2a7f8489e1": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 32m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "530cf34860dcb75c557c7cbc9a86910a3f4919230e74cdf10126ba1e75f3f49b": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 32m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "a33b097bd953e28e4d7499a4b0ca06585fe1b9029b95aac214b0bba904677259": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 28m (x17 over 32m) kubelet, containerd-20200724221200-14997 (combined from similar events): Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "47073b588559d0af9dafbb8171df657751c2a35ea5f0466a20f5762627e9cd56": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 24m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "cc2af3d00db0854dd201af575fe5d65f4c2208b59d12cea5f983ef22b810f25d": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 23m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "4cf1a61ad6807315736c91341b7763dc41aca876152d03c2db9814cfa254e7ee": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 23m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "a7cd4b3d30ded88757df1727167c36721efb9fd28978628b3503c0b86fc912e2": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 23m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "14fb4d9bb3714b5e4d1b49fd744c91c0f36ada0ca3c4313f0bb85e74660c9ab1": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 23m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "f5f36f4c79df279b40deef49d26e0ef042c075b3ba24396147e670314e61a159": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 22m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "8f22e7112e69d6377a16927508a438643f8cea01307d448d397775ae85526176": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 22m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "c1197cb670c56b20720daecde3535335370ba2b2f2515fa58f3d6b0bbc3e647d": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 22m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "21a1ae9d38e4f47d5bd6abf3cd1f776466ad5d6dc481a886df660824479af77e": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 22m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "f8e86b484453c0e4d0c68fceed3b3e44519bce50c31c2b6cd480c017ee9d684e": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 4m2s (x75 over 22m) kubelet, containerd-20200724221200-14997 (combined from similar events): Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "5ff19316dc062bffc4d2ed705ac7465ced0eb93bb31ece97a9a7976e0e544892": failed to set bridge addr: could not add IP address to "cni0": permission denied -- /stdout -- ** stderr ** Error from server (NotFound): pods "coredns-66bff467f8-hlk9j" not found Error from server (NotFound): pods "dashboard-metrics-scraper-dc6947fbf-xphhd" not found Error from server (NotFound): pods "kubernetes-dashboard-6dbb54fd95-ms9wg" not found ** /stderr ** helpers_test.go:267: kubectl --context containerd-20200724221200-14997 describe pod busybox coredns-66bff467f8-hlk9j dashboard-metrics-scraper-dc6947fbf-xphhd kubernetes-dashboard-6dbb54fd95-ms9wg: exit status 1 helpers_test.go:215: -----------------------post-mortem-------------------------------- helpers_test.go:223: ======> post-mortem[TestStartStop/group/containerd/serial/Pause]: docker inspect <====== helpers_test.go:224: (dbg) Run: docker inspect containerd-20200724221200-14997 helpers_test.go:228: (dbg) docker inspect containerd-20200724221200-14997: -- stdout -- [ { "Id": "0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318", "Created": "2020-07-24T22:12:08.823590057Z", "Path": "/usr/local/bin/entrypoint", "Args": [ "/sbin/init" ], "State": { "Status": "running", "Running": true, "Paused": false, "Restarting": false, "OOMKilled": false, "Dead": false, "Pid": 707805, "ExitCode": 0, "Error": "", "StartedAt": "2020-07-24T22:27:53.861621416Z", "FinishedAt": "2020-07-24T22:27:47.093520839Z" }, "Image": "sha256:e6bc41c39dc48b2b472936db36aedb28527ce0f675ed1bc20d029125c9ccf578", "ResolvConfPath": "/var/lib/docker/containers/0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318/resolv.conf", "HostnamePath": "/var/lib/docker/containers/0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318/hostname", "HostsPath": "/var/lib/docker/containers/0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318/hosts", "LogPath": "/var/lib/docker/containers/0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318/0e53ceb55426574d2050c441dda4805c64cf7b1ac0ebc2ed9748e7d0d843a318-json.log", "Name": "/containerd-20200724221200-14997", "RestartCount": 0, "Driver": "overlay2", "Platform": "linux", "MountLabel": "", "ProcessLabel": "", "AppArmorProfile": "unconfined", "ExecIDs": null, "HostConfig": { "Binds": [ "/lib/modules:/lib/modules:ro", "containerd-20200724221200-14997:/var" ], "ContainerIDFile": "", "LogConfig": { "Type": "json-file", "Config": {} }, "NetworkMode": "default", "PortBindings": { "22/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "2376/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "5000/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ], "8444/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "" } ] }, "RestartPolicy": { "Name": "no", "MaximumRetryCount": 0 }, "AutoRemove": false, "VolumeDriver": "", "VolumesFrom": null, "CapAdd": null, "CapDrop": null, "Capabilities": null, "Dns": [], "DnsOptions": [], "DnsSearch": [], "ExtraHosts": null, "GroupAdd": null, "IpcMode": "private", "Cgroup": "", "Links": null, "OomScoreAdj": 0, "PidMode": "", "Privileged": true, "PublishAllPorts": false, "ReadonlyRootfs": false, "SecurityOpt": [ "seccomp=unconfined", "apparmor=unconfined", "label=disable" ], "Tmpfs": { "/run": "", "/tmp": "" }, "UTSMode": "", "UsernsMode": "", "ShmSize": 67108864, "Runtime": "runc", "ConsoleSize": [ 0, 0 ], "Isolation": "", "CpuShares": 0, "Memory": 2306867200, "NanoCpus": 2000000000, "CgroupParent": "", "BlkioWeight": 0, "BlkioWeightDevice": [], "BlkioDeviceReadBps": null, "BlkioDeviceWriteBps": null, "BlkioDeviceReadIOps": null, "BlkioDeviceWriteIOps": null, "CpuPeriod": 0, "CpuQuota": 0, "CpuRealtimePeriod": 0, "CpuRealtimeRuntime": 0, "CpusetCpus": "", "CpusetMems": "", "Devices": [], "DeviceCgroupRules": null, "DeviceRequests": null, "KernelMemory": 0, "KernelMemoryTCP": 0, "MemoryReservation": 0, "MemorySwap": -1, "MemorySwappiness": null, "OomKillDisable": false, "PidsLimit": null, "Ulimits": null, "CpuCount": 0, "CpuPercent": 0, "IOMaximumIOps": 0, "IOMaximumBandwidth": 0, "MaskedPaths": null, "ReadonlyPaths": null }, "GraphDriver": { "Data": { "LowerDir": "/var/lib/docker/overlay2/302021dc5d13c095c42529cd14fcb13b8e0e062683e3b8dd811d0a384f95b4d8-init/diff:/var/lib/docker/overlay2/33ef5d7ed4fbb30d55d2e5925de9b5c359ee2f6bcaf5a31308f0c4c11e358759/diff:/var/lib/docker/overlay2/3cf31b1deb19cf9de586952311d9f94baab00dac6393d96e1a80da943c3823e6/diff:/var/lib/docker/overlay2/5e25be0edbb143d613aab16e23056ece18370c2427214464ccc0b8f033fc76c5/diff:/var/lib/docker/overlay2/933d7b760faf1726c0231eb48cbf3e7af8aeea7d1f449959b7bf07bbe9a84376/diff:/var/lib/docker/overlay2/69ec5555996fe29cf36b7e927034690ecb66f32d4c72396f3c2f3d73303ab10c/diff:/var/lib/docker/overlay2/891f9311e0e93aca44c576d873c0860bf67e972178833fc3ac0569308996933c/diff:/var/lib/docker/overlay2/431ddf5eea9395c020c3a27539c6984764cb7a0dd8f3eeadfbb24f37a897caf7/diff:/var/lib/docker/overlay2/ef7d3d9d53c985fdb46a41c8bcbc04c7d98baa3d8030b1432d115657269a4611/diff:/var/lib/docker/overlay2/d976f7471257da8ef85e0de564cae8e6b16eeb8f83fff6750b3985387f87052c/diff:/var/lib/docker/overlay2/16ed6f6abe25e98b50b10529e70d835da8b64ca99934243f70e5cc63cca44ba1/diff:/var/lib/docker/overlay2/a6495d6f579c32bd0faf387dc3b48c733dae0219e3cfc5d94f9f6ba027261668/diff:/var/lib/docker/overlay2/a34db548da9e24b0323e43e53767c49484fa19509ba9c1e738fb42330788ec52/diff:/var/lib/docker/overlay2/03f93f74dafc48e2bb0234297546abf70dc9918c047a7e3b500ed2242aec09ff/diff:/var/lib/docker/overlay2/b0d08d0be034080c25561d9451d8081050a591c2991a7f3d242234b9bf7c046d/diff:/var/lib/docker/overlay2/d7be6ed61eedc76988e74fe1d87a6a1ef287912654acea5566e7f9cb797b3e3c/diff:/var/lib/docker/overlay2/bde6a4a42d5652b3ef22e3a1dddd0f6fb765d2cd9f5b1a38d4ecb9b44c45b998/diff:/var/lib/docker/overlay2/0f384da0b034b730d4bdd22f270821719c0ac0b7b7d608c3a52fa89b9a386b90/diff:/var/lib/docker/overlay2/46e9d50bc162d2d523c12da3e378e5b859515056dc7d7f11344c1c42c64718d4/diff:/var/lib/docker/overlay2/6e7552cdb94dfc1c9a0a8efae25c758d93fff3d2eec58a3e2b710496d0c94653/diff:/var/lib/docker/overlay2/b9217960caef08b70ded801ca89f5a0808227383f2ab1de5ae7313c200a8090b/diff:/var/lib/docker/overlay2/d11fc39cafccd3e3cb8fba6cfdda2bbfdb99c2e41619ce169b0d6ede0b3f6a64/diff:/var/lib/docker/overlay2/0f89620291cba4bc3afac7c9fa1faf205450d5dcae032c3a9882009633b72b4a/diff", "MergedDir": "/var/lib/docker/overlay2/302021dc5d13c095c42529cd14fcb13b8e0e062683e3b8dd811d0a384f95b4d8/merged", "UpperDir": "/var/lib/docker/overlay2/302021dc5d13c095c42529cd14fcb13b8e0e062683e3b8dd811d0a384f95b4d8/diff", "WorkDir": "/var/lib/docker/overlay2/302021dc5d13c095c42529cd14fcb13b8e0e062683e3b8dd811d0a384f95b4d8/work" }, "Name": "overlay2" }, "Mounts": [ { "Type": "bind", "Source": "/lib/modules", "Destination": "/lib/modules", "Mode": "ro", "RW": false, "Propagation": "rprivate" }, { "Type": "volume", "Name": "containerd-20200724221200-14997", "Source": "/var/lib/docker/volumes/containerd-20200724221200-14997/_data", "Destination": "/var", "Driver": "local", "Mode": "z", "RW": true, "Propagation": "" } ], "Config": { "Hostname": "containerd-20200724221200-14997", "Domainname": "", "User": "root", "AttachStdin": false, "AttachStdout": false, "AttachStderr": false, "ExposedPorts": { "22/tcp": {}, "2376/tcp": {}, "5000/tcp": {}, "8444/tcp": {} }, "Tty": true, "OpenStdin": false, "StdinOnce": false, "Env": [ "container=docker", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], "Cmd": null, "Image": "kicbase/stable:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438", "Volumes": null, "WorkingDir": "", "Entrypoint": [ "/usr/local/bin/entrypoint", "/sbin/init" ], "OnBuild": null, "Labels": { "created_by.minikube.sigs.k8s.io": "true", "mode.minikube.sigs.k8s.io": "containerd-20200724221200-14997", "name.minikube.sigs.k8s.io": "containerd-20200724221200-14997", "role.minikube.sigs.k8s.io": "" }, "StopSignal": "SIGRTMIN+3" }, "NetworkSettings": { "Bridge": "", "SandboxID": "14a5046cd9d512b8bd2af14bc8fd545f797506a7c8ca9b3e98d538950faaa7ca", "HairpinMode": false, "LinkLocalIPv6Address": "", "LinkLocalIPv6PrefixLen": 0, "Ports": { "22/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32920" } ], "2376/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32919" } ], "5000/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32918" } ], "8444/tcp": [ { "HostIp": "127.0.0.1", "HostPort": "32917" } ] }, "SandboxKey": "/var/run/docker/netns/14a5046cd9d5", "SecondaryIPAddresses": null, "SecondaryIPv6Addresses": null, "EndpointID": "8ef326025492f4d9f256559b312826a73e65239c14de22e27539be8d3e9580f4", "Gateway": "172.17.0.1", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "IPAddress": "172.17.0.3", "IPPrefixLen": 16, "IPv6Gateway": "", "MacAddress": "02:42:ac:11:00:03", "Networks": { "bridge": { "IPAMConfig": null, "Links": null, "Aliases": null, "NetworkID": "d4a420189740571bc15c14b2ebac1ab66e95c9c6796461aa96bb3486a5f7e03d", "EndpointID": "8ef326025492f4d9f256559b312826a73e65239c14de22e27539be8d3e9580f4", "Gateway": "172.17.0.1", "IPAddress": "172.17.0.3", "IPPrefixLen": 16, "IPv6Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "MacAddress": "02:42:ac:11:00:03", "DriverOpts": null } } } } ] -- /stdout -- helpers_test.go:232: (dbg) Run: ./minikube-linux-amd64 status --format={{.Host}} -p containerd-20200724221200-14997 -n containerd-20200724221200-14997 helpers_test.go:237: <<< TestStartStop/group/containerd/serial/Pause FAILED: start of post-mortem logs <<< helpers_test.go:238: ======> post-mortem[TestStartStop/group/containerd/serial/Pause]: minikube logs <====== helpers_test.go:240: (dbg) Run: ./minikube-linux-amd64 -p containerd-20200724221200-14997 logs -n 25 helpers_test.go:245: TestStartStop/group/containerd/serial/Pause logs: -- stdout -- * ==> container status <== * CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID * 00a604004ac81 4689081edb103 36 seconds ago Exited storage-provisioner 9 0418a87b40342 * a09e6baaadfd2 2186a1a396deb 2 minutes ago Exited kindnet-cni 8 c2246648589e4 * 8662707b3b0f7 da26705ccb4b5 23 minutes ago Running kube-controller-manager 3 7b50a18d935ac * cdb7f0919992f 3439b7546f29b 24 minutes ago Running kube-proxy 0 2fca05c662478 * 7dc9f83693a99 7e28efa976bd1 24 minutes ago Running kube-apiserver 0 236935089e2fb * 90f7be9dd5648 da26705ccb4b5 24 minutes ago Exited kube-controller-manager 2 7b50a18d935ac * 574a9379a97ce 76216c34ed0c7 25 minutes ago Running kube-scheduler 0 9b3229d6d9980 * e1da8367b2af7 303ce5db0e90d 25 minutes ago Running etcd 0 0941e8d40a5bd * * ==> containerd <== * -- Logs begin at Fri 2020-07-24 22:27:54 UTC, end at Fri 2020-07-24 22:54:08 UTC. -- * Jul 24 22:53:34 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:34.704089695Z" level=info msg="TaskExit event &TaskExit{ContainerID:00a604004ac81017478505940a9e65265df2e1d0d95ee5ccee3c95ec0687461f,ID:00a604004ac81017478505940a9e65265df2e1d0d95ee5ccee3c95ec0687461f,Pid:17802,ExitStatus:1,ExitedAt:2020-07-24 22:53:34.703903283 +0000 UTC,XXX_unrecognized:[],}" * Jul 24 22:53:34 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:34.740804546Z" level=info msg="shim reaped" id=00a604004ac81017478505940a9e65265df2e1d0d95ee5ccee3c95ec0687461f * Jul 24 22:53:35 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:35.339152909Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,}" * Jul 24 22:53:35 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:35.651833529Z" level=info msg="RemoveContainer for \"228d0592b230e8e10ae8f53c83bc4cb3c2ca8cc714c0a428cb04f2a8a22c709f\"" * Jul 24 22:53:35 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:35.683705943Z" level=info msg="RemoveContainer for \"228d0592b230e8e10ae8f53c83bc4cb3c2ca8cc714c0a428cb04f2a8a22c709f\" returns successfully" * Jul 24 22:53:36 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:36.354723545Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:4b662b5f-6e78-48de-818c-81989d7f4ea9,Namespace:default,Attempt:0,} failed, error" error="failed to setup network for sandbox \"991c799eb067a2479795a0d8680f9d5a0ff1d7d2123a8bf6bda88e3ee5a83b4a\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:53:36 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:36.738603794Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,} failed, error" error="failed to setup network for sandbox \"210eec6aba4991cf665f2d5410e52443de3952fd434a0daf4aa63714b939571b\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:53:37 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:37.339027807Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:dashboard-metrics-scraper-dc6947fbf-xphhd,Uid:02a59124-2c99-4a38-abea-eedfd3e1ba46,Namespace:kubernetes-dashboard,Attempt:0,}" * Jul 24 22:53:39 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:39.067863973Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:dashboard-metrics-scraper-dc6947fbf-xphhd,Uid:02a59124-2c99-4a38-abea-eedfd3e1ba46,Namespace:kubernetes-dashboard,Attempt:0,} failed, error" error="failed to setup network for sandbox \"c2bb74b49f5e994c0e371fd97ba9a2d6d244c89b1e5393c7de421babe20e8060\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:53:40 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:40.339118131Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:kubernetes-dashboard-6dbb54fd95-ms9wg,Uid:267f98a1-434e-45a2-abda-53ad3bb7bea1,Namespace:kubernetes-dashboard,Attempt:0,}" * Jul 24 22:53:42 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:42.042455447Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:kubernetes-dashboard-6dbb54fd95-ms9wg,Uid:267f98a1-434e-45a2-abda-53ad3bb7bea1,Namespace:kubernetes-dashboard,Attempt:0,} failed, error" error="failed to setup network for sandbox \"ca19fc91f1eae4b80b05c4f3de8023e68e4bdb40295bd935d96ceccdd34a62bc\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:53:47 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:47.338855711Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:busybox,Uid:4b662b5f-6e78-48de-818c-81989d7f4ea9,Namespace:default,Attempt:0,}" * Jul 24 22:53:48 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:48.576257482Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:4b662b5f-6e78-48de-818c-81989d7f4ea9,Namespace:default,Attempt:0,} failed, error" error="failed to setup network for sandbox \"54a499035f788abb0121b0ac23492595ca813d209dfd1491be7b9f188bde9f6f\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:53:51 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:51.339129846Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,}" * Jul 24 22:53:52 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:52.339151661Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:dashboard-metrics-scraper-dc6947fbf-xphhd,Uid:02a59124-2c99-4a38-abea-eedfd3e1ba46,Namespace:kubernetes-dashboard,Attempt:0,}" * Jul 24 22:53:52 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:52.812196296Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,} failed, error" error="failed to setup network for sandbox \"a24be12ec90ceddc1aa383fee9cea18174d23821e586a00253d5836a2d9ee0e8\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:53:54 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:54.158472835Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:dashboard-metrics-scraper-dc6947fbf-xphhd,Uid:02a59124-2c99-4a38-abea-eedfd3e1ba46,Namespace:kubernetes-dashboard,Attempt:0,} failed, error" error="failed to setup network for sandbox \"379562d4d181012c170675f941a008c0691dfd635c16e63e4454c1384b1dbe65\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:53:54 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:54.339193050Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:kubernetes-dashboard-6dbb54fd95-ms9wg,Uid:267f98a1-434e-45a2-abda-53ad3bb7bea1,Namespace:kubernetes-dashboard,Attempt:0,}" * Jul 24 22:53:56 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:53:56.227009391Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:kubernetes-dashboard-6dbb54fd95-ms9wg,Uid:267f98a1-434e-45a2-abda-53ad3bb7bea1,Namespace:kubernetes-dashboard,Attempt:0,} failed, error" error="failed to setup network for sandbox \"84d1b0c841d5797b077ebe499d0687d2c5027bef218423269fb1ab927c4b21d3\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:54:00 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:54:00.338868058Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:busybox,Uid:4b662b5f-6e78-48de-818c-81989d7f4ea9,Namespace:default,Attempt:0,}" * Jul 24 22:54:02 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:54:02.289758233Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:4b662b5f-6e78-48de-818c-81989d7f4ea9,Namespace:default,Attempt:0,} failed, error" error="failed to setup network for sandbox \"0bfa003ac5bb781640c1be03bc1ad923421f177be599acda227ae251c6acbd1c\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:54:04 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:54:04.338704902Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,}" * Jul 24 22:54:05 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:54:05.339243128Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:dashboard-metrics-scraper-dc6947fbf-xphhd,Uid:02a59124-2c99-4a38-abea-eedfd3e1ba46,Namespace:kubernetes-dashboard,Attempt:0,}" * Jul 24 22:54:05 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:54:05.795456739Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:coredns-66bff467f8-hlk9j,Uid:584286ea-2ddf-4194-8a3c-48d505f7f607,Namespace:kube-system,Attempt:0,} failed, error" error="failed to setup network for sandbox \"c4ef10da47d31af55d6587ca09517240114b3a041cedd662dd519a57c502f898\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:54:07 containerd-20200724221200-14997 containerd[458]: time="2020-07-24T22:54:07.189718545Z" level=error msg="RunPodSandbox for &PodSandboxMetadata{Name:dashboard-metrics-scraper-dc6947fbf-xphhd,Uid:02a59124-2c99-4a38-abea-eedfd3e1ba46,Namespace:kubernetes-dashboard,Attempt:0,} failed, error" error="failed to setup network for sandbox \"7c591b75a4f90cc50fb8337ded7e992979267274c708adc157d4d824b8f08ab6\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * * ==> describe nodes <== * Name: containerd-20200724221200-14997 * Roles: master * Labels: beta.kubernetes.io/arch=amd64 * beta.kubernetes.io/os=linux * kubernetes.io/arch=amd64 * kubernetes.io/hostname=containerd-20200724221200-14997 * kubernetes.io/os=linux * minikube.k8s.io/commit=40eac8ce825d2bb784efa63b900c8d788ea49faf * minikube.k8s.io/name=containerd-20200724221200-14997 * minikube.k8s.io/updated_at=2020_07_24T22_13_02_0700 * minikube.k8s.io/version=v1.12.1 * node-role.kubernetes.io/master= * Annotations: kubeadm.alpha.kubernetes.io/cri-socket: /run/containerd/containerd.sock * node.alpha.kubernetes.io/ttl: 0 * volumes.kubernetes.io/controller-managed-attach-detach: true * CreationTimestamp: Fri, 24 Jul 2020 22:12:47 +0000 * Taints: * Unschedulable: false * Lease: * HolderIdentity: containerd-20200724221200-14997 * AcquireTime: * RenewTime: Fri, 24 Jul 2020 22:54:03 +0000 * Conditions: * Type Status LastHeartbeatTime LastTransitionTime Reason Message * ---- ------ ----------------- ------------------ ------ ------- * MemoryPressure False Fri, 24 Jul 2020 22:49:32 +0000 Fri, 24 Jul 2020 22:12:43 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available * DiskPressure False Fri, 24 Jul 2020 22:49:32 +0000 Fri, 24 Jul 2020 22:12:43 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure * PIDPressure False Fri, 24 Jul 2020 22:49:32 +0000 Fri, 24 Jul 2020 22:12:43 +0000 KubeletHasSufficientPID kubelet has sufficient PID available * Ready True Fri, 24 Jul 2020 22:49:32 +0000 Fri, 24 Jul 2020 22:13:11 +0000 KubeletReady kubelet is posting ready status * Addresses: * InternalIP: 172.17.0.3 * Hostname: containerd-20200724221200-14997 * Capacity: * cpu: 16 * ephemeral-storage: 128884272Ki * hugepages-1Gi: 0 * hugepages-2Mi: 0 * memory: 65817044Ki * pods: 110 * Allocatable: * cpu: 16 * ephemeral-storage: 128884272Ki * hugepages-1Gi: 0 * hugepages-2Mi: 0 * memory: 65817044Ki * pods: 110 * System Info: * Machine ID: 5ea7312d3bbd4189a79e31122cb237a6 * System UUID: 763ff36b-3261-45b1-b62e-092cbae790ce * Boot ID: 65219ec9-ab55-4151-85fa-6cbcd6144529 * Kernel Version: 5.4.0-1022-azure * OS Image: Ubuntu 19.10 * Operating System: linux * Architecture: amd64 * Container Runtime Version: containerd://1.3.3-14-g449e9269 * Kubelet Version: v1.18.3 * Kube-Proxy Version: v1.18.3 * PodCIDR: 10.244.0.0/24 * PodCIDRs: 10.244.0.0/24 * Non-terminated Pods: (11 in total) * Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits AGE * --------- ---- ------------ ---------- --------------- ------------- --- * default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 34m * kube-system coredns-66bff467f8-hlk9j 100m (0%) 0 (0%) 70Mi (0%) 170Mi (0%) 40m * kube-system etcd-containerd-20200724221200-14997 0 (0%) 0 (0%) 0 (0%) 0 (0%) 23m * kube-system kindnet-nsc8k 100m (0%) 100m (0%) 50Mi (0%) 50Mi (0%) 40m * kube-system kube-apiserver-containerd-20200724221200-14997 250m (1%) 0 (0%) 0 (0%) 0 (0%) 22m * kube-system kube-controller-manager-containerd-20200724221200-14997 200m (1%) 0 (0%) 0 (0%) 0 (0%) 41m * kube-system kube-proxy-x7fwq 0 (0%) 0 (0%) 0 (0%) 0 (0%) 40m * kube-system kube-scheduler-containerd-20200724221200-14997 100m (0%) 0 (0%) 0 (0%) 0 (0%) 41m * kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 40m * kubernetes-dashboard dashboard-metrics-scraper-dc6947fbf-xphhd 0 (0%) 0 (0%) 0 (0%) 0 (0%) 23m * kubernetes-dashboard kubernetes-dashboard-6dbb54fd95-ms9wg 0 (0%) 0 (0%) 0 (0%) 0 (0%) 23m * Allocated resources: * (Total limits may be over 100 percent, i.e., overcommitted.) * Resource Requests Limits * -------- -------- ------ * cpu 750m (4%) 100m (0%) * memory 120Mi (0%) 220Mi (0%) * ephemeral-storage 0 (0%) 0 (0%) * hugepages-1Gi 0 (0%) 0 (0%) * hugepages-2Mi 0 (0%) 0 (0%) * Events: * Type Reason Age From Message * ---- ------ ---- ---- ------- * Normal NodeHasNoDiskPressure 41m (x6 over 41m) kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientPID 41m (x5 over 41m) kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasSufficientPID * Normal NodeHasSufficientMemory 41m (x6 over 41m) kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasSufficientMemory * Normal Starting 41m kubelet, containerd-20200724221200-14997 Starting kubelet. * Warning SystemOOM 41m kubelet, containerd-20200724221200-14997 System OOM encountered, victim process: registry-server, pid: 26041 * Warning SystemOOM 41m kubelet, containerd-20200724221200-14997 System OOM encountered, victim process: registry-server, pid: 28567 * Normal NodeHasSufficientMemory 41m kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasSufficientMemory * Normal NodeHasNoDiskPressure 41m kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientPID 41m kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasSufficientPID * Normal NodeAllocatableEnforced 41m kubelet, containerd-20200724221200-14997 Updated Node Allocatable limit across pods * Normal NodeReady 40m kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeReady * Normal Starting 40m kube-proxy, containerd-20200724221200-14997 Starting kube-proxy. * Warning readOnlySysFS 40m kube-proxy, containerd-20200724221200-14997 CRI error: /sys is read-only: cannot modify conntrack limits, problems may arise later (If running Docker, see docker issue #24000) * Normal Starting 25m kubelet, containerd-20200724221200-14997 Starting kubelet. * Warning SystemOOM 25m kubelet, containerd-20200724221200-14997 System OOM encountered, victim process: registry-server, pid: 26041 * Warning SystemOOM 25m kubelet, containerd-20200724221200-14997 System OOM encountered, victim process: registry-server, pid: 28567 * Normal NodeAllocatableEnforced 25m kubelet, containerd-20200724221200-14997 Updated Node Allocatable limit across pods * Normal NodeHasSufficientMemory 25m (x7 over 25m) kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasSufficientMemory * Normal NodeHasNoDiskPressure 25m (x7 over 25m) kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasNoDiskPressure * Normal NodeHasSufficientPID 25m (x7 over 25m) kubelet, containerd-20200724221200-14997 Node containerd-20200724221200-14997 status is now: NodeHasSufficientPID * Warning readOnlySysFS 24m kube-proxy, containerd-20200724221200-14997 CRI error: /sys is read-only: cannot modify conntrack limits, problems may arise later (If running Docker, see docker issue #24000) * Normal Starting 24m kube-proxy, containerd-20200724221200-14997 Starting kube-proxy. * * ==> dmesg <== * [ +0.008042] FS-Cache: N-cookie d=00000000087e516b n=00000000639e5e65 * [ +0.007221] FS-Cache: N-key=[8] '56c00f0000000000' * [ +0.005631] FS-Cache: Duplicate cookie detected * [ +0.003841] FS-Cache: O-cookie c=0000000011bf07ab [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.005951] FS-Cache: O-cookie d=00000000087e516b n=00000000251e97b7 * [ +0.004790] FS-Cache: O-key=[8] '56c00f0000000000' * [ +0.003643] FS-Cache: N-cookie c=00000000e2eec606 [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.005689] FS-Cache: N-cookie d=00000000087e516b n=000000007550b1ca * [ +0.004826] FS-Cache: N-key=[8] '56c00f0000000000' * [ +1.890005] FS-Cache: Duplicate cookie detected * [ +0.022632] FS-Cache: O-cookie c=0000000036b15294 [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.008521] FS-Cache: O-cookie d=00000000087e516b n=00000000efd734b0 * [ +0.007450] FS-Cache: O-key=[8] '55c00f0000000000' * [ +0.004298] FS-Cache: N-cookie c=00000000c6760349 [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.007729] FS-Cache: N-cookie d=00000000087e516b n=000000002b6d28f2 * [ +0.006730] FS-Cache: N-key=[8] '55c00f0000000000' * [ +2.781512] FS-Cache: Duplicate cookie detected * [ +0.004691] FS-Cache: O-cookie c=0000000026b60cdb [p=00000000a4eacd33 fl=226 nc=0 na=1] * [ +0.006817] FS-Cache: O-cookie d=00000000087e516b n=0000000083eaff9d * [ +0.006146] FS-Cache: O-key=[8] '5ac00f0000000000' * [ +0.004163] FS-Cache: N-cookie c=000000006a4278df [p=00000000a4eacd33 fl=2 nc=0 na=1] * [ +0.006241] FS-Cache: N-cookie d=00000000087e516b n=00000000e4c3dd6e * [ +0.005074] FS-Cache: N-key=[8] '5ac00f0000000000' * [Jul24 22:02] tee (116897): /proc/108378/oom_adj is deprecated, please use /proc/108378/oom_score_adj instead. * [Jul24 22:09] hrtimer: interrupt took 1877431 ns * * ==> etcd [e1da8367b2af71bf928910238acc6b877c309718e6be22ad7c222ff9731c93cc] <== * 2020-07-24 22:29:30.038314 W | etcdserver: read-only range request "key:\"/registry/namespaces/kube-system\" " with result "range_response_count:1 size:263" took too long (102.05731ms) to execute * 2020-07-24 22:29:30.038475 W | etcdserver: read-only range request "key:\"/registry/pods/kube-system/kube-apiserver-containerd-20200724221200-14997\" " with result "range_response_count:1 size:6560" took too long (102.234322ms) to execute * 2020-07-24 22:30:00.757431 W | etcdserver: read-only range request "key:\"/registry/events/kube-system/storage-provisioner.1624d0d94aaa7ad0\" " with result "range_response_count:1 size:865" took too long (111.660057ms) to execute * 2020-07-24 22:31:03.103374 W | wal: sync duration of 4.321780012s, expected less than 1s * 2020-07-24 22:31:03.104064 W | etcdserver: read-only range request "key:\"/registry/roles\" range_end:\"/registry/rolet\" count_only:true " with result "range_response_count:0 size:7" took too long (4.143856667s) to execute * 2020-07-24 22:31:03.110173 W | etcdserver: read-only range request "key:\"/registry/csidrivers\" range_end:\"/registry/csidrivert\" count_only:true " with result "range_response_count:0 size:5" took too long (3.893694523s) to execute * 2020-07-24 22:31:03.110244 W | etcdserver: read-only range request "key:\"/registry/mutatingwebhookconfigurations\" range_end:\"/registry/mutatingwebhookconfigurationt\" count_only:true " with result "range_response_count:0 size:5" took too long (748.311405ms) to execute * 2020-07-24 22:31:03.110295 W | etcdserver: read-only range request "key:\"/registry/namespaces/default\" " with result "range_response_count:1 size:257" took too long (2.065079912s) to execute * 2020-07-24 22:31:27.394849 W | wal: sync duration of 1.161100746s, expected less than 1s * 2020-07-24 22:31:28.903901 W | etcdserver: read-only range request "key:\"/registry/mutatingwebhookconfigurations\" range_end:\"/registry/mutatingwebhookconfigurationt\" count_only:true " with result "range_response_count:0 size:5" took too long (638.619667ms) to execute * 2020-07-24 22:31:28.903925 W | etcdserver: read-only range request "key:\"/registry/controllers\" range_end:\"/registry/controllert\" count_only:true " with result "range_response_count:0 size:5" took too long (469.929992ms) to execute * 2020-07-24 22:39:26.228142 I | mvcc: store.index: compact 1128 * 2020-07-24 22:39:26.229399 I | mvcc: finished scheduled compaction at 1128 (took 884.061µs) * 2020-07-24 22:44:26.238245 I | mvcc: store.index: compact 1236 * 2020-07-24 22:44:26.238942 I | mvcc: finished scheduled compaction at 1236 (took 365.326µs) * 2020-07-24 22:49:26.248026 I | mvcc: store.index: compact 1314 * 2020-07-24 22:49:26.248710 I | mvcc: finished scheduled compaction at 1314 (took 339.022µs) * 2020-07-24 22:51:52.521928 W | etcdserver: request "header: txn: success:> failure: >>" with result "size:16" took too long (902.174992ms) to execute * 2020-07-24 22:51:52.522127 W | etcdserver: read-only range request "key:\"/registry/pods/kubernetes-dashboard/\" range_end:\"/registry/pods/kubernetes-dashboard0\" " with result "range_response_count:2 size:7466" took too long (1.329719758s) to execute * 2020-07-24 22:51:52.522249 W | etcdserver: read-only range request "key:\"/registry/priorityclasses\" range_end:\"/registry/priorityclasset\" count_only:true " with result "range_response_count:0 size:7" took too long (263.931372ms) to execute * 2020-07-24 22:51:52.522268 W | etcdserver: read-only range request "key:\"/registry/podtemplates\" range_end:\"/registry/podtemplatet\" count_only:true " with result "range_response_count:0 size:5" took too long (651.529003ms) to execute * 2020-07-24 22:51:52.522324 W | etcdserver: read-only range request "key:\"/registry/csinodes\" range_end:\"/registry/csinodet\" count_only:true " with result "range_response_count:0 size:7" took too long (315.365732ms) to execute * 2020-07-24 22:51:52.522347 W | etcdserver: read-only range request "key:\"/registry/services/specs\" range_end:\"/registry/services/spect\" count_only:true " with result "range_response_count:0 size:7" took too long (193.248178ms) to execute * 2020-07-24 22:51:53.489920 W | etcdserver: read-only range request "key:\"/registry/health\" " with result "range_response_count:0 size:5" took too long (140.684539ms) to execute * 2020-07-24 22:51:53.489983 W | etcdserver: read-only range request "key:\"/registry/pods/kubernetes-dashboard/\" range_end:\"/registry/pods/kubernetes-dashboard0\" " with result "range_response_count:2 size:7466" took too long (297.755813ms) to execute * * ==> kernel <== * 22:54:08 up 1:21, 0 users, load average: 0.29, 0.66, 2.36 * Linux containerd-20200724221200-14997 5.4.0-1022-azure #22-Ubuntu SMP Fri Jul 10 06:14:37 UTC 2020 x86_64 x86_64 x86_64 GNU/Linux * PRETTY_NAME="Ubuntu 19.10" * * ==> kube-apiserver [7dc9f83693a992e6e76f2932a3ce43efa3ffd6bbef97f8a3dab2e9ab04809fda] <== * I0724 22:29:30.838357 1 storage_scheduling.go:143] all system priority classes are created successfully or already exist. * W0724 22:29:31.103453 1 lease.go:224] Resetting endpoints for master service "kubernetes" to [172.17.0.3] * I0724 22:29:31.104915 1 controller.go:606] quota admission added evaluator for: endpoints * I0724 22:29:31.150383 1 controller.go:606] quota admission added evaluator for: endpointslices.discovery.k8s.io * I0724 22:29:31.881534 1 controller.go:606] quota admission added evaluator for: leases.coordination.k8s.io * I0724 22:29:34.181947 1 controller.go:606] quota admission added evaluator for: daemonsets.apps * I0724 22:29:34.431603 1 controller.go:606] quota admission added evaluator for: serviceaccounts * I0724 22:29:34.453874 1 controller.go:606] quota admission added evaluator for: deployments.apps * I0724 22:29:34.700275 1 controller.go:606] quota admission added evaluator for: roles.rbac.authorization.k8s.io * I0724 22:29:34.711676 1 controller.go:606] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io * I0724 22:30:24.960939 1 controller.go:606] quota admission added evaluator for: replicasets.apps * I0724 22:31:03.104488 1 trace.go:116] Trace[910046044]: "GuaranteedUpdate etcd3" type:*coordination.Lease (started: 2020-07-24 22:31:01.925257756 +0000 UTC m=+96.221406956) (total time: 1.179189211s): * Trace[910046044]: [1.179160609s] [1.178557271s] Transaction committed * I0724 22:31:03.104575 1 trace.go:116] Trace[1472583103]: "Create" url:/api/v1/namespaces/kubernetes-dashboard/events,user-agent:kubelet/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:172.17.0.3 (started: 2020-07-24 22:30:58.882075293 +0000 UTC m=+93.178224393) (total time: 4.222377974s): * Trace[1472583103]: [4.222302769s] [4.222217764s] Object stored in database * I0724 22:31:03.104626 1 trace.go:116] Trace[118677254]: "Update" url:/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/containerd-20200724221200-14997,user-agent:kubelet/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:172.17.0.3 (started: 2020-07-24 22:31:01.925076644 +0000 UTC m=+96.221225844) (total time: 1.179528033s): * Trace[118677254]: [1.17947683s] [1.179338821s] Object stored in database * I0724 22:31:03.110755 1 trace.go:116] Trace[1968230455]: "Get" url:/api/v1/namespaces/default,user-agent:kube-apiserver/v1.18.3 (linux/amd64) kubernetes/2e7996e,client:127.0.0.1 (started: 2020-07-24 22:31:01.044778208 +0000 UTC m=+95.340927408) (total time: 2.065944568s): * Trace[1968230455]: [2.065912066s] [2.065904566s] About to write a response * I0724 22:51:52.522679 1 trace.go:116] Trace[642232690]: "GuaranteedUpdate etcd3" type:*v1.Endpoints (started: 2020-07-24 22:51:51.086288237 +0000 UTC m=+1345.382437437) (total time: 1.436354155s): * Trace[642232690]: [1.436324653s] [1.434510025s] Transaction committed * I0724 22:51:52.522911 1 trace.go:116] Trace[857693863]: "List etcd3" key:/pods/kubernetes-dashboard,resourceVersion:,limit:0,continue: (started: 2020-07-24 22:51:51.191949067 +0000 UTC m=+1345.488098267) (total time: 1.330928342s): * Trace[857693863]: [1.330928342s] [1.330928342s] END * I0724 22:51:52.523386 1 trace.go:116] Trace[1580627152]: "List" url:/api/v1/namespaces/kubernetes-dashboard/pods,user-agent:e2e-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format,client:172.17.0.1 (started: 2020-07-24 22:51:51.191908464 +0000 UTC m=+1345.488057664) (total time: 1.331449678s): * Trace[1580627152]: [1.331028249s] [1.330994846s] Listing from storage done * * ==> kube-controller-manager [8662707b3b0f7d44ff6c5e080ea0aba7dd34f515065281accd9db2796d1eabbb] <== * I0724 22:30:25.016265 1 event.go:278] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"kubernetes-dashboard", Name:"dashboard-metrics-scraper-dc6947fbf", UID:"f7f90ea4-8326-451c-9d3d-7807a3878e9a", APIVersion:"apps/v1", ResourceVersion:"932", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: dashboard-metrics-scraper-dc6947fbf-xphhd * I0724 22:30:25.047095 1 event.go:278] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"kubernetes-dashboard", Name:"kubernetes-dashboard-6dbb54fd95", UID:"7d191e90-0bb9-4f25-b536-3a59e9a338ae", APIVersion:"apps/v1", ResourceVersion:"934", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: kubernetes-dashboard-6dbb54fd95-ms9wg * W0724 22:30:25.140679 1 actual_state_of_world.go:506] Failed to update statusUpdateNeeded field in actual state of world: Failed to set statusUpdateNeeded to needed true, because nodeName="containerd-20200724221200-14997" does not exist * I0724 22:30:25.167526 1 shared_informer.go:230] Caches are synced for disruption * I0724 22:30:25.167548 1 disruption.go:339] Sending events to api server. * I0724 22:30:25.174767 1 shared_informer.go:230] Caches are synced for persistent volume * I0724 22:30:25.194865 1 shared_informer.go:230] Caches are synced for TTL * I0724 22:30:25.222168 1 shared_informer.go:230] Caches are synced for node * I0724 22:30:25.222200 1 range_allocator.go:172] Starting range CIDR allocator * I0724 22:30:25.222204 1 shared_informer.go:223] Waiting for caches to sync for cidrallocator * I0724 22:30:25.222209 1 shared_informer.go:230] Caches are synced for cidrallocator * I0724 22:30:25.225090 1 shared_informer.go:230] Caches are synced for GC * I0724 22:30:25.407503 1 shared_informer.go:230] Caches are synced for resource quota * I0724 22:30:25.410855 1 shared_informer.go:230] Caches are synced for attach detach * I0724 22:30:25.419771 1 shared_informer.go:230] Caches are synced for taint * I0724 22:30:25.419834 1 node_lifecycle_controller.go:1433] Initializing eviction metric for zone: * I0724 22:30:25.419834 1 taint_manager.go:187] Starting NoExecuteTaintManager * I0724 22:30:25.419876 1 event.go:278] Event(v1.ObjectReference{Kind:"Node", Namespace:"", Name:"containerd-20200724221200-14997", UID:"878dba67-2126-43d2-a5be-2ad809c96173", APIVersion:"v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'RegisteredNode' Node containerd-20200724221200-14997 event: Registered Node containerd-20200724221200-14997 in Controller * W0724 22:30:25.419894 1 node_lifecycle_controller.go:1048] Missing timestamp for Node containerd-20200724221200-14997. Assuming now as a timestamp. * I0724 22:30:25.419932 1 node_lifecycle_controller.go:1249] Controller detected that zone is now in state Normal. * I0724 22:30:25.474250 1 shared_informer.go:230] Caches are synced for resource quota * I0724 22:30:25.474554 1 shared_informer.go:230] Caches are synced for daemon sets * I0724 22:30:25.475756 1 shared_informer.go:230] Caches are synced for garbage collector * I0724 22:30:25.513010 1 shared_informer.go:230] Caches are synced for garbage collector * I0724 22:30:25.513029 1 garbagecollector.go:142] Garbage collector: all resource monitors have synced. Proceeding to collect garbage * * ==> kube-controller-manager [90f7be9dd5648fdf7d6e6f282cc9119322bc28911e75d3b73a928fa1ad2134ec] <== * I0724 22:29:12.067440 1 serving.go:313] Generated self-signed cert in-memory * I0724 22:29:12.831319 1 controllermanager.go:161] Version: v1.18.3 * I0724 22:29:12.832693 1 dynamic_cafile_content.go:167] Starting request-header::/var/lib/minikube/certs/front-proxy-ca.crt * I0724 22:29:12.832741 1 dynamic_cafile_content.go:167] Starting client-ca-bundle::/var/lib/minikube/certs/ca.crt * I0724 22:29:12.833285 1 secure_serving.go:178] Serving securely on 127.0.0.1:10257 * I0724 22:29:12.833363 1 tlsconfig.go:240] Starting DynamicServingCertificateController * I0724 22:29:12.833994 1 deprecated_insecure_serving.go:53] Serving insecurely on [::]:10252 * W0724 22:29:12.834610 1 controllermanager.go:612] fetch api resource lists failed, use legacy client builder: Get https://control-plane.minikube.internal:8444/api/v1?timeout=32s: dial tcp 172.17.0.3:8444: connect: connection refused * F0724 22:29:22.836138 1 controllermanager.go:230] error building controller context: failed to wait for apiserver being healthy: timed out waiting for the condition: failed to get apiserver /healthz status: Get https://control-plane.minikube.internal:8444/healthz?timeout=32s: dial tcp 172.17.0.3:8444: connect: connection refused * * ==> kube-proxy [cdb7f0919992f1b17fe65623faf7b9984d3edb56993d5621bef544d49a0ce798] <== * I0724 22:13:19.703119 1 server.go:583] Version: v1.18.3 * I0724 22:13:19.703827 1 conntrack.go:52] Setting nf_conntrack_max to 524288 * E0724 22:13:19.704435 1 conntrack.go:127] sysfs is not writable: {Device:sysfs Path:/sys Type:sysfs Opts:[ro nosuid nodev noexec relatime] Freq:0 Pass:0} (mount options are [ro nosuid nodev noexec relatime]) * I0724 22:13:19.704682 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_established' to 86400 * I0724 22:13:19.704788 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_close_wait' to 3600 * I0724 22:13:19.706305 1 config.go:133] Starting endpoints config controller * I0724 22:13:19.706338 1 config.go:315] Starting service config controller * I0724 22:13:19.706344 1 shared_informer.go:223] Waiting for caches to sync for endpoints config * I0724 22:13:19.706365 1 shared_informer.go:223] Waiting for caches to sync for service config * I0724 22:13:19.806590 1 shared_informer.go:230] Caches are synced for service config * I0724 22:13:19.806609 1 shared_informer.go:230] Caches are synced for endpoints config * W0724 22:30:00.451331 1 server_others.go:559] Unknown proxy mode "", assuming iptables proxy * I0724 22:30:00.459120 1 node.go:136] Successfully retrieved node IP: 172.17.0.3 * I0724 22:30:00.459158 1 server_others.go:186] Using iptables Proxier. * I0724 22:30:00.459548 1 server.go:583] Version: v1.18.3 * I0724 22:30:00.460146 1 conntrack.go:52] Setting nf_conntrack_max to 524288 * E0724 22:30:00.460599 1 conntrack.go:127] sysfs is not writable: {Device:sysfs Path:/sys Type:sysfs Opts:[ro nosuid nodev noexec relatime] Freq:0 Pass:0} (mount options are [ro nosuid nodev noexec relatime]) * I0724 22:30:00.460736 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_established' to 86400 * I0724 22:30:00.460806 1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_close_wait' to 3600 * I0724 22:30:00.460979 1 config.go:315] Starting service config controller * I0724 22:30:00.460995 1 shared_informer.go:223] Waiting for caches to sync for service config * I0724 22:30:00.461156 1 config.go:133] Starting endpoints config controller * I0724 22:30:00.462702 1 shared_informer.go:223] Waiting for caches to sync for endpoints config * I0724 22:30:00.561150 1 shared_informer.go:230] Caches are synced for service config * I0724 22:30:00.562880 1 shared_informer.go:230] Caches are synced for endpoints config * * ==> kube-scheduler [574a9379a97ce5c734627b9b0340a240dea259b901f1dca96ad8d9978331c503] <== * E0724 22:28:46.956193 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Node: Get https://control-plane.minikube.internal:8444/api/v1/nodes?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:47.936021 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolumeClaim: Get https://control-plane.minikube.internal:8444/api/v1/persistentvolumeclaims?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:48.615795 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolume: Get https://control-plane.minikube.internal:8444/api/v1/persistentvolumes?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:48.627285 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Service: Get https://control-plane.minikube.internal:8444/api/v1/services?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:48.844520 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1beta1.PodDisruptionBudget: Get https://control-plane.minikube.internal:8444/apis/policy/v1beta1/poddisruptionbudgets?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:49.268244 1 reflector.go:178] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:233: Failed to list *v1.Pod: Get https://control-plane.minikube.internal:8444/api/v1/pods?fieldSelector=status.phase%21%3DFailed%2Cstatus.phase%21%3DSucceeded&limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:52.983500 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.CSINode: Get https://control-plane.minikube.internal:8444/apis/storage.k8s.io/v1/csinodes?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:54.489861 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.StorageClass: Get https://control-plane.minikube.internal:8444/apis/storage.k8s.io/v1/storageclasses?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:55.798147 1 reflector.go:178] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to list *v1.ConfigMap: Get https://control-plane.minikube.internal:8444/api/v1/namespaces/kube-system/configmaps?fieldSelector=metadata.name%3Dextension-apiserver-authentication&limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:56.415849 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolumeClaim: Get https://control-plane.minikube.internal:8444/api/v1/persistentvolumeclaims?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:57.297889 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Service: Get https://control-plane.minikube.internal:8444/api/v1/services?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:58.395517 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1beta1.PodDisruptionBudget: Get https://control-plane.minikube.internal:8444/apis/policy/v1beta1/poddisruptionbudgets?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:28:58.615198 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Node: Get https://control-plane.minikube.internal:8444/api/v1/nodes?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:00.876683 1 reflector.go:178] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:233: Failed to list *v1.Pod: Get https://control-plane.minikube.internal:8444/api/v1/pods?fieldSelector=status.phase%21%3DFailed%2Cstatus.phase%21%3DSucceeded&limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:01.380532 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolume: Get https://control-plane.minikube.internal:8444/api/v1/persistentvolumes?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:10.017422 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.StorageClass: Get https://control-plane.minikube.internal:8444/apis/storage.k8s.io/v1/storageclasses?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:13.820706 1 reflector.go:178] k8s.io/kubernetes/cmd/kube-scheduler/app/server.go:233: Failed to list *v1.Pod: Get https://control-plane.minikube.internal:8444/api/v1/pods?fieldSelector=status.phase%21%3DFailed%2Cstatus.phase%21%3DSucceeded&limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:14.849393 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.CSINode: Get https://control-plane.minikube.internal:8444/apis/storage.k8s.io/v1/csinodes?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:17.299470 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Node: Get https://control-plane.minikube.internal:8444/api/v1/nodes?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:17.476542 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolumeClaim: Get https://control-plane.minikube.internal:8444/api/v1/persistentvolumeclaims?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:17.787962 1 reflector.go:178] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to list *v1.ConfigMap: Get https://control-plane.minikube.internal:8444/api/v1/namespaces/kube-system/configmaps?fieldSelector=metadata.name%3Dextension-apiserver-authentication&limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:18.847726 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.Service: Get https://control-plane.minikube.internal:8444/api/v1/services?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:20.242683 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1beta1.PodDisruptionBudget: Get https://control-plane.minikube.internal:8444/apis/policy/v1beta1/poddisruptionbudgets?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * E0724 22:29:25.306254 1 reflector.go:178] k8s.io/client-go/informers/factory.go:135: Failed to list *v1.PersistentVolume: Get https://control-plane.minikube.internal:8444/api/v1/persistentvolumes?limit=500&resourceVersion=0: dial tcp 172.17.0.3:8444: connect: connection refused * I0724 22:30:02.056615 1 shared_informer.go:230] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file * * ==> kubelet <== * -- Logs begin at Fri 2020-07-24 22:27:54 UTC, end at Fri 2020-07-24 22:54:08 UTC. -- * Jul 24 22:53:54 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:54.158803 544 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "dashboard-metrics-scraper-dc6947fbf-xphhd_kubernetes-dashboard(02a59124-2c99-4a38-abea-eedfd3e1ba46)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "379562d4d181012c170675f941a008c0691dfd635c16e63e4454c1384b1dbe65": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:53:54 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:54.158824 544 kuberuntime_manager.go:727] createPodSandbox for pod "dashboard-metrics-scraper-dc6947fbf-xphhd_kubernetes-dashboard(02a59124-2c99-4a38-abea-eedfd3e1ba46)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "379562d4d181012c170675f941a008c0691dfd635c16e63e4454c1384b1dbe65": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:53:54 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:54.158890 544 pod_workers.go:191] Error syncing pod 02a59124-2c99-4a38-abea-eedfd3e1ba46 ("dashboard-metrics-scraper-dc6947fbf-xphhd_kubernetes-dashboard(02a59124-2c99-4a38-abea-eedfd3e1ba46)"), skipping: failed to "CreatePodSandbox" for "dashboard-metrics-scraper-dc6947fbf-xphhd_kubernetes-dashboard(02a59124-2c99-4a38-abea-eedfd3e1ba46)" with CreatePodSandboxError: "CreatePodSandbox for pod \"dashboard-metrics-scraper-dc6947fbf-xphhd_kubernetes-dashboard(02a59124-2c99-4a38-abea-eedfd3e1ba46)\" failed: rpc error: code = Unknown desc = failed to setup network for sandbox \"379562d4d181012c170675f941a008c0691dfd635c16e63e4454c1384b1dbe65\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:53:54 containerd-20200724221200-14997 kubelet[544]: I0724 22:53:54.338557 544 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: a09e6baaadfd28ee566e88056be5b5b6c39ad7b1953f9bc53a101febc380383d * Jul 24 22:53:54 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:54.338953 544 pod_workers.go:191] Error syncing pod 562af1c8-c195-4fed-b475-4ff2a3cca8b5 ("kindnet-nsc8k_kube-system(562af1c8-c195-4fed-b475-4ff2a3cca8b5)"), skipping: failed to "StartContainer" for "kindnet-cni" with CrashLoopBackOff: "back-off 5m0s restarting failed container=kindnet-cni pod=kindnet-nsc8k_kube-system(562af1c8-c195-4fed-b475-4ff2a3cca8b5)" * Jul 24 22:53:56 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:56.227288 544 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to setup network for sandbox "84d1b0c841d5797b077ebe499d0687d2c5027bef218423269fb1ab927c4b21d3": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:53:56 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:56.227343 544 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "kubernetes-dashboard-6dbb54fd95-ms9wg_kubernetes-dashboard(267f98a1-434e-45a2-abda-53ad3bb7bea1)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "84d1b0c841d5797b077ebe499d0687d2c5027bef218423269fb1ab927c4b21d3": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:53:56 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:56.227363 544 kuberuntime_manager.go:727] createPodSandbox for pod "kubernetes-dashboard-6dbb54fd95-ms9wg_kubernetes-dashboard(267f98a1-434e-45a2-abda-53ad3bb7bea1)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "84d1b0c841d5797b077ebe499d0687d2c5027bef218423269fb1ab927c4b21d3": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:53:56 containerd-20200724221200-14997 kubelet[544]: E0724 22:53:56.227421 544 pod_workers.go:191] Error syncing pod 267f98a1-434e-45a2-abda-53ad3bb7bea1 ("kubernetes-dashboard-6dbb54fd95-ms9wg_kubernetes-dashboard(267f98a1-434e-45a2-abda-53ad3bb7bea1)"), skipping: failed to "CreatePodSandbox" for "kubernetes-dashboard-6dbb54fd95-ms9wg_kubernetes-dashboard(267f98a1-434e-45a2-abda-53ad3bb7bea1)" with CreatePodSandboxError: "CreatePodSandbox for pod \"kubernetes-dashboard-6dbb54fd95-ms9wg_kubernetes-dashboard(267f98a1-434e-45a2-abda-53ad3bb7bea1)\" failed: rpc error: code = Unknown desc = failed to setup network for sandbox \"84d1b0c841d5797b077ebe499d0687d2c5027bef218423269fb1ab927c4b21d3\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:54:02 containerd-20200724221200-14997 kubelet[544]: E0724 22:54:02.290048 544 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to setup network for sandbox "0bfa003ac5bb781640c1be03bc1ad923421f177be599acda227ae251c6acbd1c": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:54:02 containerd-20200724221200-14997 kubelet[544]: E0724 22:54:02.290106 544 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "0bfa003ac5bb781640c1be03bc1ad923421f177be599acda227ae251c6acbd1c": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:54:02 containerd-20200724221200-14997 kubelet[544]: E0724 22:54:02.290121 544 kuberuntime_manager.go:727] createPodSandbox for pod "busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "0bfa003ac5bb781640c1be03bc1ad923421f177be599acda227ae251c6acbd1c": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:54:02 containerd-20200724221200-14997 kubelet[544]: E0724 22:54:02.290172 544 pod_workers.go:191] Error syncing pod 4b662b5f-6e78-48de-818c-81989d7f4ea9 ("busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)"), skipping: failed to "CreatePodSandbox" for "busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)" with CreatePodSandboxError: "CreatePodSandbox for pod \"busybox_default(4b662b5f-6e78-48de-818c-81989d7f4ea9)\" failed: rpc error: code = Unknown desc = failed to setup network for sandbox \"0bfa003ac5bb781640c1be03bc1ad923421f177be599acda227ae251c6acbd1c\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:54:05 containerd-20200724221200-14997 kubelet[544]: I0724 22:54:05.338585 544 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: 00a604004ac81017478505940a9e65265df2e1d0d95ee5ccee3c95ec0687461f * Jul 24 22:54:05 containerd-20200724221200-14997 kubelet[544]: E0724 22:54:05.338926 544 pod_workers.go:191] Error syncing pod 25727a03-b071-4a2c-8dfb-ea2a7038e4cd ("storage-provisioner_kube-system(25727a03-b071-4a2c-8dfb-ea2a7038e4cd)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 5m0s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(25727a03-b071-4a2c-8dfb-ea2a7038e4cd)" * Jul 24 22:54:05 containerd-20200724221200-14997 kubelet[544]: E0724 22:54:05.795728 544 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to setup network for sandbox "c4ef10da47d31af55d6587ca09517240114b3a041cedd662dd519a57c502f898": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:54:05 containerd-20200724221200-14997 kubelet[544]: E0724 22:54:05.795793 544 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "c4ef10da47d31af55d6587ca09517240114b3a041cedd662dd519a57c502f898": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:54:05 containerd-20200724221200-14997 kubelet[544]: E0724 22:54:05.795810 544 kuberuntime_manager.go:727] createPodSandbox for pod "coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "c4ef10da47d31af55d6587ca09517240114b3a041cedd662dd519a57c502f898": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:54:05 containerd-20200724221200-14997 kubelet[544]: E0724 22:54:05.795861 544 pod_workers.go:191] Error syncing pod 584286ea-2ddf-4194-8a3c-48d505f7f607 ("coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)"), skipping: failed to "CreatePodSandbox" for "coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)" with CreatePodSandboxError: "CreatePodSandbox for pod \"coredns-66bff467f8-hlk9j_kube-system(584286ea-2ddf-4194-8a3c-48d505f7f607)\" failed: rpc error: code = Unknown desc = failed to setup network for sandbox \"c4ef10da47d31af55d6587ca09517240114b3a041cedd662dd519a57c502f898\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:54:07 containerd-20200724221200-14997 kubelet[544]: E0724 22:54:07.189984 544 remote_runtime.go:105] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to setup network for sandbox "7c591b75a4f90cc50fb8337ded7e992979267274c708adc157d4d824b8f08ab6": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:54:07 containerd-20200724221200-14997 kubelet[544]: E0724 22:54:07.190039 544 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "dashboard-metrics-scraper-dc6947fbf-xphhd_kubernetes-dashboard(02a59124-2c99-4a38-abea-eedfd3e1ba46)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "7c591b75a4f90cc50fb8337ded7e992979267274c708adc157d4d824b8f08ab6": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:54:07 containerd-20200724221200-14997 kubelet[544]: E0724 22:54:07.190055 544 kuberuntime_manager.go:727] createPodSandbox for pod "dashboard-metrics-scraper-dc6947fbf-xphhd_kubernetes-dashboard(02a59124-2c99-4a38-abea-eedfd3e1ba46)" failed: rpc error: code = Unknown desc = failed to setup network for sandbox "7c591b75a4f90cc50fb8337ded7e992979267274c708adc157d4d824b8f08ab6": failed to set bridge addr: could not add IP address to "cni0": permission denied * Jul 24 22:54:07 containerd-20200724221200-14997 kubelet[544]: E0724 22:54:07.190109 544 pod_workers.go:191] Error syncing pod 02a59124-2c99-4a38-abea-eedfd3e1ba46 ("dashboard-metrics-scraper-dc6947fbf-xphhd_kubernetes-dashboard(02a59124-2c99-4a38-abea-eedfd3e1ba46)"), skipping: failed to "CreatePodSandbox" for "dashboard-metrics-scraper-dc6947fbf-xphhd_kubernetes-dashboard(02a59124-2c99-4a38-abea-eedfd3e1ba46)" with CreatePodSandboxError: "CreatePodSandbox for pod \"dashboard-metrics-scraper-dc6947fbf-xphhd_kubernetes-dashboard(02a59124-2c99-4a38-abea-eedfd3e1ba46)\" failed: rpc error: code = Unknown desc = failed to setup network for sandbox \"7c591b75a4f90cc50fb8337ded7e992979267274c708adc157d4d824b8f08ab6\": failed to set bridge addr: could not add IP address to \"cni0\": permission denied" * Jul 24 22:54:07 containerd-20200724221200-14997 kubelet[544]: I0724 22:54:07.338267 544 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: a09e6baaadfd28ee566e88056be5b5b6c39ad7b1953f9bc53a101febc380383d * Jul 24 22:54:07 containerd-20200724221200-14997 kubelet[544]: E0724 22:54:07.338620 544 pod_workers.go:191] Error syncing pod 562af1c8-c195-4fed-b475-4ff2a3cca8b5 ("kindnet-nsc8k_kube-system(562af1c8-c195-4fed-b475-4ff2a3cca8b5)"), skipping: failed to "StartContainer" for "kindnet-cni" with CrashLoopBackOff: "back-off 5m0s restarting failed container=kindnet-cni pod=kindnet-nsc8k_kube-system(562af1c8-c195-4fed-b475-4ff2a3cca8b5)" * * ==> storage-provisioner [00a604004ac81017478505940a9e65265df2e1d0d95ee5ccee3c95ec0687461f] <== * F0724 22:53:34.696730 1 main.go:37] Error getting server version: Get https://10.96.0.1:443/version: dial tcp 10.96.0.1:443: getsockopt: no route to host -- /stdout -- helpers_test.go:247: (dbg) Run: ./minikube-linux-amd64 status --format={{.APIServer}} -p containerd-20200724221200-14997 -n containerd-20200724221200-14997 helpers_test.go:254: (dbg) Run: kubectl --context containerd-20200724221200-14997 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running helpers_test.go:260: non-running pods: busybox coredns-66bff467f8-hlk9j dashboard-metrics-scraper-dc6947fbf-xphhd kubernetes-dashboard-6dbb54fd95-ms9wg helpers_test.go:262: ======> post-mortem[TestStartStop/group/containerd/serial/Pause]: describe non-running pods <====== helpers_test.go:265: (dbg) Run: kubectl --context containerd-20200724221200-14997 describe pod busybox coredns-66bff467f8-hlk9j dashboard-metrics-scraper-dc6947fbf-xphhd kubernetes-dashboard-6dbb54fd95-ms9wg helpers_test.go:265: (dbg) Non-zero exit: kubectl --context containerd-20200724221200-14997 describe pod busybox coredns-66bff467f8-hlk9j dashboard-metrics-scraper-dc6947fbf-xphhd kubernetes-dashboard-6dbb54fd95-ms9wg: exit status 1 (86.960422ms) -- stdout -- Name: busybox Namespace: default Priority: 0 Node: containerd-20200724221200-14997/172.17.0.3 Start Time: Fri, 24 Jul 2020 22:19:40 +0000 Labels: integration-test=busybox Annotations: Status: Pending IP: IPs: Containers: busybox: Container ID: Image: busybox:1.28.4-glibc Image ID: Port: Host Port: Command: sleep 3600 State: Waiting Reason: ContainerCreating Ready: False Restart Count: 0 Environment: Mounts: /var/run/secrets/kubernetes.io/serviceaccount from default-token-xmm9f (ro) Conditions: Type Status Initialized True Ready False ContainersReady False PodScheduled True Volumes: default-token-xmm9f: Type: Secret (a volume populated by a Secret) SecretName: default-token-xmm9f Optional: false QoS Class: BestEffort Node-Selectors: Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s node.kubernetes.io/unreachable:NoExecute for 300s Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Scheduled 34m default-scheduler Successfully assigned default/busybox to containerd-20200724221200-14997 Warning FailedCreatePodSandBox 34m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "5311a1ffb81c3ec44164ca704d1b425a50851c7a615951d885f3e261bb56b331": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 34m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "e0b3338bd82723225aa39e3a572b31f4b10340fa640d33ac956ec7982b47a365": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 33m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "bce48c23f5090e307249f38d5e9c17615b5ce4547b68fd7dd207f5616546b1ff": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 33m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "c260da116bad44bab092e4453833efc9ce5c3c70209770b30f7aeffed5db766d": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 33m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "e352d7747cb44ae844f2847c8895d8a03a59a5bd62570299b6f91ba0d2b31e93": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 33m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "7a8e1f3fa957ea5ffbd5b203818f66025f20b18e8ba1398842a4a1dcb4beade1": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 32m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "02713a33eaf3192cf9c63e173bd72b67773f28f4dfcef338060dda2a7f8489e1": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 32m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "530cf34860dcb75c557c7cbc9a86910a3f4919230e74cdf10126ba1e75f3f49b": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 32m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "a33b097bd953e28e4d7499a4b0ca06585fe1b9029b95aac214b0bba904677259": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 28m (x17 over 32m) kubelet, containerd-20200724221200-14997 (combined from similar events): Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "47073b588559d0af9dafbb8171df657751c2a35ea5f0466a20f5762627e9cd56": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 24m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "cc2af3d00db0854dd201af575fe5d65f4c2208b59d12cea5f983ef22b810f25d": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 23m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "4cf1a61ad6807315736c91341b7763dc41aca876152d03c2db9814cfa254e7ee": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 23m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "a7cd4b3d30ded88757df1727167c36721efb9fd28978628b3503c0b86fc912e2": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 23m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "14fb4d9bb3714b5e4d1b49fd744c91c0f36ada0ca3c4313f0bb85e74660c9ab1": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 23m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "f5f36f4c79df279b40deef49d26e0ef042c075b3ba24396147e670314e61a159": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 22m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "8f22e7112e69d6377a16927508a438643f8cea01307d448d397775ae85526176": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 22m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "c1197cb670c56b20720daecde3535335370ba2b2f2515fa58f3d6b0bbc3e647d": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 22m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "21a1ae9d38e4f47d5bd6abf3cd1f776466ad5d6dc481a886df660824479af77e": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 22m kubelet, containerd-20200724221200-14997 Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "f8e86b484453c0e4d0c68fceed3b3e44519bce50c31c2b6cd480c017ee9d684e": failed to set bridge addr: could not add IP address to "cni0": permission denied Warning FailedCreatePodSandBox 4m4s (x75 over 22m) kubelet, containerd-20200724221200-14997 (combined from similar events): Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "5ff19316dc062bffc4d2ed705ac7465ced0eb93bb31ece97a9a7976e0e544892": failed to set bridge addr: could not add IP address to "cni0": permission denied -- /stdout -- ** stderr ** Error from server (NotFound): pods "coredns-66bff467f8-hlk9j" not found Error from server (NotFound): pods "dashboard-metrics-scraper-dc6947fbf-xphhd" not found Error from server (NotFound): pods "kubernetes-dashboard-6dbb54fd95-ms9wg" not found ** /stderr ** helpers_test.go:267: kubectl --context containerd-20200724221200-14997 describe pod busybox coredns-66bff467f8-hlk9j dashboard-metrics-scraper-dc6947fbf-xphhd kubernetes-dashboard-6dbb54fd95-ms9wg: exit status 1 === CONT TestStartStop/group/containerd/serial start_stop_delete_test.go:126: (dbg) Run: ./minikube-linux-amd64 delete -p containerd-20200724221200-14997 start_stop_delete_test.go:126: (dbg) Non-zero exit: ./minikube-linux-amd64 delete -p containerd-20200724221200-14997: context deadline exceeded (1.5µs) start_stop_delete_test.go:128: failed to clean up: args "./minikube-linux-amd64 delete -p containerd-20200724221200-14997": context deadline exceeded start_stop_delete_test.go:131: (dbg) Run: kubectl config get-contexts containerd-20200724221200-14997 start_stop_delete_test.go:131: (dbg) Non-zero exit: kubectl config get-contexts containerd-20200724221200-14997: context deadline exceeded (300ns) start_stop_delete_test.go:133: config context error: context deadline exceeded (may be ok) start_stop_delete_test.go:136: expected exit code 1, got 0. output: === CONT TestStartStop/group/containerd helpers_test.go:170: Cleaning up "containerd-20200724221200-14997" profile ... helpers_test.go:171: (dbg) Run: ./minikube-linux-amd64 delete -p containerd-20200724221200-14997 helpers_test.go:171: (dbg) Done: ./minikube-linux-amd64 delete -p containerd-20200724221200-14997: (5.84613721s) --- FAIL: TestStartStop (3246.90s) --- FAIL: TestStartStop/group (0.00s) --- PASS: TestStartStop/group/old-k8s-version (341.05s) --- PASS: TestStartStop/group/old-k8s-version/serial (340.72s) --- PASS: TestStartStop/group/old-k8s-version/serial/FirstStart (142.48s) --- PASS: TestStartStop/group/old-k8s-version/serial/DeployApp (11.19s) --- PASS: TestStartStop/group/old-k8s-version/serial/Stop (15.93s) --- PASS: TestStartStop/group/old-k8s-version/serial/EnableAddonAfterStop (0.34s) --- PASS: TestStartStop/group/old-k8s-version/serial/SecondStart (151.59s) --- PASS: TestStartStop/group/old-k8s-version/serial/UserAppExistsAfterStop (5.02s) --- PASS: TestStartStop/group/old-k8s-version/serial/AddonExistsAfterStop (5.01s) --- PASS: TestStartStop/group/old-k8s-version/serial/VerifyKubernetesImages (0.38s) --- PASS: TestStartStop/group/old-k8s-version/serial/Pause (3.66s) --- PASS: TestStartStop/group/embed-certs (168.45s) --- PASS: TestStartStop/group/embed-certs/serial (168.15s) --- PASS: TestStartStop/group/embed-certs/serial/FirstStart (62.57s) --- PASS: TestStartStop/group/embed-certs/serial/DeployApp (9.52s) --- PASS: TestStartStop/group/embed-certs/serial/Stop (12.20s) --- PASS: TestStartStop/group/embed-certs/serial/EnableAddonAfterStop (0.30s) --- PASS: TestStartStop/group/embed-certs/serial/SecondStart (53.83s) --- PASS: TestStartStop/group/embed-certs/serial/UserAppExistsAfterStop (14.02s) --- PASS: TestStartStop/group/embed-certs/serial/AddonExistsAfterStop (5.01s) --- PASS: TestStartStop/group/embed-certs/serial/VerifyKubernetesImages (0.38s) --- PASS: TestStartStop/group/embed-certs/serial/Pause (3.54s) --- PASS: TestStartStop/group/newest-cni (133.12s) --- PASS: TestStartStop/group/newest-cni/serial (132.82s) --- PASS: TestStartStop/group/newest-cni/serial/FirstStart (77.23s) --- PASS: TestStartStop/group/newest-cni/serial/DeployApp (0.00s) --- PASS: TestStartStop/group/newest-cni/serial/Stop (2.38s) --- PASS: TestStartStop/group/newest-cni/serial/EnableAddonAfterStop (0.24s) --- PASS: TestStartStop/group/newest-cni/serial/SecondStart (44.24s) --- PASS: TestStartStop/group/newest-cni/serial/UserAppExistsAfterStop (0.00s) --- PASS: TestStartStop/group/newest-cni/serial/AddonExistsAfterStop (0.00s) --- PASS: TestStartStop/group/newest-cni/serial/VerifyKubernetesImages (0.90s) --- PASS: TestStartStop/group/newest-cni/serial/Pause (3.17s) --- FAIL: TestStartStop/group/crio (2571.60s) --- FAIL: TestStartStop/group/crio/serial (2564.87s) --- FAIL: TestStartStop/group/crio/serial/FirstStart (507.70s) --- FAIL: TestStartStop/group/crio/serial/DeployApp (490.30s) --- PASS: TestStartStop/group/crio/serial/Stop (21.10s) --- PASS: TestStartStop/group/crio/serial/EnableAddonAfterStop (0.27s) --- FAIL: TestStartStop/group/crio/serial/SecondStart (452.72s) --- FAIL: TestStartStop/group/crio/serial/UserAppExistsAfterStop (542.95s) --- FAIL: TestStartStop/group/crio/serial/AddonExistsAfterStop (542.86s) --- FAIL: TestStartStop/group/crio/serial/VerifyKubernetesImages (3.47s) --- FAIL: TestStartStop/group/crio/serial/Pause (3.51s) --- FAIL: TestStartStop/group/containerd (2534.10s) --- FAIL: TestStartStop/group/containerd/serial (2528.26s) --- FAIL: TestStartStop/group/containerd/serial/FirstStart (459.15s) --- FAIL: TestStartStop/group/containerd/serial/DeployApp (486.30s) --- PASS: TestStartStop/group/containerd/serial/Stop (1.45s) --- PASS: TestStartStop/group/containerd/serial/EnableAddonAfterStop (0.28s) --- FAIL: TestStartStop/group/containerd/serial/SecondStart (487.93s) --- FAIL: TestStartStop/group/containerd/serial/UserAppExistsAfterStop (543.10s) --- FAIL: TestStartStop/group/containerd/serial/AddonExistsAfterStop (542.93s) --- FAIL: TestStartStop/group/containerd/serial/VerifyKubernetesImages (3.56s) --- FAIL: TestStartStop/group/containerd/serial/Pause (3.55s) FAIL Tests completed in 1h18m5.183885789s (result code 1)