diff --git a/cmd/podman/cliconfig/config.go b/cmd/podman/cliconfig/config.go index 0e431541195..533b5bf20c8 100644 --- a/cmd/podman/cliconfig/config.go +++ b/cmd/podman/cliconfig/config.go @@ -420,6 +420,7 @@ type PortValues struct { type PsValues struct { PodmanCommand All bool + External bool Filter []string Format string Last int diff --git a/cmd/podman/ps.go b/cmd/podman/ps.go index d2c5e19e238..98d2f679ce3 100644 --- a/cmd/podman/ps.go +++ b/cmd/podman/ps.go @@ -120,6 +120,7 @@ func psInit(command *cliconfig.PsValues) { command.SetUsageTemplate(UsageTemplate()) flags := command.Flags() flags.BoolVarP(&command.All, "all", "a", false, "Show all the containers, default is only running containers") + flags.BoolVar(&command.External, "external", false, "Show containers in storage not controlled by Podman") flags.StringSliceVarP(&command.Filter, "filter", "f", []string{}, "Filter output based on conditions given") flags.StringVar(&command.Format, "format", "", "Pretty-print containers to JSON or using a Go template") flags.IntVarP(&command.Last, "last", "n", -1, "Print the n last created containers (all states)") @@ -160,7 +161,7 @@ func psCmd(c *cliconfig.PsValues) error { if err := checkFlagsPassed(c); err != nil { return errors.Wrapf(err, "error with flags passed") } - if !c.Size { + if !c.Size && !c.All && !c.External { runtime, err = adapter.GetRuntimeNoStore(getContext(), &c.PodmanCommand) } else { runtime, err = adapter.GetRuntime(getContext(), &c.PodmanCommand) @@ -311,14 +312,15 @@ func psDisplay(c *cliconfig.PsValues, runtime *adapter.LocalRuntime) error { ) opts := shared.PsOptions{ All: c.All, + External: c.External, Format: c.Format, Last: c.Last, Latest: c.Latest, + Namespace: c.Namespace, NoTrunc: c.NoTrunct, Pod: c.Pod, Quiet: c.Quiet, Size: c.Size, - Namespace: c.Namespace, Sort: c.Sort, Sync: c.Sync, } diff --git a/cmd/podman/rmi.go b/cmd/podman/rmi.go index f4ca88ea8ae..563661fed85 100644 --- a/cmd/podman/rmi.go +++ b/cmd/podman/rmi.go @@ -71,7 +71,7 @@ func rmiCmd(c *cliconfig.RmiValues) error { response, err := runtime.RemoveImage(ctx, img, c.Force) if err != nil { if errors.Cause(err) == storage.ErrImageUsedByContainer { - fmt.Printf("A container associated with containers/storage, i.e. via Buildah, CRI-O, etc., may be associated with this image: %-12.12s\n", img.ID()) + fmt.Printf("A container associated with containers/storage, i.e. via Buildah, CRI-O, etc., may be associated with this image: %-12.12s\nUsing the --force option will remove the container and image, but may cause failures for other dependent systems.", img.ID()) } if !adapter.IsImageNotFound(err) { exitCode = 2 diff --git a/cmd/podman/shared/container.go b/cmd/podman/shared/container.go index 5f8df2e1015..1f9a0016fc6 100644 --- a/cmd/podman/shared/container.go +++ b/cmd/podman/shared/container.go @@ -36,15 +36,16 @@ const ( // PsOptions describes the struct being formed for ps. type PsOptions struct { All bool + External bool Format string Last int Latest bool + Namespace bool NoTrunc bool Pod bool Quiet bool Size bool Sort string - Namespace bool Sync bool } @@ -199,6 +200,8 @@ func NewBatchContainer(r *libpod.Runtime, ctr *libpod.Container, opts PsOptions) status = "Created" case define.ContainerStateRemoving.String(): status = "Removing" + case define.ContainerStateNA.String(): + status = "NA" default: status = "Error" } @@ -461,7 +464,35 @@ func GetPsContainerOutput(r *libpod.Runtime, opts PsOptions, filters []string, m if err != nil { return nil, err } + storeCtrs, err := r.GetExternalContainers(containers) + if err != nil { + return nil, err + } + + if opts.External { + containers = storeCtrs + } else { + containers = append(containers, storeCtrs...) + } + + // GetContainers() filters the Podman containers, but it's called from more + // places than PS, so we can't move the filtering logic here. If + // we find external containers, let's refilter. + if len(storeCtrs) > 0 { + ctrsFiltered := make([]*libpod.Container, 0, len(containers)) + + for _, ctr := range containers { + include := true + for _, filter := range filterFuncs { + include = include && filter(ctr) + } + if include { + ctrsFiltered = append(ctrsFiltered, ctr) + } + } + containers = ctrsFiltered + } // We only want the last few containers. if opts.Last > 0 && opts.Last <= len(containers) { return nil, errors.Errorf("--last not yet supported") @@ -528,7 +559,7 @@ func PBatch(r *libpod.Runtime, containers []*libpod.Container, workers int, opts for res := range results { // We sort out running vs non-running here to save lots of copying // later. - if !opts.All && !opts.Latest && opts.Last < 1 { + if !opts.All && !opts.External && !opts.Latest && opts.Last < 1 { if !res.IsInfra && res.State == define.ContainerStateRunning { psResults = append(psResults, res) } diff --git a/cmd/podman/varlink/io.podman.varlink b/cmd/podman/varlink/io.podman.varlink index ac400a467cc..5bcd14ed015 100644 --- a/cmd/podman/varlink/io.podman.varlink +++ b/cmd/podman/varlink/io.podman.varlink @@ -147,6 +147,7 @@ type ContainerStats ( type PsOpts ( all: bool, + external: ?bool, filters: ?[]string, last: ?int, latest: ?bool, diff --git a/completions/bash/podman b/completions/bash/podman index c23d156bcbc..2b4cce6bc16 100644 --- a/completions/bash/podman +++ b/completions/bash/podman @@ -2383,6 +2383,7 @@ _podman_ps() { " local boolean_options=" --all -a + --external --help -h --latest -l --no-trunc diff --git a/docs/source/markdown/podman-ps.1.md b/docs/source/markdown/podman-ps.1.md index 024b85ea524..79499012893 100644 --- a/docs/source/markdown/podman-ps.1.md +++ b/docs/source/markdown/podman-ps.1.md @@ -32,12 +32,19 @@ all the containers information. By default it lists: **--all**, **-a** -Show all the containers, default is only running containers +Show all the containers including those created by other container technologies such as Buildah and CRI-O. The default displays only running containers created by Podman. Containers not created by Podman have an 'NA' status. **--pod**, **-p** Display the pods the containers are associated with +**--external** + +Display containers that are not controlled by Podman but are stored in containers storage. These containers +are generally created via other container technology such as Buildah or CRI-O and may depend on the +same container images that Podman is using. These containers are denoted with a 'NA' in the COMMAND and +STATUS column of the ps output. These containers are also shown when using the `--all` option. + **--no-trunc** Display the extended information @@ -172,11 +179,18 @@ CONTAINER ID IMAGE COMMAND CREATED STATUS ``` +``` +$ podman ps --external +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +38a8a78596f9 docker.io/library/busybox:latest NA 2 hours ago NA busybox-working-container +fd7b786b5c32 docker.io/library/alpine:latest NA 2 hours ago NA alpine-working-container +``` + ## ps Print a list of containers ## SEE ALSO -podman(1) +podman(1), buildah(1), crio(8) ## HISTORY August 2017, Originally compiled by Urvashi Mohnani diff --git a/libpod/boltdb_state.go b/libpod/boltdb_state.go index 4918bf57a6f..26d401b036c 100644 --- a/libpod/boltdb_state.go +++ b/libpod/boltdb_state.go @@ -609,6 +609,12 @@ func (s *BoltState) UpdateContainer(ctr *Container) error { return define.ErrDBClosed } + // We're dealing with a non-Podman container, don't + // attempt to update, it won't go well. + if ctr.state.State == define.ContainerStateNA { + return nil + } + if !ctr.valid { return define.ErrCtrRemoved } diff --git a/libpod/container_internal.go b/libpod/container_internal.go index 562f783a785..60db2fa3b9e 100644 --- a/libpod/container_internal.go +++ b/libpod/container_internal.go @@ -393,6 +393,11 @@ func (c *Container) syncContainer() error { if err := c.runtime.state.UpdateContainer(c); err != nil { return err } + + // We've a non-Podman container, don't sync, it won't go well. + if c.state.State == define.ContainerStateNA { + return nil + } // If runtime knows about the container, update its status in runtime // And then save back to disk if c.ensureState(define.ContainerStateCreated, define.ContainerStateRunning, define.ContainerStateStopped, define.ContainerStatePaused) { diff --git a/libpod/define/containerstate.go b/libpod/define/containerstate.go index e7d258e214a..8252275a92e 100644 --- a/libpod/define/containerstate.go +++ b/libpod/define/containerstate.go @@ -28,6 +28,10 @@ const ( // ContainerStateRemoving indicates the container is in the process of // being removed. ContainerStateRemoving ContainerStatus = iota + // ContainerStateNA indicates the container is controlled by c/storage + // and not by libpod/Podman. This status should ONLY be used by the + // ps command. + ContainerStateNA ContainerStatus = iota ) // ContainerStatus returns a string representation for users @@ -50,6 +54,8 @@ func (t ContainerStatus) String() string { return "exited" case ContainerStateRemoving: return "removing" + case ContainerStateNA: + return "NA" } return "bad state" } @@ -74,6 +80,8 @@ func StringToContainerStatus(status string) (ContainerStatus, error) { return ContainerStateExited, nil case ContainerStateRemoving.String(): return ContainerStateRemoving, nil + case ContainerStateNA.String(): + return ContainerStateNA, nil default: return ContainerStateUnknown, errors.Wrapf(ErrInvalidArg, "unknown container state: %s", status) } diff --git a/libpod/image/image.go b/libpod/image/image.go index c8583a1c5c4..b5ad1351bc2 100644 --- a/libpod/image/image.go +++ b/libpod/image/image.go @@ -230,7 +230,7 @@ func (ir *Runtime) GetImagesWithFilters(filters []string) ([]*Image, error) { } func (i *Image) reloadImage() error { - newImage, err := i.imageruntime.getImage(i.ID()) + newImage, err := i.imageruntime.GetImage(i.ID()) if err != nil { return errors.Wrapf(err, "unable to reload image") } @@ -259,7 +259,7 @@ func (i *Image) getLocalImage() (*storage.Image, error) { i.InputName = dest.DockerReference().String() } - img, err := i.imageruntime.getImage(stripSha256(i.InputName)) + img, err := i.imageruntime.GetImage(stripSha256(i.InputName)) if err == nil { return img.image, err } @@ -283,7 +283,7 @@ func (i *Image) getLocalImage() (*storage.Image, error) { if err != nil { return nil, err } - img, err = i.imageruntime.getImage(ref.String()) + img, err = i.imageruntime.GetImage(ref.String()) if err == nil { return img.image, err } @@ -457,10 +457,30 @@ func (i *Image) Remove(ctx context.Context, force bool) error { return nil } -// getImage retrieves an image matching the given name or hash from system +// TODO: Rework this method to not require an assembly of the fq name with transport +/* +// GetManifest tries to GET an images manifest, returns nil on success and err on failure +func (i *Image) GetManifest() error { + pullRef, err := alltransports.ParseImageName(i.assembleFqNameTransport()) + if err != nil { + return errors.Errorf("unable to parse '%s'", i.Names()[0]) + } + imageSource, err := pullRef.NewImageSource(nil) + if err != nil { + return errors.Wrapf(err, "unable to create new image source") + } + _, _, err = imageSource.GetManifest(nil) + if err == nil { + return nil + } + return err +} +*/ + +// GetImage retrieves an image matching the given name or hash from system // storage // If no matching image can be found, an error is returned -func (ir *Runtime) getImage(image string) (*Image, error) { +func (ir *Runtime) GetImage(image string) (*Image, error) { var img *storage.Image ref, err := is.Transport.ParseStoreReference(ir.store, image) if err == nil { diff --git a/libpod/image/prune.go b/libpod/image/prune.go index f5be8ed5061..2a3b17701bb 100644 --- a/libpod/image/prune.go +++ b/libpod/image/prune.go @@ -118,7 +118,7 @@ func (ir *Runtime) PruneImages(ctx context.Context, all bool, filter []string) ( for _, p := range pruneImages { if err := p.Remove(ctx, true); err != nil { if errors.Cause(err) == storage.ErrImageUsedByContainer { - logrus.Warnf("Failed to prune image %s as it is in use: %v", p.ID(), err) + logrus.Warnf("Failed to prune image %s as it is in use: %v.\nA container associated with containers/storage i.e. Buildah, CRI-O, etc., maybe associated with this image.\nUsing the rmi command with the --force option will remove the container and image, but may cause failures for other dependent systems.", p.ID(), err) continue } return nil, errors.Wrap(err, "failed to prune image") diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go index 51efc599602..196c5d368e2 100644 --- a/libpod/runtime_ctr.go +++ b/libpod/runtime_ctr.go @@ -837,3 +837,59 @@ func (r *Runtime) GetLatestContainer() (*Container, error) { } return ctrs[lastCreatedIndex], nil } + +//GetExternalContainers returns a list of containers from containers/storage that +//are not currently known to Podman. The list of containers passed in are all +//of the containers known to Podman from the GetContainers() function. +func (r *Runtime) GetExternalContainers(ctrs []*Container) ([]*Container, error) { + var retCtrs []*Container + + // We only have a store when doing all, storage or size, so skip this otherwise. + if r.store == nil { + return retCtrs, nil + } + + // Make a map of the libpod containers. If we run across them from + // c/storage, we won't grab them in the processing below. + libpodCtrs := make(map[string]bool) + for _, ctr := range ctrs { + libpodCtrs[ctr.ID()] = true + } + + storeContainers, err2 := r.store.Containers() + if err2 != nil { + return ctrs, errors.Wrapf(err2, "error reading list of all containers") + } + for _, container := range storeContainers { + // No need to handle libpod containers here. + if _, foundCtr := libpodCtrs[container.ID]; foundCtr { + continue + } + newCtr := new(Container) + newCtr.config = new(ContainerConfig) + newCtr.state = new(ContainerState) + + name := "" + if len(container.Names) > 0 { + name = container.Names[0] + } + myImage, _ := r.ImageRuntime().GetImage(container.ImageID) + newCtr.runtime = r + newCtr.config.ID = container.ID + newCtr.config.Name = container.Names[0] + newCtr.config.RootfsImageID = container.ImageID + newCtr.config.RootfsImageName = myImage.Names()[0] + newCtr.config.Command = []string{"NA"} + newCtr.config.CreatedTime = container.Created + newCtr.state.State = define.ContainerStateNA + lock, err := r.lockManager.AllocateLock() + if err != nil { + return nil, errors.Wrapf(err, "error allocating lock for container %s", newCtr.config.ID) + } + newCtr.lock = lock + logrus.Debugf("Found non podman container name [%v], id [%v] imageID [%v] date [%v]\n", name, container.ID, container.ImageID, container.Created) + retCtrs = append(retCtrs, newCtr) + } + + return retCtrs, nil +} diff --git a/pkg/adapter/containers_remote.go b/pkg/adapter/containers_remote.go index 36db4af68ab..15a1740e6ef 100644 --- a/pkg/adapter/containers_remote.go +++ b/pkg/adapter/containers_remote.go @@ -511,16 +511,17 @@ func (r *LocalRuntime) Ps(c *cliconfig.PsValues, opts shared.PsOptions) ([]share var psContainers []shared.PsContainerOutput last := int64(c.Last) PsOpts := iopodman.PsOpts{ - All: c.All, - Filters: &c.Filter, - Last: &last, - Latest: &c.Latest, - NoTrunc: &c.NoTrunct, - Pod: &c.Pod, - Quiet: &c.Quiet, - Size: &c.Size, - Sort: &c.Sort, - Sync: &c.Sync, + All: c.All, + External: &c.External, + Filters: &c.Filter, + Last: &last, + Latest: &c.Latest, + NoTrunc: &c.NoTrunct, + Pod: &c.Pod, + Quiet: &c.Quiet, + Size: &c.Size, + Sort: &c.Sort, + Sync: &c.Sync, } containers, err := iopodman.Ps().Call(r.Conn, PsOpts) if err != nil {