diff --git a/pkg/authorization/authorizer/subjects_test.go b/pkg/authorization/authorizer/subjects_test.go index 0d0b049f5c4b..c45d6e55bce6 100644 --- a/pkg/authorization/authorizer/subjects_test.go +++ b/pkg/authorization/authorizer/subjects_test.go @@ -50,7 +50,6 @@ func TestSubjects(t *testing.T) { "system:serviceaccount:openshift-infra:deployer-controller", "system:serviceaccount:openshift-infra:template-instance-controller", "system:serviceaccount:openshift-infra:template-instance-controller", - "system:serviceaccount:openshift-infra:build-pod-controller", "system:serviceaccount:openshift-infra:build-controller", ), expectedGroups: sets.NewString("RootUsers", "system:cluster-admins", "system:cluster-readers", "system:masters", "system:nodes"), diff --git a/pkg/build/api/types.go b/pkg/build/api/types.go index f77e17e4c64b..3f40b375aae1 100644 --- a/pkg/build/api/types.go +++ b/pkg/build/api/types.go @@ -475,6 +475,14 @@ const ( // build pod but one was already present. StatusReasonBuildPodExists StatusReason = "BuildPodExists" + // StatusReasonNoBuildContainerStatus indicates that the build failed because the + // the build pod has no container statuses. + StatusReasonNoBuildContainerStatus StatusReason = "NoBuildContainerStatus" + + // StatusReasonFailedContainer indicates that the pod for the build has at least + // one container with a non-zero exit status. + StatusReasonFailedContainer StatusReason = "FailedContainer" + // StatusReasonGenericBuildFailed is the reason associated with a broad // range of build failures. StatusReasonGenericBuildFailed StatusReason = "GenericBuildFailed" @@ -497,6 +505,8 @@ const ( StatusMessageCancelledBuild = "The build was cancelled by the user." StatusMessageDockerBuildFailed = "Docker build strategy has failed." StatusMessageBuildPodExists = "The pod for this build already exists and is older than the build." + StatusMessageNoBuildContainerStatus = "The pod for this build has no container statuses indicating success or failure." + StatusMessageFailedContainer = "The pod for this build has at least one container with a non-zero exit status." StatusMessageGenericBuildFailed = "Generic Build failure - check logs for details." ) diff --git a/pkg/build/api/validation/validation.go b/pkg/build/api/validation/validation.go index 828c32251532..e26b93f8f6c6 100644 --- a/pkg/build/api/validation/validation.go +++ b/pkg/build/api/validation/validation.go @@ -9,7 +9,6 @@ import ( "github.com/golang/glog" - "github.com/openshift/origin/pkg/util/labelselector" kpath "k8s.io/apimachinery/pkg/api/validation/path" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/strategicpatch" @@ -23,6 +22,7 @@ import ( buildutil "github.com/openshift/origin/pkg/build/util" imageapi "github.com/openshift/origin/pkg/image/api" imageapivalidation "github.com/openshift/origin/pkg/image/api/validation" + "github.com/openshift/origin/pkg/util/labelselector" ) // ValidateBuild tests required fields for a Build. @@ -81,26 +81,6 @@ func ValidateBuildUpdate(build *buildapi.Build, older *buildapi.Build) field.Err return allErrs } -func diffBuildSpec(newer buildapi.BuildSpec, older buildapi.BuildSpec) (string, error) { - codec := kapi.Codecs.LegacyCodec(v1.LegacySchemeGroupVersion) - newerObj := &buildapi.Build{Spec: newer} - olderObj := &buildapi.Build{Spec: older} - - newerJSON, err := runtime.Encode(codec, newerObj) - if err != nil { - return "", fmt.Errorf("error encoding newer: %v", err) - } - olderJSON, err := runtime.Encode(codec, olderObj) - if err != nil { - return "", fmt.Errorf("error encoding older: %v", err) - } - patch, err := strategicpatch.CreateTwoWayMergePatch(olderJSON, newerJSON, &v1.Build{}) - if err != nil { - return "", fmt.Errorf("error creating a strategic patch: %v", err) - } - return string(patch), nil -} - // refKey returns a key for the given ObjectReference. If the ObjectReference // doesn't include a namespace, the passed in namespace is used for the reference func refKey(namespace string, ref *kapi.ObjectReference) string { @@ -752,3 +732,56 @@ func ValidateNodeSelector(nodeSelector map[string]string, fldPath *field.Path) f } return allErrs } + +func diffBuildSpec(newer, older buildapi.BuildSpec) (string, error) { + newerObj := &buildapi.Build{Spec: newer} + olderObj := &buildapi.Build{Spec: older} + diffBytes, err := CreateBuildPatch(olderObj, newerObj) + if err != nil { + return "", err + } + return string(diffBytes), nil +} + +func CreateBuildPatch(older, newer *buildapi.Build) ([]byte, error) { + codec := kapi.Codecs.LegacyCodec(v1.LegacySchemeGroupVersion) + + newerJSON, err := runtime.Encode(codec, newer) + if err != nil { + return nil, fmt.Errorf("error encoding newer: %v", err) + } + olderJSON, err := runtime.Encode(codec, older) + if err != nil { + return nil, fmt.Errorf("error encoding older: %v", err) + } + patch, err := strategicpatch.CreateTwoWayMergePatch(olderJSON, newerJSON, &v1.Build{}) + if err != nil { + return nil, fmt.Errorf("error creating a strategic patch: %v", err) + } + return patch, nil +} + +func ApplyBuildPatch(build *buildapi.Build, patch []byte) (*buildapi.Build, error) { + codec := kapi.Codecs.LegacyCodec(v1.LegacySchemeGroupVersion) + versionedBuild, err := kapi.Scheme.ConvertToVersion(build, v1.SchemeGroupVersion) + if err != nil { + return nil, err + } + buildJSON, err := runtime.Encode(codec, versionedBuild) + if err != nil { + return nil, err + } + patchedJSON, err := strategicpatch.StrategicMergePatch(buildJSON, patch, &v1.Build{}) + if err != nil { + return nil, err + } + patchedVersionedBuild, err := runtime.Decode(codec, patchedJSON) + if err != nil { + return nil, err + } + patchedBuild, err := kapi.Scheme.ConvertToVersion(patchedVersionedBuild, buildapi.SchemeGroupVersion) + if err != nil { + return nil, err + } + return patchedBuild.(*buildapi.Build), nil +} diff --git a/pkg/build/client/cache/buildconfigs.go b/pkg/build/client/cache/buildconfigs.go new file mode 100644 index 000000000000..c77532f73499 --- /dev/null +++ b/pkg/build/client/cache/buildconfigs.go @@ -0,0 +1,25 @@ +package cache + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + buildapi "github.com/openshift/origin/pkg/build/api" + cacheclient "github.com/openshift/origin/pkg/client/cache" +) + +// NewBuildConfigGetter returns an object that implements the buildclient BuildConfigGetter interface +// using a StoreToBuildConfigLister +func NewBuildConfigGetter(lister cacheclient.StoreToBuildConfigLister) *buildConfigGetter { + return &buildConfigGetter{ + lister: lister, + } +} + +type buildConfigGetter struct { + lister cacheclient.StoreToBuildConfigLister +} + +// Get retrieves a buildconfig from the cache +func (g *buildConfigGetter) Get(namespace, name string, options metav1.GetOptions) (*buildapi.BuildConfig, error) { + return g.lister.BuildConfigs(namespace).Get(name, options) +} diff --git a/pkg/build/client/cache/builds.go b/pkg/build/client/cache/builds.go new file mode 100644 index 000000000000..e20d0bdffa9b --- /dev/null +++ b/pkg/build/client/cache/builds.go @@ -0,0 +1,49 @@ +package cache + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + + buildapi "github.com/openshift/origin/pkg/build/api" + cacheclient "github.com/openshift/origin/pkg/client/cache" +) + +// NewBuildLister returns an object that implements the buildclient BuildLister interface +// using a StoreToBuildLister +func NewBuildLister(lister *cacheclient.StoreToBuildLister) *buildLister { + return &buildLister{ + lister: lister, + } +} + +type buildLister struct { + lister *cacheclient.StoreToBuildLister +} + +// List returns a BuildList with the given namespace and get options. Only the LabelSelector +// from the ListOptions is honored. +func (l *buildLister) List(namespace string, opts metav1.ListOptions) (*buildapi.BuildList, error) { + selector, err := labels.Parse(opts.LabelSelector) + if err != nil { + return nil, fmt.Errorf("invalid label selector %q: %v", opts.LabelSelector, err) + } + builds, err := l.lister.Builds(namespace).List(selector) + if err != nil { + return nil, err + } + return buildList(builds), nil +} + +func buildList(builds []*buildapi.Build) *buildapi.BuildList { + items := []buildapi.Build{} + for _, b := range builds { + if b != nil { + items = append(items, *b) + } + } + return &buildapi.BuildList{ + Items: items, + } +} diff --git a/pkg/build/client/clients.go b/pkg/build/client/clients.go index 0d48cbb140a5..1bf2145f5988 100644 --- a/pkg/build/client/clients.go +++ b/pkg/build/client/clients.go @@ -1,9 +1,11 @@ package client import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + buildapi "github.com/openshift/origin/pkg/build/api" osclient "github.com/openshift/origin/pkg/client" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // BuildConfigGetter provides methods for getting BuildConfigs @@ -42,11 +44,21 @@ type BuildUpdater interface { Update(namespace string, build *buildapi.Build) error } +type BuildPatcher interface { + Patch(namespace, name string, patch []byte) (*buildapi.Build, error) +} + // BuildLister provides methods for listing the Builds. type BuildLister interface { List(namespace string, opts metav1.ListOptions) (*buildapi.BuildList, error) } +// BuildDeleter knows how to delete builds from OpenShift. +type BuildDeleter interface { + // DeleteBuild removes the build from OpenShift's storage. + DeleteBuild(build *buildapi.Build) error +} + // OSClientBuildClient delegates build create and update operations to the OpenShift client interface type OSClientBuildClient struct { Client osclient.Interface @@ -63,11 +75,21 @@ func (c OSClientBuildClient) Update(namespace string, build *buildapi.Build) err return e } +// Patch patches builds using the OpenShift client. +func (c OSClientBuildClient) Patch(namespace, name string, patch []byte) (*buildapi.Build, error) { + return c.Client.Builds(namespace).Patch(name, types.StrategicMergePatchType, patch) +} + // List lists the builds using the OpenShift client. func (c OSClientBuildClient) List(namespace string, opts metav1.ListOptions) (*buildapi.BuildList, error) { return c.Client.Builds(namespace).List(opts) } +// DeleteBuild deletes a build from OpenShift. +func (c OSClientBuildClient) DeleteBuild(build *buildapi.Build) error { + return c.Client.Builds(build.Namespace).Delete(build.Name) +} + // BuildCloner provides methods for cloning builds type BuildCloner interface { Clone(namespace string, request *buildapi.BuildRequest) (*buildapi.Build, error) @@ -88,29 +110,6 @@ func (c OSClientBuildClonerClient) Clone(namespace string, request *buildapi.Bui return c.Client.Builds(namespace).Clone(request) } -// BuildDeleter knows how to delete builds from OpenShift. -type BuildDeleter interface { - // DeleteBuild removes the build from OpenShift's storage. - DeleteBuild(build *buildapi.Build) error -} - -// buildDeleter removes a build from OpenShift. -type buildDeleter struct { - builds osclient.BuildsNamespacer -} - -// NewBuildDeleter creates a new buildDeleter. -func NewBuildDeleter(builds osclient.BuildsNamespacer) BuildDeleter { - return &buildDeleter{ - builds: builds, - } -} - -// DeleteBuild deletes a build from OpenShift. -func (p *buildDeleter) DeleteBuild(build *buildapi.Build) error { - return p.builds.Builds(build.Namespace).Delete(build.Name) -} - // BuildConfigInstantiator provides methods for instantiating builds from build configs type BuildConfigInstantiator interface { Instantiate(namespace string, request *buildapi.BuildRequest) (*buildapi.Build, error) diff --git a/pkg/build/controller/build/build_controller.go b/pkg/build/controller/build/build_controller.go new file mode 100644 index 000000000000..12e686b4c103 --- /dev/null +++ b/pkg/build/controller/build/build_controller.go @@ -0,0 +1,828 @@ +package build + +import ( + "fmt" + "time" + + "github.com/golang/glog" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + v1core "k8s.io/client-go/kubernetes/typed/core/v1" + clientv1 "k8s.io/client-go/pkg/api/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/workqueue" + kapi "k8s.io/kubernetes/pkg/api" + kexternalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" + kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" + kcoreinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion" + internalversion "k8s.io/kubernetes/pkg/client/listers/core/internalversion" + kcontroller "k8s.io/kubernetes/pkg/controller" + + builddefaults "github.com/openshift/origin/pkg/build/admission/defaults" + buildoverrides "github.com/openshift/origin/pkg/build/admission/overrides" + buildapi "github.com/openshift/origin/pkg/build/api" + "github.com/openshift/origin/pkg/build/api/validation" + buildclient "github.com/openshift/origin/pkg/build/client" + buildcacheclient "github.com/openshift/origin/pkg/build/client/cache" + "github.com/openshift/origin/pkg/build/controller/common" + "github.com/openshift/origin/pkg/build/controller/policy" + "github.com/openshift/origin/pkg/build/controller/strategy" + buildutil "github.com/openshift/origin/pkg/build/util" + osclient "github.com/openshift/origin/pkg/client" + oscache "github.com/openshift/origin/pkg/client/cache" + "github.com/openshift/origin/pkg/controller/shared" + imageapi "github.com/openshift/origin/pkg/image/api" + imageinformers "github.com/openshift/origin/pkg/image/generated/informers/internalversion/image/internalversion" + imagelisters "github.com/openshift/origin/pkg/image/generated/listers/image/internalversion" +) + +const ( + maxRetries = 15 +) + +// BuildController watches builds and synchronizes them with their +// corresponding build pods +type BuildController struct { + buildPatcher buildclient.BuildPatcher + buildLister buildclient.BuildLister + buildConfigGetter buildclient.BuildConfigGetter + buildDeleter buildclient.BuildDeleter + podClient kcoreclient.PodsGetter + + queue workqueue.RateLimitingInterface + + buildStore *oscache.StoreToBuildLister + secretStore internalversion.SecretLister + podStore internalversion.PodLister + imageStreamStore imagelisters.ImageStreamLister + + podInformer cache.SharedIndexInformer + buildInformer cache.SharedIndexInformer + + buildStoreSynced func() bool + podStoreSynced func() bool + secretStoreSynced func() bool + imageStreamStoreSynced func() bool + + runPolicies []policy.RunPolicy + createStrategy buildPodCreationStrategy + buildDefaults builddefaults.BuildDefaults + buildOverrides buildoverrides.BuildOverrides + + recorder record.EventRecorder +} + +// BuildControllerParams is the set of parameters needed to +// create a new BuildController +type BuildControllerParams struct { + BuildInformer shared.BuildInformer + BuildConfigInformer shared.BuildConfigInformer + ImageStreamInformer imageinformers.ImageStreamInformer + PodInformer kcoreinformers.PodInformer + SecretInformer kcoreinformers.SecretInformer + KubeClientInternal kclientset.Interface + KubeClientExternal kexternalclientset.Interface + OpenshiftClient osclient.Interface + DockerBuildStrategy *strategy.DockerBuildStrategy + SourceBuildStrategy *strategy.SourceBuildStrategy + CustomBuildStrategy *strategy.CustomBuildStrategy + BuildDefaults builddefaults.BuildDefaults + BuildOverrides buildoverrides.BuildOverrides +} + +// NewBuildController creates a new BuildController. +func NewBuildController(params *BuildControllerParams) *BuildController { + eventBroadcaster := record.NewBroadcaster() + eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(params.KubeClientExternal.Core().RESTClient()).Events("")}) + + buildClient := buildclient.NewOSClientBuildClient(params.OpenshiftClient) + // TODO: Switch to using the cache build lister when we figure out + // what is wrong with retrieving by index + // buildLister := buildcacheclient.NewBuildLister(params.BuildInformer.Lister()) + _ = buildcacheclient.NewBuildLister(params.BuildInformer.Lister()) + buildLister := buildClient + buildConfigGetter := buildcacheclient.NewBuildConfigGetter(params.BuildConfigInformer.Lister()) + c := &BuildController{ + buildPatcher: buildClient, + buildLister: buildLister, + buildConfigGetter: buildConfigGetter, + buildDeleter: buildClient, + secretStore: params.SecretInformer.Lister(), + podClient: params.KubeClientInternal.Core(), + podInformer: params.PodInformer.Informer(), + podStore: params.PodInformer.Lister(), + buildInformer: params.BuildInformer.Informer(), + buildStore: params.BuildInformer.Lister(), + imageStreamStore: params.ImageStreamInformer.Lister(), + createStrategy: &typeBasedFactoryStrategy{ + dockerBuildStrategy: params.DockerBuildStrategy, + sourceBuildStrategy: params.SourceBuildStrategy, + customBuildStrategy: params.CustomBuildStrategy, + }, + buildDefaults: params.BuildDefaults, + buildOverrides: params.BuildOverrides, + + queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), + recorder: eventBroadcaster.NewRecorder(kapi.Scheme, clientv1.EventSource{Component: "build-controller"}), + runPolicies: policy.GetAllRunPolicies(buildClient, buildClient), + } + + c.podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + UpdateFunc: c.podUpdated, + DeleteFunc: c.podDeleted, + }) + c.buildInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: c.buildAdded, + UpdateFunc: c.buildUpdated, + }) + + c.buildStoreSynced = c.buildInformer.HasSynced + c.podStoreSynced = c.podInformer.HasSynced + c.secretStoreSynced = params.SecretInformer.Informer().HasSynced + c.imageStreamStoreSynced = params.ImageStreamInformer.Informer().HasSynced + + return c +} + +// Run begins watching and syncing. +func (c *BuildController) Run(workers int, stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + // Wait for the controller stores to sync before starting any work in this controller. + if !cache.WaitForCacheSync(stopCh, c.buildStoreSynced, c.podStoreSynced, c.secretStoreSynced, c.imageStreamStoreSynced) { + utilruntime.HandleError(fmt.Errorf("timed out waiting for caches to sync")) + return + } + + glog.Infof("Starting build controller") + + for i := 0; i < workers; i++ { + go wait.Until(c.worker, time.Second, stopCh) + } + + <-stopCh + glog.Infof("Shutting down build controller") +} + +func (bc *BuildController) worker() { + for { + if quit := bc.work(); quit { + return + } + } +} + +// work gets the next build from the queue and invokes handleBuild on it +func (bc *BuildController) work() bool { + key, quit := bc.queue.Get() + if quit { + return true + } + + defer bc.queue.Done(key) + + build, err := bc.getBuildByKey(key.(string)) + if err != nil { + bc.handleError(err, key) + return false + } + if build == nil { + return false + } + + err = bc.handleBuild(build) + + bc.handleError(err, key) + return false +} + +// handleBuild retrieves the build's corresponding pod and calls the appropriate +// handle function based on the build's current state. Each handler returns a buildUpdate +// object that includes any updates that need to be made on the build. +func (bc *BuildController) handleBuild(build *buildapi.Build) error { + if shouldIgnore(build) { + return nil + } + + glog.V(4).Infof("Handling build %s", buildDesc(build)) + + pod, podErr := bc.podStore.Pods(build.Namespace).Get(buildapi.GetBuildPodName(build)) + + // Technically the only error that is returned from retrieving the pod is the + // NotFound error so this check should not be needed, but leaving here in case + // that changes in the future. + if podErr != nil && !errors.IsNotFound(podErr) { + return podErr + } + + var update *buildUpdate + var err, updateErr error + + switch { + case shouldCancel(build): + update, err = bc.cancelBuild(build) + case build.Status.Phase == buildapi.BuildPhaseNew: + update, err = bc.handleNewBuild(build, pod) + case build.Status.Phase == buildapi.BuildPhasePending, + build.Status.Phase == buildapi.BuildPhaseRunning: + update, err = bc.handleActiveBuild(build, pod) + case buildutil.IsBuildComplete(build): + update, err = bc.handleCompletedBuild(build, pod) + } + if update != nil && !update.isEmpty() { + updateErr = bc.updateBuild(build, update, pod) + } + if err != nil { + return err + } + if updateErr != nil { + return updateErr + } + return nil +} + +// shouldIgnore returns true if a build should be ignored by the controller. +// These include pipeline builds as well as builds that are in a terminal state. +// However if the build is either complete or failed and its completion timestamp +// has not been set, then it returns false so that the build's completion timestamp +// gets updated. +func shouldIgnore(build *buildapi.Build) bool { + // If pipeline build, do nothing. + // These builds are processed/updated/etc by the jenkins sync plugin + if build.Spec.Strategy.JenkinsPipelineStrategy != nil { + glog.V(4).Infof("Ignoring build %s with jenkins pipeline strategy", buildDesc(build)) + return true + } + + // If a build is in a terminal state, ignore it; unless it is in a succeeded or failed + // state and its completion time is not set, then we should at least attempt to set its + // completion time if possible. + if buildutil.IsBuildComplete(build) { + switch build.Status.Phase { + case buildapi.BuildPhaseComplete, + buildapi.BuildPhaseFailed: + if build.Status.CompletionTimestamp == nil { + return false + } + } + glog.V(4).Infof("Ignoring build %s in completed state", buildDesc(build)) + return true + } + + return false +} + +// shouldCancel returns true if a build is active and its cancellation flag is set +func shouldCancel(build *buildapi.Build) bool { + return !buildutil.IsBuildComplete(build) && build.Status.Cancelled +} + +// cancelBuild deletes a build pod and returns an update to mark the build as cancelled +func (bc *BuildController) cancelBuild(build *buildapi.Build) (*buildUpdate, error) { + glog.V(4).Infof("Cancelling build %s", buildDesc(build)) + + podName := buildapi.GetBuildPodName(build) + err := bc.podClient.Pods(build.Namespace).Delete(podName, &metav1.DeleteOptions{}) + if err != nil && !errors.IsNotFound(err) { + return nil, fmt.Errorf("could not delete build pod %s/%s to cancel build %s: %v", build.Namespace, podName, buildDesc(build), err) + } + + return transitionToPhase(buildapi.BuildPhaseCancelled, buildapi.StatusReasonCancelledBuild, buildapi.StatusMessageCancelledBuild), nil +} + +// handleNewBuild will check whether policy allows running the new build and if so, creates a pod +// for the build and returns an update to move it to the Pending phase +func (bc *BuildController) handleNewBuild(build *buildapi.Build, pod *kapi.Pod) (*buildUpdate, error) { + // If a pod was found, and it was created after the build was created, it + // means that the build is active and its status should be updated + if pod != nil { + //TODO: Use a better way to determine whether the pod corresponds to the build (maybe using the owner field) + if !pod.CreationTimestamp.Before(build.CreationTimestamp) { + return bc.handleActiveBuild(build, pod) + } + // If a pod was created before the current build, move the build to error + return transitionToPhase(buildapi.BuildPhaseError, buildapi.StatusReasonBuildPodExists, buildapi.StatusMessageBuildPodExists), nil + } + + runPolicy := policy.ForBuild(build, bc.runPolicies) + if runPolicy == nil { + return nil, fmt.Errorf("unable to determine build policy for %s", buildDesc(build)) + } + + // The runPolicy decides whether to execute this build or not. + if run, err := runPolicy.IsRunnable(build); err != nil || !run { + return nil, err + } + + return bc.createBuildPod(build) +} + +// createPodSpec creates a pod spec for the given build +func (bc *BuildController) createPodSpec(originalBuild *buildapi.Build, ref string) (*kapi.Pod, error) { + // TODO(rhcarvalho) + // The S2I and Docker builders expect build.Spec.Output.To to contain a + // resolved reference to a Docker image. Since build.Spec is immutable, we + // change a copy (that is never persisted) and pass it to + // createStrategy.createBuildPod. We should make the builders use + // build.Status.OutputDockerImageReference, which will make copying the build + // unnecessary. + build, err := buildutil.BuildDeepCopy(originalBuild) + if err != nil { + return nil, fmt.Errorf("unable to copy build %s: %v", buildDesc(originalBuild), err) + } + + build.Status.OutputDockerImageReference = ref + if build.Spec.Output.To != nil && len(build.Spec.Output.To.Name) != 0 { + build.Spec.Output.To = &kapi.ObjectReference{ + Kind: "DockerImage", + Name: ref, + } + } + + // Invoke the strategy to create a build pod. + podSpec, err := bc.createStrategy.CreateBuildPod(build) + if err != nil { + if strategy.IsFatal(err) { + return nil, &strategy.FatalError{Reason: fmt.Sprintf("failed to create a build pod spec for build %s/%s: %v", build.Namespace, build.Name, err)} + } + return nil, fmt.Errorf("failed to create a build pod spec for build %s/%s: %v", build.Namespace, build.Name, err) + } + if err := bc.buildDefaults.ApplyDefaults(podSpec); err != nil { + return nil, fmt.Errorf("failed to apply build defaults for build %s/%s: %v", build.Namespace, build.Name, err) + } + if err := bc.buildOverrides.ApplyOverrides(podSpec); err != nil { + return nil, fmt.Errorf("failed to apply build overrides for build %s/%s: %v", build.Namespace, build.Name, err) + } + return podSpec, nil +} + +// resolveOutputDockerImageReference returns a reference to a Docker image +// computed from the buid.Spec.Output.To reference. +func (bc *BuildController) resolveOutputDockerImageReference(build *buildapi.Build) (string, error) { + outputTo := build.Spec.Output.To + if outputTo == nil || outputTo.Name == "" { + return "", nil + } + var ref string + switch outputTo.Kind { + case "DockerImage": + ref = outputTo.Name + case "ImageStream", "ImageStreamTag": + // TODO(smarterclayton): security, ensure that the reference image stream is actually visible + namespace := outputTo.Namespace + if len(namespace) == 0 { + namespace = build.Namespace + } + + var tag string + streamName := outputTo.Name + if outputTo.Kind == "ImageStreamTag" { + var ok bool + streamName, tag, ok = imageapi.SplitImageStreamTag(streamName) + if !ok { + return "", fmt.Errorf("the referenced image stream tag is invalid: %s", outputTo.Name) + } + tag = ":" + tag + } + stream, err := bc.imageStreamStore.ImageStreams(namespace).Get(streamName) + if err != nil { + if errors.IsNotFound(err) { + return "", fmt.Errorf("the referenced output image stream %s/%s does not exist", namespace, streamName) + } + return "", fmt.Errorf("the referenced output image stream %s/%s could not be found by build %s/%s: %v", namespace, streamName, build.Namespace, build.Name, err) + } + if len(stream.Status.DockerImageRepository) == 0 { + e := fmt.Errorf("the image stream %s/%s cannot be used as the output for build %s/%s because the integrated Docker registry is not configured and no external registry was defined", namespace, outputTo.Name, build.Namespace, build.Name) + bc.recorder.Eventf(build, kapi.EventTypeWarning, "invalidOutput", "Error starting build: %v", e) + return "", e + } + ref = fmt.Sprintf("%s%s", stream.Status.DockerImageRepository, tag) + } + return ref, nil +} + +// createBuildPod creates a new pod to run a build +func (bc *BuildController) createBuildPod(build *buildapi.Build) (*buildUpdate, error) { + + update := &buildUpdate{} + + // Set the output Docker image reference. + ref, err := bc.resolveOutputDockerImageReference(build) + if err != nil { + // If we cannot resolve the output reference, the output image stream + // may not yet exist. The build should remain in the new state and show the + // reason that it is still in the new state. + update.setReason(buildapi.StatusReasonInvalidOutputReference) + update.setMessage(buildapi.StatusMessageInvalidOutputRef) + return update, err + } + + // Create the build pod spec + buildPod, err := bc.createPodSpec(build, ref) + if err != nil { + // If an error occurred when creating the pod spec, it likely means + // that the build is something we don't understand. For example, it could + // have a strategy that we don't recognize. It will remain in New state + // and be updated with the reason that it is still in New + update.setReason(buildapi.StatusReasonCannotCreateBuildPodSpec) + update.setMessage(buildapi.StatusMessageCannotCreateBuildPodSpec) + + // The error will be logged, but will not be returned to the caller + // to be retried. The reason is that there's really no external factor + // that could cause the pod creation to fail; therefore no reason + // to immediately retry processing the build. + // + // A scenario where this would happen is that we've introduced a + // new build strategy in the master, but the old version of the controller + // is still running. We don't want the old controller to move the + // build to the error phase and we don't want it to keep actively retrying. + utilruntime.HandleError(err) + return update, nil + } + + glog.V(4).Infof("Pod %s/%s for build %s is about to be created", build.Namespace, buildPod.Name, buildDesc(build)) + if _, err := bc.podClient.Pods(build.Namespace).Create(buildPod); err != nil { + if errors.IsAlreadyExists(err) { + bc.recorder.Eventf(build, kapi.EventTypeWarning, "FailedCreate", "Pod already exists: %s/%s", buildPod.Namespace, buildPod.Name) + glog.V(4).Infof("Build pod %s/%s for build %s already exists", build.Namespace, buildPod.Name, buildDesc(build)) + + // If the existing pod was created before this build, switch to the Error state. + existingPod, err := bc.podClient.Pods(build.Namespace).Get(buildPod.Name, metav1.GetOptions{}) + if err == nil && existingPod.CreationTimestamp.Before(build.CreationTimestamp) { + update = transitionToPhase(buildapi.BuildPhaseError, buildapi.StatusReasonBuildPodExists, buildapi.StatusMessageBuildPodExists) + return update, nil + } + return nil, nil + } + // Log an event if the pod is not created (most likely due to quota denial). + bc.recorder.Eventf(build, kapi.EventTypeWarning, "FailedCreate", "Error creating: %v", err) + update.setReason(buildapi.StatusReasonCannotCreateBuildPod) + update.setMessage(buildapi.StatusMessageCannotCreateBuildPod) + return update, fmt.Errorf("failed to create build pod: %v", err) + } + glog.V(4).Infof("Created pod %s/%s for build %s", build.Namespace, buildPod.Name, buildDesc(build)) + update = transitionToPhase(buildapi.BuildPhasePending, "", "") + update.setPodNameAnnotation(buildPod.Name) + update.setOutputRef(ref) + return update, nil +} + +// handleActiveBuild handles a build in either pending or running state +func (bc *BuildController) handleActiveBuild(build *buildapi.Build, pod *kapi.Pod) (*buildUpdate, error) { + + if pod == nil { + pod = bc.findMissingPod(build) + if pod == nil { + glog.V(4).Infof("Failed to find the build pod for build %s. Moving it to Error state", buildDesc(build)) + return transitionToPhase(buildapi.BuildPhaseError, buildapi.StatusReasonBuildPodDeleted, buildapi.StatusMessageBuildPodDeleted), nil + } + } + + var update *buildUpdate + switch pod.Status.Phase { + case kapi.PodPending: + if build.Status.Phase != buildapi.BuildPhasePending { + update = transitionToPhase(buildapi.BuildPhasePending, "", "") + } + if secret := build.Spec.Output.PushSecret; secret != nil && build.Status.Reason != buildapi.StatusReasonMissingPushSecret { + if _, err := bc.secretStore.Secrets(build.Namespace).Get(secret.Name); err != nil && errors.IsNotFound(err) { + glog.V(4).Infof("Setting reason for pending build to %q due to missing secret for %s", build.Status.Reason, buildDesc(build)) + update = transitionToPhase(buildapi.BuildPhasePending, buildapi.StatusReasonMissingPushSecret, buildapi.StatusMessageMissingPushSecret) + } + } + case kapi.PodRunning: + if build.Status.Phase != buildapi.BuildPhaseRunning { + update = transitionToPhase(buildapi.BuildPhaseRunning, "", "") + if pod.Status.StartTime != nil { + update.setStartTime(*pod.Status.StartTime) + } + } + case kapi.PodSucceeded: + if build.Status.Phase != buildapi.BuildPhaseComplete { + update = transitionToPhase(buildapi.BuildPhaseComplete, "", "") + } + if len(pod.Status.ContainerStatuses) == 0 { + // no containers in the pod means something went terribly wrong, so the build + // should be set to an error state + glog.V(2).Infof("Setting build %s to error state because its pod has no containers", buildDesc(build)) + update = transitionToPhase(buildapi.BuildPhaseError, buildapi.StatusReasonNoBuildContainerStatus, buildapi.StatusMessageNoBuildContainerStatus) + } else { + for _, info := range pod.Status.ContainerStatuses { + if info.State.Terminated != nil && info.State.Terminated.ExitCode != 0 { + glog.V(2).Infof("Setting build %s to error state because a container in its pod has non-zero exit code", buildDesc(build)) + update = transitionToPhase(buildapi.BuildPhaseError, buildapi.StatusReasonFailedContainer, buildapi.StatusMessageFailedContainer) + break + } + } + } + case kapi.PodFailed: + if build.Status.Phase != buildapi.BuildPhaseFailed { + // If a DeletionTimestamp has been set, it means that the pod will + // soon be deleted. The build should be transitioned to the Error phase. + if pod.DeletionTimestamp != nil { + update = transitionToPhase(buildapi.BuildPhaseError, buildapi.StatusReasonBuildPodDeleted, buildapi.StatusMessageBuildPodDeleted) + } else { + update = transitionToPhase(buildapi.BuildPhaseFailed, buildapi.StatusReasonGenericBuildFailed, buildapi.StatusMessageGenericBuildFailed) + } + } + } + return update, nil +} + +// handleCompletedBuild will only be called on builds that are already in a terminal phase however, their completion timestamp +// has not been set. +func (bc *BuildController) handleCompletedBuild(build *buildapi.Build, pod *kapi.Pod) (*buildUpdate, error) { + // Make sure that the completion timestamp has not already been set + if build.Status.CompletionTimestamp != nil { + return nil, nil + } + + update := &buildUpdate{} + var podStartTime *metav1.Time + if pod != nil { + podStartTime = pod.Status.StartTime + } + setBuildCompletionTimestampAndDuration(build, podStartTime, update) + + return update, nil +} + +// updateBuild is the single place where any update to a build is done in the build controller. +// It will check that the update is valid, peform any necessary processing such as calling HandleBuildCompletion, +// and apply the buildUpdate object as a patch. +func (bc *BuildController) updateBuild(build *buildapi.Build, update *buildUpdate, pod *kapi.Pod) error { + + stateTransition := false + // Check whether we are transitioning to a different build phase + if update.phase != nil && (*update.phase) != build.Status.Phase { + stateTransition = true + } else if build.Status.Phase == buildapi.BuildPhaseFailed && update.completionTime != nil { + // Treat a failed->failed update as a state transition when the completionTime is getting + // updated. This will cause an event to be emitted and completion processing to trigger. + // We get into this state when the pod updates the phase through the build/details subresource. + // The phase, reason, and message are set, but no event has been emitted about the failure, + // and the policy has not been given a chance to start the next build if one is waiting to + // start. + update.setPhase(buildapi.BuildPhaseFailed) + stateTransition = true + } + + if stateTransition { + // Make sure that the transition is valid + if !isValidTransition(build.Status.Phase, *update.phase) { + return fmt.Errorf("invalid phase transition %s -> %s", buildDesc(build), *update.phase) + } + + // Log that we are updating build status + reasonText := "" + if update.reason != nil && *update.reason != "" { + reasonText = fmt.Sprintf(" ( %s )", *update.reason) + } + + // Update build completion timestamp if transitioning to a terminal phase + if buildutil.IsTerminalPhase(*update.phase) { + var podStartTime *metav1.Time + if pod != nil { + podStartTime = pod.Status.StartTime + } + setBuildCompletionTimestampAndDuration(build, podStartTime, update) + } + glog.V(4).Infof("Updating build %s -> %s%s", buildDesc(build), *update.phase, reasonText) + } + + // Ensure that a pod name annotation has been set on the build if a pod is available + if update.podNameAnnotation == nil && !common.HasBuildPodNameAnnotation(build) && pod != nil { + update.setPodNameAnnotation(pod.Name) + } + + patchedBuild, err := bc.patchBuild(build, update) + if err != nil { + return err + } + + // Emit events and handle build completion if transitioned to a terminal phase + if stateTransition { + switch *update.phase { + case buildapi.BuildPhaseRunning: + bc.recorder.Eventf(patchedBuild, kapi.EventTypeNormal, buildapi.BuildStartedEventReason, fmt.Sprintf(buildapi.BuildStartedEventMessage, patchedBuild.Namespace, patchedBuild.Name)) + case buildapi.BuildPhaseCancelled: + bc.recorder.Eventf(patchedBuild, kapi.EventTypeNormal, buildapi.BuildCancelledEventReason, fmt.Sprintf(buildapi.BuildCancelledEventMessage, patchedBuild.Namespace, patchedBuild.Name)) + case buildapi.BuildPhaseComplete: + bc.recorder.Eventf(patchedBuild, kapi.EventTypeNormal, buildapi.BuildCompletedEventReason, fmt.Sprintf(buildapi.BuildCompletedEventMessage, patchedBuild.Namespace, patchedBuild.Name)) + case buildapi.BuildPhaseError, + buildapi.BuildPhaseFailed: + bc.recorder.Eventf(patchedBuild, kapi.EventTypeNormal, buildapi.BuildFailedEventReason, fmt.Sprintf(buildapi.BuildFailedEventMessage, patchedBuild.Namespace, patchedBuild.Name)) + } + if buildutil.IsTerminalPhase(*update.phase) { + common.HandleBuildCompletion(patchedBuild, bc.buildLister, bc.buildConfigGetter, bc.buildDeleter, bc.runPolicies) + } + } + return nil +} + +// patchBuild generates a patch for the given build and buildUpdate +// and applies that patch using the REST client +func (bc *BuildController) patchBuild(build *buildapi.Build, update *buildUpdate) (*buildapi.Build, error) { + + // Create a patch using the buildUpdate object + updatedBuild, err := buildutil.BuildDeepCopy(build) + if err != nil { + return nil, fmt.Errorf("cannot create a deep copy of build %s: %v", buildDesc(build), err) + } + update.apply(updatedBuild) + patch, err := validation.CreateBuildPatch(build, updatedBuild) + if err != nil { + return nil, fmt.Errorf("failed to create a build patch: %v", err) + } + + glog.V(5).Infof("Patching build %s with %v", buildDesc(build), update) + return bc.buildPatcher.Patch(build.Namespace, build.Name, patch) +} + +// findMissingPod uses the REST client directly to determine if a pod exists or not. +// It is called when a corresponding pod for a build is not found in the cache. +func (bc *BuildController) findMissingPod(build *buildapi.Build) *kapi.Pod { + // Make one last attempt to fetch the pod using the REST client + pod, err := bc.podClient.Pods(build.Namespace).Get(buildapi.GetBuildPodName(build), metav1.GetOptions{}) + if err == nil { + glog.V(2).Infof("Found missing pod for build %s by using direct client.", buildDesc(build)) + return pod + } + return nil +} + +// getBuildByKey looks up a build by key in the buildInformer cache +func (bc *BuildController) getBuildByKey(key string) (*buildapi.Build, error) { + obj, exists, err := bc.buildInformer.GetIndexer().GetByKey(key) + if err != nil { + glog.V(2).Infof("Unable to retrieve build %q from store: %v", key, err) + return nil, err + } + if !exists { + glog.V(2).Infof("Build %q has been deleted", key) + return nil, nil + } + + return obj.(*buildapi.Build), nil +} + +// podUpdated gets called by the pod informer event handler whenever a pod +// is updated or there is a relist of pods +func (bc *BuildController) podUpdated(old, cur interface{}) { + // A periodic relist will send update events for all known pods. + curPod := cur.(*kapi.Pod) + oldPod := old.(*kapi.Pod) + // The old and new ResourceVersion will be the same in a relist of pods. + // Here we ignore pod relists because we already listen to build relists. + if curPod.ResourceVersion == oldPod.ResourceVersion { + return + } + if isBuildPod(curPod) { + bc.enqueueBuildForPod(curPod) + } +} + +// podDeleted gets called by the pod informer event handler whenever a pod +// is deleted +func (bc *BuildController) podDeleted(obj interface{}) { + pod, ok := obj.(*kapi.Pod) + if !ok { + tombstone, ok := obj.(cache.DeletedFinalStateUnknown) + if !ok { + utilruntime.HandleError(fmt.Errorf("couldn't get object from tombstone: %+v", obj)) + return + } + pod, ok = tombstone.Obj.(*kapi.Pod) + if !ok { + utilruntime.HandleError(fmt.Errorf("tombstone contained object that is not a pod: %+v", obj)) + return + } + } + if isBuildPod(pod) { + bc.enqueueBuildForPod(pod) + } +} + +// buildAdded is called by the build informer event handler whenever a build +// is created +func (bc *BuildController) buildAdded(obj interface{}) { + build := obj.(*buildapi.Build) + bc.enqueueBuild(build) +} + +// buildUpdated is called by the build informer event handler whenever a build +// is updated or there is a relist of builds +func (bc *BuildController) buildUpdated(old, cur interface{}) { + build := cur.(*buildapi.Build) + bc.enqueueBuild(build) +} + +// enqueueBuild adds the given build to the queue. +func (bc *BuildController) enqueueBuild(build *buildapi.Build) { + key, err := kcontroller.KeyFunc(build) + if err != nil { + utilruntime.HandleError(fmt.Errorf("couldn't get key for build %#v: %v", build, err)) + return + } + bc.queue.Add(key) +} + +// enqueueBuildForPod adds the build corresponding to the given pod to the controller +// queue. If a build is not found for the pod, then an error is logged. +func (bc *BuildController) enqueueBuildForPod(pod *kapi.Pod) { + bc.queue.Add(fmt.Sprintf("%s/%s", pod.Namespace, buildutil.GetBuildName(pod))) +} + +// handleError is called by the main work loop to check the return of calling handleBuild. +// If an error occurred, then the key is re-added to the queue unless it has been retried too many +// times. +func (bc *BuildController) handleError(err error, key interface{}) { + if err == nil { + bc.queue.Forget(key) + return + } + + if strategy.IsFatal(err) { + glog.V(2).Infof("Will not retry fatal error for key %v: %v", key, err) + bc.queue.Forget(key) + return + } + + if bc.queue.NumRequeues(key) < maxRetries { + glog.V(4).Infof("Retrying key %v: %v", key, err) + bc.queue.AddRateLimited(key) + return + } + + glog.V(2).Infof("Giving up retrying %v: %v", key, err) + bc.queue.Forget(key) +} + +// isBuildPod returns true if the given pod is a build pod +func isBuildPod(pod *kapi.Pod) bool { + return len(buildutil.GetBuildName(pod)) > 0 +} + +// buildDesc is a utility to format the namespace/name and phase of a build +// for errors and logging +func buildDesc(build *buildapi.Build) string { + return fmt.Sprintf("%s/%s (%s)", build.Namespace, build.Name, build.Status.Phase) +} + +// transitionToPhase returns a buildUpdate object to transition a build to a new +// phase with the given reason and message +func transitionToPhase(phase buildapi.BuildPhase, reason buildapi.StatusReason, message string) *buildUpdate { + update := &buildUpdate{} + update.setPhase(phase) + update.setReason(reason) + update.setMessage(message) + return update +} + +// isValidTransition returns true if the given phase transition is valid +func isValidTransition(from, to buildapi.BuildPhase) bool { + if from == to { + return true + } + + switch { + case buildutil.IsTerminalPhase(from): + return false + case from == buildapi.BuildPhasePending: + switch to { + case buildapi.BuildPhaseNew: + return false + } + case from == buildapi.BuildPhaseRunning: + switch to { + case buildapi.BuildPhaseNew, + buildapi.BuildPhasePending: + return false + } + } + + return true +} + +// setBuildCompletionTimestampAndDuration sets the build completion time and duration as well as the start time +// if not already set on the given buildUpdate object +func setBuildCompletionTimestampAndDuration(build *buildapi.Build, podStartTime *metav1.Time, update *buildUpdate) { + now := metav1.Now() + update.setCompletionTime(now) + + startTime := build.Status.StartTimestamp + if startTime == nil { + if podStartTime != nil { + startTime = podStartTime + } else { + startTime = &now + } + update.setStartTime(*startTime) + } + update.setDuration(now.Rfc3339Copy().Time.Sub(startTime.Rfc3339Copy().Time)) +} diff --git a/pkg/build/controller/build/build_controller_test.go b/pkg/build/controller/build/build_controller_test.go new file mode 100644 index 000000000000..067bf667cd1d --- /dev/null +++ b/pkg/build/controller/build/build_controller_test.go @@ -0,0 +1,1219 @@ +package build + +import ( + "fmt" + "testing" + "time" + + apiequality "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/wait" + clientgotesting "k8s.io/client-go/testing" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" + kapi "k8s.io/kubernetes/pkg/api" + kexternalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" + kexternalclientfake "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake" + kinternalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + kinternalclientfake "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" + kexternalinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions" + kinternalinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion" + + builddefaults "github.com/openshift/origin/pkg/build/admission/defaults" + buildoverrides "github.com/openshift/origin/pkg/build/admission/overrides" + buildapi "github.com/openshift/origin/pkg/build/api" + "github.com/openshift/origin/pkg/build/api/validation" + "github.com/openshift/origin/pkg/build/controller/common" + "github.com/openshift/origin/pkg/build/controller/policy" + strategy "github.com/openshift/origin/pkg/build/controller/strategy" + buildutil "github.com/openshift/origin/pkg/build/util" + "github.com/openshift/origin/pkg/client" + "github.com/openshift/origin/pkg/client/testclient" + "github.com/openshift/origin/pkg/controller/shared" + imageapi "github.com/openshift/origin/pkg/image/api" + imageinformersinternal "github.com/openshift/origin/pkg/image/generated/informers/internalversion" + imageinternalclientset "github.com/openshift/origin/pkg/image/generated/internalclientset" + imageinternalfakeclient "github.com/openshift/origin/pkg/image/generated/internalclientset/fake" +) + +// TestHandleBuild is the main test for build updates through the controller +func TestHandleBuild(t *testing.T) { + + now := metav1.Now() + before := metav1.NewTime(now.Time.Add(-1 * time.Hour)) + + build := func(phase buildapi.BuildPhase) *buildapi.Build { + b := dockerStrategy(mockBuild(phase, buildapi.BuildOutput{})) + if phase != buildapi.BuildPhaseNew { + podName := buildapi.GetBuildPodName(b) + common.SetBuildPodNameAnnotation(b, podName) + } + return b + } + pod := func(phase kapi.PodPhase) *kapi.Pod { + p := mockBuildPod(build(buildapi.BuildPhaseNew)) + p.Status.Phase = phase + switch phase { + case kapi.PodRunning, kapi.PodFailed: + p.Status.StartTime = &now + case kapi.PodSucceeded: + p.Status.StartTime = &now + p.Status.ContainerStatuses = []kapi.ContainerStatus{ + { + Name: "container", + State: kapi.ContainerState{ + Terminated: &kapi.ContainerStateTerminated{ + ExitCode: 0, + }, + }, + }, + } + } + return p + } + cancelled := func(build *buildapi.Build) *buildapi.Build { + build.Status.Cancelled = true + return build + } + withCompletionTS := func(build *buildapi.Build) *buildapi.Build { + build.Status.CompletionTimestamp = &now + return build + } + withBuildCreationTS := func(build *buildapi.Build, tm metav1.Time) *buildapi.Build { + build.CreationTimestamp = tm + return build + } + withPodCreationTS := func(pod *kapi.Pod, tm metav1.Time) *kapi.Pod { + pod.CreationTimestamp = tm + return pod + } + + tests := []struct { + name string + + // Conditions + build *buildapi.Build + pod *kapi.Pod + runPolicy *fakeRunPolicy + errorOnPodDelete bool + errorOnPodCreate bool + errorOnBuildUpdate bool + + // Expected Result + expectUpdate *buildUpdate + expectPodCreated bool + expectPodDeleted bool + expectError bool + expectOnComplete bool + }{ + { + name: "cancel running build", + build: cancelled(build(buildapi.BuildPhaseRunning)), + pod: pod(kapi.PodRunning), + expectUpdate: newUpdate().phase(buildapi.BuildPhaseCancelled). + reason(buildapi.StatusReasonCancelledBuild). + message(buildapi.StatusMessageCancelledBuild). + completionTime(now). + startTime(now).update, + expectPodDeleted: true, + expectOnComplete: true, + }, + { + name: "cancel build in terminal state", + build: cancelled(withCompletionTS(build(buildapi.BuildPhaseComplete))), + pod: pod(kapi.PodRunning), + expectUpdate: nil, + }, + { + name: "cancel build with delete pod error", + build: cancelled(build(buildapi.BuildPhaseRunning)), + errorOnPodDelete: true, + expectUpdate: nil, + expectError: true, + }, + { + name: "new -> pending", + build: build(buildapi.BuildPhaseNew), + expectUpdate: newUpdate(). + phase(buildapi.BuildPhasePending). + reason(""). + message(""). + podNameAnnotation(pod(kapi.PodPending).Name). + update, + expectPodCreated: true, + }, + { + name: "new with existing newer pod", + build: withBuildCreationTS(build(buildapi.BuildPhaseNew), before), + pod: withPodCreationTS(pod(kapi.PodRunning), now), + expectUpdate: newUpdate(). + phase(buildapi.BuildPhaseRunning). + reason(""). + message(""). + startTime(now). + podNameAnnotation(pod(kapi.PodRunning).Name). + update, + }, + { + name: "new with existing older pod", + build: withBuildCreationTS(build(buildapi.BuildPhaseNew), now), + pod: withPodCreationTS(pod(kapi.PodRunning), before), + expectUpdate: newUpdate(). + phase(buildapi.BuildPhaseError). + reason(buildapi.StatusReasonBuildPodExists). + message(buildapi.StatusMessageBuildPodExists). + podNameAnnotation(pod(kapi.PodRunning).Name). + startTime(now). + completionTime(now). + update, + expectOnComplete: true, + }, + { + name: "new not runnable by policy", + build: build(buildapi.BuildPhaseNew), + runPolicy: &fakeRunPolicy{notRunnable: true}, + expectUpdate: nil, + }, + { + name: "new -> pending with update error", + build: build(buildapi.BuildPhaseNew), + errorOnBuildUpdate: true, + expectUpdate: nil, + expectPodCreated: true, + expectError: true, + }, + { + name: "pending -> running", + build: build(buildapi.BuildPhasePending), + pod: pod(kapi.PodRunning), + expectUpdate: newUpdate(). + phase(buildapi.BuildPhaseRunning). + reason(""). + message(""). + startTime(now). + update, + }, + { + name: "pending -> running with update error", + build: build(buildapi.BuildPhasePending), + pod: pod(kapi.PodRunning), + errorOnBuildUpdate: true, + expectUpdate: nil, + expectError: true, + }, + { + name: "pending -> failed", + build: build(buildapi.BuildPhasePending), + pod: pod(kapi.PodFailed), + expectOnComplete: true, + expectUpdate: newUpdate(). + phase(buildapi.BuildPhaseFailed). + reason(buildapi.StatusReasonGenericBuildFailed). + message(buildapi.StatusMessageGenericBuildFailed). + startTime(now). + completionTime(now). + update, + }, + { + name: "pending -> pending", + build: build(buildapi.BuildPhasePending), + pod: pod(kapi.PodPending), + expectUpdate: nil, + }, + { + name: "running -> complete", + build: build(buildapi.BuildPhaseRunning), + pod: pod(kapi.PodSucceeded), + expectOnComplete: true, + expectUpdate: newUpdate(). + phase(buildapi.BuildPhaseComplete). + reason(""). + message(""). + startTime(now). + completionTime(now). + update, + }, + { + name: "running -> running", + build: build(buildapi.BuildPhaseRunning), + pod: pod(kapi.PodRunning), + expectUpdate: nil, + }, + { + name: "running with missing pod", + build: build(buildapi.BuildPhaseRunning), + expectOnComplete: true, + expectUpdate: newUpdate(). + phase(buildapi.BuildPhaseError). + reason(buildapi.StatusReasonBuildPodDeleted). + message(buildapi.StatusMessageBuildPodDeleted). + startTime(now). + completionTime(now). + update, + }, + { + name: "failed -> failed with no completion timestamp", + build: build(buildapi.BuildPhaseFailed), + pod: pod(kapi.PodFailed), + expectOnComplete: true, + expectUpdate: newUpdate(). + startTime(now). + completionTime(now). + update, + }, + { + name: "failed -> failed with completion timestamp", + build: withCompletionTS(build(buildapi.BuildPhaseFailed)), + pod: pod(kapi.PodFailed), + expectUpdate: nil, + }, + } + + for _, tc := range tests { + func() { + var patchedBuild *buildapi.Build + var appliedPatch string + openshiftClient := fakeOpenshiftClient(tc.build) + openshiftClient.(*testclient.Fake).PrependReactor("patch", "builds", + func(action clientgotesting.Action) (bool, runtime.Object, error) { + if tc.errorOnBuildUpdate { + return true, nil, fmt.Errorf("error") + } + var err error + patchAction := action.(clientgotesting.PatchActionImpl) + appliedPatch = string(patchAction.Patch) + patchedBuild, err = validation.ApplyBuildPatch(tc.build, patchAction.Patch) + if err != nil { + panic(fmt.Sprintf("unexpected error: %v", err)) + } + return true, patchedBuild, nil + }) + var kubeClient kinternalclientset.Interface + if tc.pod != nil { + kubeClient = fakeKubeInternalClientSet(tc.pod) + } else { + kubeClient = fakeKubeInternalClientSet() + } + podDeleted := false + podCreated := false + kubeClient.(*kinternalclientfake.Clientset).PrependReactor("delete", "pods", + func(action clientgotesting.Action) (bool, runtime.Object, error) { + if tc.errorOnPodDelete { + return true, nil, fmt.Errorf("error") + } + podDeleted = true + return true, nil, nil + }) + kubeClient.(*kinternalclientfake.Clientset).PrependReactor("create", "pods", + func(action clientgotesting.Action) (bool, runtime.Object, error) { + if tc.errorOnPodCreate { + return true, nil, fmt.Errorf("error") + } + podCreated = true + return true, nil, nil + }) + + bc := newFakeBuildController(openshiftClient, nil, nil, kubeClient) + defer bc.stop() + + runPolicy := tc.runPolicy + if runPolicy == nil { + runPolicy = &fakeRunPolicy{} + } + bc.runPolicies = []policy.RunPolicy{runPolicy} + + err := bc.handleBuild(tc.build) + if err != nil { + if !tc.expectError { + t.Errorf("%s: unexpected error: %v", tc.name, err) + } + } + if err == nil && tc.expectError { + t.Errorf("%s: expected error, got none", tc.name) + } + if tc.expectUpdate == nil && patchedBuild != nil { + t.Errorf("%s: did not expect a build update, got patch %s", tc.name, appliedPatch) + } + if tc.expectPodCreated != podCreated { + t.Errorf("%s: pod created. expected: %v, actual: %v", tc.name, tc.expectPodCreated, podCreated) + } + if tc.expectOnComplete != runPolicy.onCompleteCalled { + t.Errorf("%s: on complete called. expected: %v, actual: %v", tc.name, tc.expectOnComplete, runPolicy.onCompleteCalled) + } + if tc.expectUpdate != nil { + if patchedBuild == nil { + t.Errorf("%s: did not get an update. Expected: %v", tc.name, tc.expectUpdate) + return + } + expectedBuild, err := buildutil.BuildDeepCopy(tc.build) + if err != nil { + t.Fatalf("unexpected: %v", err) + } + tc.expectUpdate.apply(expectedBuild) + + // For start/completion/duration fields, simply validate that they are set/not set + if tc.expectUpdate.startTime != nil && patchedBuild.Status.StartTimestamp != nil { + expectedBuild.Status.StartTimestamp = patchedBuild.Status.StartTimestamp + } + if tc.expectUpdate.completionTime != nil && patchedBuild.Status.CompletionTimestamp != nil { + expectedBuild.Status.CompletionTimestamp = patchedBuild.Status.CompletionTimestamp + expectedBuild.Status.Duration = patchedBuild.Status.Duration + } + expectedBuild.CreationTimestamp = patchedBuild.CreationTimestamp + + if !apiequality.Semantic.DeepEqual(*expectedBuild, *patchedBuild) { + t.Errorf("%s: did not get expected %v on build. Patch: %s", tc.name, tc.expectUpdate, appliedPatch) + } + } + }() + } + +} + +// TestWork - High-level test of the work function to ensure that a build +// in the queue will be handled by updating the build status to pending +func TestWorkWithNewBuild(t *testing.T) { + build := dockerStrategy(mockBuild(buildapi.BuildPhaseNew, buildapi.BuildOutput{})) + var patchedBuild *buildapi.Build + openshiftClient := fakeOpenshiftClient(build) + openshiftClient.(*testclient.Fake).PrependReactor("patch", "builds", applyBuildPatchReaction(t, build, &patchedBuild)) + + bc := newFakeBuildController(openshiftClient, nil, nil, nil) + defer bc.stop() + bc.enqueueBuild(build) + + bc.work() + + if bc.queue.Len() > 0 { + t.Errorf("Expected queue to be empty") + } + if patchedBuild == nil { + t.Errorf("Expected patched build not to be nil") + } + + if patchedBuild != nil && patchedBuild.Status.Phase != buildapi.BuildPhasePending { + t.Errorf("Expected patched build status set to Pending. It is %s", patchedBuild.Status.Phase) + } +} + +func TestCreateBuildPod(t *testing.T) { + kubeClient := fakeKubeInternalClientSet() + bc := newFakeBuildController(nil, nil, nil, kubeClient) + defer bc.stop() + build := dockerStrategy(mockBuild(buildapi.BuildPhaseNew, buildapi.BuildOutput{})) + + update, err := bc.createBuildPod(build) + + if err != nil { + t.Errorf("unexpected error: %v", err) + return + } + podName := buildapi.GetBuildPodName(build) + // Validate update + expected := &buildUpdate{} + expected.setPodNameAnnotation(podName) + expected.setPhase(buildapi.BuildPhasePending) + expected.setReason("") + expected.setMessage("") + expected.setOutputRef("") + validateUpdate(t, "create build pod", expected, update) + // Make sure that a pod was created + _, err = kubeClient.Core().Pods("namespace").Get(podName, metav1.GetOptions{}) + if err != nil { + t.Errorf("unexpected error: %v", err) + } +} + +func TestCreateBuildPodWithImageStreamOutput(t *testing.T) { + imageStream := &imageapi.ImageStream{} + imageStream.Namespace = "isnamespace" + imageStream.Name = "isname" + imageStream.Status.DockerImageRepository = "namespace/image-name" + imageStreamRef := &kapi.ObjectReference{Name: "isname:latest", Namespace: "isnamespace", Kind: "ImageStreamTag"} + imageClient := fakeImageClient(imageStream) + bc := newFakeBuildController(nil, imageClient, nil, nil) + defer bc.stop() + build := dockerStrategy(mockBuild(buildapi.BuildPhaseNew, buildapi.BuildOutput{To: imageStreamRef})) + podName := buildapi.GetBuildPodName(build) + + update, err := bc.createBuildPod(build) + + if err != nil { + t.Errorf("unexpected error: %v", err) + return + } + expected := &buildUpdate{} + expected.setPodNameAnnotation(podName) + expected.setPhase(buildapi.BuildPhasePending) + expected.setReason("") + expected.setMessage("") + expected.setOutputRef("namespace/image-name:latest") + validateUpdate(t, "create build pod with imagestream output", expected, update) +} + +func TestCreateBuildPodWithImageStreamError(t *testing.T) { + imageStreamRef := &kapi.ObjectReference{Name: "isname:latest", Namespace: "isnamespace", Kind: "ImageStreamTag"} + bc := newFakeBuildController(nil, nil, nil, nil) + defer bc.stop() + build := dockerStrategy(mockBuild(buildapi.BuildPhaseNew, buildapi.BuildOutput{To: imageStreamRef})) + + update, err := bc.createBuildPod(build) + + if err == nil { + t.Errorf("Expected error") + } + expected := &buildUpdate{} + expected.setReason(buildapi.StatusReasonInvalidOutputReference) + expected.setMessage(buildapi.StatusMessageInvalidOutputRef) + validateUpdate(t, "create build pod with image stream error", expected, update) +} + +type errorStrategy struct{} + +func (*errorStrategy) CreateBuildPod(build *buildapi.Build) (*kapi.Pod, error) { + return nil, fmt.Errorf("error") +} + +func TestCreateBuildPodWithPodSpecCreationError(t *testing.T) { + bc := newFakeBuildController(nil, nil, nil, nil) + defer bc.stop() + bc.createStrategy = &errorStrategy{} + build := dockerStrategy(mockBuild(buildapi.BuildPhaseNew, buildapi.BuildOutput{})) + + update, err := bc.createBuildPod(build) + + if err != nil { + t.Errorf("unexpected error: %v", err) + } + expected := &buildUpdate{} + expected.setReason(buildapi.StatusReasonCannotCreateBuildPodSpec) + expected.setMessage(buildapi.StatusMessageCannotCreateBuildPodSpec) + validateUpdate(t, "create build pod with pod spec creation error", expected, update) +} + +func TestCreateBuildPodWithNewerExistingPod(t *testing.T) { + now := metav1.Now() + build := dockerStrategy(mockBuild(buildapi.BuildPhaseNew, buildapi.BuildOutput{})) + build.Status.StartTimestamp = &now + + existingPod := &kapi.Pod{} + existingPod.Name = buildapi.GetBuildPodName(build) + existingPod.Namespace = build.Namespace + existingPod.CreationTimestamp = metav1.NewTime(now.Time.Add(time.Hour)) + + kubeClient := fakeKubeInternalClientSet(existingPod) + errorReaction := func(action clientgotesting.Action) (bool, runtime.Object, error) { + return true, nil, errors.NewAlreadyExists(schema.GroupResource{Group: "", Resource: "pods"}, existingPod.Name) + } + kubeClient.(*kinternalclientfake.Clientset).PrependReactor("create", "pods", errorReaction) + bc := newFakeBuildController(nil, nil, nil, kubeClient) + bc.start() + defer bc.stop() + + update, err := bc.createBuildPod(build) + + if err != nil { + t.Errorf("unexpected error: %v", err) + } + + if update != nil { + t.Errorf("unexpected update: %v", update) + } +} + +func TestCreateBuildPodWithOlderExistingPod(t *testing.T) { + now := metav1.Now() + build := dockerStrategy(mockBuild(buildapi.BuildPhaseNew, buildapi.BuildOutput{})) + build.CreationTimestamp = now + + existingPod := &kapi.Pod{} + existingPod.Name = buildapi.GetBuildPodName(build) + existingPod.Namespace = build.Namespace + existingPod.CreationTimestamp = metav1.NewTime(now.Time.Add(-1 * time.Hour)) + + kubeClient := fakeKubeInternalClientSet(existingPod) + errorReaction := func(action clientgotesting.Action) (bool, runtime.Object, error) { + return true, nil, errors.NewAlreadyExists(schema.GroupResource{Group: "", Resource: "pods"}, existingPod.Name) + } + kubeClient.(*kinternalclientfake.Clientset).PrependReactor("create", "pods", errorReaction) + bc := newFakeBuildController(nil, nil, nil, kubeClient) + defer bc.stop() + + update, err := bc.createBuildPod(build) + + if err != nil { + t.Errorf("unexpected error: %v", err) + } + + expected := &buildUpdate{} + expected.setPhase(buildapi.BuildPhaseError) + expected.setReason(buildapi.StatusReasonBuildPodExists) + expected.setMessage(buildapi.StatusMessageBuildPodExists) + validateUpdate(t, "create build pod with pod with older existing pod", expected, update) +} + +func TestCreateBuildPodWithPodCreationError(t *testing.T) { + kubeClient := fakeKubeInternalClientSet() + errorReaction := func(action clientgotesting.Action) (bool, runtime.Object, error) { + return true, nil, fmt.Errorf("error") + } + kubeClient.(*kinternalclientfake.Clientset).PrependReactor("create", "pods", errorReaction) + bc := newFakeBuildController(nil, nil, nil, kubeClient) + defer bc.stop() + build := dockerStrategy(mockBuild(buildapi.BuildPhaseNew, buildapi.BuildOutput{})) + + update, err := bc.createBuildPod(build) + + if err == nil { + t.Errorf("expected error") + } + + expected := &buildUpdate{} + expected.setReason(buildapi.StatusReasonCannotCreateBuildPod) + expected.setMessage(buildapi.StatusMessageCannotCreateBuildPod) + validateUpdate(t, "create build pod with pod creation error", expected, update) +} + +func TestCancelBuild(t *testing.T) { + build := mockBuild(buildapi.BuildPhaseRunning, buildapi.BuildOutput{}) + build.Name = "canceltest" + build.Namespace = "testns" + pod := &kapi.Pod{} + pod.Name = "canceltest-build" + pod.Namespace = "testns" + client := kinternalclientfake.NewSimpleClientset(pod).Core() + bc := BuildController{ + podClient: client, + } + update, err := bc.cancelBuild(build) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if _, err := client.Pods("testns").Get("canceltest-build", metav1.GetOptions{}); err == nil { + t.Errorf("expect pod canceltest-build to have been deleted") + } + if update.phase == nil || *update.phase != buildapi.BuildPhaseCancelled { + t.Errorf("expected phase to be set to cancelled") + } + if update.reason == nil || *update.reason != buildapi.StatusReasonCancelledBuild { + t.Errorf("expected status reason to be set to %s", buildapi.StatusReasonCancelledBuild) + } + if update.message == nil || *update.message != buildapi.StatusMessageCancelledBuild { + t.Errorf("expected status message to be set to %s", buildapi.StatusMessageCancelledBuild) + } +} + +func TestShouldIgnore(t *testing.T) { + + setCompletionTimestamp := func(build *buildapi.Build) *buildapi.Build { + now := metav1.Now() + build.Status.CompletionTimestamp = &now + return build + } + + tests := []struct { + name string + build *buildapi.Build + expectIgnore bool + }{ + { + name: "new docker build", + build: dockerStrategy(mockBuild(buildapi.BuildPhaseNew, buildapi.BuildOutput{})), + expectIgnore: false, + }, + { + name: "running docker build", + build: dockerStrategy(mockBuild(buildapi.BuildPhaseRunning, buildapi.BuildOutput{})), + expectIgnore: false, + }, + { + name: "cancelled docker build", + build: dockerStrategy(mockBuild(buildapi.BuildPhaseCancelled, buildapi.BuildOutput{})), + expectIgnore: true, + }, + { + name: "completed docker build with no completion timestamp", + build: dockerStrategy(mockBuild(buildapi.BuildPhaseComplete, buildapi.BuildOutput{})), + expectIgnore: false, + }, + { + name: "completed docker build with completion timestamp", + build: setCompletionTimestamp(dockerStrategy(mockBuild(buildapi.BuildPhaseComplete, buildapi.BuildOutput{}))), + expectIgnore: true, + }, + { + name: "running pipeline build", + build: pipelineStrategy(mockBuild(buildapi.BuildPhaseRunning, buildapi.BuildOutput{})), + expectIgnore: true, + }, + } + + for _, test := range tests { + actual := shouldIgnore(test.build) + if expected := test.expectIgnore; actual != expected { + t.Errorf("%s: expected result: %v, actual: %v", test.name, expected, actual) + } + } + +} + +func TestIsValidTransition(t *testing.T) { + phases := []buildapi.BuildPhase{ + buildapi.BuildPhaseNew, + buildapi.BuildPhasePending, + buildapi.BuildPhaseRunning, + buildapi.BuildPhaseComplete, + buildapi.BuildPhaseFailed, + buildapi.BuildPhaseError, + buildapi.BuildPhaseCancelled, + } + for _, fromPhase := range phases { + for _, toPhase := range phases { + if buildutil.IsTerminalPhase(fromPhase) && fromPhase != toPhase { + if isValidTransition(fromPhase, toPhase) { + t.Errorf("transition %v -> %v should be invalid", fromPhase, toPhase) + } + continue + } + if fromPhase == buildapi.BuildPhasePending && toPhase == buildapi.BuildPhaseNew { + if isValidTransition(fromPhase, toPhase) { + t.Errorf("transition %v -> %v should be invalid", fromPhase, toPhase) + } + continue + } + if fromPhase == buildapi.BuildPhaseRunning && (toPhase == buildapi.BuildPhaseNew || toPhase == buildapi.BuildPhasePending) { + if isValidTransition(fromPhase, toPhase) { + t.Errorf("transition %v -> %v shluld be invalid", fromPhase, toPhase) + } + continue + } + + if !isValidTransition(fromPhase, toPhase) { + t.Errorf("transition %v -> %v should be valid", fromPhase, toPhase) + } + } + } +} + +func TestIsTerminal(t *testing.T) { + tests := map[buildapi.BuildPhase]bool{ + buildapi.BuildPhaseNew: false, + buildapi.BuildPhasePending: false, + buildapi.BuildPhaseRunning: false, + buildapi.BuildPhaseComplete: true, + buildapi.BuildPhaseFailed: true, + buildapi.BuildPhaseError: true, + buildapi.BuildPhaseCancelled: true, + } + for phase, expected := range tests { + if actual := buildutil.IsTerminalPhase(phase); actual != expected { + t.Errorf("unexpected response for %s: %v", phase, actual) + } + } +} + +func TestSetBuildCompletionTimestampAndDuration(t *testing.T) { + // set start time to 2 seconds ago to have some significant duration + startTime := metav1.NewTime(time.Now().Add(time.Second * -2)) + earlierTime := metav1.NewTime(startTime.Add(time.Hour * -1)) + + // Marker times used for validation + afterStartTimeBeforeNow := metav1.NewTime(time.Time{}) + + // Marker durations used for validation + greaterThanZeroLessThanSinceStartTime := time.Duration(0) + atLeastOneHour := time.Duration(0) + zeroDuration := time.Duration(0) + + buildWithStartTime := &buildapi.Build{} + buildWithStartTime.Status.StartTimestamp = &startTime + buildWithNoStartTime := &buildapi.Build{} + tests := []struct { + name string + build *buildapi.Build + podStartTime *metav1.Time + expected *buildUpdate + }{ + { + name: "build with start time", + build: buildWithStartTime, + podStartTime: &earlierTime, + expected: &buildUpdate{ + completionTime: &afterStartTimeBeforeNow, + duration: &greaterThanZeroLessThanSinceStartTime, + }, + }, + { + name: "build with no start time", + build: buildWithNoStartTime, + podStartTime: &earlierTime, + expected: &buildUpdate{ + startTime: &earlierTime, + completionTime: &afterStartTimeBeforeNow, + duration: &atLeastOneHour, + }, + }, + { + name: "build with no start time, no pod start time", + build: buildWithNoStartTime, + podStartTime: nil, + expected: &buildUpdate{ + startTime: &afterStartTimeBeforeNow, + completionTime: &afterStartTimeBeforeNow, + duration: &zeroDuration, + }, + }, + } + + for _, test := range tests { + update := &buildUpdate{} + setBuildCompletionTimestampAndDuration(test.build, test.podStartTime, update) + // Ensure that only the fields in the expected update are set + if test.expected.podNameAnnotation == nil && (test.expected.podNameAnnotation != update.podNameAnnotation) { + t.Errorf("%s: podNameAnnotation should not be set", test.name) + continue + } + if test.expected.phase == nil && (test.expected.phase != update.phase) { + t.Errorf("%s: phase should not be set", test.name) + continue + } + if test.expected.reason == nil && (test.expected.reason != update.reason) { + t.Errorf("%s: reason should not be set", test.name) + continue + } + if test.expected.message == nil && (test.expected.message != update.message) { + t.Errorf("%s: message should not be set", test.name) + continue + } + if test.expected.startTime == nil && (test.expected.startTime != update.startTime) { + t.Errorf("%s: startTime should not be set", test.name) + continue + } + if test.expected.completionTime == nil && (test.expected.completionTime != update.completionTime) { + t.Errorf("%s: completionTime should not be set", test.name) + continue + } + if test.expected.duration == nil && (test.expected.duration != update.duration) { + t.Errorf("%s: duration should not be set", test.name) + continue + } + if test.expected.outputRef == nil && (test.expected.outputRef != update.outputRef) { + t.Errorf("%s: outputRef should not be set", test.name) + continue + } + now := metav1.NewTime(time.Now().Add(2 * time.Second)) + if test.expected.startTime != nil { + if update.startTime == nil { + t.Errorf("%s: expected startTime to be set", test.name) + continue + } + switch test.expected.startTime { + case &afterStartTimeBeforeNow: + if !update.startTime.Time.After(startTime.Time) && !update.startTime.Time.Before(now.Time) { + t.Errorf("%s: startTime (%v) not within expected range (%v - %v)", test.name, update.startTime, startTime, now) + continue + } + default: + if !update.startTime.Time.Equal(test.expected.startTime.Time) { + t.Errorf("%s: startTime (%v) not equal expected time (%v)", test.name, update.startTime, test.expected.startTime) + continue + } + } + } + if test.expected.completionTime != nil { + if update.completionTime == nil { + t.Errorf("%s: expected completionTime to be set", test.name) + continue + } + switch test.expected.completionTime { + case &afterStartTimeBeforeNow: + if !update.completionTime.Time.After(startTime.Time) && !update.completionTime.Time.Before(now.Time) { + t.Errorf("%s: completionTime (%v) not within expected range (%v - %v)", test.name, update.completionTime, startTime, now) + continue + } + default: + if !update.completionTime.Time.Equal(test.expected.completionTime.Time) { + t.Errorf("%s: completionTime (%v) not equal expected time (%v)", test.name, update.completionTime, test.expected.completionTime) + continue + } + } + } + if test.expected.duration != nil { + if update.duration == nil { + t.Errorf("%s: expected duration to be set", test.name) + continue + } + switch test.expected.duration { + case &greaterThanZeroLessThanSinceStartTime: + sinceStart := now.Rfc3339Copy().Time.Sub(startTime.Rfc3339Copy().Time) + if !(*update.duration > 0) || !(*update.duration <= sinceStart) { + t.Errorf("%s: duration (%v) not within expected range (%v - %v)", test.name, update.duration, 0, sinceStart) + continue + } + case &atLeastOneHour: + if !(*update.duration >= time.Hour) { + t.Errorf("%s: duration (%v) is not at least one hour", test.name, update.duration) + continue + } + default: + if *update.duration != *test.expected.duration { + t.Errorf("%s: duration (%v) not equal expected duration (%v)", test.name, update.duration, test.expected.duration) + continue + } + } + } + } +} + +func mockBuild(phase buildapi.BuildPhase, output buildapi.BuildOutput) *buildapi.Build { + return &buildapi.Build{ + ObjectMeta: metav1.ObjectMeta{ + Name: "data-build", + Namespace: "namespace", + Annotations: map[string]string{ + buildapi.BuildConfigAnnotation: "test-bc", + }, + Labels: map[string]string{ + "name": "dataBuild", + buildapi.BuildRunPolicyLabel: string(buildapi.BuildRunPolicyParallel), + buildapi.BuildConfigLabel: "test-bc", + }, + }, + Spec: buildapi.BuildSpec{ + CommonSpec: buildapi.CommonSpec{ + Source: buildapi.BuildSource{ + Git: &buildapi.GitBuildSource{ + URI: "http://my.build.com/the/build/Dockerfile", + }, + ContextDir: "contextimage", + }, + Output: output, + }, + }, + Status: buildapi.BuildStatus{ + Phase: phase, + }, + } +} + +func dockerStrategy(build *buildapi.Build) *buildapi.Build { + build.Spec.Strategy = buildapi.BuildStrategy{ + DockerStrategy: &buildapi.DockerBuildStrategy{}, + } + return build +} + +func pipelineStrategy(build *buildapi.Build) *buildapi.Build { + build.Spec.Strategy = buildapi.BuildStrategy{ + JenkinsPipelineStrategy: &buildapi.JenkinsPipelineBuildStrategy{}, + } + return build +} + +func fakeOpenshiftClient(objects ...runtime.Object) client.Interface { + return testclient.NewSimpleFake(objects...) +} + +func fakeImageClient(objects ...runtime.Object) imageinternalclientset.Interface { + return imageinternalfakeclient.NewSimpleClientset(objects...) +} + +func fakeKubeExternalClientSet(objects ...runtime.Object) kexternalclientset.Interface { + return kexternalclientfake.NewSimpleClientset(objects...) +} + +func fakeKubeInternalClientSet(objects ...runtime.Object) kinternalclientset.Interface { + return kinternalclientfake.NewSimpleClientset(objects...) +} + +func fakeKubeInternalInformers(clientSet kinternalclientset.Interface) kinternalinformers.SharedInformerFactory { + return kinternalinformers.NewSharedInformerFactory(clientSet, 0) +} + +func fakeKubeExternalInformers(clientSet kexternalclientset.Interface) kexternalinformers.SharedInformerFactory { + return kexternalinformers.NewSharedInformerFactory(clientSet, 0) +} + +func fakeInformerFactory(openshiftClient client.Interface, kubeExternalClient kexternalclientset.Interface, kubeInternalClient kinternalclientset.Interface) shared.InformerFactory { + internalKubeInformers := fakeKubeInternalInformers(kubeInternalClient) + externalKubeInformers := fakeKubeExternalInformers(kubeExternalClient) + return shared.NewInformerFactory( + internalKubeInformers, + externalKubeInformers, + kubeInternalClient, + openshiftClient, + nil, + 0) +} + +type fakeBuildController struct { + *BuildController + informers shared.InformerFactory + imageInformers imageinformersinternal.SharedInformerFactory + stopChan chan struct{} +} + +func (c *fakeBuildController) start() { + c.imageInformers.Start(c.stopChan) + c.informers.StartCore(c.stopChan) + c.informers.Start(c.stopChan) + c.informers.KubernetesInformers().Start(c.stopChan) + c.informers.InternalKubernetesInformers().Start(c.stopChan) + if !cache.WaitForCacheSync(wait.NeverStop, c.buildStoreSynced, c.podStoreSynced, c.secretStoreSynced, c.imageStreamStoreSynced) { + panic("cannot sync cache") + } +} + +func (c *fakeBuildController) stop() { + close(c.stopChan) +} + +func newFakeBuildController(openshiftClient client.Interface, imageClient imageinternalclientset.Interface, kubeExternalClient kexternalclientset.Interface, kubeInternalClient kinternalclientset.Interface) *fakeBuildController { + if openshiftClient == nil { + openshiftClient = fakeOpenshiftClient() + } + if imageClient == nil { + imageClient = fakeImageClient() + } + if kubeExternalClient == nil { + kubeExternalClient = fakeKubeExternalClientSet() + } + if kubeInternalClient == nil { + kubeInternalClient = fakeKubeInternalClientSet() + } + + informers := fakeInformerFactory(openshiftClient, kubeExternalClient, kubeInternalClient) + imageInformers := imageinformersinternal.NewSharedInformerFactory(imageClient, 0) + stopChan := make(chan struct{}) + + params := &BuildControllerParams{ + BuildInformer: informers.Builds(), + BuildConfigInformer: informers.BuildConfigs(), + PodInformer: informers.InternalKubernetesInformers().Core().InternalVersion().Pods(), + ImageStreamInformer: imageInformers.Image().InternalVersion().ImageStreams(), + SecretInformer: informers.InternalKubernetesInformers().Core().InternalVersion().Secrets(), + KubeClientInternal: kubeInternalClient, + KubeClientExternal: kubeExternalClient, + OpenshiftClient: openshiftClient, + DockerBuildStrategy: &strategy.DockerBuildStrategy{ + Image: "test/image:latest", + Codec: kapi.Codecs.LegacyCodec(buildapi.LegacySchemeGroupVersion), + }, + SourceBuildStrategy: &strategy.SourceBuildStrategy{ + Image: "test/image:latest", + Codec: kapi.Codecs.LegacyCodec(buildapi.LegacySchemeGroupVersion), + }, + CustomBuildStrategy: &strategy.CustomBuildStrategy{ + Codec: kapi.Codecs.LegacyCodec(buildapi.LegacySchemeGroupVersion), + }, + BuildDefaults: builddefaults.BuildDefaults{}, + BuildOverrides: buildoverrides.BuildOverrides{}, + } + bc := &fakeBuildController{ + BuildController: NewBuildController(params), + stopChan: stopChan, + informers: informers, + imageInformers: imageInformers, + } + bc.BuildController.recorder = &record.FakeRecorder{} + bc.start() + return bc +} + +func validateUpdate(t *testing.T, name string, expected, actual *buildUpdate) { + if expected.podNameAnnotation == nil { + if actual.podNameAnnotation != nil { + t.Errorf("%s: podNameAnnotation should be nil. Actual: %s", name, *actual.podNameAnnotation) + } + } else { + if actual.podNameAnnotation == nil { + t.Errorf("%s: podNameAnnotation should not be nil.", name) + } else { + if *expected.podNameAnnotation != *actual.podNameAnnotation { + t.Errorf("%s: unexpected value for podNameAnnotation. Expected: %s. Actual: %s", name, *expected.podNameAnnotation, *actual.podNameAnnotation) + } + } + } + if expected.phase == nil { + if actual.phase != nil { + t.Errorf("%s: phase should be nil. Actual: %s", name, *actual.phase) + } + } else { + if actual.phase == nil { + t.Errorf("%s: phase should not be nil.", name) + } else { + if *expected.phase != *actual.phase { + t.Errorf("%s: unexpected value for phase. Expected: %s. Actual: %s", name, *expected.phase, *actual.phase) + } + } + } + if expected.reason == nil { + if actual.reason != nil { + t.Errorf("%s: reason should be nil. Actual: %s", name, *actual.reason) + } + } else { + if actual.reason == nil { + t.Errorf("%s: reason should not be nil.", name) + } else { + if *expected.reason != *actual.reason { + t.Errorf("%s: unexpected value for reason. Expected: %s. Actual: %s", name, *expected.reason, *actual.reason) + } + } + } + if expected.message == nil { + if actual.message != nil { + t.Errorf("%s: message should be nil. Actual: %s", name, *actual.message) + } + } else { + if actual.message == nil { + t.Errorf("%s: message should not be nil.", name) + } else { + if *expected.message != *actual.message { + t.Errorf("%s: unexpected value for message. Expected: %s. Actual: %s", name, *expected.message, *actual.message) + } + } + } + if expected.startTime == nil { + if actual.startTime != nil { + t.Errorf("%s: startTime should be nil. Actual: %s", name, *actual.startTime) + } + } else { + if actual.startTime == nil { + t.Errorf("%s: startTime should not be nil.", name) + } else { + if !(*expected.startTime).Equal(*actual.startTime) { + t.Errorf("%s: unexpected value for startTime. Expected: %s. Actual: %s", name, *expected.startTime, *actual.startTime) + } + } + } + if expected.completionTime == nil { + if actual.completionTime != nil { + t.Errorf("%s: completionTime should be nil. Actual: %s", name, *actual.completionTime) + } + } else { + if actual.completionTime == nil { + t.Errorf("%s: completionTime should not be nil.", name) + } else { + if !(*expected.completionTime).Equal(*actual.completionTime) { + t.Errorf("%s: unexpected value for completionTime. Expected: %v. Actual: %v", name, *expected.completionTime, *actual.completionTime) + } + } + } + if expected.duration == nil { + if actual.duration != nil { + t.Errorf("%s: duration should be nil. Actual: %s", name, *actual.duration) + } + } else { + if actual.duration == nil { + t.Errorf("%s: duration should not be nil.", name) + } else { + if *expected.duration != *actual.duration { + t.Errorf("%s: unexpected value for duration. Expected: %v. Actual: %v", name, *expected.duration, *actual.duration) + } + } + } + if expected.outputRef == nil { + if actual.outputRef != nil { + t.Errorf("%s: outputRef should be nil. Actual: %s", name, *actual.outputRef) + } + } else { + if actual.outputRef == nil { + t.Errorf("%s: outputRef should not be nil.", name) + } else { + if *expected.outputRef != *actual.outputRef { + t.Errorf("%s: unexpected value for outputRef. Expected: %s. Actual: %s", name, *expected.outputRef, *actual.outputRef) + } + } + } +} + +func applyBuildPatchReaction(t *testing.T, build *buildapi.Build, buildPtr **buildapi.Build) func(action clientgotesting.Action) (bool, runtime.Object, error) { + return func(action clientgotesting.Action) (bool, runtime.Object, error) { + patchAction := action.(clientgotesting.PatchActionImpl) + var err error + (*buildPtr), err = validation.ApplyBuildPatch(build, patchAction.Patch) + if err != nil { + t.Fatalf("unexpected error: %v", err) + return true, nil, nil + } + return true, *buildPtr, nil + } +} + +type updateBuilder struct { + update *buildUpdate +} + +func newUpdate() *updateBuilder { + return &updateBuilder{update: &buildUpdate{}} +} + +func (b *updateBuilder) phase(phase buildapi.BuildPhase) *updateBuilder { + b.update.setPhase(phase) + return b +} + +func (b *updateBuilder) reason(reason buildapi.StatusReason) *updateBuilder { + b.update.setReason(reason) + return b +} + +func (b *updateBuilder) message(message string) *updateBuilder { + b.update.setMessage(message) + return b +} + +func (b *updateBuilder) startTime(startTime metav1.Time) *updateBuilder { + b.update.setStartTime(startTime) + return b +} + +func (b *updateBuilder) completionTime(completionTime metav1.Time) *updateBuilder { + b.update.setCompletionTime(completionTime) + return b +} + +func (b *updateBuilder) duration(duration time.Duration) *updateBuilder { + b.update.setDuration(duration) + return b +} + +func (b *updateBuilder) outputRef(ref string) *updateBuilder { + b.update.setOutputRef(ref) + return b +} + +func (b *updateBuilder) podNameAnnotation(podName string) *updateBuilder { + b.update.setPodNameAnnotation(podName) + return b +} + +type fakeRunPolicy struct { + notRunnable bool + onCompleteCalled bool +} + +func (f *fakeRunPolicy) IsRunnable(*buildapi.Build) (bool, error) { + return !f.notRunnable, nil +} + +func (f *fakeRunPolicy) OnComplete(*buildapi.Build) error { + f.onCompleteCalled = true + return nil +} + +func (f *fakeRunPolicy) Handles(buildapi.BuildRunPolicy) bool { + return true +} + +func mockBuildPod(build *buildapi.Build) *kapi.Pod { + pod := &kapi.Pod{} + pod.Name = buildapi.GetBuildPodName(build) + pod.Namespace = build.Namespace + pod.Annotations = map[string]string{} + pod.Annotations[buildapi.BuildAnnotation] = build.Name + return pod +} diff --git a/pkg/build/controller/build/buildupdate.go b/pkg/build/controller/build/buildupdate.go new file mode 100644 index 000000000000..d2cabe5ea38a --- /dev/null +++ b/pkg/build/controller/build/buildupdate.go @@ -0,0 +1,141 @@ +package build + +import ( + "fmt" + "strings" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + buildapi "github.com/openshift/origin/pkg/build/api" + "github.com/openshift/origin/pkg/build/controller/common" +) + +// buildUpdate holds a set of updates to be made to a build object. +// Only the fields defined in this struct will be updated/patched by this controller. +// The reason this exists is that there isn't separation at the API +// level between build spec and build status. Once that happens, the +// controller should only be able to update the status, while end users +// should be able to update the spec. +type buildUpdate struct { + podNameAnnotation *string + phase *buildapi.BuildPhase + reason *buildapi.StatusReason + message *string + startTime *metav1.Time + completionTime *metav1.Time + duration *time.Duration + outputRef *string +} + +func (u *buildUpdate) setPhase(phase buildapi.BuildPhase) { + u.phase = &phase +} + +func (u *buildUpdate) setReason(reason buildapi.StatusReason) { + u.reason = &reason +} + +func (u *buildUpdate) setMessage(message string) { + u.message = &message +} + +func (u *buildUpdate) setStartTime(startTime metav1.Time) { + u.startTime = &startTime +} + +func (u *buildUpdate) setCompletionTime(completionTime metav1.Time) { + u.completionTime = &completionTime +} + +func (u *buildUpdate) setDuration(duration time.Duration) { + u.duration = &duration +} + +func (u *buildUpdate) setOutputRef(ref string) { + u.outputRef = &ref +} + +func (u *buildUpdate) setPodNameAnnotation(podName string) { + u.podNameAnnotation = &podName +} + +func (u *buildUpdate) reset() { + u.podNameAnnotation = nil + u.phase = nil + u.reason = nil + u.message = nil + u.startTime = nil + u.completionTime = nil + u.duration = nil + u.outputRef = nil +} + +func (u *buildUpdate) isEmpty() bool { + return u.podNameAnnotation == nil && + u.phase == nil && + u.reason == nil && + u.message == nil && + u.startTime == nil && + u.completionTime == nil && + u.duration == nil && + u.outputRef == nil +} + +func (u *buildUpdate) apply(build *buildapi.Build) { + if u.phase != nil { + build.Status.Phase = *u.phase + } + if u.reason != nil { + build.Status.Reason = *u.reason + } + if u.message != nil { + build.Status.Message = *u.message + } + if u.startTime != nil { + build.Status.StartTimestamp = u.startTime + } + if u.completionTime != nil { + build.Status.CompletionTimestamp = u.completionTime + } + if u.duration != nil { + build.Status.Duration = *u.duration + } + if u.podNameAnnotation != nil { + common.SetBuildPodNameAnnotation(build, *u.podNameAnnotation) + } + if u.outputRef != nil { + build.Status.OutputDockerImageReference = *u.outputRef + } +} + +// String returns a string representation of this update +// Used with %v in string formatting +func (u *buildUpdate) String() string { + updates := []string{} + if u.phase != nil { + updates = append(updates, fmt.Sprintf("phase: %q", *u.phase)) + } + if u.reason != nil { + updates = append(updates, fmt.Sprintf("reason: %q", *u.reason)) + } + if u.message != nil { + updates = append(updates, fmt.Sprintf("message: %q", *u.message)) + } + if u.startTime != nil { + updates = append(updates, fmt.Sprintf("startTime: %q", u.startTime.String())) + } + if u.completionTime != nil { + updates = append(updates, fmt.Sprintf("completionTime: %q", u.completionTime.String())) + } + if u.duration != nil { + updates = append(updates, fmt.Sprintf("duration: %q", u.duration.String())) + } + if u.outputRef != nil { + updates = append(updates, fmt.Sprintf("outputRef: %q", *u.outputRef)) + } + if u.podNameAnnotation != nil { + updates = append(updates, fmt.Sprintf("podName: %q", *u.podNameAnnotation)) + } + return fmt.Sprintf("buildUpdate(%s)", strings.Join(updates, ", ")) +} diff --git a/pkg/build/controller/build/buildupdate_test.go b/pkg/build/controller/build/buildupdate_test.go new file mode 100644 index 000000000000..0a645f5ed7c0 --- /dev/null +++ b/pkg/build/controller/build/buildupdate_test.go @@ -0,0 +1,122 @@ +package build + +import ( + "fmt" + "testing" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + buildapi "github.com/openshift/origin/pkg/build/api" +) + +func TestBuildUpdateSetters(t *testing.T) { + now := metav1.Now() + tests := []struct { + f func(*buildUpdate) + validateApply func(*buildapi.Build) bool + expected string + }{ + { + f: func(u *buildUpdate) { + u.setPhase(buildapi.BuildPhaseCancelled) + }, + validateApply: func(b *buildapi.Build) bool { + return b.Status.Phase == buildapi.BuildPhaseCancelled + }, + expected: "buildUpdate(phase: \"Cancelled\")", + }, + { + f: func(u *buildUpdate) { + u.setReason(buildapi.StatusReasonCannotCreateBuildPodSpec) + }, + validateApply: func(b *buildapi.Build) bool { + return b.Status.Reason == buildapi.StatusReasonCannotCreateBuildPodSpec + }, + expected: "buildUpdate(reason: \"CannotCreateBuildPodSpec\")", + }, + { + f: func(u *buildUpdate) { + u.setMessage("hello") + }, + validateApply: func(b *buildapi.Build) bool { + return b.Status.Message == "hello" + }, + expected: "buildUpdate(message: \"hello\")", + }, + { + f: func(u *buildUpdate) { + u.setStartTime(now) + }, + validateApply: func(b *buildapi.Build) bool { + return (*b.Status.StartTimestamp) == now + }, + expected: fmt.Sprintf("buildUpdate(startTime: \"%v\")", now), + }, + { + f: func(u *buildUpdate) { + u.setCompletionTime(now) + }, + validateApply: func(b *buildapi.Build) bool { + return (*b.Status.CompletionTimestamp) == now + }, + expected: fmt.Sprintf("buildUpdate(completionTime: \"%v\")", now), + }, + { + f: func(u *buildUpdate) { + d := time.Duration(2 * time.Hour) + u.setDuration(d) + }, + validateApply: func(b *buildapi.Build) bool { + return b.Status.Duration == time.Duration(2*time.Hour) + }, + expected: fmt.Sprintf("buildUpdate(duration: \"%v\")", time.Duration(2*time.Hour)), + }, + { + f: func(u *buildUpdate) { + u.setOutputRef("1234567890") + }, + validateApply: func(b *buildapi.Build) bool { + return b.Status.OutputDockerImageReference == "1234567890" + }, + expected: fmt.Sprintf("buildUpdate(outputRef: %q)", "1234567890"), + }, + { + f: func(u *buildUpdate) { + u.setPodNameAnnotation("test-pod-name") + }, + validateApply: func(b *buildapi.Build) bool { + return b.Annotations != nil && b.Annotations[buildapi.BuildPodNameAnnotation] == "test-pod-name" + }, + expected: "buildUpdate(podName: \"test-pod-name\")", + }, + } + + for _, test := range tests { + buildUpdate := &buildUpdate{} + test.f(buildUpdate) + if actual := buildUpdate.String(); actual != test.expected { + t.Errorf("Unexpected string: %s, expected: %s", actual, test.expected) + } + b := &buildapi.Build{} + buildUpdate.apply(b) + if !test.validateApply(b) { + t.Errorf("Failed to apply update %v to build %#v", buildUpdate, b) + } + } +} + +func TestBuildUpdateIsEmpty(t *testing.T) { + update := &buildUpdate{} + if !update.isEmpty() { + t.Errorf("isEmpty returned false, expecting true") + } + update.setOutputRef("123456789") + if update.isEmpty() { + t.Errorf("isEmpty returned true, expecting false") + } + update.reset() + if !update.isEmpty() { + t.Errorf("isEmpty returned false, expecting true") + } +} diff --git a/pkg/build/controller/build/doc.go b/pkg/build/controller/build/doc.go new file mode 100644 index 000000000000..1141029dad3e --- /dev/null +++ b/pkg/build/controller/build/doc.go @@ -0,0 +1,70 @@ +// The BuildController is responsible for implementing a build's lifecycle, +// which can be represented by a simple state machine: +// +// Active States: +// +-------+ +-------------+ +-------------+ +// | New | -> | Pending (p) | -> | Running (p) | +// +-------+ +-------------+ +-------------+ +// +// Terminal States: +// +---------------+ +------------+ +-----------+ +-------+ +// | Succeeded (p) | | Failed (p) | | Cancelled | | Error | +// +---------------+ +------------+ +-----------+ +-------+ +// +// (p) denotes that a pod is associated with the build at that state +// +// A transition is valid from any active state to any terminal state. +// Transitions that are not valid: +// Pending -> New +// Running -> New +// Running -> Pending +// Any terminal state -> Any other state +// +// Following is a brief description of each state: +// +// Active States: +// +// New - this is the initial state for a build. It will not have a pod +// associated with it. When the controller sees a build in this state, it +// needs to determine whether it can create a pod for it and move it to +// Pending. A build can only transition from New to Pending if the following +// conditions are met: +// - If the build output is set to an ImageStreamTag, the ImageStream must +// exist and must have a valid Docker reference. +// - The policy associated with the build (Serial, Parallel, SerialLatestOnly) +// must allow the build to be created. For example, if there is another build +// from the same BuildConfig already running and the policy is Serial, +// the current build must remain in the New state. +// +// Pending - a build is set in this state when a build pod has been created. +// If the pod is either New or Pending state, the build will remain in Pending +// state. The pod could remain in Pending state for a long time if the push secret +// it needs to mount is not present. The build controller will check if the +// push secret exists, and if not, it will update the build reason and message +// with that information. +// +// Running - a build is updated to this state if the corresponding pod is in the +// Running state. +// +// Terminal States +// Once a build enters a terminal state, it must notify its corresponding policy +// that it has completed. That way the next build can be moved off of the New +// state if for example the BuildConfig uses the Serial policy. In general, the +// build controller updates the build's state based on the pod state. The one +// exception is the Failed state which can be set by the build pod itself when it +// updates the build/details subresource. This is done so that the build pod can +// set the reason/message for the build failure. Because the build controller +// can also update the reason/message while processing a build, the build storage +// prevents updates to the reason/message after the build's phase has been changed +// to Failed. +// +// Succeeded/Failed - reflect the final state of the build pod. The build's +// Status.Phase field can be set to Failed by the build pod itself. +// +// Cancelled - is set when the build is cancelled from one of the active states. +// +// Error - is set when the build pod is deleted while the build is running or +// the build pod is in an invalid state when the build completes (for example, it +// has no containers). + +package build diff --git a/pkg/build/controller/build/podcreationstrategy.go b/pkg/build/controller/build/podcreationstrategy.go new file mode 100644 index 000000000000..d506d3ced2df --- /dev/null +++ b/pkg/build/controller/build/podcreationstrategy.go @@ -0,0 +1,46 @@ +package build + +import ( + "fmt" + + kapi "k8s.io/kubernetes/pkg/api" + + buildapi "github.com/openshift/origin/pkg/build/api" +) + +// buildPodCreationStrategy is used by the build controller to +// create a build pod based on a build strategy +type buildPodCreationStrategy interface { + CreateBuildPod(build *buildapi.Build) (*kapi.Pod, error) +} + +type typeBasedFactoryStrategy struct { + dockerBuildStrategy buildPodCreationStrategy + sourceBuildStrategy buildPodCreationStrategy + customBuildStrategy buildPodCreationStrategy +} + +func (f *typeBasedFactoryStrategy) CreateBuildPod(build *buildapi.Build) (*kapi.Pod, error) { + var pod *kapi.Pod + var err error + switch { + case build.Spec.Strategy.DockerStrategy != nil: + pod, err = f.dockerBuildStrategy.CreateBuildPod(build) + case build.Spec.Strategy.SourceStrategy != nil: + pod, err = f.sourceBuildStrategy.CreateBuildPod(build) + case build.Spec.Strategy.CustomStrategy != nil: + pod, err = f.customBuildStrategy.CreateBuildPod(build) + case build.Spec.Strategy.JenkinsPipelineStrategy != nil: + return nil, fmt.Errorf("creating a build pod for Build %s/%s with the JenkinsPipeline strategy is not supported", build.Namespace, build.Name) + default: + return nil, fmt.Errorf("no supported build strategy defined for Build %s/%s", build.Namespace, build.Name) + } + + if pod != nil { + if pod.Annotations == nil { + pod.Annotations = map[string]string{} + } + pod.Annotations[buildapi.BuildAnnotation] = build.Name + } + return pod, err +} diff --git a/pkg/build/controller/build/podcreationstrategy_test.go b/pkg/build/controller/build/podcreationstrategy_test.go new file mode 100644 index 000000000000..baf5f6010f8d --- /dev/null +++ b/pkg/build/controller/build/podcreationstrategy_test.go @@ -0,0 +1,113 @@ +package build + +import ( + "fmt" + "testing" + + kapi "k8s.io/kubernetes/pkg/api" + + buildapi "github.com/openshift/origin/pkg/build/api" +) + +type testPodCreationStrategy struct { + pod *kapi.Pod + err error +} + +func (s *testPodCreationStrategy) CreateBuildPod(b *buildapi.Build) (*kapi.Pod, error) { + return s.pod, s.err +} + +func TestStrategyCreateBuildPod(t *testing.T) { + dockerBuildPod := &kapi.Pod{} + sourceBuildPod := &kapi.Pod{} + customBuildPod := &kapi.Pod{} + + dockerBuild := &buildapi.Build{} + dockerBuild.Spec.Strategy.DockerStrategy = &buildapi.DockerBuildStrategy{} + + sourceBuild := &buildapi.Build{} + sourceBuild.Spec.Strategy.SourceStrategy = &buildapi.SourceBuildStrategy{} + + customBuild := &buildapi.Build{} + customBuild.Spec.Strategy.CustomStrategy = &buildapi.CustomBuildStrategy{} + + pipelineBuild := &buildapi.Build{} + pipelineBuild.Spec.Strategy.JenkinsPipelineStrategy = &buildapi.JenkinsPipelineBuildStrategy{} + + strategy := &typeBasedFactoryStrategy{ + dockerBuildStrategy: &testPodCreationStrategy{pod: dockerBuildPod}, + sourceBuildStrategy: &testPodCreationStrategy{pod: sourceBuildPod}, + customBuildStrategy: &testPodCreationStrategy{pod: customBuildPod}, + } + strategyErr := fmt.Errorf("error") + errorStrategy := &typeBasedFactoryStrategy{ + dockerBuildStrategy: &testPodCreationStrategy{err: strategyErr}, + sourceBuildStrategy: &testPodCreationStrategy{err: strategyErr}, + customBuildStrategy: &testPodCreationStrategy{err: strategyErr}, + } + + tests := []struct { + strategy buildPodCreationStrategy + build *buildapi.Build + expectedPod *kapi.Pod + expectError bool + }{ + { + strategy: strategy, + build: dockerBuild, + expectedPod: dockerBuildPod, + }, + { + strategy: strategy, + build: sourceBuild, + expectedPod: sourceBuildPod, + }, + { + strategy: strategy, + build: customBuild, + expectedPod: customBuildPod, + }, + { + strategy: strategy, + build: pipelineBuild, + expectError: true, + }, + { + strategy: strategy, + build: &buildapi.Build{}, + expectError: true, + }, + { + strategy: errorStrategy, + build: dockerBuild, + expectError: true, + }, + { + strategy: errorStrategy, + build: sourceBuild, + expectError: true, + }, + { + strategy: errorStrategy, + build: customBuild, + expectError: true, + }, + } + + for _, test := range tests { + pod, err := test.strategy.CreateBuildPod(test.build) + if test.expectError { + if err == nil { + t.Errorf("Expected error but did not get one") + } + continue + } + if err != nil { + t.Fatalf("unexpected error %v", err) + } + if pod != test.expectedPod { + t.Errorf("did not get expected pod with build %#v", test.build) + } + } +} diff --git a/pkg/build/controller/build_controller.go b/pkg/build/controller/build_controller.go deleted file mode 100644 index 8dd09ce012dd..000000000000 --- a/pkg/build/controller/build_controller.go +++ /dev/null @@ -1,330 +0,0 @@ -package controller - -import ( - "fmt" - - "github.com/golang/glog" - - errors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/client-go/tools/record" - kapi "k8s.io/kubernetes/pkg/api" - - builddefaults "github.com/openshift/origin/pkg/build/admission/defaults" - buildoverrides "github.com/openshift/origin/pkg/build/admission/overrides" - buildapi "github.com/openshift/origin/pkg/build/api" - buildclient "github.com/openshift/origin/pkg/build/client" - "github.com/openshift/origin/pkg/build/controller/common" - "github.com/openshift/origin/pkg/build/controller/policy" - strategy "github.com/openshift/origin/pkg/build/controller/strategy" - buildutil "github.com/openshift/origin/pkg/build/util" - imageapi "github.com/openshift/origin/pkg/image/api" -) - -// BuildController watches build resources and manages their state -type BuildController struct { - BuildUpdater buildclient.BuildUpdater - BuildLister buildclient.BuildLister - BuildConfigGetter buildclient.BuildConfigGetter - BuildDeleter buildclient.BuildDeleter - PodManager podManager - BuildStrategy BuildStrategy - ImageStreamClient imageStreamClient - Recorder record.EventRecorder - RunPolicies []policy.RunPolicy - BuildDefaults builddefaults.BuildDefaults - BuildOverrides buildoverrides.BuildOverrides -} - -// BuildStrategy knows how to create a pod spec for a pod which can execute a build. -type BuildStrategy interface { - CreateBuildPod(build *buildapi.Build) (*kapi.Pod, error) -} - -type podManager interface { - CreatePod(namespace string, pod *kapi.Pod) (*kapi.Pod, error) - DeletePod(namespace string, pod *kapi.Pod) error - GetPod(namespace, name string) (*kapi.Pod, error) -} - -type imageStreamClient interface { - GetImageStream(namespace, name string) (*imageapi.ImageStream, error) -} - -// CancelBuild updates a build status to Cancelled, after its associated pod is deleted. -func (bc *BuildController) CancelBuild(build *buildapi.Build) error { - if !isBuildCancellable(build) { - glog.V(4).Infof("Build %s/%s can be cancelled only if it has pending/running status, not %s.", build.Namespace, build.Name, build.Status.Phase) - return nil - } - - glog.V(4).Infof("Cancelling build %s/%s.", build.Namespace, build.Name) - - pod, err := bc.PodManager.GetPod(build.Namespace, buildapi.GetBuildPodName(build)) - if err != nil { - if !errors.IsNotFound(err) { - return fmt.Errorf("Failed to get pod for build %s/%s: %v", build.Namespace, build.Name, err) - } - } else { - err := bc.PodManager.DeletePod(build.Namespace, pod) - if err != nil && !errors.IsNotFound(err) { - return fmt.Errorf("Couldn't delete build pod %s/%s: %v", build.Namespace, pod.Name, err) - } - } - - build.Status.Phase = buildapi.BuildPhaseCancelled - now := metav1.Now() - common.SetBuildCompletionTimeAndDuration(build, &now) - bc.Recorder.Eventf(build, kapi.EventTypeNormal, buildapi.BuildCancelledEventReason, fmt.Sprintf(buildapi.BuildCancelledEventMessage, build.Namespace, build.Name)) - // set the status details for the cancelled build before updating the build - // object. - build.Status.Reason = buildapi.StatusReasonCancelledBuild - build.Status.Message = buildapi.StatusMessageCancelledBuild - if err := bc.BuildUpdater.Update(build.Namespace, build); err != nil { - return fmt.Errorf("Failed to update build %s/%s: %v", build.Namespace, build.Name, err) - } - - glog.V(4).Infof("Build %s/%s was successfully cancelled.", build.Namespace, build.Name) - - common.HandleBuildCompletion(build, bc.BuildLister, bc.BuildConfigGetter, bc.BuildDeleter, bc.RunPolicies) - - return nil -} - -// HandleBuild deletes pods for cancelled builds and takes new builds and puts -// them in the pending state after creating a corresponding pod -func (bc *BuildController) HandleBuild(build *buildapi.Build) error { - // these builds are processed/updated/etc by the jenkins sync plugin - if build.Spec.Strategy.JenkinsPipelineStrategy != nil { - glog.V(4).Infof("Ignoring build with jenkins pipeline strategy") - return nil - } - glog.V(4).Infof("Handling build %s/%s (%s)", build.Namespace, build.Name, build.Status.Phase) - - // A cancelling event was triggered for the build, delete its pod and update build status. - if build.Status.Cancelled && build.Status.Phase != buildapi.BuildPhaseCancelled { - glog.V(5).Infof("Marking build %s/%s as cancelled", build.Namespace, build.Name) - if err := bc.CancelBuild(build); err != nil { - build.Status.Reason = buildapi.StatusReasonCancelBuildFailed - build.Status.Message = buildapi.StatusMessageCancelBuildFailed - if err = bc.BuildUpdater.Update(build.Namespace, build); err != nil { - utilruntime.HandleError(fmt.Errorf("Failed to update build %s/%s: %v", build.Namespace, build.Name, err)) - } - return fmt.Errorf("Failed to cancel build %s/%s: %v, will retry", build.Namespace, build.Name, err) - } - } - - // Handle only new builds from this point - if build.Status.Phase != buildapi.BuildPhaseNew { - return nil - } - - runPolicy := policy.ForBuild(build, bc.RunPolicies) - if runPolicy == nil { - return fmt.Errorf("unable to determine build scheduler for %s/%s", build.Namespace, build.Name) - } - - // The runPolicy decides whether to execute this build or not. - if run, err := runPolicy.IsRunnable(build); err != nil || !run { - return err - } - - if err := bc.nextBuildPhase(build); err != nil { - return err - } - - if err := bc.BuildUpdater.Update(build.Namespace, build); err != nil { - // This is not a retryable error because the build has been created. The worst case - // outcome of not updating the buildconfig is that we might rerun a build for the - // same "new" imageid change in the future, which is better than guaranteeing we - // run the build 2+ times by retrying it here. - glog.V(2).Infof("Failed to record changes to build %s/%s: %v", build.Namespace, build.Name, err) - } - return nil -} - -// nextBuildPhase updates build with any appropriate changes, or returns an error if -// the change cannot occur. When returning nil, be sure to set build.Status and optionally -// build.Message. -func (bc *BuildController) nextBuildPhase(build *buildapi.Build) error { - // If a cancelling event was triggered for the build, update build status. - if build.Status.Cancelled { - glog.V(4).Infof("Cancelling build %s/%s.", build.Namespace, build.Name) - build.Status.Phase = buildapi.BuildPhaseCancelled - return nil - } - - // Set the output Docker image reference. - ref, err := bc.resolveOutputDockerImageReference(build) - if err != nil { - build.Status.Reason = buildapi.StatusReasonInvalidOutputReference - build.Status.Message = buildapi.StatusMessageInvalidOutputRef - return err - } - build.Status.OutputDockerImageReference = ref - - // Make a copy to avoid mutating the build from this point on. - copy, err := kapi.Scheme.Copy(build) - if err != nil { - return fmt.Errorf("unable to copy build: %v", err) - } - buildCopy := copy.(*buildapi.Build) - - // TODO(rhcarvalho) - // The S2I and Docker builders expect build.Spec.Output.To to contain a - // resolved reference to a Docker image. Since build.Spec is immutable, we - // change a copy (that is never persisted) and pass it to - // bc.BuildStrategy.CreateBuildPod. We should make the builders use - // build.Status.OutputDockerImageReference, what will make copying the build - // unnecessary. - if build.Spec.Output.To != nil && len(build.Spec.Output.To.Name) != 0 { - buildCopy.Spec.Output.To = &kapi.ObjectReference{ - Kind: "DockerImage", - Name: ref, - } - } - - // Invoke the strategy to get a build pod. - podSpec, err := bc.BuildStrategy.CreateBuildPod(buildCopy) - if err != nil { - build.Status.Reason = buildapi.StatusReasonCannotCreateBuildPodSpec - build.Status.Message = buildapi.StatusMessageCannotCreateBuildPodSpec - if strategy.IsFatal(err) { - return &strategy.FatalError{Reason: fmt.Sprintf("failed to create a build pod spec for build %s/%s: %v", build.Namespace, build.Name, err)} - } - return fmt.Errorf("failed to create a build pod spec for build %s/%s: %v", build.Namespace, build.Name, err) - } - if err := bc.BuildDefaults.ApplyDefaults(podSpec); err != nil { - return fmt.Errorf("failed to apply build defaults for build %s/%s: %v", build.Namespace, build.Name, err) - } - if err := bc.BuildOverrides.ApplyOverrides(podSpec); err != nil { - return fmt.Errorf("failed to apply build overrides for build %s/%s: %v", build.Namespace, build.Name, err) - } - - glog.V(4).Infof("Pod %s for build %s/%s is about to be created", podSpec.Name, build.Namespace, build.Name) - - if _, err := bc.PodManager.CreatePod(build.Namespace, podSpec); err != nil { - if errors.IsAlreadyExists(err) { - bc.Recorder.Eventf(build, kapi.EventTypeWarning, "FailedCreate", "Pod already exists: %s/%s", podSpec.Namespace, podSpec.Name) - glog.V(4).Infof("Build pod already existed: %#v", podSpec) - - // If the existing pod was created before this build, switch to Error state. - existingPod, err := bc.PodManager.GetPod(podSpec.Namespace, podSpec.Name) - if err == nil && existingPod.CreationTimestamp.Before(build.CreationTimestamp) { - build.Status.Phase = buildapi.BuildPhaseError - build.Status.Reason = buildapi.StatusReasonBuildPodExists - build.Status.Message = buildapi.StatusMessageBuildPodExists - } - return nil - } - // Log an event if the pod is not created (most likely due to quota denial). - bc.Recorder.Eventf(build, kapi.EventTypeWarning, "FailedCreate", "Error creating: %v", err) - build.Status.Reason = buildapi.StatusReasonCannotCreateBuildPod - build.Status.Message = buildapi.StatusMessageCannotCreateBuildPod - return fmt.Errorf("failed to create build pod: %v", err) - } - common.SetBuildPodNameAnnotation(build, podSpec.Name) - glog.V(4).Infof("Created pod for build: %#v", podSpec) - - // Set the build phase, which will be persisted. - build.Status.Phase = buildapi.BuildPhasePending - build.Status.Reason = "" - build.Status.Message = "" - return nil -} - -// resolveOutputDockerImageReference returns a reference to a Docker image -// computed from the buid.Spec.Output.To reference. -func (bc *BuildController) resolveOutputDockerImageReference(build *buildapi.Build) (string, error) { - outputTo := build.Spec.Output.To - if outputTo == nil || outputTo.Name == "" { - return "", nil - } - var ref string - switch outputTo.Kind { - case "DockerImage": - ref = outputTo.Name - case "ImageStream", "ImageStreamTag": - // TODO(smarterclayton): security, ensure that the reference image stream is actually visible - namespace := outputTo.Namespace - if len(namespace) == 0 { - namespace = build.Namespace - } - - var tag string - streamName := outputTo.Name - if outputTo.Kind == "ImageStreamTag" { - var ok bool - streamName, tag, ok = imageapi.SplitImageStreamTag(streamName) - if !ok { - return "", fmt.Errorf("the referenced image stream tag is invalid: %s", outputTo.Name) - } - tag = ":" + tag - } - stream, err := bc.ImageStreamClient.GetImageStream(namespace, streamName) - if err != nil { - if errors.IsNotFound(err) { - return "", fmt.Errorf("the referenced output image stream %s/%s does not exist", namespace, streamName) - } - return "", fmt.Errorf("the referenced output image stream %s/%s could not be found by build %s/%s: %v", namespace, streamName, build.Namespace, build.Name, err) - } - if len(stream.Status.DockerImageRepository) == 0 { - e := fmt.Errorf("the image stream %s/%s cannot be used as the output for build %s/%s because the integrated Docker registry is not configured and no external registry was defined", namespace, outputTo.Name, build.Namespace, build.Name) - bc.Recorder.Eventf(build, kapi.EventTypeWarning, "invalidOutput", "Error starting build: %v", e) - return "", e - } - ref = fmt.Sprintf("%s%s", stream.Status.DockerImageRepository, tag) - } - return ref, nil -} - -// isBuildCancellable checks for build status and returns true if the condition is checked. -func isBuildCancellable(build *buildapi.Build) bool { - return build.Status.Phase == buildapi.BuildPhaseNew || build.Status.Phase == buildapi.BuildPhasePending || build.Status.Phase == buildapi.BuildPhaseRunning -} - -// BuildDeleteController watches for builds being deleted and cleans up associated pods -type BuildDeleteController struct { - PodManager podManager -} - -// HandleBuildDeletion deletes a build pod if the corresponding build has been deleted -func (bc *BuildDeleteController) HandleBuildDeletion(build *buildapi.Build) error { - glog.V(4).Infof("Handling deletion of build %s", build.Name) - if build.Spec.Strategy.JenkinsPipelineStrategy != nil { - glog.V(4).Infof("Ignoring build with jenkins pipeline strategy") - return nil - } - podName := buildapi.GetBuildPodName(build) - pod, err := bc.PodManager.GetPod(build.Namespace, podName) - if err != nil && !errors.IsNotFound(err) { - glog.V(2).Infof("Failed to find pod with name %s for build %s in namespace %s due to error: %v", podName, build.Name, build.Namespace, err) - return err - } - if pod == nil { - glog.V(2).Infof("Did not find pod with name %s for build %s in namespace %s", podName, build.Name, build.Namespace) - return nil - } - if buildName := buildapi.GetBuildName(pod); buildName != build.Name { - glog.V(2).Infof("Not deleting pod %s/%s because the build label %s does not match the build name %s", pod.Namespace, podName, buildName, build.Name) - return nil - } - err = bc.PodManager.DeletePod(build.Namespace, pod) - if err != nil && !errors.IsNotFound(err) { - glog.V(2).Infof("Failed to delete pod %s/%s for build %s due to error: %v", build.Namespace, podName, build.Name, err) - return err - } - return nil -} - -// buildKey returns a build object that can be used to lookup a build -// in the cache store, given a pod for the build -func buildKey(pod *kapi.Pod) *buildapi.Build { - return &buildapi.Build{ - ObjectMeta: metav1.ObjectMeta{ - Name: buildutil.GetBuildName(pod), - Namespace: pod.Namespace, - }, - } -} diff --git a/pkg/build/controller/build_controller_test.go b/pkg/build/controller/build_controller_test.go deleted file mode 100644 index 854104334ac5..000000000000 --- a/pkg/build/controller/build_controller_test.go +++ /dev/null @@ -1,762 +0,0 @@ -package controller - -import ( - "errors" - "reflect" - "testing" - - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/tools/record" - kapi "k8s.io/kubernetes/pkg/api" - - buildapi "github.com/openshift/origin/pkg/build/api" - buildclient "github.com/openshift/origin/pkg/build/client" - "github.com/openshift/origin/pkg/build/controller/policy" - imageapi "github.com/openshift/origin/pkg/image/api" -) - -type okBuildUpdater struct{} - -func (okc *okBuildUpdater) Update(namespace string, build *buildapi.Build) error { - return nil -} - -type okBuildLister struct{} - -func (okc *okBuildLister) List(namespace string, opts metav1.ListOptions) (*buildapi.BuildList, error) { - return &buildapi.BuildList{Items: []buildapi.Build{}}, nil -} - -type okBuildDeleter struct{} - -func (okc *okBuildDeleter) DeleteBuild(*buildapi.Build) error { - return nil -} - -type okBuildConfigGetter struct { - BuildConfig *buildapi.BuildConfig -} - -func (okc *okBuildConfigGetter) Get(namespace, name string, options metav1.GetOptions) (*buildapi.BuildConfig, error) { - if okc.BuildConfig != nil { - return okc.BuildConfig, nil - } else { - return &buildapi.BuildConfig{}, nil - } -} - -type errBuildUpdater struct{} - -func (ec *errBuildUpdater) Update(namespace string, build *buildapi.Build) error { - return errors.New("UpdateBuild error!") -} - -type okStrategy struct { - build *buildapi.Build -} - -func (os *okStrategy) CreateBuildPod(build *buildapi.Build) (*kapi.Pod, error) { - os.build = build - return &kapi.Pod{}, nil -} - -type errStrategy struct{} - -func (es *errStrategy) CreateBuildPod(build *buildapi.Build) (*kapi.Pod, error) { - return nil, errors.New("CreateBuildPod error!") -} - -type okPodManager struct{} - -func (*okPodManager) CreatePod(namespace string, pod *kapi.Pod) (*kapi.Pod, error) { - return &kapi.Pod{}, nil -} - -func (*okPodManager) DeletePod(namespace string, pod *kapi.Pod) error { - return nil -} - -func (*okPodManager) GetPod(namespace, name string) (*kapi.Pod, error) { - return &kapi.Pod{}, nil -} - -type errPodManager struct{} - -func (*errPodManager) CreatePod(namespace string, pod *kapi.Pod) (*kapi.Pod, error) { - return &kapi.Pod{}, errors.New("CreatePod error!") -} - -func (*errPodManager) DeletePod(namespace string, pod *kapi.Pod) error { - return errors.New("DeletePod error!") -} - -func (*errPodManager) GetPod(namespace, name string) (*kapi.Pod, error) { - return nil, errors.New("GetPod error!") -} - -type errExistsPodManager struct{} - -func (*errExistsPodManager) CreatePod(namespace string, pod *kapi.Pod) (*kapi.Pod, error) { - return &kapi.Pod{}, kerrors.NewAlreadyExists(kapi.Resource("Pod"), "name") -} - -func (*errExistsPodManager) DeletePod(namespace string, pod *kapi.Pod) error { - return kerrors.NewNotFound(kapi.Resource("Pod"), "name") -} - -func (*errExistsPodManager) GetPod(namespace, name string) (*kapi.Pod, error) { - return nil, kerrors.NewNotFound(kapi.Resource("Pod"), "name") -} - -type okImageStreamClient struct{} - -func (*okImageStreamClient) GetImageStream(namespace, name string) (*imageapi.ImageStream, error) { - return &imageapi.ImageStream{ - ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace}, - Status: imageapi.ImageStreamStatus{ - DockerImageRepository: "image/repo", - }, - }, nil -} - -type errImageStreamClient struct{} - -func (*errImageStreamClient) GetImageStream(namespace, name string) (*imageapi.ImageStream, error) { - return nil, errors.New("GetImageStream error!") -} - -type errNotFoundImageStreamClient struct{} - -func (*errNotFoundImageStreamClient) GetImageStream(namespace, name string) (*imageapi.ImageStream, error) { - return nil, kerrors.NewNotFound(imageapi.Resource("ImageStream"), name) -} - -func mockBuild(phase buildapi.BuildPhase, output buildapi.BuildOutput) *buildapi.Build { - return &buildapi.Build{ - ObjectMeta: metav1.ObjectMeta{ - Name: "data-build", - Namespace: "namespace", - Annotations: map[string]string{ - buildapi.BuildConfigAnnotation: "test-bc", - }, - Labels: map[string]string{ - "name": "dataBuild", - // TODO: Switch this test to use Serial policy - buildapi.BuildRunPolicyLabel: string(buildapi.BuildRunPolicyParallel), - buildapi.BuildConfigLabel: "test-bc", - }, - }, - Spec: buildapi.BuildSpec{ - CommonSpec: buildapi.CommonSpec{ - Source: buildapi.BuildSource{ - Git: &buildapi.GitBuildSource{ - URI: "http://my.build.com/the/build/Dockerfile", - }, - ContextDir: "contextimage", - }, - Strategy: buildapi.BuildStrategy{ - DockerStrategy: &buildapi.DockerBuildStrategy{}, - }, - Output: output, - }, - }, - Status: buildapi.BuildStatus{ - Phase: phase, - }, - } -} - -func mockBuildController() *BuildController { - return &BuildController{ - BuildUpdater: &okBuildUpdater{}, - BuildLister: &okBuildLister{}, - BuildDeleter: &okBuildDeleter{}, - BuildConfigGetter: &okBuildConfigGetter{}, - PodManager: &okPodManager{}, - BuildStrategy: &okStrategy{}, - ImageStreamClient: &okImageStreamClient{}, - Recorder: &record.FakeRecorder{}, - RunPolicies: policy.GetAllRunPolicies(&okBuildLister{}, &okBuildUpdater{}), - } -} - -func mockPod(status kapi.PodPhase, exitCode int) *kapi.Pod { - return &kapi.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "data-build-build", - Annotations: map[string]string{ - buildapi.BuildAnnotation: "data-build", - }, - }, - Status: kapi.PodStatus{ - Phase: status, - ContainerStatuses: []kapi.ContainerStatus{ - { - State: kapi.ContainerState{ - Terminated: &kapi.ContainerStateTerminated{ExitCode: int32(exitCode)}, - }, - }, - }, - }, - } -} - -func TestHandleBuild(t *testing.T) { - type handleBuildTest struct { - inStatus buildapi.BuildPhase - outStatus buildapi.BuildPhase - buildOutput buildapi.BuildOutput - buildStrategy BuildStrategy - buildUpdater buildclient.BuildUpdater - imageClient imageStreamClient - podManager podManager - outputSpec string - errExpected bool - } - - tests := []handleBuildTest{ - { // 0 - inStatus: buildapi.BuildPhaseNew, - outStatus: buildapi.BuildPhasePending, - buildOutput: buildapi.BuildOutput{ - To: &kapi.ObjectReference{ - Kind: "DockerImage", - Name: "repository/dataBuild", - }, - }, - }, - { // 1 - inStatus: buildapi.BuildPhasePending, - outStatus: buildapi.BuildPhasePending, - buildOutput: buildapi.BuildOutput{ - To: &kapi.ObjectReference{ - Kind: "DockerImage", - Name: "repository/dataBuild", - }, - }, - }, - { // 2 - inStatus: buildapi.BuildPhaseRunning, - outStatus: buildapi.BuildPhaseRunning, - buildOutput: buildapi.BuildOutput{ - To: &kapi.ObjectReference{ - Kind: "DockerImage", - Name: "repository/dataBuild", - }, - }, - }, - { // 3 - inStatus: buildapi.BuildPhaseComplete, - outStatus: buildapi.BuildPhaseComplete, - buildOutput: buildapi.BuildOutput{ - To: &kapi.ObjectReference{ - Kind: "DockerImage", - Name: "repository/dataBuild", - }, - }, - }, - { // 4 - inStatus: buildapi.BuildPhaseFailed, - outStatus: buildapi.BuildPhaseFailed, - buildOutput: buildapi.BuildOutput{ - To: &kapi.ObjectReference{ - Kind: "DockerImage", - Name: "repository/dataBuild", - }, - }, - }, - { // 5 - inStatus: buildapi.BuildPhaseError, - outStatus: buildapi.BuildPhaseError, - buildOutput: buildapi.BuildOutput{ - To: &kapi.ObjectReference{ - Kind: "DockerImage", - Name: "repository/dataBuild", - }, - }, - }, - { // 6 - inStatus: buildapi.BuildPhaseNew, - outStatus: buildapi.BuildPhaseNew, - buildStrategy: &errStrategy{}, - buildOutput: buildapi.BuildOutput{ - To: &kapi.ObjectReference{ - Kind: "DockerImage", - Name: "repository/dataBuild", - }, - }, - errExpected: true, - }, - { // 7 - inStatus: buildapi.BuildPhaseNew, - outStatus: buildapi.BuildPhaseNew, - podManager: &errPodManager{}, - buildOutput: buildapi.BuildOutput{ - To: &kapi.ObjectReference{ - Kind: "DockerImage", - Name: "repository/dataBuild", - }, - }, - errExpected: true, - }, - { // 8 - inStatus: buildapi.BuildPhaseNew, - outStatus: buildapi.BuildPhaseNew, - podManager: &errExistsPodManager{}, - buildOutput: buildapi.BuildOutput{ - To: &kapi.ObjectReference{ - Kind: "DockerImage", - Name: "repository/dataBuild", - }, - }, - }, - { // 9 - inStatus: buildapi.BuildPhaseNew, - outStatus: buildapi.BuildPhasePending, - buildUpdater: &errBuildUpdater{}, - buildOutput: buildapi.BuildOutput{ - To: &kapi.ObjectReference{ - Kind: "DockerImage", - Name: "repository/dataBuild", - }, - }, - }, - { // 10 - inStatus: buildapi.BuildPhaseNew, - outStatus: buildapi.BuildPhasePending, - buildOutput: buildapi.BuildOutput{ - To: &kapi.ObjectReference{ - Kind: "ImageStreamTag", - Name: "foo:tag", - }, - }, - outputSpec: "image/repo:tag", - }, - { // 11 - inStatus: buildapi.BuildPhaseNew, - outStatus: buildapi.BuildPhasePending, - buildOutput: buildapi.BuildOutput{ - To: &kapi.ObjectReference{ - Kind: "ImageStreamTag", - Name: "foo:tag", - Namespace: "bar", - }, - }, - outputSpec: "image/repo:tag", - }, - { // 12 - inStatus: buildapi.BuildPhaseNew, - outStatus: buildapi.BuildPhaseNew, - imageClient: &errNotFoundImageStreamClient{}, - buildOutput: buildapi.BuildOutput{ - To: &kapi.ObjectReference{ - Kind: "ImageStreamTag", - Name: "foo:tag", - }, - }, - errExpected: true, - }, - { // 13 - inStatus: buildapi.BuildPhaseNew, - outStatus: buildapi.BuildPhaseNew, - imageClient: &errImageStreamClient{}, - buildOutput: buildapi.BuildOutput{ - To: &kapi.ObjectReference{ - Kind: "ImageStreamTag", - Name: "foo:tag", - }, - }, - errExpected: true, - }, - { // 14 - inStatus: buildapi.BuildPhaseNew, - outStatus: buildapi.BuildPhasePending, - buildOutput: buildapi.BuildOutput{ - To: &kapi.ObjectReference{ - Kind: "ImageStreamTag", - Name: "foo:tag", - Namespace: "bar", - }, - }, - outputSpec: "image/repo:tag", - // an error updating the build is not reported as an error. - buildUpdater: &errBuildUpdater{}, - }, - } - - for i, tc := range tests { - build := mockBuild(tc.inStatus, tc.buildOutput) - ctrl := mockBuildController() - if tc.buildStrategy != nil { - ctrl.BuildStrategy = tc.buildStrategy - } - if tc.buildUpdater != nil { - ctrl.BuildUpdater = tc.buildUpdater - } - if tc.podManager != nil { - ctrl.PodManager = tc.podManager - } - if tc.imageClient != nil { - ctrl.ImageStreamClient = tc.imageClient - } - // create a copy of the build before passing it to HandleBuild - // so that we can compare it later to see if it was mutated - copy, err := kapi.Scheme.Copy(build) - - if err != nil { - t.Errorf("(%d) Failed to copy build: %#v with err: %#v", i, build, err) - continue - } - originalBuild := copy.(*buildapi.Build) - err = ctrl.HandleBuild(build) - - // ensure we return an error for cases where expected output is an error. - // these will be retried by the retrycontroller - if tc.errExpected && err == nil { - t.Errorf("(%d) Expected an error from HandleBuild, got none!", i) - } - - if !tc.errExpected && err != nil { - t.Errorf("(%d) Unexpected error %v", i, err) - } - if build.Status.Phase != tc.outStatus { - t.Errorf("(%d) Expected %s, got %s!", i, tc.outStatus, build.Status.Phase) - } - if tc.inStatus != buildapi.BuildPhaseError && build.Status.Phase == buildapi.BuildPhaseError && len(build.Status.Message) == 0 { - t.Errorf("(%d) errored build should set message: %#v", i, build) - } - - if !reflect.DeepEqual(build.Spec, originalBuild.Spec) { - t.Errorf("(%d) build.Spec mutated: expected %#v, got %#v", i, originalBuild.Spec, build.Spec) - } - - if len(tc.outputSpec) != 0 { - build := ctrl.BuildStrategy.(*okStrategy).build - if build == nil { - t.Errorf("(%d) unable to cast build", i) - } - - if build.Spec.Output.To.Name != tc.outputSpec { - t.Errorf("(%d) expected build sent to strategy to have docker spec %s, got %s", i, tc.outputSpec, build.Spec.Output.To.Name) - } - - if build.Status.OutputDockerImageReference != tc.outputSpec { - t.Errorf("(%d) expected build status to have OutputDockerImageReference %s, got %s", i, tc.outputSpec, build.Status.OutputDockerImageReference) - } - } - } -} - -func TestCancelBuild(t *testing.T) { - type handleCancelBuildTest struct { - inStatus buildapi.BuildPhase - outStatus buildapi.BuildPhase - podStatus kapi.PodPhase - exitCode int - buildUpdater buildclient.BuildUpdater - podManager podManager - startTimestamp *metav1.Time - completionTimestamp *metav1.Time - } - dummy := metav1.Now() - curtime := &dummy - - tests := []handleCancelBuildTest{ - { // 0 - inStatus: buildapi.BuildPhaseNew, - outStatus: buildapi.BuildPhaseCancelled, - exitCode: 0, - startTimestamp: curtime, - completionTimestamp: curtime, - }, - { // 1 - inStatus: buildapi.BuildPhasePending, - outStatus: buildapi.BuildPhaseCancelled, - podStatus: kapi.PodRunning, - exitCode: 0, - startTimestamp: curtime, - completionTimestamp: curtime, - }, - { // 2 - inStatus: buildapi.BuildPhaseRunning, - outStatus: buildapi.BuildPhaseCancelled, - podStatus: kapi.PodRunning, - exitCode: 0, - startTimestamp: curtime, - completionTimestamp: curtime, - }, - { // 3 - inStatus: buildapi.BuildPhaseComplete, - outStatus: buildapi.BuildPhaseComplete, - podStatus: kapi.PodSucceeded, - exitCode: 0, - startTimestamp: nil, - completionTimestamp: nil, - }, - { // 4 - inStatus: buildapi.BuildPhaseFailed, - outStatus: buildapi.BuildPhaseFailed, - podStatus: kapi.PodFailed, - exitCode: 1, - startTimestamp: nil, - completionTimestamp: nil, - }, - { // 5 - inStatus: buildapi.BuildPhaseNew, - outStatus: buildapi.BuildPhaseNew, - podStatus: kapi.PodFailed, - exitCode: 1, - podManager: &errPodManager{}, - startTimestamp: nil, - completionTimestamp: nil, - }, - { // 6 - inStatus: buildapi.BuildPhaseNew, - outStatus: buildapi.BuildPhaseNew, - podStatus: kapi.PodFailed, - exitCode: 1, - buildUpdater: &errBuildUpdater{}, - startTimestamp: nil, - completionTimestamp: nil, - }, - { // 7 - inStatus: buildapi.BuildPhaseCancelled, - outStatus: buildapi.BuildPhaseCancelled, - exitCode: 0, - startTimestamp: nil, - completionTimestamp: nil, - }, - } - - for i, tc := range tests { - build := mockBuild(tc.inStatus, buildapi.BuildOutput{}) - ctrl := mockBuildController() - if tc.buildUpdater != nil { - ctrl.BuildUpdater = tc.buildUpdater - } - if tc.podManager != nil { - ctrl.PodManager = tc.podManager - } - - err := ctrl.CancelBuild(build) - - if tc.podManager != nil && reflect.TypeOf(tc.podManager).Elem().Name() == "errPodManager" { - if err == nil { - t.Errorf("(%d) Expected error, got none", i) - } - } - if tc.buildUpdater != nil && reflect.TypeOf(tc.buildUpdater).Elem().Name() == "errBuildUpdater" { - if err == nil { - t.Errorf("(%d) Expected error, got none", i) - } - // can't check tc.outStatus because the local build object does get updated - // in this test (but would not be updated in etcd) - continue - } - - if tc.startTimestamp == nil && build.Status.StartTimestamp != nil { - t.Errorf("(%d) Expected nil start timestamp, got %v!", i, build.Status.StartTimestamp) - } - if tc.startTimestamp != nil && build.Status.StartTimestamp == nil { - t.Errorf("(%d) nil start timestamp!", i) - } - if tc.startTimestamp != nil && !tc.startTimestamp.Before(*build.Status.StartTimestamp) && tc.startTimestamp.Time != build.Status.StartTimestamp.Time { - t.Errorf("(%d) Expected build start timestamp %v to be equal to or later than %v!", i, build.Status.StartTimestamp, tc.startTimestamp) - } - - if tc.completionTimestamp == nil && build.Status.CompletionTimestamp != nil { - t.Errorf("(%d) Expected nil completion timestamp, got %v!", i, build.Status.CompletionTimestamp) - } - if tc.completionTimestamp != nil && build.Status.CompletionTimestamp == nil { - t.Errorf("(%d) nil start timestamp!", i) - } - if tc.completionTimestamp != nil && !tc.completionTimestamp.Before(*build.Status.CompletionTimestamp) && tc.completionTimestamp.Time != build.Status.CompletionTimestamp.Time { - t.Errorf("(%d) Expected build completion timestamp %v to be equal to or later than %v!", i, build.Status.CompletionTimestamp, tc.completionTimestamp) - } - - if build.Status.Phase != tc.outStatus { - t.Errorf("(%d) Expected %s, got %s!", i, tc.outStatus, build.Status.Phase) - } - } -} - -type customPodManager struct { - CreatePodFunc func(namespace string, pod *kapi.Pod) (*kapi.Pod, error) - DeletePodFunc func(namespace string, pod *kapi.Pod) error - GetPodFunc func(namespace, name string) (*kapi.Pod, error) -} - -func (c *customPodManager) CreatePod(namespace string, pod *kapi.Pod) (*kapi.Pod, error) { - return c.CreatePodFunc(namespace, pod) -} - -func (c *customPodManager) DeletePod(namespace string, pod *kapi.Pod) error { - return c.DeletePodFunc(namespace, pod) -} - -func (c *customPodManager) GetPod(namespace, name string) (*kapi.Pod, error) { - return c.GetPodFunc(namespace, name) -} - -func TestHandleHandleBuildDeletionOK(t *testing.T) { - deleteWasCalled := false - build := mockBuild(buildapi.BuildPhaseComplete, buildapi.BuildOutput{}) - ctrl := BuildDeleteController{&customPodManager{ - GetPodFunc: func(namespace, names string) (*kapi.Pod, error) { - return &kapi.Pod{ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{buildapi.BuildLabel: buildapi.LabelValue(build.Name)}, - Annotations: map[string]string{buildapi.BuildAnnotation: build.Name}, - }}, nil - }, - DeletePodFunc: func(namespace string, pod *kapi.Pod) error { - deleteWasCalled = true - return nil - }, - }} - - err := ctrl.HandleBuildDeletion(build) - if err != nil { - t.Errorf("Unexpected error %v", err) - } - if !deleteWasCalled { - t.Error("DeletePod was not called when it should have been!") - } -} - -func TestHandleHandlePipelineBuildDeletionOK(t *testing.T) { - deleteWasCalled := false - build := mockBuild(buildapi.BuildPhaseComplete, buildapi.BuildOutput{}) - build.Spec.Strategy.JenkinsPipelineStrategy = &buildapi.JenkinsPipelineBuildStrategy{} - ctrl := BuildDeleteController{&customPodManager{ - GetPodFunc: func(namespace, names string) (*kapi.Pod, error) { - return &kapi.Pod{ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{buildapi.BuildLabel: buildapi.LabelValue(build.Name)}, - Annotations: map[string]string{buildapi.BuildAnnotation: build.Name}, - }}, nil - }, - DeletePodFunc: func(namespace string, pod *kapi.Pod) error { - deleteWasCalled = true - return nil - }, - }} - - err := ctrl.HandleBuildDeletion(build) - if err != nil { - t.Errorf("Unexpected error %v", err) - } - if deleteWasCalled { - t.Error("DeletePod was called when it should not have been!") - } -} - -func TestHandleHandleBuildDeletionOKDeprecatedLabel(t *testing.T) { - deleteWasCalled := false - build := mockBuild(buildapi.BuildPhaseComplete, buildapi.BuildOutput{}) - ctrl := BuildDeleteController{&customPodManager{ - GetPodFunc: func(namespace, names string) (*kapi.Pod, error) { - return &kapi.Pod{ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{buildapi.BuildLabel: buildapi.LabelValue(build.Name)}, - Annotations: map[string]string{buildapi.BuildAnnotation: build.Name}, - }}, nil - }, - DeletePodFunc: func(namespace string, pod *kapi.Pod) error { - deleteWasCalled = true - return nil - }, - }} - - err := ctrl.HandleBuildDeletion(build) - if err != nil { - t.Errorf("Unexpected error %v", err) - } - if !deleteWasCalled { - t.Error("DeletePod was not called when it should have been!") - } -} - -func TestHandleHandleBuildDeletionFailGetPod(t *testing.T) { - build := mockBuild(buildapi.BuildPhaseComplete, buildapi.BuildOutput{}) - ctrl := BuildDeleteController{&customPodManager{ - GetPodFunc: func(namespace, name string) (*kapi.Pod, error) { - return nil, errors.New("random") - }, - }} - - err := ctrl.HandleBuildDeletion(build) - if err == nil { - t.Error("Expected random error got none!") - } -} - -func TestHandleHandleBuildDeletionGetPodNotFound(t *testing.T) { - deleteWasCalled := false - build := mockBuild(buildapi.BuildPhaseComplete, buildapi.BuildOutput{}) - ctrl := BuildDeleteController{&customPodManager{ - GetPodFunc: func(namespace, name string) (*kapi.Pod, error) { - return nil, kerrors.NewNotFound(kapi.Resource("Pod"), name) - }, - DeletePodFunc: func(namespace string, pod *kapi.Pod) error { - deleteWasCalled = true - return nil - }, - }} - - err := ctrl.HandleBuildDeletion(build) - if err != nil { - t.Errorf("Unexpected error, %v", err) - } - if deleteWasCalled { - t.Error("DeletePod was called when it should not have been!") - } -} - -func TestHandleHandleBuildDeletionMismatchedLabels(t *testing.T) { - deleteWasCalled := false - build := mockBuild(buildapi.BuildPhaseComplete, buildapi.BuildOutput{}) - ctrl := BuildDeleteController{&customPodManager{ - GetPodFunc: func(namespace, names string) (*kapi.Pod, error) { - return &kapi.Pod{}, nil - }, - DeletePodFunc: func(namespace string, pod *kapi.Pod) error { - deleteWasCalled = true - return nil - }, - }} - - err := ctrl.HandleBuildDeletion(build) - if err != nil { - t.Errorf("Unexpected error %v", err) - } - if deleteWasCalled { - t.Error("DeletePod was called when it should not have been!") - } -} - -func TestHandleHandleBuildDeletionDeletePodError(t *testing.T) { - build := mockBuild(buildapi.BuildPhaseComplete, buildapi.BuildOutput{}) - ctrl := BuildDeleteController{&customPodManager{ - GetPodFunc: func(namespace, names string) (*kapi.Pod, error) { - return &kapi.Pod{ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{buildapi.BuildLabel: buildapi.LabelValue(build.Name)}, - Annotations: map[string]string{buildapi.BuildAnnotation: build.Name}, - }}, nil - }, - DeletePodFunc: func(namespace string, pod *kapi.Pod) error { - return errors.New("random") - }, - }} - - err := ctrl.HandleBuildDeletion(build) - if err == nil { - t.Error("Expected random error got none!") - } -} - -type customBuildUpdater struct { - UpdateFunc func(namespace string, build *buildapi.Build) error -} - -func (c *customBuildUpdater) Update(namespace string, build *buildapi.Build) error { - return c.UpdateFunc(namespace, build) -} diff --git a/pkg/build/controller/buildconfig/buildconfig_controller.go b/pkg/build/controller/buildconfig/buildconfig_controller.go new file mode 100644 index 000000000000..18e36f0a317b --- /dev/null +++ b/pkg/build/controller/buildconfig/buildconfig_controller.go @@ -0,0 +1,273 @@ +package controller + +import ( + "fmt" + "time" + + "github.com/golang/glog" + kerrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + v1core "k8s.io/client-go/kubernetes/typed/core/v1" + clientv1 "k8s.io/client-go/pkg/api/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/workqueue" + kapi "k8s.io/kubernetes/pkg/api" + kexternalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" + kcontroller "k8s.io/kubernetes/pkg/controller" + + buildapi "github.com/openshift/origin/pkg/build/api" + buildclient "github.com/openshift/origin/pkg/build/client" + cachebuildclient "github.com/openshift/origin/pkg/build/client/cache" + buildutil "github.com/openshift/origin/pkg/build/controller/common" + buildgenerator "github.com/openshift/origin/pkg/build/generator" + osclient "github.com/openshift/origin/pkg/client" + "github.com/openshift/origin/pkg/controller/shared" +) + +const ( + maxRetries = 15 +) + +// configControllerFatalError represents a fatal error while generating a build. +// An operation that fails because of a fatal error should not be retried. +type configControllerFatalError struct { + // Reason the fatal error occurred + reason string +} + +// Error returns the error string for this fatal error +func (e *configControllerFatalError) Error() string { + return fmt.Sprintf("fatal: %s", e.reason) +} + +// IsFatal returns true if err is a fatal error +func IsFatal(err error) bool { + _, isFatal := err.(*configControllerFatalError) + return isFatal +} + +type BuildConfigController struct { + buildConfigInstantiator buildclient.BuildConfigInstantiator + buildConfigGetter buildclient.BuildConfigGetter + buildLister buildclient.BuildLister + buildDeleter buildclient.BuildDeleter + + buildConfigInformer cache.SharedIndexInformer + + queue workqueue.RateLimitingInterface + + buildConfigStoreSynced func() bool + + recorder record.EventRecorder +} + +func NewBuildConfigController(openshiftClient osclient.Interface, kubeExternalClient kexternalclientset.Interface, buildConfigInformer shared.BuildConfigInformer, buildInformer shared.BuildInformer) *BuildConfigController { + eventBroadcaster := record.NewBroadcaster() + eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeExternalClient.Core().RESTClient()).Events("")}) + + buildClient := buildclient.NewOSClientBuildClient(openshiftClient) + buildConfigGetter := cachebuildclient.NewBuildConfigGetter(buildConfigInformer.Lister()) + buildConfigInstantiator := buildclient.NewOSClientBuildConfigInstantiatorClient(openshiftClient) + // TODO: Switch to using the cache build lister when we figure out + // what is wrong with retrieving by index + // buildLister := cachebuildclient.NewBuildLister(buildInformer.Lister()) + _ = cachebuildclient.NewBuildLister(buildInformer.Lister()) + buildLister := buildClient + + c := &BuildConfigController{ + buildConfigGetter: buildConfigGetter, + buildLister: buildLister, + buildDeleter: buildClient, + buildConfigInstantiator: buildConfigInstantiator, + + buildConfigInformer: buildConfigInformer.Informer(), + + queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), + recorder: eventBroadcaster.NewRecorder(kapi.Scheme, clientv1.EventSource{Component: "buildconfig-controller"}), + } + + c.buildConfigInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + UpdateFunc: c.buildConfigUpdated, + AddFunc: c.buildConfigAdded, + }) + + c.buildConfigStoreSynced = c.buildConfigInformer.HasSynced + return c +} + +func (c *BuildConfigController) handleBuildConfig(bc *buildapi.BuildConfig) error { + glog.V(4).Infof("Handling BuildConfig %s", bcDesc(bc)) + + if err := buildutil.HandleBuildPruning(bc.Name, bc.Namespace, c.buildLister, c.buildConfigGetter, c.buildDeleter); err != nil { + utilruntime.HandleError(err) + } + + hasChangeTrigger := buildapi.HasTriggerType(buildapi.ConfigChangeBuildTriggerType, bc) + + if !hasChangeTrigger { + return nil + } + + if bc.Status.LastVersion > 0 { + return nil + } + + glog.V(4).Infof("Running build for BuildConfig %s", bcDesc(bc)) + + buildTriggerCauses := []buildapi.BuildTriggerCause{} + // instantiate new build + lastVersion := int64(0) + request := &buildapi.BuildRequest{ + TriggeredBy: append(buildTriggerCauses, + buildapi.BuildTriggerCause{ + Message: buildapi.BuildTriggerCauseConfigMsg, + }), + ObjectMeta: metav1.ObjectMeta{ + Name: bc.Name, + Namespace: bc.Namespace, + }, + LastVersion: &lastVersion, + } + if _, err := c.buildConfigInstantiator.Instantiate(bc.Namespace, request); err != nil { + var instantiateErr error + if kerrors.IsConflict(err) { + instantiateErr = fmt.Errorf("unable to instantiate Build for BuildConfig %s due to a conflicting update: %v", bcDesc(bc), err) + utilruntime.HandleError(instantiateErr) + } else if buildgenerator.IsFatal(err) || kerrors.IsNotFound(err) || kerrors.IsBadRequest(err) || kerrors.IsForbidden(err) { + instantiateErr = fmt.Errorf("gave up on Build for BuildConfig %s due to fatal error: %v", bcDesc(bc), err) + utilruntime.HandleError(instantiateErr) + c.recorder.Event(bc, kapi.EventTypeWarning, "BuildConfigInstantiateFailed", instantiateErr.Error()) + return &configControllerFatalError{err.Error()} + } else { + instantiateErr = fmt.Errorf("error instantiating Build from BuildConfig %s: %v", bcDesc(bc), err) + c.recorder.Event(bc, kapi.EventTypeWarning, "BuildConfigInstantiateFailed", instantiateErr.Error()) + utilruntime.HandleError(instantiateErr) + } + return instantiateErr + } + return nil +} + +// buildConfigAdded is called by the buildconfig informer event handler whenever a +// buildconfig is created +func (c *BuildConfigController) buildConfigAdded(obj interface{}) { + bc := obj.(*buildapi.BuildConfig) + c.enqueueBuildConfig(bc) +} + +// buildConfigUpdated gets called by the buildconfig informer event handler whenever a +// buildconfig is updated or there is a relist of buildconfigs +func (c *BuildConfigController) buildConfigUpdated(old, cur interface{}) { + bc := cur.(*buildapi.BuildConfig) + c.enqueueBuildConfig(bc) +} + +// enqueueBuild adds the given build to the queue. +func (c *BuildConfigController) enqueueBuildConfig(bc *buildapi.BuildConfig) { + key, err := kcontroller.KeyFunc(bc) + if err != nil { + utilruntime.HandleError(fmt.Errorf("couldn't get key for buildconfig %#v: %v", bc, err)) + return + } + c.queue.Add(key) +} + +// Run begins watching and syncing. +func (c *BuildConfigController) Run(workers int, stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + // Wait for the controller stores to sync before starting any work in this controller. + if !cache.WaitForCacheSync(stopCh, c.buildConfigStoreSynced) { + utilruntime.HandleError(fmt.Errorf("timed out waiting for caches to sync")) + return + } + + glog.Infof("Starting buildconfig controller") + + for i := 0; i < workers; i++ { + go wait.Until(c.worker, time.Second, stopCh) + } + + <-stopCh + glog.Infof("Shutting down buildconfig controller") +} + +func (c *BuildConfigController) worker() { + for { + if quit := c.work(); quit { + return + } + } +} + +// work gets the next build from the queue and invokes handleBuild on it +func (c *BuildConfigController) work() bool { + key, quit := c.queue.Get() + if quit { + return true + } + + defer c.queue.Done(key) + + bc, err := c.getBuildConfigByKey(key.(string)) + if err != nil { + c.handleError(err, key) + return false + } + if bc == nil { + return false + } + + err = c.handleBuildConfig(bc) + c.handleError(err, key) + + return false +} + +// handleError is called by the main work loop to check the return of calling handleBuildConfig +// If an error occurred, then the key is re-added to the queue unless it has been retried too many +// times. +func (c *BuildConfigController) handleError(err error, key interface{}) { + if err == nil { + c.queue.Forget(key) + return + } + + if IsFatal(err) { + glog.V(2).Infof("Will not retry fatal error for key %v: %v", key, err) + c.queue.Forget(key) + return + } + + if c.queue.NumRequeues(key) < maxRetries { + glog.V(4).Infof("Retrying key %v: %v", key, err) + c.queue.AddRateLimited(key) + return + } + + glog.V(2).Infof("Giving up retrying %v: %v", key, err) + c.queue.Forget(key) +} + +// getBuildConfigByKey looks up a buildconfig by key in the buildConfigInformer cache +func (c *BuildConfigController) getBuildConfigByKey(key string) (*buildapi.BuildConfig, error) { + obj, exists, err := c.buildConfigInformer.GetIndexer().GetByKey(key) + if err != nil { + glog.V(2).Infof("Unable to retrieve buildconfig %s from store: %v", key, err) + return nil, err + } + if !exists { + glog.V(2).Infof("Buildconfig %q has been deleted", key) + return nil, nil + } + + return obj.(*buildapi.BuildConfig), nil +} + +func bcDesc(bc *buildapi.BuildConfig) string { + return fmt.Sprintf("%s/%s (%d)", bc.Namespace, bc.Name, bc.Status.LastVersion) +} diff --git a/pkg/build/controller/buildconfig_controller_test.go b/pkg/build/controller/buildconfig/buildconfig_controller_test.go similarity index 73% rename from pkg/build/controller/buildconfig_controller_test.go rename to pkg/build/controller/buildconfig/buildconfig_controller_test.go index 6f78d8deb4d6..43bfbbcfa2e5 100644 --- a/pkg/build/controller/buildconfig_controller_test.go +++ b/pkg/build/controller/buildconfig/buildconfig_controller_test.go @@ -4,6 +4,7 @@ import ( "fmt" "testing" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/record" buildapi "github.com/openshift/origin/pkg/build/api" @@ -45,13 +46,13 @@ func TestHandleBuildConfig(t *testing.T) { err: tc.instantiatorError, } controller := &BuildConfigController{ - BuildConfigInstantiator: instantiator, - BuildLister: &okBuildLister{}, - BuildDeleter: &okBuildDeleter{}, - BuildConfigGetter: &okBuildConfigGetter{BuildConfig: tc.bc}, - Recorder: &record.FakeRecorder{}, + buildConfigInstantiator: instantiator, + buildLister: &okBuildLister{}, + buildDeleter: &okBuildDeleter{}, + buildConfigGetter: &okBuildConfigGetter{BuildConfig: tc.bc}, + recorder: &record.FakeRecorder{}, } - err := controller.HandleBuildConfig(tc.bc) + err := controller.handleBuildConfig(tc.bc) if err != nil { if !tc.expectErr { t.Errorf("%s: unexpected error: %v", tc.name, err) @@ -107,3 +108,27 @@ func buildConfigWithNonZeroLastVersion() *buildapi.BuildConfig { bc.Status.LastVersion = 1 return bc } + +type okBuildLister struct{} + +func (okc *okBuildLister) List(namespace string, opts metav1.ListOptions) (*buildapi.BuildList, error) { + return &buildapi.BuildList{Items: []buildapi.Build{}}, nil +} + +type okBuildDeleter struct{} + +func (okc *okBuildDeleter) DeleteBuild(*buildapi.Build) error { + return nil +} + +type okBuildConfigGetter struct { + BuildConfig *buildapi.BuildConfig +} + +func (okc *okBuildConfigGetter) Get(namespace, name string, options metav1.GetOptions) (*buildapi.BuildConfig, error) { + if okc.BuildConfig != nil { + return okc.BuildConfig, nil + } else { + return &buildapi.BuildConfig{}, nil + } +} diff --git a/pkg/build/controller/buildconfig_controller.go b/pkg/build/controller/buildconfig_controller.go deleted file mode 100644 index ea28c50b8ea5..000000000000 --- a/pkg/build/controller/buildconfig_controller.go +++ /dev/null @@ -1,97 +0,0 @@ -package controller - -import ( - "fmt" - - "github.com/golang/glog" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/client-go/tools/record" - kapi "k8s.io/kubernetes/pkg/api" - - buildapi "github.com/openshift/origin/pkg/build/api" - buildclient "github.com/openshift/origin/pkg/build/client" - buildutil "github.com/openshift/origin/pkg/build/controller/common" - buildgenerator "github.com/openshift/origin/pkg/build/generator" -) - -// ConfigControllerFatalError represents a fatal error while generating a build. -// An operation that fails because of a fatal error should not be retried. -type ConfigControllerFatalError struct { - // Reason the fatal error occurred - Reason string -} - -// Error returns the error string for this fatal error -func (e *ConfigControllerFatalError) Error() string { - return fmt.Sprintf("fatal error processing BuildConfig: %s", e.Reason) -} - -// IsFatal returns true if err is a fatal error -func IsFatal(err error) bool { - _, isFatal := err.(*ConfigControllerFatalError) - return isFatal -} - -type BuildConfigController struct { - BuildConfigInstantiator buildclient.BuildConfigInstantiator - BuildConfigGetter buildclient.BuildConfigGetter - BuildLister buildclient.BuildLister - BuildDeleter buildclient.BuildDeleter - // recorder is used to record events. - Recorder record.EventRecorder -} - -func (c *BuildConfigController) HandleBuildConfig(bc *buildapi.BuildConfig) error { - glog.V(4).Infof("Handling BuildConfig %s/%s", bc.Namespace, bc.Name) - - if err := buildutil.HandleBuildPruning(bc.Name, bc.Namespace, c.BuildLister, c.BuildConfigGetter, c.BuildDeleter); err != nil { - utilruntime.HandleError(err) - } - - hasChangeTrigger := buildapi.HasTriggerType(buildapi.ConfigChangeBuildTriggerType, bc) - - if !hasChangeTrigger { - return nil - } - - if bc.Status.LastVersion > 0 { - return nil - } - - glog.V(4).Infof("Running build for BuildConfig %s/%s", bc.Namespace, bc.Name) - - buildTriggerCauses := []buildapi.BuildTriggerCause{} - // instantiate new build - lastVersion := int64(0) - request := &buildapi.BuildRequest{ - TriggeredBy: append(buildTriggerCauses, - buildapi.BuildTriggerCause{ - Message: buildapi.BuildTriggerCauseConfigMsg, - }), - ObjectMeta: metav1.ObjectMeta{ - Name: bc.Name, - Namespace: bc.Namespace, - }, - LastVersion: &lastVersion, - } - if _, err := c.BuildConfigInstantiator.Instantiate(bc.Namespace, request); err != nil { - var instantiateErr error - if kerrors.IsConflict(err) { - instantiateErr = fmt.Errorf("unable to instantiate Build for BuildConfig %s/%s due to a conflicting update: %v", bc.Namespace, bc.Name, err) - utilruntime.HandleError(instantiateErr) - } else if buildgenerator.IsFatal(err) || kerrors.IsNotFound(err) || kerrors.IsBadRequest(err) || kerrors.IsForbidden(err) { - instantiateErr = fmt.Errorf("gave up on Build for BuildConfig %s/%s due to fatal error: %v", bc.Namespace, bc.Name, err) - utilruntime.HandleError(instantiateErr) - c.Recorder.Event(bc, kapi.EventTypeWarning, "BuildConfigInstantiateFailed", instantiateErr.Error()) - return &ConfigControllerFatalError{err.Error()} - } else { - instantiateErr = fmt.Errorf("error instantiating Build from BuildConfig %s/%s: %v", bc.Namespace, bc.Name, err) - c.Recorder.Event(bc, kapi.EventTypeWarning, "BuildConfigInstantiateFailed", instantiateErr.Error()) - utilruntime.HandleError(instantiateErr) - } - return instantiateErr - } - return nil -} diff --git a/pkg/build/controller/buildpod/buildpod_controller.go b/pkg/build/controller/buildpod/buildpod_controller.go deleted file mode 100644 index 7f279fa8a80e..000000000000 --- a/pkg/build/controller/buildpod/buildpod_controller.go +++ /dev/null @@ -1,508 +0,0 @@ -package buildpod - -import ( - "fmt" - "time" - - "github.com/golang/glog" - errors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/wait" - v1core "k8s.io/client-go/kubernetes/typed/core/v1" - clientv1 "k8s.io/client-go/pkg/api/v1" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/record" - "k8s.io/client-go/util/workqueue" - kapi "k8s.io/kubernetes/pkg/api" - kexternalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" - kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" - kcoreinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion" - kcontroller "k8s.io/kubernetes/pkg/controller" - - buildapi "github.com/openshift/origin/pkg/build/api" - buildclient "github.com/openshift/origin/pkg/build/client" - "github.com/openshift/origin/pkg/build/controller/common" - "github.com/openshift/origin/pkg/build/controller/policy" - strategy "github.com/openshift/origin/pkg/build/controller/strategy" - buildutil "github.com/openshift/origin/pkg/build/util" - osclient "github.com/openshift/origin/pkg/client" - oscache "github.com/openshift/origin/pkg/client/cache" -) - -const ( - // We must avoid processing build pods until the build and pod stores have synced. - // If they haven't synced, to avoid a hot loop, we'll wait this long between checks. - storeSyncedPollPeriod = 100 * time.Millisecond - maxRetries = 15 - maxNotFoundRetries = 5 -) - -// BuildPodController watches pods running builds and manages the build state -type BuildPodController struct { - buildUpdater buildclient.BuildUpdater - secretClient kcoreclient.SecretsGetter - podClient kcoreclient.PodsGetter - - queue workqueue.RateLimitingInterface - - buildStore oscache.StoreToBuildLister - buildLister buildclient.BuildLister - buildConfigGetter buildclient.BuildConfigGetter - buildDeleter buildclient.BuildDeleter - podInformer kcoreinformers.PodInformer - - buildStoreSynced func() bool - podStoreSynced func() bool - - runPolicies []policy.RunPolicy - - recorder record.EventRecorder -} - -// NewBuildPodController creates a new BuildPodController. -func NewBuildPodController(buildInformer cache.SharedIndexInformer, podInformer kcoreinformers.PodInformer, intkc kclientset.Interface, extkc kexternalclientset.Interface, oc osclient.Interface) *BuildPodController { - eventBroadcaster := record.NewBroadcaster() - eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(extkc.Core().RESTClient()).Events("")}) - - buildListerUpdater := buildclient.NewOSClientBuildClient(oc) - buildConfigGetter := buildclient.NewOSClientBuildConfigClient(oc) - buildDeleter := buildclient.NewBuildDeleter(oc) - - c := &BuildPodController{ - buildUpdater: buildListerUpdater, - buildLister: buildListerUpdater, - buildConfigGetter: buildConfigGetter, - buildDeleter: buildDeleter, - secretClient: intkc.Core(), // TODO: Replace with cache client - podClient: intkc.Core(), - podInformer: podInformer, - queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), - recorder: eventBroadcaster.NewRecorder(kapi.Scheme, clientv1.EventSource{Component: "build-pod-controller"}), - } - - c.runPolicies = policy.GetAllRunPolicies(buildListerUpdater, buildListerUpdater) - - c.buildStore.Indexer = buildInformer.GetIndexer() - podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - UpdateFunc: c.updatePod, - DeleteFunc: c.deletePod, - }) - buildInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: c.addBuild, - UpdateFunc: c.updateBuild, - }) - - c.buildStoreSynced = buildInformer.HasSynced - c.podStoreSynced = podInformer.Informer().HasSynced - - return c -} - -type podNotFoundKey struct { - Namespace string - PodName string - BuildName string -} - -// Run begins watching and syncing. -func (c *BuildPodController) Run(workers int, stopCh <-chan struct{}) { - defer utilruntime.HandleCrash() - defer c.queue.ShutDown() - - // Wait for the build store to sync before starting any work in this controller. - if !cache.WaitForCacheSync(stopCh, c.buildStoreSynced, c.podStoreSynced) { - utilruntime.HandleError(fmt.Errorf("timed out waiting for caches to sync")) - return - } - - for i := 0; i < workers; i++ { - go wait.Until(c.worker, time.Second, stopCh) - } - - <-stopCh - glog.Infof("Shutting down build pod controller") -} - -// HandlePod updates the state of the build based on the pod state -func (bc *BuildPodController) HandlePod(pod *kapi.Pod) error { - glog.V(5).Infof("Handling update of build pod %s/%s in phase %s", pod.Namespace, pod.Name, pod.Status.Phase) - build, exists, err := bc.getBuildForPod(pod) - if err != nil { - glog.V(4).Infof("Error getting build for pod %s/%s: %v", pod.Namespace, pod.Name, err) - return err - } - if !exists || build == nil { - glog.V(5).Infof("No build found for pod %s/%s", pod.Namespace, pod.Name) - return nil - } - glog.V(5).Infof("build %s/%s is in phase %s", build.Name, build.Namespace, build.Status.Phase) - nextStatus := build.Status.Phase - currentReason := build.Status.Reason - - // if the build is marked failed, the build status reason has already been - // set (probably by the build pod itself), so don't do any updating here - // or we'll overwrite the correct value. - if build.Status.Phase != buildapi.BuildPhaseFailed { - switch pod.Status.Phase { - case kapi.PodPending: - build.Status.Reason = "" - build.Status.Message = "" - nextStatus = buildapi.BuildPhasePending - if secret := build.Spec.Output.PushSecret; secret != nil && currentReason != buildapi.StatusReasonMissingPushSecret { - if _, err := bc.secretClient.Secrets(build.Namespace).Get(secret.Name, metav1.GetOptions{}); err != nil && errors.IsNotFound(err) { - build.Status.Reason = buildapi.StatusReasonMissingPushSecret - build.Status.Message = buildapi.StatusMessageMissingPushSecret - glog.V(4).Infof("Setting reason for pending build to %q due to missing secret %s/%s", build.Status.Reason, build.Namespace, secret.Name) - } - } - - case kapi.PodRunning: - // The pod's still running - build.Status.Reason = "" - build.Status.Message = "" - nextStatus = buildapi.BuildPhaseRunning - - case kapi.PodSucceeded: - build.Status.Reason = "" - build.Status.Message = "" - // Check the exit codes of all the containers in the pod - nextStatus = buildapi.BuildPhaseComplete - if len(pod.Status.ContainerStatuses) == 0 { - // no containers in the pod means something went badly wrong, so the build - // should be failed. - glog.V(2).Infof("Failing build %s/%s because the pod has no containers", build.Namespace, build.Name) - nextStatus = buildapi.BuildPhaseFailed - if build.Status.Reason == "" { - build.Status.Reason = buildapi.StatusReasonBuildPodDeleted - build.Status.Message = buildapi.StatusMessageBuildPodDeleted - } - } else { - for _, info := range pod.Status.ContainerStatuses { - if info.State.Terminated != nil && info.State.Terminated.ExitCode != 0 { - nextStatus = buildapi.BuildPhaseFailed - break - } - } - } - - case kapi.PodFailed: - nextStatus = buildapi.BuildPhaseFailed - if build.Status.Reason == "" { - build.Status.Reason = buildapi.StatusReasonGenericBuildFailed - build.Status.Message = buildapi.StatusMessageGenericBuildFailed - } - - default: - build.Status.Reason = "" - build.Status.Message = "" - } - } - - needsUpdate := false - // Update the build object when it progress to a next state or the reason for - // the current state changed. Do not touch builds that are complete - // because we'd potentially be overwriting build state information set by the - // build pod directly. - if (!common.HasBuildPodNameAnnotation(build) || build.Status.Phase != nextStatus) && !buildutil.IsBuildComplete(build) { - needsUpdate = true - common.SetBuildPodNameAnnotation(build, pod.Name) - reason := "" - if len(build.Status.Reason) > 0 { - reason = " (" + string(build.Status.Reason) + ")" - } - glog.V(4).Infof("Updating build %s/%s status %s -> %s%s", build.Namespace, build.Name, build.Status.Phase, nextStatus, reason) - build.Status.Phase = nextStatus - if build.Status.Phase == buildapi.BuildPhaseRunning { - build.Status.StartTimestamp = pod.Status.StartTime - bc.recorder.Eventf(build, kapi.EventTypeNormal, buildapi.BuildStartedEventReason, fmt.Sprintf(buildapi.BuildStartedEventMessage, build.Namespace, build.Name)) - } - } - - // we're going to get pod relist events for completed/failed pods, - // there's no reason to re-update the build and rerun - // HandleBuildCompletion if we've already done it for this - // build previously. - buildWasComplete := build.Status.CompletionTimestamp != nil - if !buildWasComplete && buildutil.IsBuildComplete(build) && build.Status.Phase != buildapi.BuildPhaseCancelled { - needsUpdate = common.SetBuildCompletionTimeAndDuration(build, pod.Status.StartTime) || needsUpdate - } - if needsUpdate { - if err := bc.buildUpdater.Update(build.Namespace, build); err != nil { - return fmt.Errorf("failed to update build %s/%s: %v", build.Namespace, build.Name, err) - } - glog.V(4).Infof("Build %s/%s status was updated to %s", build.Namespace, build.Name, build.Status.Phase) - } - // if the build was not previously marked complete but it's complete now, - // handle completion for it. otherwise ignore it because we've already - // handled its completion previously. - if !buildWasComplete && buildutil.IsBuildComplete(build) { - switch build.Status.Phase { - case buildapi.BuildPhaseComplete: - bc.recorder.Eventf(build, kapi.EventTypeNormal, buildapi.BuildCompletedEventReason, fmt.Sprintf(buildapi.BuildCompletedEventMessage, build.Namespace, build.Name)) - case buildapi.BuildPhaseError, buildapi.BuildPhaseFailed: - bc.recorder.Eventf(build, kapi.EventTypeNormal, buildapi.BuildFailedEventReason, fmt.Sprintf(buildapi.BuildFailedEventMessage, build.Namespace, build.Name)) - } - common.HandleBuildCompletion(build, bc.buildLister, bc.buildConfigGetter, bc.buildDeleter, bc.runPolicies) - } - - return nil -} - -// HandleBuildPodDeletion sets the status of a build to error if the build pod has been deleted -func (bc *BuildPodController) HandleBuildPodDeletion(pod *kapi.Pod) error { - glog.V(4).Infof("Handling deletion of build pod %s/%s", pod.Namespace, pod.Name) - build, exists, err := bc.getBuildForPod(pod) - if err != nil { - glog.V(4).Infof("Error getting build for pod %s/%s", pod.Namespace, pod.Name) - return err - } - if !exists || build == nil { - glog.V(5).Infof("No build found for deleted pod %s/%s", pod.Namespace, pod.Name) - return nil - } - - if build.Spec.Strategy.JenkinsPipelineStrategy != nil { - glog.V(4).Infof("Build %s/%s is a pipeline build, ignoring", build.Namespace, build.Name) - return nil - } - // If build was cancelled, we'll leave HandleBuild to update the build - if build.Status.Cancelled { - glog.V(4).Infof("Cancelation for build %s/%s was already triggered, ignoring", build.Namespace, build.Name) - return nil - } - - if buildutil.IsBuildComplete(build) { - glog.V(4).Infof("Pod was deleted but build %s/%s is already completed, so no need to update it.", build.Namespace, build.Name) - return nil - } - - nextStatus := buildapi.BuildPhaseError - if build.Status.Phase != nextStatus { - glog.V(4).Infof("Updating build %s/%s status %s -> %s", build.Namespace, build.Name, build.Status.Phase, nextStatus) - build.Status.Phase = nextStatus - build.Status.Reason = buildapi.StatusReasonBuildPodDeleted - build.Status.Message = buildapi.StatusMessageBuildPodDeleted - common.SetBuildCompletionTimeAndDuration(build, pod.Status.StartTime) - bc.recorder.Eventf(build, kapi.EventTypeNormal, buildapi.BuildFailedEventReason, fmt.Sprintf(buildapi.BuildFailedEventMessage, build.Namespace, build.Name)) - - if err := bc.buildUpdater.Update(build.Namespace, build); err != nil { - return fmt.Errorf("Failed to update build %s/%s: %v", build.Namespace, build.Name, err) - } - common.HandleBuildCompletion(build, bc.buildLister, bc.buildConfigGetter, bc.buildDeleter, bc.runPolicies) - } - return nil -} - -func (bc *BuildPodController) worker() { - for { - if quit := bc.work(); quit { - return - } - } -} - -func (bc *BuildPodController) work() bool { - key, quit := bc.queue.Get() - if quit { - return true - } - defer bc.queue.Done(key) - - if notFoundKey, ok := key.(podNotFoundKey); ok { - bc.handlePodNotFound(notFoundKey) - return true - } - - pod, err := bc.getPodByKey(key.(string)) - if err != nil { - utilruntime.HandleError(err) - } - - if pod == nil { - return false - } - - err = bc.HandlePod(pod) - bc.handleError(err, key, pod) - - return false -} - -func podForNotFoundKey(key podNotFoundKey) *kapi.Pod { - pod := &kapi.Pod{} - pod.Namespace = key.Namespace - pod.Name = key.PodName - pod.Annotations = map[string]string{buildapi.BuildAnnotation: key.BuildName} - return pod -} - -func (bc *BuildPodController) handlePodNotFound(key podNotFoundKey) { - _, err := bc.podInformer.Lister().Pods(key.Namespace).Get(key.PodName) - if err == nil { - glog.V(4).Infof("Found missing pod %s/%s\n", key.Namespace, key.PodName) - bc.queue.Forget(key) - return - } - - // If number of retries has not been exceeded, requeue and attempt to retrieve from cache again - if bc.queue.NumRequeues(key) < maxNotFoundRetries { - glog.V(4).Infof("Failed to retrieve build pod %s/%s: %v. Retrying it.", key.Namespace, key.PodName, err) - bc.queue.AddRateLimited(key) - return - } - - // Once the maximum number of retries has been exceeded, try retrieving it from the API server directly - bc.queue.Forget(key) - _, err = bc.podClient.Pods(key.Namespace).Get(key.PodName, metav1.GetOptions{}) - if err != nil && errors.IsNotFound(err) { - // If the pod is still not found, handle it as a deletion event - err = bc.HandleBuildPodDeletion(podForNotFoundKey(key)) - } - utilruntime.HandleError(err) -} - -func (bc *BuildPodController) getPodByKey(key string) (*kapi.Pod, error) { - obj, exists, err := bc.podInformer.Informer().GetIndexer().GetByKey(key) - if err != nil { - glog.Infof("Unable to retrieve pod %q from store: %v", key, err) - bc.queue.AddRateLimited(key) - return nil, err - } - if !exists { - glog.Infof("Pod %q has been deleted", key) - return nil, nil - } - - return obj.(*kapi.Pod), nil -} - -func (bc *BuildPodController) updatePod(old, cur interface{}) { - // A periodic relist will send update events for all known pods. - curPod := cur.(*kapi.Pod) - oldPod := old.(*kapi.Pod) - if curPod.ResourceVersion == oldPod.ResourceVersion { - return - } - if isBuildPod(curPod) { - bc.enqueuePod(curPod) - } -} - -func (bc *BuildPodController) deletePod(obj interface{}) { - pod, ok := obj.(*kapi.Pod) - if !ok { - tombstone, ok := obj.(cache.DeletedFinalStateUnknown) - if !ok { - utilruntime.HandleError(fmt.Errorf("couldn't get object from tombstone: %+v", obj)) - return - } - pod, ok = tombstone.Obj.(*kapi.Pod) - if !ok { - utilruntime.HandleError(fmt.Errorf("tombstone contained object that is not a pod: %+v", obj)) - return - } - } - if isBuildPod(pod) { - err := bc.HandleBuildPodDeletion(pod) - utilruntime.HandleError(err) - } -} - -func (bc *BuildPodController) addBuild(obj interface{}) { - build := obj.(*buildapi.Build) - bc.checkBuildPodDeletion(build) -} - -func (bc *BuildPodController) updateBuild(old, cur interface{}) { - build := cur.(*buildapi.Build) - bc.checkBuildPodDeletion(build) -} - -func (bc *BuildPodController) checkBuildPodDeletion(build *buildapi.Build) { - switch { - case buildutil.IsBuildComplete(build): - glog.V(5).Infof("checkBuildPodDeletion: ignoring build %s/%s because it is complete", build.Namespace, build.Name) - return - case build.Status.Phase == buildapi.BuildPhaseNew: - glog.V(5).Infof("checkBuildPodDeletion: ignoring build %s/%s because it is new", build.Namespace, build.Name) - return - case build.Spec.Strategy.JenkinsPipelineStrategy != nil: - glog.V(5).Infof("checkBuildPodDeletion: ignoring build %s/%s because it is a pipeline build", build.Namespace, build.Name) - return - } - _, err := bc.podInformer.Lister().Pods(build.Namespace).Get(buildapi.GetBuildPodName(build)) - - // The only error that can currently be returned is a NotFound error, but adding a check - // here just in case that changes in the future - if err != nil && errors.IsNotFound(err) { - // If the pod is not found, enqueue a pod not found event. The reason is that - // the cache may not have been populated at the time. With the pod not found key, - // we will keep trying to find the pod a fixed number of times. If at that point, the pod - // is not found by directly accessing the API server, then we will mark the build - // as failed. - bc.enqueuePodNotFound(build.Namespace, buildapi.GetBuildPodName(build), build.Name) - } - if err != nil { - utilruntime.HandleError(err) - } -} - -func (c *BuildPodController) enqueuePod(pod *kapi.Pod) { - key, err := kcontroller.KeyFunc(pod) - if err != nil { - utilruntime.HandleError(fmt.Errorf("couldn't get key for object %#v: %v", pod, err)) - return - } - c.queue.Add(key) -} - -func (c *BuildPodController) enqueuePodNotFound(namespace, name, buildName string) { - key := podNotFoundKey{ - Namespace: namespace, - PodName: name, - BuildName: buildName, - } - c.queue.Add(key) -} - -func (bc *BuildPodController) handleError(err error, key interface{}, pod *kapi.Pod) { - if err == nil { - bc.queue.Forget(key) - return - } - - if strategy.IsFatal(err) { - glog.V(2).Infof("Will not retry fatal error for pod %s/%s: %v", pod.Namespace, pod.Name, err) - bc.queue.Forget(key) - return - } - - if bc.queue.NumRequeues(key) < maxRetries { - glog.V(4).Infof("Retrying pod %s/%s: %v", pod.Namespace, pod.Name, err) - bc.queue.AddRateLimited(key) - return - } - - glog.V(2).Infof("Giving up retrying pod %s/%s: %v", pod.Namespace, pod.Name, err) - bc.queue.Forget(key) -} - -func isBuildPod(pod *kapi.Pod) bool { - return len(buildutil.GetBuildName(pod)) > 0 -} - -func (bc *BuildPodController) getBuildForPod(pod *kapi.Pod) (*buildapi.Build, bool, error) { - if !isBuildPod(pod) { - return nil, false, fmt.Errorf("cannot get build for pod (%s/%s): pod is not a build pod", pod.Namespace, pod.Name) - } - build, err := bc.buildStore.Builds(pod.Namespace).Get(buildutil.GetBuildName(pod)) - if err != nil && errors.IsNotFound(err) { - return nil, false, nil - } - if err == nil { - build, err = buildutil.BuildDeepCopy(build) - } - return build, true, err -} diff --git a/pkg/build/controller/buildpod/buildpod_controller_test.go b/pkg/build/controller/buildpod/buildpod_controller_test.go deleted file mode 100644 index ab5bc24a389f..000000000000 --- a/pkg/build/controller/buildpod/buildpod_controller_test.go +++ /dev/null @@ -1,404 +0,0 @@ -package buildpod - -import ( - "errors" - "reflect" - "testing" - "time" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/tools/cache" - kapi "k8s.io/kubernetes/pkg/api" - kfakeexternal "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" - kinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion" - - buildapi "github.com/openshift/origin/pkg/build/api" - buildclient "github.com/openshift/origin/pkg/build/client" - "github.com/openshift/origin/pkg/build/controller/common" - "github.com/openshift/origin/pkg/client/testclient" -) - -type errBuildUpdater struct{} - -func (ec *errBuildUpdater) Update(namespace string, build *buildapi.Build) error { - return errors.New("UpdateBuild error!") -} - -type customBuildUpdater struct { - UpdateFunc func(namespace string, build *buildapi.Build) error -} - -func (c *customBuildUpdater) Update(namespace string, build *buildapi.Build) error { - return c.UpdateFunc(namespace, build) -} - -func mockPod(status kapi.PodPhase, exitCode int, startTime *metav1.Time) *kapi.Pod { - return &kapi.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "data-build-build", - Namespace: "namespace", - Annotations: map[string]string{ - buildapi.BuildAnnotation: "data-build", - }, - }, - Status: kapi.PodStatus{ - Phase: status, - ContainerStatuses: []kapi.ContainerStatus{ - { - State: kapi.ContainerState{ - Terminated: &kapi.ContainerStateTerminated{ExitCode: int32(exitCode)}, - }, - }, - }, - StartTime: startTime, - }, - } -} - -func mockBuild(phase buildapi.BuildPhase, output buildapi.BuildOutput) *buildapi.Build { - return &buildapi.Build{ - ObjectMeta: metav1.ObjectMeta{ - Name: "data-build", - Namespace: "namespace", - Annotations: map[string]string{ - buildapi.BuildConfigAnnotation: "test-bc", - }, - Labels: map[string]string{ - "name": "dataBuild", - // TODO: Switch this test to use Serial policy - buildapi.BuildRunPolicyLabel: string(buildapi.BuildRunPolicyParallel), - buildapi.BuildConfigLabel: "test-bc", - }, - }, - Spec: buildapi.BuildSpec{ - CommonSpec: buildapi.CommonSpec{ - Source: buildapi.BuildSource{ - Git: &buildapi.GitBuildSource{ - URI: "http://my.build.com/the/build/Dockerfile", - }, - ContextDir: "contextimage", - }, - Strategy: buildapi.BuildStrategy{ - DockerStrategy: &buildapi.DockerBuildStrategy{}, - }, - Output: output, - }, - }, - Status: buildapi.BuildStatus{ - Phase: phase, - }, - } -} - -type FakeIndexer struct { - *cache.FakeCustomStore -} - -func mockBuildPodController(build *buildapi.Build, buildUpdater buildclient.BuildUpdater) *BuildPodController { - buildInformer := cache.NewSharedIndexInformer(&cache.ListWatch{}, &buildapi.Build{}, 2*time.Minute, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) - kubeInformerFactory := kinformers.NewSharedInformerFactory(&fake.Clientset{}, 0) - podInformer := kubeInformerFactory.Core().InternalVersion().Pods() - fakeSecret := &kapi.Secret{} - fakeSecret.Name = "fakeSecret" - fakeSecret.Namespace = "namespace" - kclient := fake.NewSimpleClientset(fakeSecret) - osclient := testclient.NewSimpleFake() - c := NewBuildPodController(buildInformer, podInformer, kclient, kfakeexternal.NewSimpleClientset(), osclient) - if build != nil { - c.buildStore.Indexer.Add(build) - } - if buildUpdater != nil { - c.buildUpdater = buildUpdater - } - return c -} - -func TestHandlePod(t *testing.T) { - type handlePodTest struct { - matchID bool - inStatus buildapi.BuildPhase - outStatus buildapi.BuildPhase - startTimestamp *metav1.Time - completionTimestamp *metav1.Time - podStatus kapi.PodPhase - exitCode int - buildUpdater buildclient.BuildUpdater - } - - dummy := metav1.Now() - curtime := &dummy - tests := []handlePodTest{ - { // 0 - matchID: false, - inStatus: buildapi.BuildPhasePending, - outStatus: buildapi.BuildPhasePending, - podStatus: kapi.PodPending, - exitCode: 0, - startTimestamp: nil, - completionTimestamp: nil, - }, - { // 1 - matchID: true, - inStatus: buildapi.BuildPhasePending, - outStatus: buildapi.BuildPhasePending, - podStatus: kapi.PodPending, - exitCode: 0, - startTimestamp: nil, - completionTimestamp: nil, - }, - { // 2 - matchID: true, - inStatus: buildapi.BuildPhasePending, - outStatus: buildapi.BuildPhaseRunning, - podStatus: kapi.PodRunning, - exitCode: 0, - startTimestamp: curtime, - completionTimestamp: nil, - }, - { // 3 - matchID: true, - inStatus: buildapi.BuildPhaseRunning, - outStatus: buildapi.BuildPhaseComplete, - podStatus: kapi.PodSucceeded, - exitCode: 0, - startTimestamp: curtime, - completionTimestamp: curtime, - }, - { // 4 - matchID: true, - inStatus: buildapi.BuildPhaseRunning, - outStatus: buildapi.BuildPhaseFailed, - podStatus: kapi.PodFailed, - exitCode: -1, - startTimestamp: curtime, - completionTimestamp: curtime, - }, - { // 5 - matchID: true, - inStatus: buildapi.BuildPhaseRunning, - outStatus: buildapi.BuildPhaseComplete, - podStatus: kapi.PodSucceeded, - exitCode: 0, - buildUpdater: &errBuildUpdater{}, - startTimestamp: curtime, - completionTimestamp: curtime, - }, - { // 6 - matchID: true, - inStatus: buildapi.BuildPhaseCancelled, - outStatus: buildapi.BuildPhaseCancelled, - podStatus: kapi.PodFailed, - exitCode: 0, - startTimestamp: nil, - completionTimestamp: nil, - }, - } - - for i, tc := range tests { - build := mockBuild(tc.inStatus, buildapi.BuildOutput{}) - // Default build updater to retrieve updated build - if tc.buildUpdater == nil { - tc.buildUpdater = &customBuildUpdater{ - UpdateFunc: func(namespace string, updatedBuild *buildapi.Build) error { - build = updatedBuild - return nil - }, - } - } - ctrl := mockBuildPodController(build, tc.buildUpdater) - pod := mockPod(tc.podStatus, tc.exitCode, curtime) - if tc.matchID { - build.Name = "name" - } - - err := ctrl.HandlePod(pod) - - if tc.buildUpdater != nil && reflect.TypeOf(tc.buildUpdater).Elem().Name() == "errBuildUpdater" { - if err == nil { - t.Errorf("(%d) Expected error, got none", i) - } - // can't check tc.outStatus because the local build object does get updated - // in this test (but would not updated in etcd) - continue - } - if build.Status.Phase != tc.outStatus { - t.Errorf("(%d) Expected %s, got %s!", i, tc.outStatus, build.Status.Phase) - } - if tc.inStatus != buildapi.BuildPhaseCancelled && tc.inStatus != buildapi.BuildPhaseComplete && !common.HasBuildPodNameAnnotation(build) { - t.Errorf("(%d) Build does not have pod name annotation. %#v", i, build) - } - if tc.startTimestamp == nil && build.Status.StartTimestamp != nil { - t.Errorf("(%d) Expected nil start timestamp, got %v!", i, build.Status.StartTimestamp) - } - if tc.startTimestamp != nil && build.Status.StartTimestamp == nil { - t.Errorf("(%d) nil start timestamp!", i) - } - if tc.startTimestamp != nil && !tc.startTimestamp.Before(*build.Status.StartTimestamp) && tc.startTimestamp.Time != build.Status.StartTimestamp.Time { - t.Errorf("(%d) Expected build start timestamp %v to be equal to or later than %v!", i, build.Status.StartTimestamp, tc.startTimestamp) - } - - if tc.completionTimestamp == nil && build.Status.CompletionTimestamp != nil { - t.Errorf("(%d) Expected nil completion timestamp, got %v!", i, build.Status.CompletionTimestamp) - } - if tc.completionTimestamp != nil && build.Status.CompletionTimestamp == nil { - t.Errorf("(%d) nil completion timestamp!", i) - } - if tc.completionTimestamp != nil && !tc.completionTimestamp.Before(*build.Status.CompletionTimestamp) && tc.completionTimestamp.Time != build.Status.CompletionTimestamp.Time { - t.Errorf("(%d) Expected build completion timestamp %v to be equal to or later than %v!", i, build.Status.CompletionTimestamp, tc.completionTimestamp) - } - } -} - -func TestHandleBuildPodDeletionOK(t *testing.T) { - updateWasCalled := false - // only not finished build (buildutil.IsBuildComplete) should be handled - build := mockBuild(buildapi.BuildPhaseRunning, buildapi.BuildOutput{}) - ctrl := mockBuildPodController(build, &customBuildUpdater{ - UpdateFunc: func(namespace string, build *buildapi.Build) error { - updateWasCalled = true - return nil - }, - }) - pod := mockPod(kapi.PodSucceeded, 0, nil) - - err := ctrl.HandleBuildPodDeletion(pod) - if err != nil { - t.Errorf("Unexpected error %v", err) - } - if !updateWasCalled { - t.Error("UpdateBuild was not called when it should have been!") - } -} - -func TestHandlePipelineBuildPodDeletionOK(t *testing.T) { - updateWasCalled := false - // only not finished build (buildutil.IsBuildComplete) should be handled - build := mockBuild(buildapi.BuildPhaseRunning, buildapi.BuildOutput{}) - build.Spec.Strategy.JenkinsPipelineStrategy = &buildapi.JenkinsPipelineBuildStrategy{} - ctrl := mockBuildPodController(build, &customBuildUpdater{ - UpdateFunc: func(namespace string, build *buildapi.Build) error { - updateWasCalled = true - return nil - }, - }) - pod := mockPod(kapi.PodSucceeded, 0, nil) - - err := ctrl.HandleBuildPodDeletion(pod) - if err != nil { - t.Errorf("Unexpected error %v", err) - } - if updateWasCalled { - t.Error("UpdateBuild called when it should not have been!") - } -} - -func TestHandleBuildPodDeletionOKFinishedBuild(t *testing.T) { - updateWasCalled := false - // finished build buildutil.IsBuildComplete should not be handled - build := mockBuild(buildapi.BuildPhaseComplete, buildapi.BuildOutput{}) - ctrl := mockBuildPodController(build, &customBuildUpdater{ - UpdateFunc: func(namespace string, build *buildapi.Build) error { - updateWasCalled = true - return nil - }, - }) - pod := mockPod(kapi.PodSucceeded, 0, nil) - - err := ctrl.HandleBuildPodDeletion(pod) - if err != nil { - t.Errorf("Unexpected error %v", err) - } - if updateWasCalled { - t.Error("UpdateBuild was called when it should not!") - } -} - -func TestHandleBuildPodDeletionOKErroneousBuild(t *testing.T) { - updateWasCalled := false - // erroneous builds should not be handled - build := mockBuild(buildapi.BuildPhaseError, buildapi.BuildOutput{}) - ctrl := mockBuildPodController(build, &customBuildUpdater{ - UpdateFunc: func(namespace string, build *buildapi.Build) error { - updateWasCalled = true - return nil - }, - }) - pod := mockPod(kapi.PodSucceeded, 0, nil) - - err := ctrl.HandleBuildPodDeletion(pod) - if err != nil { - t.Errorf("Unexpected error %v", err) - } - if updateWasCalled { - t.Error("UpdateBuild was called when it should not!") - } -} - -type fakeIndexer struct { - *cache.FakeCustomStore -} - -func (*fakeIndexer) Index(indexName string, obj interface{}) ([]interface{}, error) { return nil, nil } -func (*fakeIndexer) IndexKeys(indexName, indexKey string) ([]string, error) { return nil, nil } -func (*fakeIndexer) ListIndexFuncValues(indexName string) []string { return nil } -func (*fakeIndexer) ByIndex(indexName, indexKey string) ([]interface{}, error) { return nil, nil } -func (*fakeIndexer) GetIndexers() cache.Indexers { return nil } -func (*fakeIndexer) AddIndexers(newIndexers cache.Indexers) error { return nil } - -func newErrIndexer(err error) cache.Indexer { - return &fakeIndexer{ - &cache.FakeCustomStore{ - GetByKeyFunc: func(key string) (interface{}, bool, error) { - return nil, true, err - }, - }, - } -} - -func TestHandleBuildPodDeletionBuildGetError(t *testing.T) { - ctrl := mockBuildPodController(nil, &customBuildUpdater{}) - ctrl.buildStore.Indexer = newErrIndexer(errors.New("random")) - pod := mockPod(kapi.PodSucceeded, 0, nil) - err := ctrl.HandleBuildPodDeletion(pod) - if err == nil { - t.Error("Expected random error, but got none!") - } - if err != nil && err.Error() != "random" { - t.Errorf("Expected random error, got: %v", err) - } -} - -func TestHandleBuildPodDeletionBuildNotExists(t *testing.T) { - updateWasCalled := false - ctrl := mockBuildPodController(nil, &customBuildUpdater{ - UpdateFunc: func(namespace string, build *buildapi.Build) error { - updateWasCalled = true - return nil - }, - }) - pod := mockPod(kapi.PodSucceeded, 0, nil) - - err := ctrl.HandleBuildPodDeletion(pod) - if err != nil { - t.Errorf("Unexpected error %v", err) - } - if updateWasCalled { - t.Error("UpdateBuild was called when it should not!") - } -} - -func TestHandleBuildPodDeletionBuildUpdateError(t *testing.T) { - build := mockBuild(buildapi.BuildPhaseRunning, buildapi.BuildOutput{}) - ctrl := mockBuildPodController(build, &customBuildUpdater{ - UpdateFunc: func(namespace string, build *buildapi.Build) error { - return errors.New("random") - }, - }) - pod := mockPod(kapi.PodSucceeded, 0, nil) - - err := ctrl.HandleBuildPodDeletion(pod) - if err == nil { - t.Error("Expected random error, but got none!") - } -} diff --git a/pkg/build/controller/common/util_test.go b/pkg/build/controller/common/util_test.go index f5b3f1499328..2901d03b1e7b 100644 --- a/pkg/build/controller/common/util_test.go +++ b/pkg/build/controller/common/util_test.go @@ -128,7 +128,7 @@ func TestHandleBuildPruning(t *testing.T) { buildLister := buildclient.NewOSClientBuildClient(osclient) buildConfigGetter := buildclient.NewOSClientBuildConfigClient(osclient) - buildDeleter := buildclient.NewBuildDeleter(osclient) + buildDeleter := buildclient.NewOSClientBuildClient(osclient) bcName := buildutil.ConfigNameForBuild(build) successfulStartingBuilds, err := buildutil.BuildConfigBuilds(buildLister, build.Namespace, bcName, func(build buildapi.Build) bool { return build.Status.Phase == buildapi.BuildPhaseComplete }) diff --git a/pkg/build/controller/factory/factory.go b/pkg/build/controller/factory/factory.go deleted file mode 100644 index 1270c01972d5..000000000000 --- a/pkg/build/controller/factory/factory.go +++ /dev/null @@ -1,459 +0,0 @@ -package factory - -import ( - "fmt" - "time" - - "github.com/golang/glog" - - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/watch" - kv1core "k8s.io/client-go/kubernetes/typed/core/v1" - kclientv1 "k8s.io/client-go/pkg/api/v1" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/record" - "k8s.io/client-go/util/flowcontrol" - kapi "k8s.io/kubernetes/pkg/api" - kclientsetexternal "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" - kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - - builddefaults "github.com/openshift/origin/pkg/build/admission/defaults" - buildoverrides "github.com/openshift/origin/pkg/build/admission/overrides" - buildapi "github.com/openshift/origin/pkg/build/api" - buildclient "github.com/openshift/origin/pkg/build/client" - buildcontroller "github.com/openshift/origin/pkg/build/controller" - "github.com/openshift/origin/pkg/build/controller/policy" - strategy "github.com/openshift/origin/pkg/build/controller/strategy" - osclient "github.com/openshift/origin/pkg/client" - controller "github.com/openshift/origin/pkg/controller" - imageapi "github.com/openshift/origin/pkg/image/api" - errors "github.com/openshift/origin/pkg/util/errors" -) - -const ( - // We must avoid creating processing imagestream changes until the build config store has synced. - // If it hasn't synced, to avoid a hot loop, we'll wait this long between checks. - storeSyncedPollPeriod = 100 * time.Millisecond - maxRetries = 60 -) - -// limitedLogAndRetry stops retrying after maxTimeout, failing the build. -func limitedLogAndRetry(buildupdater buildclient.BuildUpdater, maxTimeout time.Duration) controller.RetryFunc { - return func(obj interface{}, err error, retries controller.Retry) bool { - isFatal := strategy.IsFatal(err) - build := obj.(*buildapi.Build) - if !isFatal && time.Since(retries.StartTimestamp.Time) < maxTimeout { - glog.V(4).Infof("Retrying Build %s/%s with error: %v", build.Namespace, build.Name, err) - return true - } - build.Status.Phase = buildapi.BuildPhaseFailed - if !isFatal { - build.Status.Reason = buildapi.StatusReasonExceededRetryTimeout - build.Status.Message = buildapi.StatusMessageExceededRetryTimeout - } - build.Status.Message = errors.ErrorToSentence(err) - now := metav1.Now() - build.Status.CompletionTimestamp = &now - glog.V(3).Infof("Giving up retrying Build %s/%s: %v", build.Namespace, build.Name, err) - utilruntime.HandleError(err) - if err := buildupdater.Update(build.Namespace, build); err != nil { - // retry update, but only on error other than NotFound - return !kerrors.IsNotFound(err) - } - return false - } -} - -// BuildControllerFactory constructs BuildController objects -type BuildControllerFactory struct { - OSClient osclient.Interface - KubeClient kclientset.Interface - ExternalKubeClient kclientsetexternal.Interface - BuildUpdater buildclient.BuildUpdater - BuildLister buildclient.BuildLister - BuildConfigGetter buildclient.BuildConfigGetter - BuildDeleter buildclient.BuildDeleter - DockerBuildStrategy *strategy.DockerBuildStrategy - SourceBuildStrategy *strategy.SourceBuildStrategy - CustomBuildStrategy *strategy.CustomBuildStrategy - BuildDefaults builddefaults.BuildDefaults - BuildOverrides buildoverrides.BuildOverrides - - // Stop may be set to allow controllers created by this factory to be terminated. - Stop <-chan struct{} -} - -// Create constructs a BuildController -func (factory *BuildControllerFactory) Create() controller.RunnableController { - queue := cache.NewResyncableFIFO(cache.MetaNamespaceKeyFunc) - cache.NewReflector(newBuildLW(factory.OSClient), &buildapi.Build{}, queue, 2*time.Minute).RunUntil(factory.Stop) - - eventBroadcaster := record.NewBroadcaster() - eventBroadcaster.StartRecordingToSink(&kv1core.EventSinkImpl{Interface: kv1core.New(factory.ExternalKubeClient.CoreV1().RESTClient()).Events("")}) - - client := ControllerClient{factory.KubeClient, factory.OSClient} - buildController := &buildcontroller.BuildController{ - BuildUpdater: factory.BuildUpdater, - BuildLister: factory.BuildLister, - BuildConfigGetter: factory.BuildConfigGetter, - BuildDeleter: factory.BuildDeleter, - ImageStreamClient: client, - PodManager: client, - RunPolicies: policy.GetAllRunPolicies(factory.BuildLister, factory.BuildUpdater), - BuildStrategy: &typeBasedFactoryStrategy{ - DockerBuildStrategy: factory.DockerBuildStrategy, - SourceBuildStrategy: factory.SourceBuildStrategy, - CustomBuildStrategy: factory.CustomBuildStrategy, - }, - Recorder: eventBroadcaster.NewRecorder(kapi.Scheme, kclientv1.EventSource{Component: "build-controller"}), - BuildDefaults: factory.BuildDefaults, - BuildOverrides: factory.BuildOverrides, - } - - return &controller.RetryController{ - Queue: queue, - RetryManager: controller.NewQueueRetryManager( - queue, - cache.MetaNamespaceKeyFunc, - limitedLogAndRetry(factory.BuildUpdater, 30*time.Minute), - flowcontrol.NewTokenBucketRateLimiter(1, 10)), - Handle: func(obj interface{}) error { - build := obj.(*buildapi.Build) - err := buildController.HandleBuild(build) - if err != nil { - // Update the build status message only if it changed. - if msg := errors.ErrorToSentence(err); build.Status.Message != msg { - // Set default Reason. - if len(build.Status.Reason) == 0 { - build.Status.Reason = buildapi.StatusReasonError - } - build.Status.Message = msg - if err := buildController.BuildUpdater.Update(build.Namespace, build); err != nil { - glog.V(2).Infof("Failed to update status message of Build %s/%s: %v", build.Namespace, build.Name, err) - } - buildController.Recorder.Eventf(build, kapi.EventTypeWarning, "HandleBuildError", "Build has error: %v", err) - } - } - return err - }, - } -} - -// CreateDeleteController constructs a BuildDeleteController -func (factory *BuildControllerFactory) CreateDeleteController() controller.RunnableController { - client := ControllerClient{factory.KubeClient, factory.OSClient} - queue := cache.NewDeltaFIFO(cache.MetaNamespaceKeyFunc, nil, keyListerGetter{}) - cache.NewReflector(&buildDeleteLW{client, queue}, &buildapi.Build{}, queue, 5*time.Minute).RunUntil(factory.Stop) - - buildDeleteController := &buildcontroller.BuildDeleteController{ - PodManager: client, - } - - return &controller.RetryController{ - Queue: queue, - RetryManager: controller.NewQueueRetryManager( - queue, - queue.KeyOf, - controller.RetryNever, - flowcontrol.NewTokenBucketRateLimiter(1, 10)), - Handle: func(obj interface{}) error { - deltas := obj.(cache.Deltas) - for _, delta := range deltas { - if delta.Type == cache.Deleted { - return buildDeleteController.HandleBuildDeletion(delta.Object.(*buildapi.Build)) - } - } - return nil - }, - } -} - -// retryFunc returns a function to retry a controller event -func retryFunc(kind string, isFatal func(err error) bool) controller.RetryFunc { - return func(obj interface{}, err error, retries controller.Retry) bool { - name, keyErr := cache.MetaNamespaceKeyFunc(obj) - if keyErr != nil { - name = "Unknown" - } - if isFatal != nil && isFatal(err) { - glog.V(3).Infof("Will not retry fatal error for %s %s: %v", kind, name, err) - utilruntime.HandleError(err) - return false - } - if retries.Count > maxRetries { - glog.V(3).Infof("Giving up retrying %s %s: %v", kind, name, err) - utilruntime.HandleError(err) - return false - } - glog.V(4).Infof("Retrying %s %s: %v", kind, name, err) - return true - } -} - -// keyListerGetter is a dummy implementation of a KeyListerGetter -// which always returns a fake object and true for gets, and -// returns no items for list. This forces the DeltaFIFO queue -// to always queue delete events it receives from etcd. Our -// client will properly handle duplicated events and this is more -// efficient than maintaining a local cache of all the build pods -// so the DeltaFIFO can perform a proper diff. -type keyListerGetter struct { - client osclient.Interface -} - -// ListKeys is a dummy implementation of a KeyListerGetter interface returning -// empty string array; used only to force DeltaFIFO to always queue delete events. -func (keyListerGetter) ListKeys() []string { - return []string{} -} - -// GetByKey is a dummy implementation of a KeyListerGetter interface returning -// always "", true, nil; used only to force DeltaFIFO to always queue delete events. -func (keyListerGetter) GetByKey(key string) (interface{}, bool, error) { - return "", true, nil -} - -type BuildConfigControllerFactory struct { - Client osclient.Interface - KubeClient kclientset.Interface - ExternalKubeClient kclientsetexternal.Interface - BuildConfigInstantiator buildclient.BuildConfigInstantiator - BuildConfigGetter buildclient.BuildConfigGetter - BuildLister buildclient.BuildLister - BuildDeleter buildclient.BuildDeleter - // Stop may be set to allow controllers created by this factory to be terminated. - Stop <-chan struct{} -} - -// Create creates a new ConfigChangeController which is used to trigger builds on creation -func (factory *BuildConfigControllerFactory) Create() controller.RunnableController { - queue := cache.NewResyncableFIFO(cache.MetaNamespaceKeyFunc) - cache.NewReflector(newBuildConfigLW(factory.Client), &buildapi.BuildConfig{}, queue, 2*time.Minute).RunUntil(factory.Stop) - - eventBroadcaster := record.NewBroadcaster() - eventBroadcaster.StartRecordingToSink(&kv1core.EventSinkImpl{Interface: kv1core.New(factory.ExternalKubeClient.CoreV1().RESTClient()).Events("")}) - - bcController := &buildcontroller.BuildConfigController{ - BuildConfigInstantiator: factory.BuildConfigInstantiator, - BuildConfigGetter: factory.BuildConfigGetter, - BuildLister: factory.BuildLister, - BuildDeleter: factory.BuildDeleter, - Recorder: eventBroadcaster.NewRecorder(kapi.Scheme, kclientv1.EventSource{Component: "build-config-controller"}), - } - - return &controller.RetryController{ - Queue: queue, - RetryManager: controller.NewQueueRetryManager( - queue, - cache.MetaNamespaceKeyFunc, - retryFunc("BuildConfig", buildcontroller.IsFatal), - flowcontrol.NewTokenBucketRateLimiter(1, 10)), - Handle: func(obj interface{}) error { - bc := obj.(*buildapi.BuildConfig) - return bcController.HandleBuildConfig(bc) - }, - } -} - -// podEnumerator allows a cache.Poller to enumerate items in an api.PodList -type podEnumerator struct { - *kapi.PodList -} - -// Len returns the number of items in the pod list. -func (pe *podEnumerator) Len() int { - if pe.PodList == nil { - return 0 - } - return len(pe.Items) -} - -// Get returns the item (and ID) with the particular index. -func (pe *podEnumerator) Get(index int) interface{} { - return &pe.Items[index] -} - -type typeBasedFactoryStrategy struct { - DockerBuildStrategy *strategy.DockerBuildStrategy - SourceBuildStrategy *strategy.SourceBuildStrategy - CustomBuildStrategy *strategy.CustomBuildStrategy -} - -func (f *typeBasedFactoryStrategy) CreateBuildPod(build *buildapi.Build) (*kapi.Pod, error) { - var pod *kapi.Pod - var err error - switch { - case build.Spec.Strategy.DockerStrategy != nil: - pod, err = f.DockerBuildStrategy.CreateBuildPod(build) - case build.Spec.Strategy.SourceStrategy != nil: - pod, err = f.SourceBuildStrategy.CreateBuildPod(build) - case build.Spec.Strategy.CustomStrategy != nil: - pod, err = f.CustomBuildStrategy.CreateBuildPod(build) - case build.Spec.Strategy.JenkinsPipelineStrategy != nil: - return nil, nil - default: - return nil, fmt.Errorf("no supported build strategy defined for Build %s/%s", build.Namespace, build.Name) - } - - if pod != nil { - if pod.Annotations == nil { - pod.Annotations = map[string]string{} - } - pod.Annotations[buildapi.BuildAnnotation] = build.Name - } - return pod, err -} - -// podLW is a ListWatcher implementation for Pods. -type podLW struct { - client kclientset.Interface -} - -// List lists all Pods that have a build label. -func (lw *podLW) List(options metav1.ListOptions) (runtime.Object, error) { - return listPods(lw.client) -} - -func listPods(client kclientset.Interface) (*kapi.PodList, error) { - // get builds with new label - sel, err := labels.Parse(buildapi.BuildLabel) - if err != nil { - return nil, err - } - listNew, err := client.Core().Pods(metav1.NamespaceAll).List(metav1.ListOptions{LabelSelector: sel.String()}) - if err != nil { - return nil, err - } - return listNew, nil -} - -// Watch watches all Pods that have a build label. -func (lw *podLW) Watch(options metav1.ListOptions) (watch.Interface, error) { - // FIXME: since we cannot have OR on label name we'll just get builds with new label - sel, err := labels.Parse(buildapi.BuildLabel) - if err != nil { - return nil, err - } - opts := metav1.ListOptions{ - LabelSelector: sel.String(), - ResourceVersion: options.ResourceVersion, - } - return lw.client.Core().Pods(metav1.NamespaceAll).Watch(opts) -} - -func newBuildLW(client osclient.Interface) cache.ListerWatcher { - return &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - return client.Builds(metav1.NamespaceAll).List(options) - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - return client.Builds(metav1.NamespaceAll).Watch(options) - }, - } -} - -// buildDeleteLW is a ListWatcher implementation that watches for builds being deleted -type buildDeleteLW struct { - ControllerClient - store cache.Store -} - -// List returns an empty list but adds delete events to the store for all Builds that have been deleted but still have pods. -func (lw *buildDeleteLW) List(options metav1.ListOptions) (runtime.Object, error) { - glog.V(5).Info("Checking for deleted builds") - podList, err := listPods(lw.KubeClient) - if err != nil { - glog.V(4).Infof("Failed to find any pods due to error %v", err) - return nil, err - } - - for _, pod := range podList.Items { - buildName := buildapi.GetBuildName(&pod) - if len(buildName) == 0 { - continue - } - glog.V(5).Infof("Found build pod %s/%s", pod.Namespace, pod.Name) - - build, err := lw.Client.Builds(pod.Namespace).Get(buildName, metav1.GetOptions{}) - if err != nil && !kerrors.IsNotFound(err) { - glog.V(4).Infof("Error getting build for pod %s/%s: %v", pod.Namespace, pod.Name, err) - return nil, err - } - if err != nil && kerrors.IsNotFound(err) { - build = nil - } - if build == nil { - deletedBuild := &buildapi.Build{ - ObjectMeta: metav1.ObjectMeta{ - Name: buildName, - Namespace: pod.Namespace, - }, - } - glog.V(4).Infof("No build found for build pod %s/%s, deleting pod", pod.Namespace, pod.Name) - err := lw.store.Delete(deletedBuild) - if err != nil { - glog.V(4).Infof("Error queuing delete event: %v", err) - } - } else { - glog.V(5).Infof("Found build %s/%s for pod %s", build.Namespace, build.Name, pod.Name) - } - } - return &buildapi.BuildList{}, nil -} - -// Watch watches all Builds. -func (lw *buildDeleteLW) Watch(options metav1.ListOptions) (watch.Interface, error) { - return lw.Client.Builds(metav1.NamespaceAll).Watch(options) -} - -func newBuildConfigLW(client osclient.Interface) cache.ListerWatcher { - return &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - return client.BuildConfigs(metav1.NamespaceAll).List(options) - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - return client.BuildConfigs(metav1.NamespaceAll).Watch(options) - }, - } -} - -func newImageStreamLW(client osclient.Interface) cache.ListerWatcher { - return &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - return client.ImageStreams(metav1.NamespaceAll).List(options) - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - return client.ImageStreams(metav1.NamespaceAll).Watch(options) - }, - } -} - -// ControllerClient implements the common interfaces needed for build controllers -type ControllerClient struct { - KubeClient kclientset.Interface - Client osclient.Interface -} - -// CreatePod creates a pod using the Kubernetes client. -func (c ControllerClient) CreatePod(namespace string, pod *kapi.Pod) (*kapi.Pod, error) { - return c.KubeClient.Core().Pods(namespace).Create(pod) -} - -// DeletePod destroys a pod using the Kubernetes client. -func (c ControllerClient) DeletePod(namespace string, pod *kapi.Pod) error { - return c.KubeClient.Core().Pods(namespace).Delete(pod.Name, nil) -} - -// GetPod gets a pod using the Kubernetes client. -func (c ControllerClient) GetPod(namespace, name string) (*kapi.Pod, error) { - return c.KubeClient.Core().Pods(namespace).Get(name, metav1.GetOptions{}) -} - -// GetImageStream retrieves an image repository by namespace and name -func (c ControllerClient) GetImageStream(namespace, name string) (*imageapi.ImageStream, error) { - return c.Client.ImageStreams(namespace).Get(name, metav1.GetOptions{}) -} diff --git a/pkg/build/controller/factory/factory_test.go b/pkg/build/controller/factory/factory_test.go deleted file mode 100644 index 7ca244459512..000000000000 --- a/pkg/build/controller/factory/factory_test.go +++ /dev/null @@ -1,127 +0,0 @@ -package factory - -import ( - "errors" - "fmt" - "strings" - "testing" - "time" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - kapi "k8s.io/kubernetes/pkg/api" - - buildapi "github.com/openshift/origin/pkg/build/api" - controller "github.com/openshift/origin/pkg/controller" -) - -type buildUpdater struct { - Build *buildapi.Build -} - -func (b *buildUpdater) Update(namespace string, build *buildapi.Build) error { - b.Build = build - return nil -} - -func TestLimitedLogAndRetryFinish(t *testing.T) { - updater := &buildUpdater{} - err := errors.New("funky error") - - now := metav1.Now() - retry := controller.Retry{ - Count: 0, - StartTimestamp: metav1.Date(now.Year(), now.Month(), now.Day(), now.Hour(), now.Minute()-31, now.Second(), now.Nanosecond(), now.Location()), - } - if limitedLogAndRetry(updater, 30*time.Minute)(&buildapi.Build{Status: buildapi.BuildStatus{Phase: buildapi.BuildPhaseNew}}, err, retry) { - t.Error("Expected no more retries after reaching timeout!") - } - if updater.Build == nil { - t.Fatal("BuildUpdater wasn't called!") - } - if updater.Build.Status.Phase != buildapi.BuildPhaseFailed { - t.Errorf("Expected status %s, got %s!", buildapi.BuildPhaseFailed, updater.Build.Status.Phase) - } - if !strings.Contains(updater.Build.Status.Message, "Funky error.") { - t.Errorf("Expected message to contain %v, got %s!", err.Error(), updater.Build.Status.Message) - } - if updater.Build.Status.CompletionTimestamp == nil { - t.Error("Expected CompletionTimestamp to be set!") - } -} - -func TestLimitedLogAndRetryProcessing(t *testing.T) { - updater := &buildUpdater{} - err := errors.New("funky error") - - now := metav1.Now() - retry := controller.Retry{ - Count: 0, - StartTimestamp: metav1.Date(now.Year(), now.Month(), now.Day(), now.Hour(), now.Minute()-10, now.Second(), now.Nanosecond(), now.Location()), - } - if !limitedLogAndRetry(updater, 30*time.Minute)(&buildapi.Build{Status: buildapi.BuildStatus{Phase: buildapi.BuildPhaseNew}}, err, retry) { - t.Error("Expected more retries!") - } - if updater.Build != nil { - t.Fatal("BuildUpdater shouldn't be called!") - } -} - -func TestControllerRetryFunc(t *testing.T) { - obj := &kapi.Pod{} - obj.Name = "testpod" - obj.Namespace = "testNS" - - testErr := fmt.Errorf("test error") - tests := []struct { - name string - retryCount int - isFatal func(err error) bool - err error - expect bool - }{ - { - name: "maxRetries-1 retries", - retryCount: maxRetries - 1, - err: testErr, - expect: true, - }, - { - name: "maxRetries+1 retries", - retryCount: maxRetries + 1, - err: testErr, - expect: false, - }, - { - name: "isFatal returns true", - retryCount: 0, - err: testErr, - isFatal: func(err error) bool { - if err != testErr { - t.Errorf("Unexpected error: %v", err) - } - return true - }, - expect: false, - }, - { - name: "isFatal returns false", - retryCount: 0, - err: testErr, - isFatal: func(err error) bool { - if err != testErr { - t.Errorf("Unexpected error: %v", err) - } - return false - }, - expect: true, - }, - } - - for _, tc := range tests { - f := retryFunc("test kind", tc.isFatal) - result := f(obj, tc.err, controller.Retry{Count: tc.retryCount}) - if result != tc.expect { - t.Errorf("%s: unexpected result. Expected: %v. Got: %v", tc.name, tc.expect, result) - } - } -} diff --git a/pkg/build/controller/policy/parallel.go b/pkg/build/controller/policy/parallel.go index 5be515d9752e..4d8127a616da 100644 --- a/pkg/build/controller/policy/parallel.go +++ b/pkg/build/controller/policy/parallel.go @@ -30,3 +30,8 @@ func (s *ParallelPolicy) IsRunnable(build *buildapi.Build) (bool, error) { func (s *ParallelPolicy) OnComplete(build *buildapi.Build) error { return handleComplete(s.BuildLister, s.BuildUpdater, build) } + +// Handles returns true for the build run parallel policy +func (s *ParallelPolicy) Handles(policy buildapi.BuildRunPolicy) bool { + return policy == buildapi.BuildRunPolicyParallel +} diff --git a/pkg/build/controller/policy/policy.go b/pkg/build/controller/policy/policy.go index 5b70ee86ae15..109d0ffcb3b9 100644 --- a/pkg/build/controller/policy/policy.go +++ b/pkg/build/controller/policy/policy.go @@ -23,6 +23,9 @@ type RunPolicy interface { // OnComplete allows policy to execute action when the given build just // completed. OnComplete(*buildapi.Build) error + + // Handles returns true if the run policy handles a specific policy + Handles(buildapi.BuildRunPolicy) bool } // GetAllRunPolicies returns a set of all run policies. @@ -36,23 +39,11 @@ func GetAllRunPolicies(lister buildclient.BuildLister, updater buildclient.Build // ForBuild picks the appropriate run policy for the given build. func ForBuild(build *buildapi.Build, policies []RunPolicy) RunPolicy { + buildPolicy := buildutil.BuildRunPolicy(build) for _, s := range policies { - switch buildutil.BuildRunPolicy(build) { - case buildapi.BuildRunPolicyParallel: - if _, ok := s.(*ParallelPolicy); ok { - glog.V(5).Infof("Using %T run policy for build %s/%s", s, build.Namespace, build.Name) - return s - } - case buildapi.BuildRunPolicySerial: - if _, ok := s.(*SerialPolicy); ok { - glog.V(5).Infof("Using %T run policy for build %s/%s", s, build.Namespace, build.Name) - return s - } - case buildapi.BuildRunPolicySerialLatestOnly: - if _, ok := s.(*SerialLatestOnlyPolicy); ok { - glog.V(5).Infof("Using %T run policy for build %s/%s", s, build.Namespace, build.Name) - return s - } + if s.Handles(buildPolicy) { + glog.V(5).Infof("Using %T run policy for build %s/%s", s, build.Namespace, build.Name) + return s } } return nil diff --git a/pkg/build/controller/policy/serial.go b/pkg/build/controller/policy/serial.go index d305f2c94808..3e694f412a68 100644 --- a/pkg/build/controller/policy/serial.go +++ b/pkg/build/controller/policy/serial.go @@ -33,3 +33,8 @@ func (s *SerialPolicy) IsRunnable(build *buildapi.Build) (bool, error) { func (s *SerialPolicy) OnComplete(build *buildapi.Build) error { return handleComplete(s.BuildLister, s.BuildUpdater, build) } + +// Handles returns true for the build run serial policy +func (s *SerialPolicy) Handles(policy buildapi.BuildRunPolicy) bool { + return policy == buildapi.BuildRunPolicySerial +} diff --git a/pkg/build/controller/policy/serial_latest_only.go b/pkg/build/controller/policy/serial_latest_only.go index 8877fbcc0479..0ae74cff467e 100644 --- a/pkg/build/controller/policy/serial_latest_only.go +++ b/pkg/build/controller/policy/serial_latest_only.go @@ -49,6 +49,11 @@ func (s *SerialLatestOnlyPolicy) OnComplete(build *buildapi.Build) error { return handleComplete(s.BuildLister, s.BuildUpdater, build) } +// Handles returns true for the build run serial latest only policy +func (s *SerialLatestOnlyPolicy) Handles(policy buildapi.BuildRunPolicy) bool { + return policy == buildapi.BuildRunPolicySerialLatestOnly +} + // cancelPreviousBuilds cancels all queued builds that have the build sequence number // lower than the given build. It retries the cancellation in case of conflict. func (s *SerialLatestOnlyPolicy) cancelPreviousBuilds(build *buildapi.Build) []error { diff --git a/pkg/build/controller/strategy/custom.go b/pkg/build/controller/strategy/custom.go index 6474b9b176f5..53e45677d9be 100644 --- a/pkg/build/controller/strategy/custom.go +++ b/pkg/build/controller/strategy/custom.go @@ -113,6 +113,7 @@ func (bs *CustomBuildStrategy) CreateBuildPod(build *buildapi.Build) (*kapi.Pod, setupDockerSocket(pod) setupDockerSecrets(pod, build.Spec.Output.PushSecret, strategy.PullSecret, build.Spec.Source.Images) } + setOwnerReference(pod, build) setupSourceSecrets(pod, build.Spec.Source.SourceSecret) setupSecrets(pod, build.Spec.Source.Secrets) setupAdditionalSecrets(pod, build.Spec.Strategy.CustomStrategy.Secrets) diff --git a/pkg/build/controller/strategy/docker.go b/pkg/build/controller/strategy/docker.go index 1055e570240e..754d614058eb 100644 --- a/pkg/build/controller/strategy/docker.go +++ b/pkg/build/controller/strategy/docker.go @@ -77,6 +77,7 @@ func (bs *DockerBuildStrategy) CreateBuildPod(build *buildapi.Build) (*kapi.Pod, pod.Spec.Containers[0].StdinOnce = true } + setOwnerReference(pod, build) setupDockerSocket(pod) setupDockerSecrets(pod, build.Spec.Output.PushSecret, strategy.PullSecret, build.Spec.Source.Images) setupSourceSecrets(pod, build.Spec.Source.SourceSecret) diff --git a/pkg/build/controller/strategy/sti.go b/pkg/build/controller/strategy/sti.go index 88f67a50b0a7..4b7fbf65b6db 100644 --- a/pkg/build/controller/strategy/sti.go +++ b/pkg/build/controller/strategy/sti.go @@ -100,6 +100,7 @@ func (bs *SourceBuildStrategy) CreateBuildPod(build *buildapi.Build) (*kapi.Pod, pod.Spec.Containers[0].StdinOnce = true } + setOwnerReference(pod, build) setupDockerSocket(pod) setupDockerSecrets(pod, build.Spec.Output.PushSecret, strategy.PullSecret, build.Spec.Source.Images) setupSourceSecrets(pod, build.Spec.Source.SourceSecret) diff --git a/pkg/build/controller/strategy/util.go b/pkg/build/controller/strategy/util.go index c5b70da22619..8317ff3d27c5 100644 --- a/pkg/build/controller/strategy/util.go +++ b/pkg/build/controller/strategy/util.go @@ -5,14 +5,17 @@ import ( "path/filepath" "strconv" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kvalidation "k8s.io/apimachinery/pkg/util/validation" + kapi "k8s.io/kubernetes/pkg/api" + "github.com/golang/glog" buildapi "github.com/openshift/origin/pkg/build/api" + buildapiv1 "github.com/openshift/origin/pkg/build/api/v1" "github.com/openshift/origin/pkg/build/builder/cmd/dockercfg" imageapi "github.com/openshift/origin/pkg/image/api" "github.com/openshift/origin/pkg/util/namer" "github.com/openshift/origin/pkg/version" - kvalidation "k8s.io/apimachinery/pkg/util/validation" - kapi "k8s.io/kubernetes/pkg/api" ) const ( @@ -25,6 +28,12 @@ const ( sourceSecretMountPath = "/var/run/secrets/openshift.io/source" ) +var ( + // BuildControllerRefKind contains the schema.GroupVersionKind for builds. + // This is used in the ownerRef of builder pods. + BuildControllerRefKind = buildapiv1.SchemeGroupVersion.WithKind("Build") +) + // FatalError is an error which can't be retried. type FatalError struct { // Reason the fatal error occurred @@ -216,3 +225,16 @@ func getContainerVerbosity(containerEnv []kapi.EnvVar) (verbosity string) { func getPodLabels(build *buildapi.Build) map[string]string { return map[string]string{buildapi.BuildLabel: buildapi.LabelValue(build.Name)} } + +func setOwnerReference(pod *kapi.Pod, build *buildapi.Build) { + t := true + pod.OwnerReferences = []metav1.OwnerReference{ + { + APIVersion: BuildControllerRefKind.GroupVersion().String(), + Kind: BuildControllerRefKind.Kind, + Name: build.Name, + UID: build.UID, + Controller: &t, + }, + } +} diff --git a/pkg/build/registry/build/strategy.go b/pkg/build/registry/build/strategy.go index 22005d975f2d..67d482f1f7a4 100644 --- a/pkg/build/registry/build/strategy.go +++ b/pkg/build/registry/build/strategy.go @@ -51,7 +51,16 @@ func (strategy) PrepareForCreate(ctx apirequest.Context, obj runtime.Object) { // PrepareForUpdate clears fields that are not allowed to be set by end users on update. func (strategy) PrepareForUpdate(ctx apirequest.Context, obj, old runtime.Object) { - _ = obj.(*api.Build) + newBuild := obj.(*api.Build) + oldBuild := old.(*api.Build) + // If the build is already in a failed state, do not allow an update + // of the reason and message. This is to prevent the build controller from + // overwriting the reason and message that was set by the builder pod + // when it updated the build's details. + if oldBuild.Status.Phase == api.BuildPhaseFailed { + newBuild.Status.Reason = oldBuild.Status.Reason + newBuild.Status.Message = oldBuild.Status.Message + } } // Canonicalize normalizes the object after validation. diff --git a/pkg/build/util/util.go b/pkg/build/util/util.go index e2c2007d5638..da5907feebc1 100644 --- a/pkg/build/util/util.go +++ b/pkg/build/util/util.go @@ -50,7 +50,18 @@ func GetInputReference(strategy buildapi.BuildStrategy) *kapi.ObjectReference { // IsBuildComplete returns whether the provided build is complete or not func IsBuildComplete(build *buildapi.Build) bool { - return build.Status.Phase != buildapi.BuildPhaseRunning && build.Status.Phase != buildapi.BuildPhasePending && build.Status.Phase != buildapi.BuildPhaseNew + return IsTerminalPhase(build.Status.Phase) +} + +// IsTerminalPhase returns true if the provided phase is terminal +func IsTerminalPhase(phase buildapi.BuildPhase) bool { + switch phase { + case buildapi.BuildPhaseNew, + buildapi.BuildPhasePending, + buildapi.BuildPhaseRunning: + return false + } + return true } // IsPaused returns true if the provided BuildConfig is paused and cannot be used to create a new Build diff --git a/pkg/client/builds.go b/pkg/client/builds.go index e0a934d8ec15..6d712fdd09bd 100644 --- a/pkg/client/builds.go +++ b/pkg/client/builds.go @@ -2,6 +2,7 @@ package client import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/watch" kapi "k8s.io/kubernetes/pkg/api" @@ -23,6 +24,7 @@ type BuildInterface interface { Watch(opts metav1.ListOptions) (watch.Interface, error) Clone(request *buildapi.BuildRequest) (*buildapi.Build, error) UpdateDetails(build *buildapi.Build) (*buildapi.Build, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (*buildapi.Build, error) } // builds implements BuildsNamespacer interface @@ -102,3 +104,10 @@ func (c *builds) UpdateDetails(build *buildapi.Build) (*buildapi.Build, error) { err := c.r.Put().Namespace(c.ns).Resource("builds").Name(build.Name).SubResource("details").Body(build).Do().Into(result) return result, err } + +// Patch takes the partial representation of a build and updates it. +func (c *builds) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (*buildapi.Build, error) { + result := &buildapi.Build{} + err := c.r.Patch(types.StrategicMergePatchType).Namespace(c.ns).Resource("builds").SubResource(subresources...).Name(name).Body(data).Do().Into(result) + return result, err +} diff --git a/pkg/client/cache/buildconfig.go b/pkg/client/cache/buildconfig.go index 30fe17a9079e..edb35c0fd59c 100644 --- a/pkg/client/cache/buildconfig.go +++ b/pkg/client/cache/buildconfig.go @@ -11,6 +11,7 @@ import ( // StoreToBuildConfigLister gives a store List and Exists methods. The store must contain only buildconfigs. type StoreToBuildConfigLister interface { + BuildConfigs(namespace string) storeBuildConfigsNamespacer List() ([]*buildapi.BuildConfig, error) GetConfigsForImageStreamTrigger(namespace, name string) ([]*buildapi.BuildConfig, error) } diff --git a/pkg/client/testclient/fake_builds.go b/pkg/client/testclient/fake_builds.go index d1b87228ad8e..441823e0807d 100644 --- a/pkg/client/testclient/fake_builds.go +++ b/pkg/client/testclient/fake_builds.go @@ -3,6 +3,7 @@ package testclient import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/watch" clientgotesting "k8s.io/client-go/testing" @@ -54,6 +55,15 @@ func (c *FakeBuilds) Update(inObj *buildapi.Build) (*buildapi.Build, error) { return obj.(*buildapi.Build), err } +func (c *FakeBuilds) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (*buildapi.Build, error) { + obj, err := c.Fake.Invokes(clientgotesting.NewPatchSubresourceAction(buildsResource, c.Namespace, name, data, subresources...), &buildapi.Build{}) + + if obj == nil { + return nil, err + } + return obj.(*buildapi.Build), err +} + func (c *FakeBuilds) Delete(name string) error { _, err := c.Fake.Invokes(clientgotesting.NewDeleteAction(buildsResource, c.Namespace, name), &buildapi.Build{}) return err diff --git a/pkg/cmd/admin/prune/builds.go b/pkg/cmd/admin/prune/builds.go index e41a0c7c0cc8..63678bb4a119 100644 --- a/pkg/cmd/admin/prune/builds.go +++ b/pkg/cmd/admin/prune/builds.go @@ -157,7 +157,7 @@ func (o PruneBuildsOptions) Run() error { buildDeleter := &describingBuildDeleter{w: w} if o.Confirm { - buildDeleter.delegate = buildclient.NewBuildDeleter(o.OSClient) + buildDeleter.delegate = buildclient.NewOSClientBuildClient(o.OSClient) } else { fmt.Fprintln(os.Stderr, "Dry run enabled - no modifications will be made. Add --confirm to remove builds") } diff --git a/pkg/cmd/cli/cmd/logs_test.go b/pkg/cmd/cli/cmd/logs_test.go index 51741d3221ec..f16e754b5edc 100644 --- a/pkg/cmd/cli/cmd/logs_test.go +++ b/pkg/cmd/cli/cmd/logs_test.go @@ -10,6 +10,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/watch" kcmd "k8s.io/kubernetes/pkg/kubectl/cmd" @@ -74,6 +75,10 @@ func (f *fakeBuildClient) UpdateDetails(build *buildapi.Build) (*buildapi.Build, return nil, nil } +func (f *fakeBuildClient) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (*buildapi.Build, error) { + return nil, nil +} + type fakeNamespacer struct { client buildclient.BuildInterface } diff --git a/pkg/cmd/server/bootstrappolicy/controller_policy.go b/pkg/cmd/server/bootstrappolicy/controller_policy.go index debcb5751021..24c772a56862 100644 --- a/pkg/cmd/server/bootstrappolicy/controller_policy.go +++ b/pkg/cmd/server/bootstrappolicy/controller_policy.go @@ -49,25 +49,13 @@ func init() { addControllerRole(rbac.ClusterRole{ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + InfraBuildControllerServiceAccountName}, Rules: []rbac.PolicyRule{ - rbac.NewRule("get", "list", "watch", "update", "delete").Groups(buildGroup, legacyBuildGroup).Resources("builds").RuleOrDie(), + rbac.NewRule("get", "list", "watch", "patch", "update", "delete").Groups(buildGroup, legacyBuildGroup).Resources("builds").RuleOrDie(), rbac.NewRule("get").Groups(buildGroup, legacyBuildGroup).Resources("buildconfigs").RuleOrDie(), rbac.NewRule("create").Groups(buildGroup, legacyBuildGroup).Resources("builds/optimizeddocker", "builds/docker", "builds/source", "builds/custom", "builds/jenkinspipeline").RuleOrDie(), - rbac.NewRule("get").Groups(imageGroup, legacyImageGroup).Resources("imagestreams").RuleOrDie(), + rbac.NewRule("get", "list").Groups(imageGroup, legacyImageGroup).Resources("imagestreams").RuleOrDie(), + rbac.NewRule("get", "list").Groups(kapiGroup).Resources("secrets").RuleOrDie(), rbac.NewRule("get", "list", "create", "delete").Groups(kapiGroup).Resources("pods").RuleOrDie(), - eventsRule(), - }, - }) - - // build-pod-controller - addControllerRole(rbac.ClusterRole{ - ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + InfraBuildPodControllerServiceAccountName}, - Rules: []rbac.PolicyRule{ - rbac.NewRule("create", "get", "list", "watch", "update", "delete").Groups(buildGroup, legacyBuildGroup).Resources("builds").RuleOrDie(), - rbac.NewRule("get", "list", "create", "delete").Groups(kapiGroup).Resources("pods").RuleOrDie(), - rbac.NewRule("get").Groups(kapiGroup).Resources("secrets").RuleOrDie(), - rbac.NewRule("get").Groups(buildGroup, legacyBuildGroup).Resources("buildconfigs").RuleOrDie(), - // Needed for strategyrestriction admission - rbac.NewRule("create").Groups(buildGroup, legacyBuildGroup).Resources("builds/optimizeddocker", "builds/docker", "builds/source", "builds/custom", "builds/jenkinspipeline").RuleOrDie(), + rbac.NewRule("get").Groups(kapiGroup).Resources("namespaces").RuleOrDie(), eventsRule(), }, }) diff --git a/pkg/cmd/server/bootstrappolicy/infra_sa_policy.go b/pkg/cmd/server/bootstrappolicy/infra_sa_policy.go index 59384ac0f2aa..36228af913d0 100644 --- a/pkg/cmd/server/bootstrappolicy/infra_sa_policy.go +++ b/pkg/cmd/server/bootstrappolicy/infra_sa_policy.go @@ -33,7 +33,6 @@ const ( InfraServiceAccountControllerServiceAccountName = "serviceaccount-controller" InfraServiceAccountPullSecretsControllerServiceAccountName = "serviceaccount-pull-secrets-controller" InfraServiceAccountTokensControllerServiceAccountName = "serviceaccount-tokens-controller" - InfraBuildPodControllerServiceAccountName = "build-pod-controller" InfraBuildConfigChangeControllerServiceAccountName = "build-config-change-controller" InfraPersistentVolumeBinderControllerServiceAccountName = "pv-binder-controller" diff --git a/pkg/cmd/server/origin/controller.go b/pkg/cmd/server/origin/controller.go index 843a87f8927e..0b657a711add 100644 --- a/pkg/cmd/server/origin/controller.go +++ b/pkg/cmd/server/origin/controller.go @@ -98,7 +98,6 @@ func (c *MasterConfig) NewOpenshiftControllerInitializers() (map[string]controll Codec: codec, } ret["build"] = buildControllerConfig.RunController - ret["build-pod"] = controller.RunBuildPodController ret["build-config-change"] = controller.RunBuildConfigChangeController // initialize apps.openshift.io controllers diff --git a/pkg/cmd/server/origin/controller/build.go b/pkg/cmd/server/origin/controller/build.go index eb9f7cd50930..4589e5ebc9df 100644 --- a/pkg/cmd/server/origin/controller/build.go +++ b/pkg/cmd/server/origin/controller/build.go @@ -7,9 +7,8 @@ import ( builddefaults "github.com/openshift/origin/pkg/build/admission/defaults" buildoverrides "github.com/openshift/origin/pkg/build/admission/overrides" - buildclient "github.com/openshift/origin/pkg/build/client" - buildpodcontroller "github.com/openshift/origin/pkg/build/controller/buildpod" - buildcontrollerfactory "github.com/openshift/origin/pkg/build/controller/factory" + buildcontroller "github.com/openshift/origin/pkg/build/controller/build" + buildconfigcontroller "github.com/openshift/origin/pkg/build/controller/buildconfig" buildstrategy "github.com/openshift/origin/pkg/build/controller/strategy" configapi "github.com/openshift/origin/pkg/cmd/server/api" "github.com/openshift/origin/pkg/cmd/server/bootstrappolicy" @@ -23,7 +22,7 @@ type BuildControllerConfig struct { Codec runtime.Codec } -// RunBuildController starts the build sync loop for builds and buildConfig processing. +// RunController starts the build sync loop for builds and buildConfig processing. func (c *BuildControllerConfig) RunController(ctx ControllerContext) (bool, error) { pluginInitializer := kubeadmission.NewPluginInitializer( ctx.ClientBuilder.KubeInternalClientOrDie(bootstrappolicy.InfraBuildControllerServiceAccountName), @@ -50,15 +49,24 @@ func (c *BuildControllerConfig) RunController(ctx ControllerContext) (bool, erro if err != nil { return true, err } + kubeClient := ctx.ClientBuilder.KubeInternalClientOrDie(bootstrappolicy.InfraBuildControllerServiceAccountName) + externalKubeClient := ctx.ClientBuilder.ClientOrDie(bootstrappolicy.InfraBuildControllerServiceAccountName) - factory := buildcontrollerfactory.BuildControllerFactory{ - KubeClient: ctx.ClientBuilder.KubeInternalClientOrDie(bootstrappolicy.InfraBuildControllerServiceAccountName), - ExternalKubeClient: ctx.ClientBuilder.ClientOrDie(bootstrappolicy.InfraBuildControllerServiceAccountName), - OSClient: deprecatedOpenshiftClient, - BuildUpdater: buildclient.NewOSClientBuildClient(deprecatedOpenshiftClient), - BuildLister: buildclient.NewOSClientBuildClient(deprecatedOpenshiftClient), - BuildConfigGetter: buildclient.NewOSClientBuildConfigClient(deprecatedOpenshiftClient), - BuildDeleter: buildclient.NewBuildDeleter(deprecatedOpenshiftClient), + buildInformer := ctx.DeprecatedOpenshiftInformers.Builds() + buildConfigInformer := ctx.DeprecatedOpenshiftInformers.BuildConfigs() + imageStreamInformer := ctx.ImageInformers.Image().InternalVersion().ImageStreams() + podInformer := ctx.DeprecatedOpenshiftInformers.InternalKubernetesInformers().Core().InternalVersion().Pods() + secretInformer := ctx.DeprecatedOpenshiftInformers.InternalKubernetesInformers().Core().InternalVersion().Secrets() + + buildControllerParams := &buildcontroller.BuildControllerParams{ + BuildInformer: buildInformer, + BuildConfigInformer: buildConfigInformer, + ImageStreamInformer: imageStreamInformer, + PodInformer: podInformer, + SecretInformer: secretInformer, + KubeClientInternal: kubeClient, + KubeClientExternal: externalKubeClient, + OpenshiftClient: deprecatedOpenshiftClient, DockerBuildStrategy: &buildstrategy.DockerBuildStrategy{ Image: c.DockerImage, // TODO: this will be set to --storage-version (the internal schema we use) @@ -78,36 +86,18 @@ func (c *BuildControllerConfig) RunController(ctx ControllerContext) (bool, erro BuildOverrides: buildOverrides, } - controller := factory.Create() - controller.Run() - deleteController := factory.CreateDeleteController() - deleteController.Run() - return true, nil -} - -func RunBuildPodController(ctx ControllerContext) (bool, error) { - go buildpodcontroller.NewBuildPodController( - ctx.DeprecatedOpenshiftInformers.Builds().Informer(), - ctx.DeprecatedOpenshiftInformers.InternalKubernetesInformers().Core().InternalVersion().Pods(), - ctx.ClientBuilder.KubeInternalClientOrDie(bootstrappolicy.InfraBuildPodControllerServiceAccountName), - ctx.ClientBuilder.ClientOrDie(bootstrappolicy.InfraBuildPodControllerServiceAccountName), - ctx.ClientBuilder.DeprecatedOpenshiftClientOrDie(bootstrappolicy.InfraBuildPodControllerServiceAccountName), - ).Run(5, ctx.Stop) + go buildcontroller.NewBuildController(buildControllerParams).Run(5, ctx.Stop) return true, nil } func RunBuildConfigChangeController(ctx ControllerContext) (bool, error) { clientName := bootstrappolicy.InfraBuildConfigChangeControllerServiceAccountName - bcInstantiator := buildclient.NewOSClientBuildConfigInstantiatorClient(ctx.ClientBuilder.DeprecatedOpenshiftClientOrDie(clientName)) - factory := buildcontrollerfactory.BuildConfigControllerFactory{ - Client: ctx.ClientBuilder.DeprecatedOpenshiftClientOrDie(clientName), - KubeClient: ctx.ClientBuilder.KubeInternalClientOrDie(clientName), - ExternalKubeClient: ctx.ClientBuilder.ClientOrDie(clientName), - BuildConfigInstantiator: bcInstantiator, - BuildLister: buildclient.NewOSClientBuildClient(ctx.ClientBuilder.DeprecatedOpenshiftClientOrDie(clientName)), - BuildConfigGetter: buildclient.NewOSClientBuildConfigClient(ctx.ClientBuilder.DeprecatedOpenshiftClientOrDie(clientName)), - BuildDeleter: buildclient.NewBuildDeleter(ctx.ClientBuilder.DeprecatedOpenshiftClientOrDie(clientName)), - } - go factory.Create().Run() + openshiftClient := ctx.ClientBuilder.DeprecatedOpenshiftClientOrDie(clientName) + kubeExternalClient := ctx.ClientBuilder.ClientOrDie(clientName) + buildConfigInformer := ctx.DeprecatedOpenshiftInformers.BuildConfigs() + buildInformer := ctx.DeprecatedOpenshiftInformers.Builds() + + controller := buildconfigcontroller.NewBuildConfigController(openshiftClient, kubeExternalClient, buildConfigInformer, buildInformer) + go controller.Run(5, ctx.Stop) return true, nil } diff --git a/pkg/cmd/server/origin/master_config.go b/pkg/cmd/server/origin/master_config.go index 16e11d858e1e..738ca75cd043 100644 --- a/pkg/cmd/server/origin/master_config.go +++ b/pkg/cmd/server/origin/master_config.go @@ -1036,15 +1036,6 @@ func (c *MasterConfig) GetOpenShiftClientEnvVars() ([]kapi.EnvVar, error) { // BuildControllerClients returns the build controller client objects func (c *MasterConfig) BuildControllerClients() (*osclient.Client, kclientsetinternal.Interface, kclientsetexternal.Interface) { - _, osClient, internalKubeClientset, externalKubeClientset, err := c.GetServiceAccountClients(bootstrappolicy.InfraBuildControllerServiceAccountName) - if err != nil { - glog.Fatal(err) - } - return osClient, internalKubeClientset, externalKubeClientset -} - -// BuildPodControllerClients returns the build pod controller client objects -func (c *MasterConfig) BuildPodControllerClients() (*osclient.Client, kclientsetinternal.Interface, kclientsetexternal.Interface) { return c.PrivilegedLoopbackOpenShiftClient, c.PrivilegedLoopbackKubernetesClientsetInternal, c.PrivilegedLoopbackKubernetesClientsetExternal } diff --git a/pkg/cmd/server/start/start_master.go b/pkg/cmd/server/start/start_master.go index d37d0230c0dd..1907ca37d253 100644 --- a/pkg/cmd/server/start/start_master.go +++ b/pkg/cmd/server/start/start_master.go @@ -821,7 +821,6 @@ func startControllers(oc *origin.MasterConfig, kc *kubernetes.MasterConfig) erro ) if configapi.IsBuildEnabled(&oc.Options) { allowedOpenshiftControllers.Insert("build") - allowedOpenshiftControllers.Insert("build-pod") allowedOpenshiftControllers.Insert("build-config-change") } if oc.Options.TemplateServiceBrokerConfig != nil { diff --git a/test/common/build/controllers.go b/test/common/build/controllers.go index cdb530ebcabd..77b630b14b18 100644 --- a/test/common/build/controllers.go +++ b/test/common/build/controllers.go @@ -9,6 +9,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/util/wait" watchapi "k8s.io/apimachinery/pkg/watch" kapi "k8s.io/kubernetes/pkg/api" kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" @@ -27,13 +28,9 @@ var ( // for any other changes to happen when testing whether only a single build got processed BuildControllerTestWait = 10 * time.Second - // BuildPodControllerTestWait is the time that RunBuildPodControllerTest waits - // after a state transition to make sure other state transitions don't occur unexpectedly - BuildPodControllerTestWait = 10 * time.Second - - // BuildPodControllerTestTransitionTimeout is the time RunBuildPodControllerTest waits + // BuildControllerTestTransitionTimeout is the time RunBuildControllerPodSyncTest waits // for a build trasition to occur after the pod's status has been updated - BuildPodControllerTestTransitionTimeout = 60 * time.Second + BuildControllerTestTransitionTimeout = 60 * time.Second // BuildControllersWatchTimeout is used by all tests to wait for watch events. In case where only // a single watch event is expected, the test will fail after the timeout. @@ -168,7 +165,7 @@ type buildControllerPodTest struct { States []buildControllerPodState } -func RunBuildPodControllerTest(t testingT, osClient *client.Client, kClient kclientset.Interface) { +func RunBuildControllerPodSyncTest(t testingT, osClient *client.Client, kClient kclientset.Interface) { ns := testutil.Namespace() tests := []buildControllerPodTest{ @@ -305,7 +302,7 @@ func RunBuildPodControllerTest(t testingT, osClient *client.Client, kClient kcli errChan <- fmt.Errorf("unexpected event received: %s, object: %#v", e.Type, e.Object) return } - if done { + if done && b.Status.Phase != state.BuildPhase { errChan <- fmt.Errorf("build %s/%s transitioned to new state (%s) after reaching desired state", b.Namespace, b.Name, b.Status.Phase) return } @@ -320,7 +317,7 @@ func RunBuildPodControllerTest(t testingT, osClient *client.Client, kClient kcli case err := <-errChan: t.Errorf("%s: Error %v", test.Name, err) return false - case <-time.After(BuildPodControllerTestTransitionTimeout): + case <-time.After(BuildControllerTestTransitionTimeout): t.Errorf("%s: Timed out waiting for build %s/%s to reach state %s. Current state: %s", test.Name, b.Namespace, b.Name, state.BuildPhase, b.Status.Phase) return false case <-stateReached: @@ -333,7 +330,7 @@ func RunBuildPodControllerTest(t testingT, osClient *client.Client, kClient kcli t.Errorf("%s: Error %v", test.Name, err) return false - case <-time.After(BuildPodControllerTestWait): + case <-time.After(BuildControllerTestWait): // After waiting for a set time, if no other state is reached, continue to wait for next state transition return true } @@ -640,10 +637,6 @@ func RunBuildRunningPodDeleteTest(t testingT, clusterAdminClient *client.Client, t.Fatalf("expected build status to be marked pending, but was marked %s", newBuild.Status.Phase) } - // give the poddeletecontroller's build cache time to be updated with the build object - // so it doesn't get a miss when looking up the build while processing the pod delete event. - time.Sleep(10 * time.Second) - clusterAdminKubeClientset.Core().Pods(testutil.Namespace()).Delete(buildapi.GetBuildPodName(newBuild), metav1.NewDeleteOptions(0)) event = waitForWatch(t, "build updated to error", buildWatch) if e, a := watchapi.Modified, event.Type; e != a { @@ -653,21 +646,33 @@ func RunBuildRunningPodDeleteTest(t testingT, clusterAdminClient *client.Client, if newBuild.Status.Phase != buildapi.BuildPhaseError { t.Fatalf("expected build status to be marked error, but was marked %s", newBuild.Status.Phase) } - events, err := clusterAdminKubeClientset.Core().Events(testutil.Namespace()).Search(kapi.Scheme, newBuild) - if err != nil { - t.Fatalf("error getting build events: %v", err) - } + foundFailed := false - for _, event := range events.Items { - if event.Reason == buildapi.BuildFailedEventReason { - foundFailed = true - expect := fmt.Sprintf(buildapi.BuildFailedEventMessage, newBuild.Namespace, newBuild.Name) - if event.Message != expect { - t.Fatalf("expected failed event message to be %s, got %s", expect, event.Message) + + err = wait.Poll(time.Second, 30*time.Second, func() (bool, error) { + events, err := clusterAdminKubeClientset.Core().Events(testutil.Namespace()).Search(kapi.Scheme, newBuild) + if err != nil { + t.Fatalf("error getting build events: %v", err) + return false, fmt.Errorf("error getting build events: %v", err) + } + for _, event := range events.Items { + if event.Reason == buildapi.BuildFailedEventReason { + foundFailed = true + expect := fmt.Sprintf(buildapi.BuildFailedEventMessage, newBuild.Namespace, newBuild.Name) + if event.Message != expect { + return false, fmt.Errorf("expected failed event message to be %s, got %s", expect, event.Message) + } + return true, nil } - break } + return false, nil + }) + + if err != nil { + t.Fatalf("unexpected: %v", err) + return } + if !foundFailed { t.Fatalf("expected to find a failed event on the build %s/%s", newBuild.Namespace, newBuild.Name) } diff --git a/test/extended/builds/controller_compat.go b/test/extended/builds/controller_compat.go index c8c225b5acb0..7f375c87f68e 100644 --- a/test/extended/builds/controller_compat.go +++ b/test/extended/builds/controller_compat.go @@ -24,9 +24,9 @@ var _ = g.Describe("[bldcompat][Slow][Compatibility] build controller", func() { build.RunBuildControllerTest(g.GinkgoT(), oc.AdminClient(), oc.InternalAdminKubeClient()) }) }) - g.Describe("RunBuildPodControllerTest", func() { + g.Describe("RunBuildControllerPodSyncTest", func() { g.It("should succeed", func() { - build.RunBuildPodControllerTest(g.GinkgoT(), oc.AdminClient(), oc.InternalAdminKubeClient()) + build.RunBuildControllerPodSyncTest(g.GinkgoT(), oc.AdminClient(), oc.InternalAdminKubeClient()) }) }) g.Describe("RunImageChangeTriggerTest [SkipPrevControllers]", func() { diff --git a/test/integration/buildcontroller_test.go b/test/integration/buildcontroller_test.go index a9e0c4132b31..54595be16640 100644 --- a/test/integration/buildcontroller_test.go +++ b/test/integration/buildcontroller_test.go @@ -23,7 +23,6 @@ import ( type controllerCount struct { BuildControllers, - BuildPodControllers, ImageChangeControllers, ConfigChangeControllers int } @@ -37,12 +36,12 @@ func TestConcurrentBuildControllers(t *testing.T) { build.RunBuildControllerTest(t, osClient, kClient) } -// TestConcurrentBuildPodControllers tests the lifecycle of a build pod when running multiple controllers. -func TestConcurrentBuildPodControllers(t *testing.T) { +// TestConcurrentBuildControllersPodSync tests the lifecycle of a build pod when running multiple controllers. +func TestConcurrentBuildControllersPodSync(t *testing.T) { defer testutil.DumpEtcdOnFailure(t) - // Start a master with multiple BuildPodControllers - osClient, kClient := setupBuildControllerTest(controllerCount{BuildPodControllers: 5}, t) - build.RunBuildPodControllerTest(t, osClient, kClient) + // Start a master with multiple BuildControllers + osClient, kClient := setupBuildControllerTest(controllerCount{BuildControllers: 5}, t) + build.RunBuildControllerPodSyncTest(t, osClient, kClient) } func TestConcurrentBuildImageChangeTriggerControllers(t *testing.T) { @@ -138,6 +137,7 @@ func setupBuildControllerTest(counts controllerCount, t *testing.T) (*client.Cli } openshiftControllerContext := origincontrollers.ControllerContext{ + ImageInformers: openshiftConfig.ImageInformers, KubeControllerContext: controllerContext, ClientBuilder: origincontrollers.OpenshiftControllerClientBuilder{ ControllerClientBuilder: controller.SAControllerClientBuilder{ @@ -158,12 +158,6 @@ func setupBuildControllerTest(counts controllerCount, t *testing.T) (*client.Cli t.Fatal(err) } } - for i := 0; i < counts.BuildPodControllers; i++ { - _, err := openshiftControllerInitializers["build-pod"](openshiftControllerContext) - if err != nil { - t.Fatal(err) - } - } for i := 0; i < counts.ImageChangeControllers; i++ { openshiftConfig.RunImageTriggerController() } diff --git a/test/testdata/bootstrappolicy/bootstrap_cluster_role_bindings.yaml b/test/testdata/bootstrappolicy/bootstrap_cluster_role_bindings.yaml index d8bf76f8e820..67d64bf8868d 100644 --- a/test/testdata/bootstrappolicy/bootstrap_cluster_role_bindings.yaml +++ b/test/testdata/bootstrappolicy/bootstrap_cluster_role_bindings.yaml @@ -652,20 +652,6 @@ items: namespace: openshift-infra userNames: - system:serviceaccount:openshift-infra:build-controller -- apiVersion: v1 - groupNames: null - kind: ClusterRoleBinding - metadata: - creationTimestamp: null - name: system:openshift:controller:build-pod-controller - roleRef: - name: system:openshift:controller:build-pod-controller - subjects: - - kind: ServiceAccount - name: build-pod-controller - namespace: openshift-infra - userNames: - - system:serviceaccount:openshift-infra:build-pod-controller - apiVersion: v1 groupNames: null kind: ClusterRoleBinding diff --git a/test/testdata/bootstrappolicy/bootstrap_cluster_roles.yaml b/test/testdata/bootstrappolicy/bootstrap_cluster_roles.yaml index c012f92cf86e..de2f712b7d58 100644 --- a/test/testdata/bootstrappolicy/bootstrap_cluster_roles.yaml +++ b/test/testdata/bootstrappolicy/bootstrap_cluster_roles.yaml @@ -3559,6 +3559,7 @@ items: - delete - get - list + - patch - update - watch - apiGroups: @@ -3589,46 +3590,15 @@ items: - imagestreams verbs: - get - - apiGroups: - - "" - attributeRestrictions: null - resources: - - pods - verbs: - - create - - delete - - get - list - apiGroups: - "" attributeRestrictions: null resources: - - events - verbs: - - create - - patch - - update -- apiVersion: v1 - kind: ClusterRole - metadata: - annotations: - authorization.openshift.io/system-only: "true" - creationTimestamp: null - name: system:openshift:controller:build-pod-controller - rules: - - apiGroups: - - "" - - build.openshift.io - attributeRestrictions: null - resources: - - builds + - secrets verbs: - - create - - delete - get - list - - update - - watch - apiGroups: - "" attributeRestrictions: null @@ -3643,29 +3613,9 @@ items: - "" attributeRestrictions: null resources: - - secrets - verbs: - - get - - apiGroups: - - "" - - build.openshift.io - attributeRestrictions: null - resources: - - buildconfigs + - namespaces verbs: - get - - apiGroups: - - "" - - build.openshift.io - attributeRestrictions: null - resources: - - builds/custom - - builds/docker - - builds/jenkinspipeline - - builds/optimizeddocker - - builds/source - verbs: - - create - apiGroups: - "" attributeRestrictions: null