Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 11 additions & 0 deletions Gopkg.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 4 additions & 0 deletions Gopkg.toml
Original file line number Diff line number Diff line change
Expand Up @@ -41,3 +41,7 @@ required = [
[[constraint]]
name = "github.com/kevinburke/go-bindata"
version = "v3.11.0"

[[constraint]]
name = "github.com/openshift/cluster-version-operator"
revision = "fe673cb712fa5e27001488fc088ac91bb553353d"
3 changes: 3 additions & 0 deletions cmd/cluster-ingress-operator/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,9 @@ func main() {
resyncPeriod := 10 * time.Minute
logrus.Infof("Watching %s, %s, %s, %d", resource, kind, namespace, resyncPeriod)
sdk.Watch(resource, kind, namespace, resyncPeriod)
// TODO Use a named constant for the router's namespace or get the
// namespace from config.
sdk.Watch("apps/v1", "DaemonSet", "openshift-ingress", resyncPeriod)
sdk.Handle(handler)
sdk.Run(context.TODO())
}
9 changes: 9 additions & 0 deletions manifests/00-cluster-role.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,15 @@ rules:
- list
- watch

- apiGroups:
- operatorstatus.openshift.io
resources:
- clusteroperators
verbs:
- create
- get
- update

# Mirrored from assets/router/cluster-role.yaml
- apiGroups:
- ""
Expand Down
8 changes: 4 additions & 4 deletions pkg/manifests/bindata.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 2 additions & 0 deletions pkg/stub/handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,8 @@ type Handler struct {
}

func (h *Handler) Handle(ctx context.Context, event sdk.Event) error {
defer h.syncOperatorStatus()

// TODO: This should be adding an item to a rate limited work queue, but for
// now correctness is more important than performance.
switch o := event.Object.(type) {
Expand Down
196 changes: 196 additions & 0 deletions pkg/stub/status.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,196 @@
package stub

import (
"fmt"
"strings"

"github.com/sirupsen/logrus"

ingressv1alpha1 "github.com/openshift/cluster-ingress-operator/pkg/apis/ingress/v1alpha1"
"github.com/openshift/cluster-ingress-operator/pkg/util/clusteroperator"
operatorversion "github.com/openshift/cluster-ingress-operator/version"
osv1 "github.com/openshift/cluster-version-operator/pkg/apis/operatorstatus.openshift.io/v1"

"github.com/operator-framework/operator-sdk/pkg/sdk"

appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)

// syncOperatorStatus computes the operator's current status and therefrom
// creates or updates the ClusterOperator resource for the operator.
func (h *Handler) syncOperatorStatus() {
co := &osv1.ClusterOperator{
TypeMeta: metav1.TypeMeta{
Kind: "ClusterOperator",
APIVersion: "operatorstatus.openshift.io/v1",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: h.Namespace,
// TODO Use a named constant or get name from config.
Name: "openshift-ingress",
},
}
err := sdk.Get(co)
isNotFound := errors.IsNotFound(err)
if err != nil && !isNotFound {
logrus.Errorf("syncOperatorStatus: error getting ClusterOperator %s/%s: %v",
co.Namespace, co.Name, err)

return
}

ns, ingresses, daemonsets, err := h.getOperatorState()
if err != nil {
logrus.Errorf("syncOperatorStatus: getOperatorState: %v", err)

return
}

oldConditions := co.Status.Conditions
co.Status.Conditions = computeStatusConditions(oldConditions, ns,
ingresses, daemonsets)

if isNotFound {
co.Status.Version = operatorversion.Version

if err := sdk.Create(co); err != nil {
logrus.Errorf("syncOperatorStatus: failed to create ClusterOperator %s/%s: %v",
co.Namespace, co.Name, err)
} else {
logrus.Infof("syncOperatorStatus: created ClusterOperator %s/%s (UID %v)",
co.Namespace, co.Name, co.UID)
}

return
}

if clusteroperator.ConditionsEqual(oldConditions, co.Status.Conditions) {
return
}

if err := sdk.Update(co); err != nil {
logrus.Errorf("syncOperatorStatus: failed to update status of ClusterOperator %s/%s: %v",
co.Namespace, co.Name, err)
}
}

// getOperatorState gets and returns the resources necessary to compute the
// operator's current state.
func (h *Handler) getOperatorState() (*corev1.Namespace, []ingressv1alpha1.ClusterIngress, []appsv1.DaemonSet, error) {
ns, err := h.ManifestFactory.RouterNamespace()
if err != nil {
return nil, nil, nil, fmt.Errorf(
"error building router namespace: %v", err)
}

if err := sdk.Get(ns); err != nil {
if errors.IsNotFound(err) {
return nil, nil, nil, nil
}

return nil, nil, nil, fmt.Errorf(
"error getting Namespace %s: %v", ns.Name, err)
}

ingressList := &ingressv1alpha1.ClusterIngressList{
TypeMeta: metav1.TypeMeta{
Kind: "ClusterIngress",
APIVersion: "ingress.openshift.io/v1alpha1",
},
}
err = sdk.List(h.Namespace, ingressList,
sdk.WithListOptions(&metav1.ListOptions{}))
if err != nil {
return nil, nil, nil, fmt.Errorf(
"failed to list ClusterIngresses: %v", err)
}

daemonsetList := &appsv1.DaemonSetList{
TypeMeta: metav1.TypeMeta{
Kind: "DaemonSet",
APIVersion: "apps/v1",
},
}
err = sdk.List(ns.Name, daemonsetList,
sdk.WithListOptions(&metav1.ListOptions{}))
if err != nil {
return nil, nil, nil, fmt.Errorf(
"failed to list DaemonSets: %v", err)
}

return ns, ingressList.Items, daemonsetList.Items, nil
}

// computeStatusConditions computes the operator's current state.
func computeStatusConditions(conditions []osv1.ClusterOperatorStatusCondition, ns *corev1.Namespace, ingresses []ingressv1alpha1.ClusterIngress, daemonsets []appsv1.DaemonSet) []osv1.ClusterOperatorStatusCondition {
failingCondition := &osv1.ClusterOperatorStatusCondition{
Type: osv1.OperatorFailing,
Status: osv1.ConditionUnknown,
}
if ns == nil {
failingCondition.Status = osv1.ConditionTrue
failingCondition.Reason = "NoNamespace"
failingCondition.Message = "router namespace does not exist"
} else {
failingCondition.Status = osv1.ConditionFalse
}
conditions = clusteroperator.SetStatusCondition(conditions,
failingCondition)

progressingCondition := &osv1.ClusterOperatorStatusCondition{
Type: osv1.OperatorProgressing,
Status: osv1.ConditionUnknown,
}
numIngresses := len(ingresses)
numDaemonsets := len(daemonsets)
if numIngresses == numDaemonsets {

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

When an Ingress CR is deleted, corresponding daemonset may not be deleted yet (pending op) and the length of numIngresses and numDaemonsets will defer, don't we want to say OperatorProgressing = True in this case?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think we do want to say "Progressing" is true in that case. The docstring for osv1.OperatorProgressing says, "OperatorProgressing indicates that the operator is actively making changes to the binary maintained by the operator". When a ClusterIngress is deleted, then the operator reconciles, and then it is actively making changes to the binary (deleting the DaemonSet for the router) until the DaemonSet is gone.

progressingCondition.Status = osv1.ConditionFalse
} else {
progressingCondition.Status = osv1.ConditionTrue
progressingCondition.Reason = "Reconciling"
progressingCondition.Message = fmt.Sprintf(
"have %d ingresses, want %d",
numDaemonsets, numIngresses)
}
conditions = clusteroperator.SetStatusCondition(conditions,
progressingCondition)

availableCondition := &osv1.ClusterOperatorStatusCondition{
Type: osv1.OperatorAvailable,
Status: osv1.ConditionUnknown,
}
dsAvailable := map[string]bool{}
for _, ds := range daemonsets {
dsAvailable[ds.Name] = ds.Status.NumberAvailable > 0
}
unavailable := []string{}
for _, ingress := range ingresses {
// TODO Use the manifest to derive the name, or use labels or
// owner references.
name := "router-" + ingress.Name
if available, exists := dsAvailable[name]; !exists {
msg := fmt.Sprintf("no router for ingress %q",
ingress.Name)
unavailable = append(unavailable, msg)
} else if !available {
msg := fmt.Sprintf("ingress %q not available",
ingress.Name)
unavailable = append(unavailable, msg)
}
}
if len(unavailable) == 0 {
availableCondition.Status = osv1.ConditionTrue
} else {
availableCondition.Status = osv1.ConditionFalse
availableCondition.Reason = "IngressUnavailable"
availableCondition.Message = strings.Join(unavailable,
"\n")
}
conditions = clusteroperator.SetStatusCondition(conditions,
availableCondition)

return conditions
}
Loading