-
Notifications
You must be signed in to change notification settings - Fork 199
/
kafkatopic_validator.go
262 lines (226 loc) · 11.4 KB
/
kafkatopic_validator.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
// Copyright © 2022 Cisco Systems, Inc. and/or its affiliates
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package webhooks
import (
"context"
"fmt"
"strings"
"emperror.dev/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/validation/field"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/go-logr/logr"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
banzaicloudv1alpha1 "github.com/banzaicloud/koperator/api/v1alpha1"
banzaicloudv1beta1 "github.com/banzaicloud/koperator/api/v1beta1"
"github.com/banzaicloud/koperator/pkg/k8sutil"
"github.com/banzaicloud/koperator/pkg/kafkaclient"
"github.com/banzaicloud/koperator/pkg/util"
)
const (
TopicManagedByAnnotationKey = "managedBy"
TopicManagedByKoperatorAnnotationValue = "koperator"
)
type KafkaTopicValidator struct {
Client client.Client
NewKafkaFromCluster func(client.Client, *banzaicloudv1beta1.KafkaCluster) (kafkaclient.KafkaClient, func(), error)
Log logr.Logger
}
func (s KafkaTopicValidator) ValidateCreate(ctx context.Context, obj runtime.Object) error {
return s.validate(ctx, obj)
}
func (s KafkaTopicValidator) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) error {
return s.validate(ctx, newObj)
}
func (s KafkaTopicValidator) ValidateDelete(ctx context.Context, obj runtime.Object) error {
return nil
}
func (s *KafkaTopicValidator) validate(ctx context.Context, obj runtime.Object) error {
kafkaTopic := obj.(*banzaicloudv1alpha1.KafkaTopic)
log := s.Log.WithValues("name", kafkaTopic.GetName(), "namespace", kafkaTopic.GetNamespace())
fieldErrs, err := s.validateKafkaTopic(ctx, log, kafkaTopic)
if err != nil {
log.Error(err, errorDuringValidationMsg)
return apierrors.NewInternalError(errors.WithMessage(err, errorDuringValidationMsg))
}
if len(fieldErrs) == 0 {
return nil
}
log.Info("rejected", "invalid field(s)", fieldErrs.ToAggregate().Error())
return apierrors.NewInvalid(
kafkaTopic.GetObjectKind().GroupVersionKind().GroupKind(),
kafkaTopic.Name, fieldErrs)
}
func (s *KafkaTopicValidator) validateKafkaTopic(ctx context.Context, log logr.Logger, topic *banzaicloudv1alpha1.KafkaTopic) (field.ErrorList, error) {
var allErrs field.ErrorList
var logMsg string
// First check if the kafkatopic is valid
if topic.Spec.Partitions < banzaicloudv1alpha1.MinPartitions || topic.Spec.Partitions == 0 {
allErrs = append(allErrs, field.Invalid(field.NewPath("spec").Child("partitions"), topic.Spec.Partitions, outOfRangePartitionsErrMsg))
}
if topic.Spec.ReplicationFactor < banzaicloudv1alpha1.MinReplicationFactor || topic.Spec.ReplicationFactor == 0 {
allErrs = append(allErrs, field.Invalid(field.NewPath("spec").Child("replicationFactor"), topic.Spec.ReplicationFactor, outOfRangeReplicationFactorErrMsg))
}
// Get the referenced KafkaCluster
clusterName := topic.Spec.ClusterRef.Name
clusterNamespace := topic.Spec.ClusterRef.Namespace
if clusterNamespace == "" {
clusterNamespace = topic.GetNamespace()
}
var cluster *banzaicloudv1beta1.KafkaCluster
var err error
// Check if the cluster being referenced actually exists
if cluster, err = k8sutil.LookupKafkaCluster(ctx, s.Client, clusterName, clusterNamespace); err != nil {
if !apierrors.IsNotFound(err) {
return nil, errors.Wrap(err, cantConnectAPIServerMsg)
}
if k8sutil.IsMarkedForDeletion(topic.ObjectMeta) {
log.Info("Deleted as a result of a cluster deletion")
return nil, nil
}
logMsg = fmt.Sprintf("kafkaCluster '%s' in the namespace '%s' does not exist", topic.Spec.ClusterRef.Name, topic.Spec.ClusterRef.Namespace)
allErrs = append(allErrs, field.Invalid(field.NewPath("spec").Child("clusterRef").Child("name"), clusterName, logMsg))
// retrun is needed here because later this cluster is used for further checks but it is nil
return allErrs, nil
}
if k8sutil.IsMarkedForDeletion(cluster.ObjectMeta) {
// Let this through, it's a delete topic request from a parent cluster being deleted
log.Info("Cluster is going down for deletion, assuming a delete topic request")
return nil, nil
}
if util.ObjectManagedByClusterRegistry(cluster) {
// referencing remote Kafka clusters is not allowed
logMsg = fmt.Sprintf("kafkaCluster '%s' in the namespace '%s' is a remote kafka cluster", topic.Spec.ClusterRef.Name, topic.Spec.ClusterRef.Namespace)
allErrs = append(allErrs, field.Invalid(field.NewPath("spec").Child("clusterRef").Child("name"), clusterName, logMsg))
}
fieldErr, err := s.checkExistingKafkaTopicCRs(ctx, clusterNamespace, topic)
if err != nil {
return nil, err
}
if fieldErr != nil {
allErrs = append(allErrs, fieldErr)
}
fieldErrList, err := s.checkKafka(ctx, topic, cluster)
if err != nil {
return nil, err
}
allErrs = append(allErrs, fieldErrList...)
return allErrs, nil
}
// checkKafka creates a Kafka admin client and connects to the Kafka brokers to check
// whether the referred topic exists, and what are its properties
func (s *KafkaTopicValidator) checkKafka(ctx context.Context, topic *banzaicloudv1alpha1.KafkaTopic,
cluster *banzaicloudv1beta1.KafkaCluster) (field.ErrorList, error) {
// retrieve an admin client for the cluster
broker, closeClient, err := s.NewKafkaFromCluster(s.Client, cluster)
if err != nil {
// Log as info to not cause stack traces when making CC topic
return nil, errors.WrapIff(err, fmt.Sprintf("%s: %s", cantConnectErrorMsg, topic.Spec.ClusterRef.Name))
}
defer closeClient()
existing, err := broker.GetTopic(topic.Spec.Name)
if err != nil {
return nil, errors.WrapIff(err, fmt.Sprintf("failed to list topics for kafka cluster: %s", topic.Spec.ClusterRef.Name))
}
var allErrs field.ErrorList
// The topic exists
if existing != nil {
// Check if this is the correct CR for this topic
topicCR := &banzaicloudv1alpha1.KafkaTopic{}
if err := s.Client.Get(ctx, types.NamespacedName{Name: topic.Name, Namespace: topic.Namespace}, topicCR); err != nil {
// Checking that the validation request is update
if apierrors.IsNotFound(err) {
if manager, ok := topic.GetAnnotations()[TopicManagedByAnnotationKey]; !ok || strings.ToLower(manager) != TopicManagedByKoperatorAnnotationValue {
allErrs = append(allErrs, field.Invalid(field.NewPath("spec").Child("name"), topic.Spec.Name,
fmt.Sprintf(`topic "%s" already exists on kafka cluster and it is not managed by Koperator,
if you want it to be managed by Koperator so you can modify its configurations through a KafkaTopic CR,
add this "%s: %s" annotation to this KafkaTopic CR`, topic.Spec.Name, TopicManagedByAnnotationKey, TopicManagedByKoperatorAnnotationValue)))
}
// Comparing KafkaTopic configuration with the existing
if existing.NumPartitions != topic.Spec.Partitions {
allErrs = append(allErrs, field.Invalid(field.NewPath("spec").Child("partitions"), topic.Spec.Partitions,
fmt.Sprintf(`When creating KafkaTopic CR for existing topic, initially its partition number must be the same as what the existing kafka topic has (given: %v present: %v)`, topic.Spec.Partitions, existing.NumPartitions)))
}
if existing.ReplicationFactor != int16(topic.Spec.ReplicationFactor) {
allErrs = append(allErrs, field.Invalid(field.NewPath("spec").Child("replicationfactor"), topic.Spec.ReplicationFactor,
fmt.Sprintf(`When creating KafkaTopic CR for existing topic, initially its replication factor must be the same as what the existing kafka topic has (given: %v present: %v)`, topic.Spec.ReplicationFactor, existing.ReplicationFactor)))
}
if diff := cmp.Diff(existing.ConfigEntries, util.MapStringStringPointer(topic.Spec.Config), cmpopts.EquateEmpty()); diff != "" {
allErrs = append(allErrs, field.Invalid(field.NewPath("spec").Child("config"), topic.Spec.Partitions,
fmt.Sprintf(`When creating KafkaTopic CR for existing topic, initially its configuration must be the same as the existing kafka topic configuration.
Difference: %s`, diff)))
}
if len(allErrs) > 0 {
return allErrs, nil
}
} else {
return nil, errors.WrapIff(err, cantConnectAPIServerMsg)
}
}
// make sure the user isn't trying to decrease partition count
if existing.NumPartitions > topic.Spec.Partitions {
allErrs = append(allErrs, field.Invalid(field.NewPath("spec").Child("partitions"), topic.Spec.Partitions,
fmt.Sprintf("kafka does not support decreasing partition count on an existing topic (from %v to %v)", existing.NumPartitions, topic.Spec.Partitions)))
}
// check if the user is trying to change the replication factor
if existing.ReplicationFactor != int16(topic.Spec.ReplicationFactor) {
allErrs = append(allErrs, field.Invalid(field.NewPath("spec").Child("replicationFactor"), topic.Spec.ReplicationFactor,
fmt.Sprintf("kafka does not support changing the replication factor on an existing topic (from %v to %v)", existing.ReplicationFactor, topic.Spec.ReplicationFactor)))
}
// the topic does not exist check if requesting a replication factor larger than the broker size
} else if int(topic.Spec.ReplicationFactor) > broker.NumBrokers() {
allErrs = append(allErrs, field.Invalid(field.NewPath("spec").Child("replicationFactor"), topic.Spec.ReplicationFactor,
fmt.Sprintf("%s (available brokers: %v)", invalidReplicationFactorErrMsg, broker.NumBrokers())))
}
return allErrs, nil
}
// checkExistingKafkaTopicCRs checks whether there's any other duplicate KafkaTopic CR exists
// that refers to the same KafkaCluster's same topic
func (s *KafkaTopicValidator) checkExistingKafkaTopicCRs(ctx context.Context,
clusterNamespace string, topic *banzaicloudv1alpha1.KafkaTopic) (*field.Error, error) {
// check KafkaTopic in the referred KafkaCluster's namespace
kafkaTopicList := banzaicloudv1alpha1.KafkaTopicList{}
err := s.Client.List(ctx, &kafkaTopicList, client.MatchingFields{"spec.name": topic.Spec.Name})
if err != nil {
return nil, errors.Wrap(err, cantConnectAPIServerMsg)
}
var foundKafkaTopic *banzaicloudv1alpha1.KafkaTopic
for i, kafkaTopic := range kafkaTopicList.Items {
// filter the cr under admission
if kafkaTopic.GetName() == topic.GetName() && kafkaTopic.GetNamespace() == topic.GetNamespace() {
continue
}
// filter remote KafkaTopic CRs
if util.ObjectManagedByClusterRegistry(&kafkaTopic) {
continue
}
referredNamespace := kafkaTopic.Spec.ClusterRef.Namespace
referredName := kafkaTopic.Spec.ClusterRef.Name
if referredName == topic.Spec.ClusterRef.Name {
if (kafkaTopic.GetNamespace() == clusterNamespace && referredNamespace == "") || referredNamespace == clusterNamespace {
foundKafkaTopic = &kafkaTopicList.Items[i]
break
}
}
}
if foundKafkaTopic != nil {
logMsg := fmt.Sprintf("kafkaTopic CR '%s' in namesapce '%s' is already referencing to Kafka topic '%s'",
foundKafkaTopic.Name, foundKafkaTopic.Namespace, foundKafkaTopic.Spec.Name)
return field.Invalid(field.NewPath("spec").Child("name"), foundKafkaTopic.Spec.Name, logMsg), nil
}
return nil, nil
}