diff --git a/operator/api/v1alpha1/flowtest_types.go b/operator/api/v1alpha1/flowtest_types.go index e67538e..f1ffed0 100644 --- a/operator/api/v1alpha1/flowtest_types.go +++ b/operator/api/v1alpha1/flowtest_types.go @@ -32,16 +32,20 @@ type FlowTestSpec struct { // FlowTestStatus defines the observed state of FlowTest type FlowTestStatus struct { - FailedMatch flowv1beta1.Match `json:"failedMatch"` - FailedFilter flowv1beta1.Filter `json:"failedFilter"` - // +kubebuilder:validation:Enum=Created;Running;Completed + // +nullable + FailedMatches []flowv1beta1.Match `json:"failedMatches"` + // +nullable + FailedFilters []flowv1beta1.Filter `json:"failedFilters"` + // +kubebuilder:default:="Created" + // +kubebuilder:validation:Enum=Created;Running;Completed;Error Status FlowStatus `json:"status"` } //+kubebuilder:object:root=true //+kubebuilder:subresource:status -// +kubebuilder:printcolumn:JSONPath=".status.simulationPod.name",name="SimulationPod",type="string" -// +kubebuilder:printcolumn:JSONPath=".status.simulationFlow.name",name="SimulationFlow",type="string" +// +kubebuilder:printcolumn:JSONPath=".spec.referencePod.name",name="Reference Pod",type="string" +// +kubebuilder:printcolumn:JSONPath=".spec.referenceFlow.name",name="Reference Flow",type="string" +// +kubebuilder:printcolumn:JSONPath=".status.status",name="Status",type="string" // FlowTest is the Schema for the flowtests API type FlowTest struct { diff --git a/operator/api/v1alpha1/reference_types.go b/operator/api/v1alpha1/reference_types.go index f2771d1..7543d3a 100644 --- a/operator/api/v1alpha1/reference_types.go +++ b/operator/api/v1alpha1/reference_types.go @@ -10,9 +10,8 @@ type ReferenceObject struct { type FlowStatus string const ( - Created FlowStatus = "Created" - Running FlowStatus = "Running" - Skipped FlowStatus = "Skipped" - Failed FlowStatus = "Failed" - Passed FlowStatus = "Passed" + Created FlowStatus = "Created" + Running FlowStatus = "Running" + Completed FlowStatus = "Completed" + Error FlowStatus = "Error" ) diff --git a/operator/api/v1alpha1/zz_generated.deepcopy.go b/operator/api/v1alpha1/zz_generated.deepcopy.go index 4a34ed4..1fceb4a 100644 --- a/operator/api/v1alpha1/zz_generated.deepcopy.go +++ b/operator/api/v1alpha1/zz_generated.deepcopy.go @@ -21,6 +21,7 @@ limitations under the License. package v1alpha1 import ( + "github.com/banzaicloud/logging-operator/pkg/sdk/api/v1beta1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -108,8 +109,20 @@ func (in *FlowTestSpec) DeepCopy() *FlowTestSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FlowTestStatus) DeepCopyInto(out *FlowTestStatus) { *out = *in - in.FailedMatch.DeepCopyInto(&out.FailedMatch) - in.FailedFilter.DeepCopyInto(&out.FailedFilter) + if in.FailedMatches != nil { + in, out := &in.FailedMatches, &out.FailedMatches + *out = make([]v1beta1.Match, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FailedFilters != nil { + in, out := &in.FailedFilters, &out.FailedFilters + *out = make([]v1beta1.Filter, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowTestStatus. diff --git a/operator/config/crd/bases/loggingplumber.isala.me_flowtests.yaml b/operator/config/crd/bases/loggingplumber.isala.me_flowtests.yaml index 0c82662..881e8df 100644 --- a/operator/config/crd/bases/loggingplumber.isala.me_flowtests.yaml +++ b/operator/config/crd/bases/loggingplumber.isala.me_flowtests.yaml @@ -17,11 +17,14 @@ spec: scope: Namespaced versions: - additionalPrinterColumns: - - jsonPath: .status.simulationPod.name - name: SimulationPod + - jsonPath: .spec.referencePod.name + name: Reference Pod type: string - - jsonPath: .status.simulationFlow.name - name: SimulationFlow + - jsonPath: .spec.referenceFlow.name + name: Reference Flow + type: string + - jsonPath: .status.status + name: Status type: string name: v1alpha1 schema: @@ -81,658 +84,520 @@ spec: status: description: FlowTestStatus defines the observed state of FlowTest properties: - failedFilter: - description: Filter definition for FlowSpec - properties: - concat: - properties: - continuous_line_regexp: - description: The regexp to match continuous lines. This is - exclusive with n_lines. - type: string - flush_interval: - description: The number of seconds after which the last received - event log will be flushed. If specified 0, wait for next - line forever. - type: integer - keep_partial_key: - description: If true, keep partial_key in concatenated records - (default:False) - type: boolean - keep_partial_metadata: - description: If true, keep partial metadata - type: string - key: - description: Specify field name in the record to parse. If - you leave empty the Container Runtime default will be used. - type: string - multiline_end_regexp: - description: The regexp to match ending of multiline. This - is exclusive with n_lines. - type: string - multiline_start_regexp: - description: The regexp to match beginning of multiline. This - is exclusive with n_lines. - type: string - n_lines: - description: The number of lines. This is exclusive with multiline_start_regex. - type: integer - partial_key: - description: The field name that is the reference to concatenate - records - type: string - partial_value: - description: The value stored in the field specified by partial_key - that represent partial log - type: string - separator: - description: 'The separator of lines. (default: "\n")' - type: string - stream_identity_key: - description: The key to determine which stream an event belongs - to. - type: string - timeout_label: - description: The label name to handle events caused by timeout. - type: string - use_first_timestamp: - description: 'Use timestamp of first record when buffer is - flushed. (default: False)' - type: boolean - use_partial_metadata: - description: Use partial metadata to concatenate multiple - records - type: string - type: object - dedot: - properties: - de_dot_nested: - description: 'Will cause the plugin to recurse through nested - structures (hashes and arrays), and remove dots in those - key-names too.(default: false)' - type: boolean - de_dot_separator: - description: Separator (default:_) - type: string - type: object - detectExceptions: - properties: - languages: - description: 'Programming languages for which to detect exceptions. - (default: [])' - items: + failedFilters: + items: + description: Filter definition for FlowSpec + properties: + concat: + properties: + continuous_line_regexp: + description: The regexp to match continuous lines. This + is exclusive with n_lines. type: string - type: array - max_bytes: - description: 'Maximum number of bytes to flush (0 means no - limit) (default: 0)' - type: integer - max_lines: - description: 'Maximum number of lines to flush (0 means no - limit) (default: 1000)' - type: integer - message: - description: 'The field which contains the raw message text - in the input JSON data. (default: "")' - type: string - multiline_flush_interval: - description: 'The interval of flushing the buffer for multiline - format. (default: nil)' - type: string - remove_tag_prefix: - description: 'The prefix to be removed from the input tag - when outputting a record. (default: "")' - type: string - stream: - description: 'Separate log streams by this field in the input - JSON data. (default: "")' - type: string - type: object - enhanceK8s: - properties: - api_groups: - description: 'Kubernetes resources api groups (default: ["apps/v1", - "extensions/v1beta1"])' - items: + flush_interval: + description: The number of seconds after which the last + received event log will be flushed. If specified 0, wait + for next line forever. + type: integer + keep_partial_key: + description: If true, keep partial_key in concatenated records + (default:False) + type: boolean + keep_partial_metadata: + description: If true, keep partial metadata type: string - type: array - bearer_token_file: - description: 'Bearer token path (default: nil)' - type: string - ca_file: - description: 'Kubernetes API CA file (default: nil)' - properties: - mountFrom: - description: Refers to a secret value to be used through - a volume mount - properties: - secretKeyRef: - description: SecretKeySelector selects a key of a - Secret. - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the Secret or its - key must be defined - type: boolean - required: - - key - type: object - type: object - value: - description: Refers to a non-secret value - type: string - valueFrom: - description: Refers to a secret value to be used directly - properties: - secretKeyRef: - description: SecretKeySelector selects a key of a - Secret. - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the Secret or its - key must be defined - type: boolean - required: - - key - type: object - type: object - type: object - cache_refresh: - description: 'Cache refresh (default: 60*60)' - type: integer - cache_refresh_variation: - description: 'Cache refresh variation (default: 60*15)' - type: integer - cache_size: - description: 'Cache size (default: 1000)' - type: integer - cache_ttl: - description: 'Cache TTL (default: 60*60*2)' - type: integer - client_cert: - description: 'Kubernetes API Client certificate (default: - nil)' - properties: - mountFrom: - description: Refers to a secret value to be used through - a volume mount - properties: - secretKeyRef: - description: SecretKeySelector selects a key of a - Secret. - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the Secret or its - key must be defined - type: boolean - required: - - key - type: object - type: object - value: - description: Refers to a non-secret value - type: string - valueFrom: - description: Refers to a secret value to be used directly - properties: - secretKeyRef: - description: SecretKeySelector selects a key of a - Secret. - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the Secret or its - key must be defined - type: boolean - required: - - key - type: object - type: object - type: object - client_key: - description: '// Kubernetes API Client certificate key (default: - nil)' - properties: - mountFrom: - description: Refers to a secret value to be used through - a volume mount - properties: - secretKeyRef: - description: SecretKeySelector selects a key of a - Secret. - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the Secret or its - key must be defined - type: boolean - required: - - key - type: object - type: object - value: - description: Refers to a non-secret value + key: + description: Specify field name in the record to parse. + If you leave empty the Container Runtime default will + be used. + type: string + multiline_end_regexp: + description: The regexp to match ending of multiline. This + is exclusive with n_lines. + type: string + multiline_start_regexp: + description: The regexp to match beginning of multiline. + This is exclusive with n_lines. + type: string + n_lines: + description: The number of lines. This is exclusive with + multiline_start_regex. + type: integer + partial_key: + description: The field name that is the reference to concatenate + records + type: string + partial_value: + description: The value stored in the field specified by + partial_key that represent partial log + type: string + separator: + description: 'The separator of lines. (default: "\n")' + type: string + stream_identity_key: + description: The key to determine which stream an event + belongs to. + type: string + timeout_label: + description: The label name to handle events caused by timeout. + type: string + use_first_timestamp: + description: 'Use timestamp of first record when buffer + is flushed. (default: False)' + type: boolean + use_partial_metadata: + description: Use partial metadata to concatenate multiple + records + type: string + type: object + dedot: + properties: + de_dot_nested: + description: 'Will cause the plugin to recurse through nested + structures (hashes and arrays), and remove dots in those + key-names too.(default: false)' + type: boolean + de_dot_separator: + description: Separator (default:_) + type: string + type: object + detectExceptions: + properties: + languages: + description: 'Programming languages for which to detect + exceptions. (default: [])' + items: type: string - valueFrom: - description: Refers to a secret value to be used directly - properties: - secretKeyRef: - description: SecretKeySelector selects a key of a - Secret. - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the Secret or its - key must be defined - type: boolean - required: - - key - type: object - type: object - type: object - core_api_versions: - description: 'Kubernetes core API version (for different Kubernetes - versions) (default: [''v1''])' - items: + type: array + max_bytes: + description: 'Maximum number of bytes to flush (0 means + no limit) (default: 0)' + type: integer + max_lines: + description: 'Maximum number of lines to flush (0 means + no limit) (default: 1000)' + type: integer + message: + description: 'The field which contains the raw message text + in the input JSON data. (default: "")' type: string - type: array - data_type: - description: 'Sumologic data type (default: metrics)' - type: string - in_namespace_path: - description: 'parameters for read/write record (default: [''$.namespace''])' - items: + multiline_flush_interval: + description: 'The interval of flushing the buffer for multiline + format. (default: nil)' type: string - type: array - in_pod_path: - description: '(default: [''$.pod'',''$.pod_name''])' - items: + remove_tag_prefix: + description: 'The prefix to be removed from the input tag + when outputting a record. (default: "")' type: string - type: array - kubernetes_url: - description: 'Kubernetes API URL (default: nil)' - type: string - secret_dir: - description: 'Service account directory (default: /var/run/secrets/kubernetes.io/serviceaccount)' - type: string - ssl_partial_chain: - description: 'if `ca_file` is for an intermediate CA, or otherwise - we do not have the root CA and want to trust the intermediate - CA certs we do have, set this to `true` - this corresponds - to the openssl s_client -partial_chain flag and X509_V_FLAG_PARTIAL_CHAIN - (default: false)' - type: boolean - verify_ssl: - description: 'Verify SSL (default: true)' - type: boolean - type: object - geoip: - properties: - backend_library: - description: Specify backend library (geoip2_c, geoip, geoip2_compat) - type: string - geoip_2_database: - description: Specify optional geoip2 database (using bundled - GeoLite2-City.mmdb by default) - type: string - geoip_database: - description: Specify optional geoip database (using bundled - GeoLiteCity databse by default) - type: string - geoip_lookup_keys: - description: 'Specify one or more geoip lookup field which - has ip address (default: host)' - type: string - records: - description: 'Records are represented as maps: `key: value`' - items: - additionalProperties: + stream: + description: 'Separate log streams by this field in the + input JSON data. (default: "")' + type: string + type: object + enhanceK8s: + properties: + api_groups: + description: 'Kubernetes resources api groups (default: + ["apps/v1", "extensions/v1beta1"])' + items: type: string - description: Parameters inside record directives are considered - to be new key-value pairs - type: object - type: array - skip_adding_null_record: - description: To avoid get stacktrace error with `[null, null]` - array for elasticsearch. - type: boolean - type: object - grep: - properties: - and: - items: - description: Specify filtering rule. This directive contains - either `regexp` or `exclude` directive. - properties: - exclude: - items: - description: Specify filtering rule to reject events. - This directive contains two parameters. - properties: - key: - description: Specify field name in the record - to parse. - type: string - pattern: - description: Pattern expression to evaluate - type: string - required: - - key - - pattern - type: object - type: array - regexp: - items: - description: Specify filtering rule. This directive - contains two parameters. - properties: - key: - description: Specify field name in the record - to parse. - type: string - pattern: - description: Pattern expression to evaluate - type: string - required: - - key - - pattern - type: object - type: array - type: object - type: array - exclude: - items: - description: Specify filtering rule to reject events. This - directive contains two parameters. + type: array + bearer_token_file: + description: 'Bearer token path (default: nil)' + type: string + ca_file: + description: 'Kubernetes API CA file (default: nil)' properties: - key: - description: Specify field name in the record to parse. - type: string - pattern: - description: Pattern expression to evaluate + mountFrom: + description: Refers to a secret value to be used through + a volume mount + properties: + secretKeyRef: + description: SecretKeySelector selects a key of + a Secret. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + type: object + value: + description: Refers to a non-secret value type: string - required: - - key - - pattern - type: object - type: array - or: - items: - description: Specify filtering rule. This directive contains - either `regexp` or `exclude` directive. - properties: - exclude: - items: - description: Specify filtering rule to reject events. - This directive contains two parameters. - properties: - key: - description: Specify field name in the record - to parse. - type: string - pattern: - description: Pattern expression to evaluate - type: string - required: - - key - - pattern - type: object - type: array - regexp: - items: - description: Specify filtering rule. This directive - contains two parameters. - properties: - key: - description: Specify field name in the record - to parse. - type: string - pattern: - description: Pattern expression to evaluate - type: string - required: - - key - - pattern - type: object - type: array + valueFrom: + description: Refers to a secret value to be used directly + properties: + secretKeyRef: + description: SecretKeySelector selects a key of + a Secret. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + type: object type: object - type: array - regexp: - items: - description: Specify filtering rule. This directive contains - two parameters. + cache_refresh: + description: 'Cache refresh (default: 60*60)' + type: integer + cache_refresh_variation: + description: 'Cache refresh variation (default: 60*15)' + type: integer + cache_size: + description: 'Cache size (default: 1000)' + type: integer + cache_ttl: + description: 'Cache TTL (default: 60*60*2)' + type: integer + client_cert: + description: 'Kubernetes API Client certificate (default: + nil)' properties: - key: - description: Specify field name in the record to parse. - type: string - pattern: - description: Pattern expression to evaluate + mountFrom: + description: Refers to a secret value to be used through + a volume mount + properties: + secretKeyRef: + description: SecretKeySelector selects a key of + a Secret. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + type: object + value: + description: Refers to a non-secret value type: string - required: - - key - - pattern + valueFrom: + description: Refers to a secret value to be used directly + properties: + secretKeyRef: + description: SecretKeySelector selects a key of + a Secret. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + type: object type: object - type: array - type: object - parser: - properties: - emit_invalid_record_to_error: - description: 'Emit invalid record to @ERROR label. Invalid - cases are: key not exist, format is not matched, unexpected - error' - type: boolean - hash_value_field: - description: Store parsed values as a hash value in a field. - type: string - inject_key_prefix: - description: Store parsed values with specified key name prefix. - type: string - key_name: - description: Specify field name in the record to parse. If - you leave empty the Container Runtime default will be used. - type: string - parse: - properties: - delimiter: - description: 'Only available when using type: ltsv (default: - "\t")' - type: string - delimiter_pattern: - description: 'Only available when using type: ltsv' - type: string - estimate_current_event: - description: If true, use Fluent::EventTime.now(current - time) as a timestamp when time_key is specified. - type: boolean - expression: - description: Regexp expression to evaluate - type: string - format: - description: 'Only available when using type: multi_format' - type: string - format_firstline: - description: 'Only available when using type: multi_format' - type: string - keep_time_key: - description: If true, keep time field in the record. - type: boolean - label_delimiter: - description: 'Only available when using type: ltsv (default: - ":")' - type: string - local_time: - description: 'Ff true, use local time. Otherwise, UTC - is used. This is exclusive with utc. (default: true)' - type: boolean - multiline: - description: The multiline parser plugin parses multiline - logs. - items: + client_key: + description: '// Kubernetes API Client certificate key (default: + nil)' + properties: + mountFrom: + description: Refers to a secret value to be used through + a volume mount + properties: + secretKeyRef: + description: SecretKeySelector selects a key of + a Secret. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + type: object + value: + description: Refers to a non-secret value type: string - type: array - null_empty_string: - description: If true, empty string field is replaced with - nil - type: boolean - null_value_pattern: - description: ' Specify null value pattern.' - type: string - patterns: - description: 'Only available when using type: multi_format' - items: + valueFrom: + description: Refers to a secret value to be used directly properties: - estimate_current_event: - description: If true, use Fluent::EventTime.now(current - time) as a timestamp when time_key is specified. - type: boolean - expression: - description: Regexp expression to evaluate - type: string - format: - description: 'Only available when using type: multi_format' - type: string - keep_time_key: - description: If true, keep time field in the record. - type: boolean - local_time: - description: 'Ff true, use local time. Otherwise, - UTC is used. This is exclusive with utc. (default: - true)' - type: boolean - null_empty_string: - description: If true, empty string field is replaced - with nil - type: boolean - null_value_pattern: - description: ' Specify null value pattern.' - type: string - time_format: - description: Process value using specified format. - This is available only when time_type is string - type: string - time_key: - description: Specify time field for event time. - If the event doesn't have this field, current - time is used. - type: string - time_type: - description: 'Parse/format value according to this - type available values: float, unixtime, string - (default: string)' - type: string - timezone: - description: 'Use specified timezone. one can parse/format - the time value in the specified timezone. (default: - nil)' - type: string - type: - description: 'Parse type: apache2, apache_error, - nginx, syslog, csv, tsv, ltsv, json, multiline, - none, logfmt' - type: string - types: - description: 'Types casting the fields to proper - types example: field1:type, field2:type' - type: string - utc: - description: 'If true, use UTC. Otherwise, local - time is used. This is exclusive with localtime - (default: false)' - type: boolean + secretKeyRef: + description: SecretKeySelector selects a key of + a Secret. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object type: object - type: array - time_format: - description: Process value using specified format. This - is available only when time_type is string - type: string - time_key: - description: Specify time field for event time. If the - event doesn't have this field, current time is used. - type: string - time_type: - description: 'Parse/format value according to this type - available values: float, unixtime, string (default: - string)' - type: string - timezone: - description: 'Use specified timezone. one can parse/format - the time value in the specified timezone. (default: - nil)' + type: object + core_api_versions: + description: 'Kubernetes core API version (for different + Kubernetes versions) (default: [''v1''])' + items: type: string - type: - description: 'Parse type: apache2, apache_error, nginx, - syslog, csv, tsv, ltsv, json, multiline, none, logfmt' + type: array + data_type: + description: 'Sumologic data type (default: metrics)' + type: string + in_namespace_path: + description: 'parameters for read/write record (default: + [''$.namespace''])' + items: type: string - types: - description: 'Types casting the fields to proper types - example: field1:type, field2:type' + type: array + in_pod_path: + description: '(default: [''$.pod'',''$.pod_name''])' + items: type: string - utc: - description: 'If true, use UTC. Otherwise, local time - is used. This is exclusive with localtime (default: - false)' - type: boolean - type: object - parsers: - description: Deprecated, use `parse` instead - items: + type: array + kubernetes_url: + description: 'Kubernetes API URL (default: nil)' + type: string + secret_dir: + description: 'Service account directory (default: /var/run/secrets/kubernetes.io/serviceaccount)' + type: string + ssl_partial_chain: + description: 'if `ca_file` is for an intermediate CA, or + otherwise we do not have the root CA and want to trust + the intermediate CA certs we do have, set this to `true` + - this corresponds to the openssl s_client -partial_chain + flag and X509_V_FLAG_PARTIAL_CHAIN (default: false)' + type: boolean + verify_ssl: + description: 'Verify SSL (default: true)' + type: boolean + type: object + geoip: + properties: + backend_library: + description: Specify backend library (geoip2_c, geoip, geoip2_compat) + type: string + geoip_2_database: + description: Specify optional geoip2 database (using bundled + GeoLite2-City.mmdb by default) + type: string + geoip_database: + description: Specify optional geoip database (using bundled + GeoLiteCity databse by default) + type: string + geoip_lookup_keys: + description: 'Specify one or more geoip lookup field which + has ip address (default: host)' + type: string + records: + description: 'Records are represented as maps: `key: value`' + items: + additionalProperties: + type: string + description: Parameters inside record directives are considered + to be new key-value pairs + type: object + type: array + skip_adding_null_record: + description: To avoid get stacktrace error with `[null, + null]` array for elasticsearch. + type: boolean + type: object + grep: + properties: + and: + items: + description: Specify filtering rule. This directive contains + either `regexp` or `exclude` directive. + properties: + exclude: + items: + description: Specify filtering rule to reject events. + This directive contains two parameters. + properties: + key: + description: Specify field name in the record + to parse. + type: string + pattern: + description: Pattern expression to evaluate + type: string + required: + - key + - pattern + type: object + type: array + regexp: + items: + description: Specify filtering rule. This directive + contains two parameters. + properties: + key: + description: Specify field name in the record + to parse. + type: string + pattern: + description: Pattern expression to evaluate + type: string + required: + - key + - pattern + type: object + type: array + type: object + type: array + exclude: + items: + description: Specify filtering rule to reject events. + This directive contains two parameters. + properties: + key: + description: Specify field name in the record to parse. + type: string + pattern: + description: Pattern expression to evaluate + type: string + required: + - key + - pattern + type: object + type: array + or: + items: + description: Specify filtering rule. This directive contains + either `regexp` or `exclude` directive. + properties: + exclude: + items: + description: Specify filtering rule to reject events. + This directive contains two parameters. + properties: + key: + description: Specify field name in the record + to parse. + type: string + pattern: + description: Pattern expression to evaluate + type: string + required: + - key + - pattern + type: object + type: array + regexp: + items: + description: Specify filtering rule. This directive + contains two parameters. + properties: + key: + description: Specify field name in the record + to parse. + type: string + pattern: + description: Pattern expression to evaluate + type: string + required: + - key + - pattern + type: object + type: array + type: object + type: array + regexp: + items: + description: Specify filtering rule. This directive contains + two parameters. + properties: + key: + description: Specify field name in the record to parse. + type: string + pattern: + description: Pattern expression to evaluate + type: string + required: + - key + - pattern + type: object + type: array + type: object + parser: + properties: + emit_invalid_record_to_error: + description: 'Emit invalid record to @ERROR label. Invalid + cases are: key not exist, format is not matched, unexpected + error' + type: boolean + hash_value_field: + description: Store parsed values as a hash value in a field. + type: string + inject_key_prefix: + description: Store parsed values with specified key name + prefix. + type: string + key_name: + description: Specify field name in the record to parse. + If you leave empty the Container Runtime default will + be used. + type: string + parse: properties: delimiter: description: 'Only available when using type: ltsv (default: @@ -875,311 +740,466 @@ spec: false)' type: boolean type: object - type: array - remove_key_name_field: - description: Remove key_name field when parsing is succeeded - type: boolean - replace_invalid_sequence: - description: If true, invalid string is replaced with safe - characters and re-parse it. - type: boolean - reserve_data: - description: Keep original key-value pair in parsed result. - type: boolean - reserve_time: - description: Keep original event time in parsed result. - type: boolean - type: object - prometheus: - properties: - labels: - additionalProperties: - type: string - type: object - metrics: - items: - properties: - buckets: - description: Buckets of record for instrumentation - type: string - desc: - description: Description of metric - type: string - key: - description: Key name of record for instrumentation. - type: string - labels: - additionalProperties: + parsers: + description: Deprecated, use `parse` instead + items: + properties: + delimiter: + description: 'Only available when using type: ltsv + (default: "\t")' type: string - description: Additional labels for this metric - type: object - name: - description: Metrics name - type: string - type: - description: Metrics type [counter](https://github.com/fluent/fluent-plugin-prometheus#counter-type), - [gauge](https://github.com/fluent/fluent-plugin-prometheus#gauge-type), - [summary](https://github.com/fluent/fluent-plugin-prometheus#summary-type), - [histogram](https://github.com/fluent/fluent-plugin-prometheus#histogram-type) - type: string - required: - - desc - - name - - type - type: object - type: array - type: object - record_modifier: - properties: - char_encoding: - description: Fluentd including some plugins treats logs as - a BINARY by default to forward. To overide that, use a target - encoding or a from:to encoding here. - type: string - prepare_value: - description: Prepare values for filtering in configure phase. - Prepared values can be used in . You can write any - ruby code. - type: string - records: - description: 'Add records docs at: https://github.com/repeatedly/fluent-plugin-record-modifier - Records are represented as maps: `key: value`' - items: + delimiter_pattern: + description: 'Only available when using type: ltsv' + type: string + estimate_current_event: + description: If true, use Fluent::EventTime.now(current + time) as a timestamp when time_key is specified. + type: boolean + expression: + description: Regexp expression to evaluate + type: string + format: + description: 'Only available when using type: multi_format' + type: string + format_firstline: + description: 'Only available when using type: multi_format' + type: string + keep_time_key: + description: If true, keep time field in the record. + type: boolean + label_delimiter: + description: 'Only available when using type: ltsv + (default: ":")' + type: string + local_time: + description: 'Ff true, use local time. Otherwise, + UTC is used. This is exclusive with utc. (default: + true)' + type: boolean + multiline: + description: The multiline parser plugin parses multiline + logs. + items: + type: string + type: array + null_empty_string: + description: If true, empty string field is replaced + with nil + type: boolean + null_value_pattern: + description: ' Specify null value pattern.' + type: string + patterns: + description: 'Only available when using type: multi_format' + items: + properties: + estimate_current_event: + description: If true, use Fluent::EventTime.now(current + time) as a timestamp when time_key is specified. + type: boolean + expression: + description: Regexp expression to evaluate + type: string + format: + description: 'Only available when using type: + multi_format' + type: string + keep_time_key: + description: If true, keep time field in the + record. + type: boolean + local_time: + description: 'Ff true, use local time. Otherwise, + UTC is used. This is exclusive with utc. (default: + true)' + type: boolean + null_empty_string: + description: If true, empty string field is + replaced with nil + type: boolean + null_value_pattern: + description: ' Specify null value pattern.' + type: string + time_format: + description: Process value using specified format. + This is available only when time_type is string + type: string + time_key: + description: Specify time field for event time. + If the event doesn't have this field, current + time is used. + type: string + time_type: + description: 'Parse/format value according to + this type available values: float, unixtime, + string (default: string)' + type: string + timezone: + description: 'Use specified timezone. one can + parse/format the time value in the specified + timezone. (default: nil)' + type: string + type: + description: 'Parse type: apache2, apache_error, + nginx, syslog, csv, tsv, ltsv, json, multiline, + none, logfmt' + type: string + types: + description: 'Types casting the fields to proper + types example: field1:type, field2:type' + type: string + utc: + description: 'If true, use UTC. Otherwise, local + time is used. This is exclusive with localtime + (default: false)' + type: boolean + type: object + type: array + time_format: + description: Process value using specified format. + This is available only when time_type is string + type: string + time_key: + description: Specify time field for event time. If + the event doesn't have this field, current time + is used. + type: string + time_type: + description: 'Parse/format value according to this + type available values: float, unixtime, string (default: + string)' + type: string + timezone: + description: 'Use specified timezone. one can parse/format + the time value in the specified timezone. (default: + nil)' + type: string + type: + description: 'Parse type: apache2, apache_error, nginx, + syslog, csv, tsv, ltsv, json, multiline, none, logfmt' + type: string + types: + description: 'Types casting the fields to proper types + example: field1:type, field2:type' + type: string + utc: + description: 'If true, use UTC. Otherwise, local time + is used. This is exclusive with localtime (default: + false)' + type: boolean + type: object + type: array + remove_key_name_field: + description: Remove key_name field when parsing is succeeded + type: boolean + replace_invalid_sequence: + description: If true, invalid string is replaced with safe + characters and re-parse it. + type: boolean + reserve_data: + description: Keep original key-value pair in parsed result. + type: boolean + reserve_time: + description: Keep original event time in parsed result. + type: boolean + type: object + prometheus: + properties: + labels: additionalProperties: type: string - description: Parameters inside record directives are considered - to be new key-value pairs type: object - type: array - remove_keys: - description: A comma-delimited list of keys to delete - type: string - replaces: - description: Replace specific value for keys - items: - description: Specify replace rule. This directive contains - three parameters. - properties: - expression: - description: Regular expression - type: string - key: - description: Key to search for + metrics: + items: + properties: + buckets: + description: Buckets of record for instrumentation + type: string + desc: + description: Description of metric + type: string + key: + description: Key name of record for instrumentation. + type: string + labels: + additionalProperties: + type: string + description: Additional labels for this metric + type: object + name: + description: Metrics name + type: string + type: + description: Metrics type [counter](https://github.com/fluent/fluent-plugin-prometheus#counter-type), + [gauge](https://github.com/fluent/fluent-plugin-prometheus#gauge-type), + [summary](https://github.com/fluent/fluent-plugin-prometheus#summary-type), + [histogram](https://github.com/fluent/fluent-plugin-prometheus#histogram-type) + type: string + required: + - desc + - name + - type + type: object + type: array + type: object + record_modifier: + properties: + char_encoding: + description: Fluentd including some plugins treats logs + as a BINARY by default to forward. To overide that, use + a target encoding or a from:to encoding here. + type: string + prepare_value: + description: Prepare values for filtering in configure phase. + Prepared values can be used in . You can write + any ruby code. + type: string + records: + description: 'Add records docs at: https://github.com/repeatedly/fluent-plugin-record-modifier + Records are represented as maps: `key: value`' + items: + additionalProperties: type: string - replace: - description: Value to replace with + description: Parameters inside record directives are considered + to be new key-value pairs + type: object + type: array + remove_keys: + description: A comma-delimited list of keys to delete + type: string + replaces: + description: Replace specific value for keys + items: + description: Specify replace rule. This directive contains + three parameters. + properties: + expression: + description: Regular expression + type: string + key: + description: Key to search for + type: string + replace: + description: Value to replace with + type: string + required: + - expression + - key + - replace + type: object + type: array + whitelist_keys: + description: This is exclusive with remove_keys + type: string + type: object + record_transformer: + properties: + auto_typecast: + description: 'Use original value type. (default: true)' + type: boolean + enable_ruby: + description: 'When set to true, the full Ruby syntax is + enabled in the ${...} expression. (default: false)' + type: boolean + keep_keys: + description: A comma-delimited list of keys to keep. + type: string + records: + description: 'Add records docs at: https://docs.fluentd.org/filter/record_transformer + Records are represented as maps: `key: value`' + items: + additionalProperties: type: string - required: - - expression - - key - - replace - type: object - type: array - whitelist_keys: - description: This is exclusive with remove_keys - type: string - type: object - record_transformer: - properties: - auto_typecast: - description: 'Use original value type. (default: true)' - type: boolean - enable_ruby: - description: 'When set to true, the full Ruby syntax is enabled - in the ${...} expression. (default: false)' - type: boolean - keep_keys: - description: A comma-delimited list of keys to keep. - type: string - records: - description: 'Add records docs at: https://docs.fluentd.org/filter/record_transformer - Records are represented as maps: `key: value`' - items: - additionalProperties: - type: string - description: Parameters inside record directives are considered - to be new key-value pairs - type: object - type: array - remove_keys: - description: A comma-delimited list of keys to delete - type: string - renew_record: - description: 'Create new Hash to transform incoming data (default: - false)' - type: boolean - renew_time_key: - description: Specify field name of the record to overwrite - the time of events. Its value must be unix time. - type: string - type: object - stdout: - properties: - output_type: - description: This is the option of stdout format. - type: string - type: object - sumologic: - properties: - collector_key_name: - description: 'CollectorKey Name (default: "_collector")' - type: string - collector_value: - description: 'Collector Value (default: "undefined")' - type: string - exclude_container_regex: - description: 'Exclude Container Regex (default: "")' - type: string - exclude_facility_regex: - description: 'Exclude Facility Regex (default: "")' - type: string - exclude_host_regex: - description: 'Exclude Host Regex (default: "")' - type: string - exclude_namespace_regex: - description: 'Exclude Namespace Regex (default: "")' - type: string - exclude_pod_regex: - description: 'Exclude Pod Regex (default: "")' - type: string - exclude_priority_regex: - description: 'Exclude Priority Regex (default: "")' - type: string - exclude_unit_regex: - description: 'Exclude Unit Regex (default: "")' - type: string - log_format: - description: 'Log Format (default: json)' - type: string - source_category: - description: 'Source Category (default: "%{namespace}/%{pod_name}")' - type: string - source_category_key_name: - description: 'Source CategoryKey Name (default: "_sourceCategory")' - type: string - source_category_prefix: - description: 'Source Category Prefix (default: kubernetes/)' - type: string - source_category_replace_dash: - description: 'Source Category Replace Dash (default: "/")' - type: string - source_host: - description: 'Source Host (default: "")' - type: string - source_host_key_name: - description: 'Source HostKey Name (default: "_sourceHost")' - type: string - source_name: - description: 'Source Name (default: "%{namespace}.%{pod}.%{container}")' - type: string - source_name_key_name: - description: 'Source NameKey Name (default: "_sourceName")' - type: string - tracing_annotation_prefix: - description: 'Tracing Annotation Prefix (default: "pod_annotation_")' - type: string - tracing_container_name: - description: 'Tracing Container Name (default: "container_name")' - type: string - tracing_format: - description: 'Tracing Format (default: false)' - type: boolean - tracing_host: - description: 'Tracing Host (default: "hostname")' - type: string - tracing_label_prefix: - description: 'Tracing Label Prefix (default: "pod_label_")' - type: string - tracing_namespace: - description: 'Tracing Namespace (default: "namespace")' - type: string - tracing_pod: - description: 'Tracing Pod (default: "pod")' - type: string - tracing_pod_id: - description: 'Tracing Pod ID (default: "pod_id")' - type: string - type: object - tag_normaliser: - properties: - format: - description: Re-Tag log messages info at [github](https://github.com/banzaicloud/fluent-plugin-tag-normaliser) - type: string - type: object - throttle: - properties: - group_bucket_limit: - description: 'Maximum number logs allowed per groups over - the period of group_bucket_period_s (default: 6000)' - type: integer - group_bucket_period_s: - description: 'This is the period of of time over which group_bucket_limit - applies (default: 60)' - type: integer - group_drop_logs: - description: 'When a group reaches its limit, logs will be - dropped from further processing if this value is true (default: - true)' - type: boolean - group_key: - description: 'Used to group logs. Groups are rate limited - independently (default: kubernetes.container_name)' - type: string - group_reset_rate_s: - description: 'After a group has exceeded its bucket limit, - logs are dropped until the rate per second falls below or - equal to group_reset_rate_s. (default: group_bucket_limit/group_bucket_period_s)' - type: integer - group_warning_delay_s: - description: 'When a group reaches its limit and as long as - it is not reset, a warning message with the current log - rate of the group is emitted repeatedly. This is the delay - between every repetition. (default: 10 seconds)' - type: integer - type: object - type: object - failedMatch: - properties: - exclude: - properties: - container_names: - items: + description: Parameters inside record directives are considered + to be new key-value pairs + type: object + type: array + remove_keys: + description: A comma-delimited list of keys to delete type: string - type: array - hosts: - items: + renew_record: + description: 'Create new Hash to transform incoming data + (default: false)' + type: boolean + renew_time_key: + description: Specify field name of the record to overwrite + the time of events. Its value must be unix time. type: string - type: array - labels: - additionalProperties: + type: object + stdout: + properties: + output_type: + description: This is the option of stdout format. type: string - type: object - type: object - select: - properties: - container_names: - items: + type: object + sumologic: + properties: + collector_key_name: + description: 'CollectorKey Name (default: "_collector")' type: string - type: array - hosts: - items: + collector_value: + description: 'Collector Value (default: "undefined")' type: string - type: array - labels: - additionalProperties: + exclude_container_regex: + description: 'Exclude Container Regex (default: "")' type: string - type: object - type: object - type: object + exclude_facility_regex: + description: 'Exclude Facility Regex (default: "")' + type: string + exclude_host_regex: + description: 'Exclude Host Regex (default: "")' + type: string + exclude_namespace_regex: + description: 'Exclude Namespace Regex (default: "")' + type: string + exclude_pod_regex: + description: 'Exclude Pod Regex (default: "")' + type: string + exclude_priority_regex: + description: 'Exclude Priority Regex (default: "")' + type: string + exclude_unit_regex: + description: 'Exclude Unit Regex (default: "")' + type: string + log_format: + description: 'Log Format (default: json)' + type: string + source_category: + description: 'Source Category (default: "%{namespace}/%{pod_name}")' + type: string + source_category_key_name: + description: 'Source CategoryKey Name (default: "_sourceCategory")' + type: string + source_category_prefix: + description: 'Source Category Prefix (default: kubernetes/)' + type: string + source_category_replace_dash: + description: 'Source Category Replace Dash (default: "/")' + type: string + source_host: + description: 'Source Host (default: "")' + type: string + source_host_key_name: + description: 'Source HostKey Name (default: "_sourceHost")' + type: string + source_name: + description: 'Source Name (default: "%{namespace}.%{pod}.%{container}")' + type: string + source_name_key_name: + description: 'Source NameKey Name (default: "_sourceName")' + type: string + tracing_annotation_prefix: + description: 'Tracing Annotation Prefix (default: "pod_annotation_")' + type: string + tracing_container_name: + description: 'Tracing Container Name (default: "container_name")' + type: string + tracing_format: + description: 'Tracing Format (default: false)' + type: boolean + tracing_host: + description: 'Tracing Host (default: "hostname")' + type: string + tracing_label_prefix: + description: 'Tracing Label Prefix (default: "pod_label_")' + type: string + tracing_namespace: + description: 'Tracing Namespace (default: "namespace")' + type: string + tracing_pod: + description: 'Tracing Pod (default: "pod")' + type: string + tracing_pod_id: + description: 'Tracing Pod ID (default: "pod_id")' + type: string + type: object + tag_normaliser: + properties: + format: + description: Re-Tag log messages info at [github](https://github.com/banzaicloud/fluent-plugin-tag-normaliser) + type: string + type: object + throttle: + properties: + group_bucket_limit: + description: 'Maximum number logs allowed per groups over + the period of group_bucket_period_s (default: 6000)' + type: integer + group_bucket_period_s: + description: 'This is the period of of time over which group_bucket_limit + applies (default: 60)' + type: integer + group_drop_logs: + description: 'When a group reaches its limit, logs will + be dropped from further processing if this value is true + (default: true)' + type: boolean + group_key: + description: 'Used to group logs. Groups are rate limited + independently (default: kubernetes.container_name)' + type: string + group_reset_rate_s: + description: 'After a group has exceeded its bucket limit, + logs are dropped until the rate per second falls below + or equal to group_reset_rate_s. (default: group_bucket_limit/group_bucket_period_s)' + type: integer + group_warning_delay_s: + description: 'When a group reaches its limit and as long + as it is not reset, a warning message with the current + log rate of the group is emitted repeatedly. This is the + delay between every repetition. (default: 10 seconds)' + type: integer + type: object + type: object + nullable: true + type: array + failedMatches: + items: + properties: + exclude: + properties: + container_names: + items: + type: string + type: array + hosts: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + type: object + select: + properties: + container_names: + items: + type: string + type: array + hosts: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + type: object + type: object + nullable: true + type: array status: + default: Created enum: - Created - Running - Completed + - Error type: string required: - - failedFilter - - failedMatch + - failedFilters + - failedMatches - status type: object type: object diff --git a/operator/config/manager/kustomization.yaml b/operator/config/manager/kustomization.yaml index 2bcd3ee..5e793dd 100644 --- a/operator/config/manager/kustomization.yaml +++ b/operator/config/manager/kustomization.yaml @@ -5,6 +5,12 @@ generatorOptions: disableNameSuffixHash: true configMapGenerator: -- name: manager-config - files: +- files: - controller_manager_config.yaml + name: manager-config +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +images: +- name: controller + newName: controller + newTag: latest diff --git a/operator/config/rbac/role.yaml b/operator/config/rbac/role.yaml index 7d446ea..21c5be5 100644 --- a/operator/config/rbac/role.yaml +++ b/operator/config/rbac/role.yaml @@ -6,6 +6,18 @@ metadata: creationTimestamp: null name: manager-role rules: +- apiGroups: + - logging.banzaicloud.io + resources: + - clusterflows + - clusteroutputs + - flows + - outputs + verbs: + - create + - delete + - get + - list - apiGroups: - loggingplumber.isala.me resources: diff --git a/operator/config/samples/loggingplumber_v1alpha1_flowtest.yaml b/operator/config/samples/loggingplumber_v1alpha1_flowtest.yaml index 3dcd952..6e616c4 100644 --- a/operator/config/samples/loggingplumber_v1alpha1_flowtest.yaml +++ b/operator/config/samples/loggingplumber_v1alpha1_flowtest.yaml @@ -2,17 +2,22 @@ apiVersion: loggingplumber.isala.me/v1alpha1 kind: FlowTest metadata: name: flowtest-sample + labels: + app.kubernetes.io/name: pod-simulation + app.kubernetes.io/managed-by: rancher-logging-explorer + app.kubernetes.io/created-by: logging-plumber + loggingplumber.isala.me/flowtest: flowtest-sample spec: referencePod: kind: Pod - name: test-pod + name: busybox-echo namespace: default referenceFlow: - kind: ClusterFlow - name: all-logs + kind: Flow + name: busybox-echo namespace: default sentMessages: - "[2021-06-10T11:50:06Z] @DEBUG Tam ipsae consuetudo infelix adtendi contexo mansuefecisti diutius re. 1373 ::0.403911" - "[2021-06-10T11:50:07Z] @WARNING Ne hi flagitantur alienam neglecta. 1374 ::0.474177" - "[2021-06-10T11:50:08Z] @INFO Amo ideoque die se at, caro aer, ad cor. 1375 ::0.263548" - - "[2021-06-10T11:50:09Z] @INFO Se contexo servis inpiis erogo, diligit ita significaret eosdem. 1376 ::0.405282" \ No newline at end of file + - "[2021-06-10T11:50:09Z] @INFO Se contexo servis inpiis erogo, diligit ita significaret eosdem. 1376 ::0.405282" diff --git a/operator/config/samples/simulations.yaml b/operator/config/samples/simulations.yaml new file mode 100644 index 0000000..c2f63b9 --- /dev/null +++ b/operator/config/samples/simulations.yaml @@ -0,0 +1,68 @@ +apiVersion: v1 +kind: Pod +metadata: + name: busybox-echo + labels: + loggingplumber.isala.me/test: simulations +spec: + containers: + - name: busybox + image: busybox + command: ["sh", "-c", "x=1; while [ $x -gt -1 ]; do echo \"{'count': '$(( x++ ))', 'date': '$(date)'}\" && sleep 5; done"] +--- +apiVersion: logging.banzaicloud.io/v1beta1 +kind: Flow +metadata: + name: busybox-echo + labels: + loggingplumber.isala.me/test: simulations +spec: + localOutputRefs: + - busybox-echo + match: + - select: + labels: + loggingplumber.isala.me/test: invalid + - select: + labels: + loggingplumber.isala.me/test: simulations + filters: + - record_modifier: + records: + - foo: "bar" + - grep: + regexp: + - key: first + pattern: /^5\d\d$/ +--- +apiVersion: logging.banzaicloud.io/v1beta1 +kind: Output +metadata: + name: busybox-echo + labels: + loggingplumber.isala.me/test: simulations +spec: + http: + endpoint: "http://logging-plumber-log-aggregator.default.svc/busybox-echo/" + buffer: + flush_interval: 10s + flush_mode: interval +--- +apiVersion: v1 +kind: Service +metadata: + name: logging-plumber-log-aggregator + labels: + loggingplumber.isala.me/test: simulations +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app.kubernetes.io/name: logging-plumber-log-aggregator + app.kubernetes.io/managed-by: rancher-logging-explorer + app.kubernetes.io/created-by: logging-plumber + loggingplumber.isala.me/component: log-aggregator \ No newline at end of file diff --git a/operator/controllers/cleanup.go b/operator/controllers/cleanup.go new file mode 100644 index 0000000..23c76a0 --- /dev/null +++ b/operator/controllers/cleanup.go @@ -0,0 +1,74 @@ +package controllers + +import ( + "context" + "fmt" + flowv1beta1 "github.com/banzaicloud/logging-operator/pkg/sdk/api/v1beta1" + v1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +func (r *FlowTestReconciler) cleanUpResources(ctx context.Context, flowTestName string) error { + logger := log.FromContext(ctx) + + matchingLabels := &client.MatchingLabels{"loggingplumber.isala.me/flowtest": flowTestName} + + var podList v1.PodList + if err := r.List(ctx, &podList, matchingLabels); client.IgnoreNotFound(err) != nil { + logger.Error(err, fmt.Sprintf("failed to get provisioned %s", podList.Kind)) + return err + } + + for _, resource := range podList.Items { + if err := r.Delete(ctx, &resource); client.IgnoreNotFound(err) != nil { + logger.Error(err, fmt.Sprintf("failed to delete a provisioned %s", resource.Kind), "uuid", resource.GetUID(), "name", resource.GetName()) + return err + } + logger.V(1).Info(fmt.Sprintf("%s deleted", resource.Kind), "uuid", resource.GetUID(), "name", resource.GetName()) + } + + var configMapList v1.ConfigMapList + if err := r.List(ctx, &configMapList, matchingLabels); client.IgnoreNotFound(err) != nil { + logger.Error(err, fmt.Sprintf("failed to get provisioned %s", configMapList.Kind)) + return err + } + + for _, resource := range configMapList.Items { + if err := r.Delete(ctx, &resource); client.IgnoreNotFound(err) != nil { + logger.Error(err, fmt.Sprintf("failed to delete a provisioned %s", resource.Kind), "uuid", resource.GetUID(), "name", resource.GetName()) + return err + } + logger.V(1).Info(fmt.Sprintf("%s deleted", resource.Kind), "uuid", resource.GetUID(), "name", resource.GetName()) + } + + var flows flowv1beta1.FlowList + if err := r.List(ctx, &flows, &client.MatchingLabels{"loggingplumber.isala.me/flowtest": flowTestName}); client.IgnoreNotFound(err) != nil { + logger.Error(err, fmt.Sprintf("failed to get provisioned %s", flows.Kind)) + //return err + } + + for _, resource := range flows.Items { + if err := r.Delete(ctx, &resource); client.IgnoreNotFound(err) != nil { + logger.Error(err, fmt.Sprintf("failed to delete a provisioned %s", resource.Kind), "uuid", resource.GetUID(), "name", resource.GetName()) + return err + } + logger.V(1).Info(fmt.Sprintf("%s deleted", resource.Kind), "uuid", resource.GetUID(), "name", resource.GetName()) + } + + var outputs flowv1beta1.OutputList + if err := r.List(ctx, &outputs, &client.MatchingLabels{"loggingplumber.isala.me/flowtest": flowTestName}); client.IgnoreNotFound(err) != nil { + logger.Error(err, fmt.Sprintf("failed to get provisioned %s", outputs.Kind)) + //return err + } + + for _, resource := range outputs.Items { + if err := r.Delete(ctx, &resource); client.IgnoreNotFound(err) != nil { + logger.Error(err, fmt.Sprintf("failed to delete a provisioned %s", resource.Kind), "uuid", resource.GetUID(), "name", resource.GetName()) + return err + } + logger.V(1).Info(fmt.Sprintf("%s deleted", resource.Kind), "uuid", resource.GetUID(), "name", resource.GetName()) + } + + return nil +} diff --git a/operator/controllers/flowtest_controller.go b/operator/controllers/flowtest_controller.go index 0b9854d..568fa82 100644 --- a/operator/controllers/flowtest_controller.go +++ b/operator/controllers/flowtest_controller.go @@ -18,8 +18,12 @@ package controllers import ( "context" + "fmt" + flowv1beta1 "github.com/banzaicloud/logging-operator/pkg/sdk/api/v1beta1" loggingplumberv1alpha1 "github.com/mrsupiri/rancher-logging-explorer/api/v1alpha1" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" + "reflect" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" @@ -32,13 +36,13 @@ type FlowTestReconciler struct { Scheme *runtime.Scheme } +//+kubebuilder:rbac:groups=logging.banzaicloud.io,resources=flows;clusterflows;outputs;clusteroutputs,verbs=get;list;create;delete //+kubebuilder:rbac:groups=loggingplumber.isala.me,resources=flowtests,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=loggingplumber.isala.me,resources=flowtests/status,verbs=get;update;patch //+kubebuilder:rbac:groups=loggingplumber.isala.me,resources=flowtests/finalizers,verbs=update // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. -// TODO(user): Modify the Reconcile function to compare the state specified by // the FlowTest object against the actual cluster state, and then // perform operations to make the cluster state reflect the state specified by // the user. @@ -48,33 +52,65 @@ type FlowTestReconciler struct { func (r *FlowTestReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { logger := log.FromContext(ctx) + logger.Info("Reconciling") + var flowTest loggingplumberv1alpha1.FlowTest if err := r.Get(ctx, req.NamespacedName, &flowTest); err != nil { - return ctrl.Result{}, client.IgnoreNotFound(err) + if apierrors.IsNotFound(err) { + if err := r.cleanUpResources(ctx, req.Name); client.IgnoreNotFound(err) != nil { + return ctrl.Result{}, err + } + } else { + logger.Error(err, "failed to get the flowtest") + } + return ctrl.Result{Requeue: false}, client.IgnoreNotFound(err) } - logger.Info("Reconciling") - //var referencePod v1.Pod - //if err := r.Get(ctx, types.NamespacedName{ - // Namespace: flowTest.Spec.ReferencePod.Namespace, - // Name: flowTest.Spec.ReferencePod.Name, - //}, &referencePod); err != nil { - // //if apierrors.IsNotFound(err) { - // // logger.V() - // //} - // //client.IgnoreNotFound() - // return ctrl.Result{}, err - //} - - //var referenceFlow flowv1beta1.Flow - //if err := r.Get(ctx, types.NamespacedName{ - // Namespace: flowTest.Spec.ReferenceFlow.Namespace, - // Name: flowTest.Spec.ReferenceFlow.Name, - //}, &referenceFlow); err != nil { - // return ctrl.Result{}, err - //} - - return ctrl.Result{RequeueAfter: time.Second * 10}, nil + ctx = context.WithValue(ctx, "flowTest", flowTest) + + if flowTest.Status.Status == "" { + flowTest.Status.Status = loggingplumberv1alpha1.Created + //flowTest.Status.FailedFilters = []flowv1beta1.Filter{} + //flowTest.Status.FailedMatches = []flowv1beta1.Match{} + if err := r.Status().Update(ctx, &flowTest); err != nil { + logger.Error(err, "failed to update flowtest status") + return ctrl.Result{}, r.setErrorStatus(ctx, client.IgnoreNotFound(err)) + } + return ctrl.Result{}, nil + } + + if flowTest.Status.Status == loggingplumberv1alpha1.Created { + if err := r.provisionResource(ctx); err != nil { + return ctrl.Result{}, r.setErrorStatus(ctx, err) + } + } + + if flowTest.Status.Status == loggingplumberv1alpha1.Completed { + if err := r.cleanUpResources(ctx, req.Name); client.IgnoreNotFound(err) != nil { + return ctrl.Result{}, r.setErrorStatus(ctx, err) + } + return ctrl.Result{Requeue: false}, nil + } + + if flowTest.Status.Status == loggingplumberv1alpha1.Running { + // Timeout + twoMinuteAfterCreation := flowTest.CreationTimestamp.Add(2 * time.Minute) + if time.Now().After(twoMinuteAfterCreation) { + flowTest.Status.Status = loggingplumberv1alpha1.Completed + if err := r.Status().Update(ctx, &flowTest); err != nil { + logger.Error(err, "failed to update flowtest status") + return ctrl.Result{}, r.setErrorStatus(ctx, client.IgnoreNotFound(err)) + } + return ctrl.Result{}, nil + } else { + logger.V(1).Info("checking log indexes") + if err := r.checkForPassingFlowTest(ctx); err != nil { + return ctrl.Result{RequeueAfter: 30 * time.Second}, err + } + } + } + + return ctrl.Result{RequeueAfter: 30 * time.Second}, nil } // SetupWithManager sets up the controller with the Manager. @@ -83,3 +119,62 @@ func (r *FlowTestReconciler) SetupWithManager(mgr ctrl.Manager) error { For(&loggingplumberv1alpha1.FlowTest{}). Complete(r) } + +func (r *FlowTestReconciler) checkForPassingFlowTest(ctx context.Context) error { + logger := log.FromContext(ctx) + flowTest := ctx.Value("flowTest").(loggingplumberv1alpha1.FlowTest) + + var flows flowv1beta1.FlowList + if err := r.List(ctx, &flows, &client.MatchingLabels{"loggingplumber.isala.me/flowtest": flowTest.ObjectMeta.Name}); client.IgnoreNotFound(err) != nil { + logger.Error(err, fmt.Sprintf("failed to get provisioned %s", flows.Kind)) + return err + } + + for _, flow := range flows.Items { + passing, err := CheckIndex(ctx, flow.ObjectMeta.Name) + if err != nil { + return err + } + if passing { + logger.V(1).Info(fmt.Sprintf("flow %s is passing", flow.ObjectMeta.Name)) + if err := r.Delete(ctx, &flow); err != nil { + logger.Error(err, "failed to delete flow status") + return err + } + for _, match := range flow.Spec.Match { + flowTest.Status.FailedMatches = removeMatchIfExists(flowTest.Status.FailedMatches, match) + } + for _, filter := range flow.Spec.Filters { + flowTest.Status.FailedFilters = removeFilterIfExists(flowTest.Status.FailedFilters, filter) + } + if err := r.Status().Update(ctx, &flowTest); err != nil { + logger.Error(err, "failed to update flow status") + return err + } + } + } + + return nil +} + +func removeMatchIfExists(matches []flowv1beta1.Match, match flowv1beta1.Match) []flowv1beta1.Match { + var failing []flowv1beta1.Match + for _, element := range matches { + if reflect.DeepEqual(match, element) { + continue + } + failing = append(failing, element) + } + return failing +} + +func removeFilterIfExists(filters []flowv1beta1.Filter, filter flowv1beta1.Filter) []flowv1beta1.Filter { + var failing []flowv1beta1.Filter + for _, element := range filters { + if reflect.DeepEqual(filter, element) { + continue + } + failing = append(failing, element) + } + return failing +} diff --git a/operator/controllers/provision.go b/operator/controllers/provision.go new file mode 100644 index 0000000..029433c --- /dev/null +++ b/operator/controllers/provision.go @@ -0,0 +1,252 @@ +package controllers + +import ( + "bytes" + "context" + "fmt" + flowv1beta1 "github.com/banzaicloud/logging-operator/pkg/sdk/api/v1beta1" + loggingplumberv1alpha1 "github.com/mrsupiri/rancher-logging-explorer/api/v1alpha1" + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +func (r *FlowTestReconciler) provisionResource(ctx context.Context) error { + logger := log.FromContext(ctx) + flowTest := ctx.Value("flowTest").(loggingplumberv1alpha1.FlowTest) + + logOutput := new(bytes.Buffer) + for _, line := range flowTest.Spec.SentMessages { + _, _ = logOutput.WriteString(fmt.Sprintf("%s\n", line)) + } + + Immutable := true + configMap := v1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "V1", + Kind: "ConfigMap", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-configmap", flowTest.Spec.ReferencePod.Name), + Namespace: flowTest.Spec.ReferencePod.Namespace, + Labels: map[string]string{ + "app.kubernetes.io/name": "pod-simulation", + "app.kubernetes.io/managed-by": "rancher-logging-explorer", + "app.kubernetes.io/created-by": "logging-plumber", + "loggingplumber.isala.me/flowtest-uuid": string(flowTest.ObjectMeta.UID), + "loggingplumber.isala.me/flowtest": flowTest.ObjectMeta.Name, + }, + }, + Immutable: &Immutable, + BinaryData: map[string][]byte{"simulation.log": logOutput.Bytes()}, + } + + if err := r.Create(ctx, &configMap); err != nil { + logger.Error(err, "failed to create ConfigMap with simulation.log") + return err + } + + logger.V(1).Info("deployed config map with simulation.log", "uuid", configMap.ObjectMeta.UID) + + // TODO: set the name based on flowtest uuid + var referencePod v1.Pod + if err := r.Get(ctx, types.NamespacedName{ + Namespace: flowTest.Spec.ReferencePod.Namespace, + Name: flowTest.Spec.ReferencePod.Name, + }, &referencePod); err != nil { + return err + } + + simulationPod := v1.Pod{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "V1", + Kind: "Pod", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-simulation", flowTest.UID), + Namespace: flowTest.Spec.ReferencePod.Namespace, + Labels: map[string]string{}, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{{ + // TODO: Handle more than or less than 1 Container (#12) + Name: referencePod.Spec.Containers[0].Name, + Image: "k3d-rancher-logging-explorer-registry:5000/rancher-logging-explorer/pod-simulator:latest", + VolumeMounts: []v1.VolumeMount{{Name: "config-volume", MountPath: "/var/logs"}}, + }}, + Volumes: []v1.Volume{ + { + Name: "config-volume", + VolumeSource: v1.VolumeSource{ + ConfigMap: &v1.ConfigMapVolumeSource{ + LocalObjectReference: v1.LocalObjectReference{Name: fmt.Sprintf("%s-configmap", flowTest.Spec.ReferencePod.Name)}, + }, + }, + }, + }, + NodeSelector: referencePod.Spec.NodeSelector, + }, + } + + extraLabels := map[string]string{} + + extraLabels["app.kubernetes.io/name"] = "pod-simulation" + extraLabels["app.kubernetes.io/managed-by"] = "rancher-logging-explorer" + extraLabels["app.kubernetes.io/created-by"] = "logging-plumber" + extraLabels["loggingplumber.isala.me/flowtest-uuid"] = string(flowTest.ObjectMeta.UID) + extraLabels["loggingplumber.isala.me/flowtest"] = flowTest.ObjectMeta.Name + + simulationPod.ObjectMeta.Labels = referencePod.ObjectMeta.Labels + + for k, v := range extraLabels { + simulationPod.ObjectMeta.Labels[k] = v + } + + if err := r.Create(ctx, &simulationPod); err != nil { + logger.Error(err, "failed to create the simulation pod") + return err + } + + logger.V(1).Info("deployed simulation pod", "pod-uuid", simulationPod.UID) + + var outputPod v1.Pod + if err := r.Get(ctx, client.ObjectKey{Name: "logging-plumber-log-aggregator", Namespace: flowTest.ObjectMeta.Namespace}, &outputPod); err != nil { + if apierrors.IsNotFound(err) { + outputPod := v1.Pod{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "V1", + Kind: "Pod", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "logging-plumber-log-aggregator", + Namespace: flowTest.ObjectMeta.Namespace, + Labels: map[string]string{ + "app.kubernetes.io/name": "logging-plumber-log-aggregator", + "app.kubernetes.io/managed-by": "rancher-logging-explorer", + "app.kubernetes.io/created-by": "logging-plumber", + "loggingplumber.isala.me/component": "log-aggregator", + }, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{{ + Name: "log-output", + Image: "paynejacob/log-output:latest", + Ports: []v1.ContainerPort{{ + Name: "http", + ContainerPort: 80, + Protocol: "TCP", + }}, + }}, + }, + } + if err := r.Create(ctx, &outputPod); err != nil { + logger.Error(err, "failed to create the log output pod") + return err + } + logger.V(1).Info("deployed log output pod", "pod-uuid", outputPod.UID) + } + } else { + logger.V(1).Info("found a already deployed log output pod", "pod-uuid", outputPod.UID) + } + + var referenceFlow flowv1beta1.Flow + if err := r.Get(ctx, types.NamespacedName{ + Namespace: flowTest.Spec.ReferenceFlow.Namespace, + Name: flowTest.Spec.ReferenceFlow.Name, + }, &referenceFlow); err != nil { + return err + } + + if err := r.deploySlicedFlows(ctx, referenceFlow, extraLabels); err != nil { + return err + } + + flowTest.Status.Status = loggingplumberv1alpha1.Running + + flowTest.Status.FailedFilters = referenceFlow.Spec.Filters + flowTest.Status.FailedMatches = referenceFlow.Spec.Match + + if err := r.Status().Update(ctx, &flowTest); err != nil { + logger.Error(err, "failed to update flowtest status") + return err + } + + return nil +} + +func (r *FlowTestReconciler) deploySlicedFlows(ctx context.Context, referenceFlow flowv1beta1.Flow, extraLabels map[string]string) error { + logger := log.FromContext(ctx) + flowTest := ctx.Value("flowTest").(loggingplumberv1alpha1.FlowTest) + + i := 0 + + flowTemplate, outTemplate := banzaiTemplates(referenceFlow, flowTest, extraLabels) + + for x := 1; x <= len(referenceFlow.Spec.Match); x++ { + targetFlow := *flowTemplate.DeepCopy() + targetOutput := *outTemplate.DeepCopy() + + targetFlow.ObjectMeta.Name = fmt.Sprintf("%s-%d-match", flowTest.ObjectMeta.UID, i) + targetFlow.ObjectMeta.Labels["loggingplumber.isala.me/test-id"] = fmt.Sprintf("%d", i) + targetFlow.ObjectMeta.Labels["loggingplumber.isala.me/test-type"] = "match" + + targetOutput.ObjectMeta.Name = fmt.Sprintf("%s-%d-match", flowTest.ObjectMeta.UID, i) + targetOutput.ObjectMeta.Labels["loggingplumber.isala.me/test-id"] = fmt.Sprintf("%d", i) + targetOutput.ObjectMeta.Labels["loggingplumber.isala.me/test-type"] = "match" + targetOutput.Spec.HTTPOutput.Endpoint = fmt.Sprintf("%s/%s/", targetOutput.Spec.HTTPOutput.Endpoint, targetFlow.ObjectMeta.Name) + + targetFlow.Spec.LocalOutputRefs = []string{targetOutput.ObjectMeta.Name} + + targetFlow.Spec.Match = append(targetFlow.Spec.Match, referenceFlow.Spec.Match[:x]...) + + if err := r.Create(ctx, &targetOutput); err != nil { + logger.Error(err, fmt.Sprintf("failed to deploy Flow #%d for %s", i, referenceFlow.ObjectMeta.Name)) + return err + } + + if err := r.Create(ctx, &targetFlow); err != nil { + logger.Error(err, fmt.Sprintf("failed to deploy Flow #%d for %s", i, referenceFlow.ObjectMeta.Name)) + return err + } + + logger.V(1).Info("deployed match slice", "test-id", i) + i++ + } + + for x := 1; x <= len(referenceFlow.Spec.Filters); x++ { + targetFlow := *flowTemplate.DeepCopy() + targetOutput := *outTemplate.DeepCopy() + + targetFlow.ObjectMeta.Name = fmt.Sprintf("%s-%d-filture", flowTest.ObjectMeta.UID, i) + targetFlow.ObjectMeta.Labels["loggingplumber.isala.me/test-type"] = "filter" + targetFlow.ObjectMeta.Labels["loggingplumber.isala.me/test-id"] = fmt.Sprintf("%d", i) + + targetOutput.ObjectMeta.Name = fmt.Sprintf("%s-%d-filture", flowTest.ObjectMeta.UID, i) + targetOutput.ObjectMeta.Labels["loggingplumber.isala.me/test-type"] = "filter" + targetOutput.ObjectMeta.Labels["loggingplumber.isala.me/test-id"] = fmt.Sprintf("%d", i) + targetOutput.Spec.HTTPOutput.Endpoint = fmt.Sprintf("%s/%s/", targetOutput.Spec.HTTPOutput.Endpoint, targetFlow.ObjectMeta.Name) + + targetFlow.Spec.LocalOutputRefs = []string{targetOutput.ObjectMeta.Name} + + targetFlow.Spec.Match = nil + + targetFlow.Spec.Filters = referenceFlow.Spec.Filters[:x] + + if err := r.Create(ctx, &targetOutput); err != nil { + logger.Error(err, fmt.Sprintf("failed to deploy Flow #%d for %s", i, referenceFlow.ObjectMeta.Name)) + return err + } + + if err := r.Create(ctx, &targetFlow); err != nil { + logger.Error(err, fmt.Sprintf("failed to deploy Flow #%d for %s", i, referenceFlow.ObjectMeta.Name)) + return err + } + logger.V(1).Info("deployed filter slice", "test-id", i) + i++ + } + + return nil +} diff --git a/operator/controllers/utils.go b/operator/controllers/utils.go new file mode 100644 index 0000000..0d51c14 --- /dev/null +++ b/operator/controllers/utils.go @@ -0,0 +1,127 @@ +package controllers + +import ( + "context" + "encoding/json" + "fmt" + flowv1beta1 "github.com/banzaicloud/logging-operator/pkg/sdk/api/v1beta1" + "github.com/banzaicloud/logging-operator/pkg/sdk/model/output" + loggingplumberv1alpha1 "github.com/mrsupiri/rancher-logging-explorer/api/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "net/http" + "sigs.k8s.io/controller-runtime/pkg/log" + "time" +) + +func banzaiTemplates(flow flowv1beta1.Flow, flowTest loggingplumberv1alpha1.FlowTest, extraLabels map[string]string) (flowv1beta1.Flow, flowv1beta1.Output) { + flowTemplate := flowv1beta1.Flow{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "logging.banzaicloud.io/v1beta1", + Kind: "Flow", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-test", flow.ObjectMeta.Name), + Namespace: flow.Namespace, + Labels: map[string]string{ + "app.kubernetes.io/name": flow.ObjectMeta.Name, + "app.kubernetes.io/managed-by": "rancher-logging-explorer", + "app.kubernetes.io/created-by": "logging-plumber", + "loggingplumber.isala.me/flowtest-uuid": string(flowTest.ObjectMeta.UID), + "loggingplumber.isala.me/flowtest": flowTest.ObjectMeta.Name, + }, + }, + Spec: flowv1beta1.FlowSpec{ + LocalOutputRefs: nil, + Match: []flowv1beta1.Match{{ + Select: &flowv1beta1.Select{Labels: extraLabels}, + }}, + }, + } + + outTemplate := flowv1beta1.Output{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "logging.banzaicloud.io/v1beta1", + Kind: "Output", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-test", flow.ObjectMeta.Name), + Namespace: flow.Namespace, + Labels: map[string]string{ + "app.kubernetes.io/name": flow.ObjectMeta.Name, + "app.kubernetes.io/managed-by": "rancher-logging-explorer", + "app.kubernetes.io/created-by": "logging-plumber", + "loggingplumber.isala.me/flowtest-uuid": string(flowTest.ObjectMeta.UID), + "loggingplumber.isala.me/flowtest": flowTest.ObjectMeta.Name, + }, + }, + Spec: flowv1beta1.OutputSpec{ + HTTPOutput: &output.HTTPOutputConfig{ + Endpoint: "http://logging-plumber-log-aggregator.default.svc", + Buffer: &output.Buffer{ + FlushMode: "interval", + FlushInterval: "10s", + }, + }, + }, + } + + return flowTemplate, outTemplate +} + +func (r *FlowTestReconciler) setErrorStatus(ctx context.Context, err error) error { + logger := log.FromContext(ctx) + flowTest := ctx.Value("flowTest").(loggingplumberv1alpha1.FlowTest) + if err != nil { + flowTest.Status.Status = loggingplumberv1alpha1.Error + + if err := r.Status().Update(ctx, &flowTest); err != nil { + logger.Error(err, "failed to update flowtest status") + return err + } + return err + } + return nil +} + +type Index struct { + Name string `json:"name"` + FirstLog time.Time `json:"first_log"` + LastLog time.Time `json:"last_log"` + LogCount int `json:"log_count"` +} + +func CheckIndex(ctx context.Context, indexName string) (bool, error) { + logger := log.FromContext(ctx) + + client := &http.Client{} + + // TODO: UPDATE THIS SVC NAME + // NOTE: When developing this requires port-forward because controller is running locally + req, err := http.NewRequest("GET", "http://localhost:8111/", nil) + if err != nil { + logger.Error(err, "failed to create request for checking indexes") + return false, err + } + req.Header.Add("Accept", "application/json") + resp, err := client.Do(req) + if err != nil { + logger.Error(err, "failed to fetch log indexes") + return false, err + } + var indexes []Index + if err := json.NewDecoder(resp.Body).Decode(&indexes); err != nil { + logger.Error(err, "failed to fetch log indexes") + return false, err + } + for _, index := range indexes { + if index.Name == indexName { + if index.LogCount > 0 { + return true, nil + } else { + return false, nil + } + } + } + + return false, nil +} diff --git a/operator/main.go b/operator/main.go index 8f79a53..f9ea684 100644 --- a/operator/main.go +++ b/operator/main.go @@ -32,6 +32,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/healthz" "sigs.k8s.io/controller-runtime/pkg/log/zap" + loggingoperatorv1beta1 "github.com/banzaicloud/logging-operator/pkg/sdk/api/v1beta1" + loggingplumberv1alpha1 "github.com/mrsupiri/rancher-logging-explorer/api/v1alpha1" "github.com/mrsupiri/rancher-logging-explorer/controllers" //+kubebuilder:scaffold:imports @@ -44,6 +46,7 @@ var ( func init() { utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + utilruntime.Must(loggingoperatorv1beta1.AddToScheme(scheme)) utilruntime.Must(loggingplumberv1alpha1.AddToScheme(scheme)) //+kubebuilder:scaffold:scheme diff --git a/pod-simulator/Dockerfile b/pod-simulator/Dockerfile new file mode 100644 index 0000000..dde4ab8 --- /dev/null +++ b/pod-simulator/Dockerfile @@ -0,0 +1,8 @@ +FROM golang:1.16-alpine as build +WORKDIR /build +COPY . . +RUN go build -o pod-simulator main.go + +FROM alpine +COPY --from=build /build/pod-simulator /usr/local/bin/pod-simulator +ENTRYPOINT ["pod-simulator"] \ No newline at end of file diff --git a/pod-simulator/go.mod b/pod-simulator/go.mod index 1fb0b57..b204fff 100644 --- a/pod-simulator/go.mod +++ b/pod-simulator/go.mod @@ -1,5 +1,3 @@ -module github.com/mrsupiri/rancher-logging-explorer/pod-simulator +module github.com/MrSupiri/rancher-logging-explorer/pod-simulator go 1.16 - -require github.com/gorilla/mux v1.8.0 diff --git a/pod-simulator/go.sum b/pod-simulator/go.sum index 5350288..e69de29 100644 --- a/pod-simulator/go.sum +++ b/pod-simulator/go.sum @@ -1,2 +0,0 @@ -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= diff --git a/pod-simulator/main.go b/pod-simulator/main.go index 3d22acb..82aa61b 100644 --- a/pod-simulator/main.go +++ b/pod-simulator/main.go @@ -19,7 +19,7 @@ func main() { // os.Open() opens specific file in // read-only mode and this return // a pointer of type os. - file, err := os.Open("./simulation.log") + file, err := os.Open("/var/logs/simulation.log") if err != nil { panic("failed to open simulation.log")