diff --git a/CHANGELOG.md b/CHANGELOG.md
index a9f5d0cd399..afa548a2370 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -53,7 +53,7 @@ To learn more about active deprecations, we recommend checking [GitHub Discussio
### Improvements
- **General:**: Add ScaledObject/ScaledJob names to output of `kubectl get triggerauthentication/clustertriggerauthentication` ([#796](https://github.com/kedacore/keda/issues/796))
-
+- **General**: Add ComplexScalingLogic structure to SO for advanced scaling options ([#3567](https://github.com/kedacore/keda/issues/3567)) and ([#2440](https://github.com/kedacore/keda/issues/2440))
### Fixes
- **General**: Metrics server exposes Prometheus metrics ([#4776](https://github.com/kedacore/keda/issues/4776))
diff --git a/Makefile b/Makefile
index 061320f541a..2035be451df 100644
--- a/Makefile
+++ b/Makefile
@@ -160,7 +160,7 @@ proto-gen: protoc-gen ## Generate Liiklus, ExternalScaler and MetricsService pro
PATH="$(LOCALBIN):$(PATH)" protoc -I vendor --proto_path=pkg/metricsservice/api metrics.proto --go_out=pkg/metricsservice/api --go-grpc_out=pkg/metricsservice/api
.PHONY: mockgen-gen
-mockgen-gen: mockgen pkg/mock/mock_scaling/mock_interface.go pkg/mock/mock_scaling/mock_executor/mock_interface.go pkg/mock/mock_scaler/mock_scaler.go pkg/mock/mock_scale/mock_interfaces.go pkg/mock/mock_client/mock_interfaces.go pkg/scalers/liiklus/mocks/mock_liiklus.go pkg/mock/mock_secretlister/mock_interfaces.go
+mockgen-gen: mockgen pkg/mock/mock_scaling/mock_interface.go pkg/mock/mock_scaling/mock_executor/mock_interface.go pkg/mock/mock_scaler/mock_scaler.go pkg/mock/mock_scale/mock_interfaces.go pkg/mock/mock_client/mock_interfaces.go pkg/scalers/liiklus/mocks/mock_liiklus.go pkg/mock/mock_secretlister/mock_interfaces.go pkg/mock/mock_externalscaling/mock_externalscaling.go
pkg/mock/mock_scaling/mock_interface.go: pkg/scaling/scale_handler.go
$(MOCKGEN) -destination=$@ -package=mock_scaling -source=$^
@@ -168,6 +168,8 @@ pkg/mock/mock_scaling/mock_executor/mock_interface.go: pkg/scaling/executor/scal
$(MOCKGEN) -destination=$@ -package=mock_executor -source=$^
pkg/mock/mock_scaler/mock_scaler.go: pkg/scalers/scaler.go
$(MOCKGEN) -destination=$@ -package=mock_scalers -source=$^
+pkg/mock/mock_externalscaling/mock_externalscaling.go: pkg/externalscaling/api/externalCalculation_grpc.pb.go
+ $(MOCKGEN) -destination=$@ -package=mock_externalscaling -source=$^
pkg/mock/mock_secretlister/mock_interfaces.go: vendor/k8s.io/client-go/listers/core/v1/secret.go
mkdir -p pkg/mock/mock_secretlister
$(MOCKGEN) k8s.io/client-go/listers/core/v1 SecretLister,SecretNamespaceLister > $@
diff --git a/apis/keda/v1alpha1/condition_types.go b/apis/keda/v1alpha1/condition_types.go
index 4f3a182ee77..6256f0cfd43 100644
--- a/apis/keda/v1alpha1/condition_types.go
+++ b/apis/keda/v1alpha1/condition_types.go
@@ -32,6 +32,8 @@ const (
ConditionActive ConditionType = "Active"
// ConditionFallback specifies that the resource has a fallback active.
ConditionFallback ConditionType = "Fallback"
+ // ConditionExternalFallback specifies that the resource has external fallback active.
+ ConditionExternalFallback ConditionType = "ExternalFallback"
// ConditionPaused specifies that the resource is paused.
ConditionPaused ConditionType = "Paused"
)
@@ -88,6 +90,8 @@ func (c *Conditions) AreInitialized() bool {
foundActive := false
foundFallback := false
foundPaused := false
+ foundExternalFallback := false
+
if *c != nil {
for _, condition := range *c {
if condition.Type == ConditionReady {
@@ -113,14 +117,19 @@ func (c *Conditions) AreInitialized() bool {
break
}
}
+ for _, condition := range *c {
+ if condition.Type == ConditionExternalFallback {
+ foundExternalFallback = true
+ }
+ }
}
- return foundReady && foundActive && foundFallback && foundPaused
+ return foundReady && foundActive && foundFallback && foundPaused && foundExternalFallback
}
// GetInitializedConditions returns Conditions initialized to the default -> Status: Unknown
func GetInitializedConditions() *Conditions {
- return &Conditions{{Type: ConditionReady, Status: metav1.ConditionUnknown}, {Type: ConditionActive, Status: metav1.ConditionUnknown}, {Type: ConditionFallback, Status: metav1.ConditionUnknown}, {Type: ConditionPaused, Status: metav1.ConditionUnknown}}
+ return &Conditions{{Type: ConditionReady, Status: metav1.ConditionUnknown}, {Type: ConditionActive, Status: metav1.ConditionUnknown}, {Type: ConditionFallback, Status: metav1.ConditionUnknown}, {Type: ConditionPaused, Status: metav1.ConditionUnknown}, {Type: ConditionExternalFallback, Status: metav1.ConditionUnknown}}
}
// IsTrue is true if the condition is True
@@ -171,6 +180,14 @@ func (c *Conditions) SetFallbackCondition(status metav1.ConditionStatus, reason
c.setCondition(ConditionFallback, status, reason, message)
}
+// SetExternalFallbackCondition modifies ExternalFallback Condition according to input parameters (for ExternalCalculators)
+func (c *Conditions) SetExternalFallbackCondition(status metav1.ConditionStatus, reason string, message string) {
+ if *c == nil {
+ c = GetInitializedConditions()
+ }
+ c.setCondition(ConditionExternalFallback, status, reason, message)
+}
+
// SetPausedCondition modifies Paused Condition according to input parameters
func (c *Conditions) SetPausedCondition(status metav1.ConditionStatus, reason string, message string) {
if *c == nil {
@@ -211,6 +228,13 @@ func (c *Conditions) GetPausedCondition() Condition {
return c.getCondition(ConditionPaused)
}
+func (c *Conditions) GetExternalFallbackCondition() Condition {
+ if *c == nil {
+ c = GetInitializedConditions()
+ }
+ return c.getCondition(ConditionExternalFallback)
+}
+
func (c Conditions) getCondition(conditionType ConditionType) Condition {
for i := range c {
if c[i].Type == conditionType {
diff --git a/apis/keda/v1alpha1/scaledobject_types.go b/apis/keda/v1alpha1/scaledobject_types.go
index 24af5a2c0ee..8d275ce9ee8 100644
--- a/apis/keda/v1alpha1/scaledobject_types.go
+++ b/apis/keda/v1alpha1/scaledobject_types.go
@@ -102,6 +102,31 @@ type AdvancedConfig struct {
HorizontalPodAutoscalerConfig *HorizontalPodAutoscalerConfig `json:"horizontalPodAutoscalerConfig,omitempty"`
// +optional
RestoreToOriginalReplicaCount bool `json:"restoreToOriginalReplicaCount,omitempty"`
+ // +optional
+ ComplexScalingLogic ComplexScalingLogic `json:"complexScalingLogic,omitempty"`
+}
+
+// ComplexScalingLogic describes advanced scaling logic options like formula
+// and gRPC server for external calculations
+type ComplexScalingLogic struct {
+ // +optional
+ ExternalCalculations []ExternalCalculation `json:"externalCalculators,omitempty"`
+ // +optional
+ Formula string `json:"formula,omitempty"`
+ // +optional
+ Target string `json:"target,omitempty"`
+}
+
+// ExternalCalculation structure describes name and URL of a gRPC server
+// that KEDA can connect to with collected metrics and modify them. Each server
+// has a timeout and tls certification. If certDir is left empty, it will
+// connect with insecure.NewCredentials()
+type ExternalCalculation struct {
+ Name string `json:"name"`
+ URL string `json:"url"`
+ Timeout string `json:"timeout"`
+ // +optional
+ CertificateDirectory string `json:"certDir"`
}
// HorizontalPodAutoscalerConfig specifies horizontal scale config
@@ -141,6 +166,10 @@ type ScaledObjectStatus struct {
// +optional
ResourceMetricNames []string `json:"resourceMetricNames,omitempty"`
// +optional
+ CompositeScalerName string `json:"compositeScalerName,omitempty"`
+ // +optional
+ ExternalCalculationHealth map[string]HealthStatus `json:"externalCalculationHealth,omitempty"`
+ // +optional
Conditions Conditions `json:"conditions,omitempty"`
// +optional
Health map[string]HealthStatus `json:"health,omitempty"`
diff --git a/apis/keda/v1alpha1/scaledobject_webhook.go b/apis/keda/v1alpha1/scaledobject_webhook.go
index 6daa3aa62d5..bb2958b7a93 100644
--- a/apis/keda/v1alpha1/scaledobject_webhook.go
+++ b/apis/keda/v1alpha1/scaledobject_webhook.go
@@ -19,7 +19,11 @@ package v1alpha1
import (
"context"
"encoding/json"
+ "errors"
"fmt"
+ "reflect"
+ "strconv"
+ "time"
appsv1 "k8s.io/api/apps/v1"
autoscalingv2 "k8s.io/api/autoscaling/v2"
@@ -213,6 +217,16 @@ func verifyScaledObjects(incomingSo *ScaledObject, action string) error {
}
}
+ // verify ComplexScalingLogic structure if defined in ScaledObject
+ if incomingSo.Spec.Advanced != nil && !reflect.DeepEqual(incomingSo.Spec.Advanced.ComplexScalingLogic, ComplexScalingLogic{}) {
+ _, _, err = ValidateComplexScalingLogic(incomingSo, []autoscalingv2.MetricSpec{})
+ if err != nil {
+ scaledobjectlog.Error(err, "error validating ComplexScalingLogic")
+ prommetrics.RecordScaledObjectValidatingErrors(incomingSo.Namespace, action, "complex-scaling-logic")
+
+ return err
+ }
+ }
return nil
}
@@ -297,3 +311,109 @@ func verifyCPUMemoryScalers(incomingSo *ScaledObject, action string) error {
}
return nil
}
+
+// ValidateComplexScalingLogic validates all combinations of given arguments
+// and their values
+func ValidateComplexScalingLogic(so *ScaledObject, specs []autoscalingv2.MetricSpec) (float64, autoscalingv2.MetricTargetType, error) {
+ csl := so.Spec.Advanced.ComplexScalingLogic
+
+ // if Formula AND ExternalCalculations is empty, return an error
+ if csl.Formula == "" && len(csl.ExternalCalculations) < 1 {
+ return -1, autoscalingv2.MetricTargetType(""), fmt.Errorf("error atleast one ComplexScalingLogic function needs to be specified (formula or externalCalculation)")
+ }
+
+ var num float64
+ var metricType autoscalingv2.MetricTargetType
+
+ // validate formula if not empty
+ if err := validateCSLformula(so); err != nil {
+ err := errors.Join(fmt.Errorf("error validating formula in ComplexScalingLogic"), err)
+ return -1, autoscalingv2.MetricTargetType(""), err
+ }
+ // validate externalCalculators if not empty
+ if err := validateCSLexternalCalculations(csl); err != nil {
+ err := errors.Join(fmt.Errorf("error validating externalCalculator in ComplexScalingLogic"), err)
+ return -1, autoscalingv2.MetricTargetType(""), err
+ }
+ // validate target if not empty
+ num, metricType, err := validateCSLtarget(csl, specs)
+ if err != nil {
+ err := errors.Join(fmt.Errorf("error validating target in ComplexScalingLogic"), err)
+ return -1, autoscalingv2.MetricTargetType(""), err
+ }
+ return num, metricType, nil
+}
+
+func validateCSLformula(so *ScaledObject) error {
+ csl := so.Spec.Advanced.ComplexScalingLogic
+
+ // if formula is empty, nothing to validate
+ if csl.Formula == "" {
+ return nil
+ }
+ // formula needs target because it's always transformed to Composite scaler
+ if csl.Target == "" {
+ return fmt.Errorf("formula is given but target is empty")
+ }
+
+ // possible TODO: this could be more soffisticated - only check for names that
+ // are used in the formula itself. This would require parsing the formula.
+ for _, trig := range so.Spec.Triggers {
+ if trig.Name == "" {
+ return fmt.Errorf("trigger of type '%s' has empty name but csl.Formula is defined", trig.Type)
+ }
+ }
+ if len(csl.ExternalCalculations) > 0 {
+ if csl.ExternalCalculations[len(csl.ExternalCalculations)-1].Name == "" {
+ return fmt.Errorf("last externalCalculator has empty name but csl.Formula is defined")
+ }
+ }
+ return nil
+}
+
+func validateCSLexternalCalculations(cls ComplexScalingLogic) error {
+ // timeout check
+ for _, ec := range cls.ExternalCalculations {
+ _, err := strconv.ParseInt(ec.Timeout, 10, 64)
+ if err != nil {
+ // expect timeout in time format like 1m10s
+ _, err = time.ParseDuration(ec.Timeout)
+ if err != nil {
+ return fmt.Errorf("%s: error while converting type of timeout for external calculator", err)
+ }
+ }
+ if ec.URL == "" {
+ return fmt.Errorf("URL is empty for externalCalculator '%s'", ec.Name)
+ }
+ }
+
+ return nil
+}
+
+func validateCSLtarget(csl ComplexScalingLogic, specs []autoscalingv2.MetricSpec) (float64, autoscalingv2.MetricTargetType, error) {
+ if csl.Target == "" {
+ return -1, "", nil
+ }
+ // convert string to float
+ num, err := strconv.ParseFloat(csl.Target, 64)
+ if err != nil || num <= 0.0 {
+ return -1, "", fmt.Errorf("error converting target for complex logic (string->float) to valid target: %w", err)
+ }
+
+ var metricType autoscalingv2.MetricTargetType
+ // if target is given, composite scaler for metric collection will be
+ // passed to HPA config -> all types need to be the same
+ // make sure all scalers have the same metricTargetType
+ for _, metric := range specs {
+ if metric.External == nil {
+ continue
+ }
+ if metricType == "" {
+ metricType = metric.External.Target.Type
+ } else if metric.External.Target.Type != metricType {
+ err := fmt.Errorf("error metric target type not the same for composite scaler: %s & %s", metricType, metric.External.Target.Type)
+ return -1, "", err
+ }
+ }
+ return num, metricType, nil
+}
diff --git a/apis/keda/v1alpha1/scaledobject_webhook_test.go b/apis/keda/v1alpha1/scaledobject_webhook_test.go
index fbde5787df1..c91f99abe56 100644
--- a/apis/keda/v1alpha1/scaledobject_webhook_test.go
+++ b/apis/keda/v1alpha1/scaledobject_webhook_test.go
@@ -514,6 +514,324 @@ var _ = It("should validate the so update if it's removing the finalizer even if
}).ShouldNot(HaveOccurred())
})
+var _ = It("should validate the so creation with ComplexScalingLogic.Formula", func() {
+ namespaceName := "complex-scaling-logic-formula-good"
+ namespace := createNamespace(namespaceName)
+ workload := createDeployment(namespaceName, false, false)
+
+ csl := ComplexScalingLogic{Target: "2", Formula: "workload_trig + cron_trig"}
+
+ triggers := []ScaleTriggers{
+ {
+ Type: "cron",
+ Name: "cron_trig",
+ Metadata: map[string]string{
+ "timezone": "UTC",
+ "start": "0 * * * *",
+ "end": "1 * * * *",
+ "desiredReplicas": "1",
+ },
+ },
+ {
+ Type: "kubernetes-workload",
+ Name: "workload_trig",
+ Metadata: map[string]string{
+ "podSelector": "pod=workload-test",
+ "value": "1",
+ },
+ },
+ }
+
+ so := createScaledObjectCSL(namespaceName, csl, triggers)
+
+ err := k8sClient.Create(context.Background(), namespace)
+ Expect(err).ToNot(HaveOccurred())
+ err = k8sClient.Create(context.Background(), workload)
+ Expect(err).ToNot(HaveOccurred())
+ Eventually(func() error {
+ return k8sClient.Create(context.Background(), so)
+ }).ShouldNot(HaveOccurred())
+})
+
+var _ = It("should validate the so creation with ComplexScalingLogic.ExternalCalculations", func() {
+ namespaceName := "complex-scaling-logic-ec-good"
+ namespace := createNamespace(namespaceName)
+ workload := createDeployment(namespaceName, false, false)
+
+ csl := ComplexScalingLogic{Target: "2", ExternalCalculations: []ExternalCalculation{
+ {
+ Name: "calc1",
+ URL: "http://test.com",
+ Timeout: "10s",
+ },
+ {
+ Name: "calc2",
+ URL: "http://test2.com",
+ Timeout: "20",
+ },
+ }}
+
+ triggers := []ScaleTriggers{
+ {
+ Type: "cron",
+ Name: "cron_trig",
+ Metadata: map[string]string{
+ "timezone": "UTC",
+ "start": "0 * * * *",
+ "end": "1 * * * *",
+ "desiredReplicas": "1",
+ },
+ },
+ {
+ Type: "kubernetes-workload",
+ Name: "workload_trig",
+ Metadata: map[string]string{
+ "podSelector": "pod=workload-test",
+ "value": "1",
+ },
+ },
+ }
+
+ so := createScaledObjectCSL(namespaceName, csl, triggers)
+
+ err := k8sClient.Create(context.Background(), namespace)
+ Expect(err).ToNot(HaveOccurred())
+ err = k8sClient.Create(context.Background(), workload)
+ Expect(err).ToNot(HaveOccurred())
+ Eventually(func() error {
+ return k8sClient.Create(context.Background(), so)
+ }).ShouldNot(HaveOccurred())
+})
+
+var _ = It("should validate the so creation with ComplexScalingLogic Formula & ExternalCalculations", func() {
+ namespaceName := "complex-scaling-logic-both-good"
+ namespace := createNamespace(namespaceName)
+ workload := createDeployment(namespaceName, false, false)
+
+ csl := ComplexScalingLogic{Target: "2", Formula: "3 + calc_last", ExternalCalculations: []ExternalCalculation{
+ {
+ Name: "calc1",
+ URL: "http://test.com",
+ Timeout: "10s",
+ },
+ {
+ Name: "calc_last",
+ URL: "http://test2.com",
+ Timeout: "20",
+ },
+ },
+ }
+
+ triggers := []ScaleTriggers{
+ {
+ Type: "cron",
+ Name: "cron_trig",
+ Metadata: map[string]string{
+ "timezone": "UTC",
+ "start": "0 * * * *",
+ "end": "1 * * * *",
+ "desiredReplicas": "1",
+ },
+ },
+ {
+ Type: "kubernetes-workload",
+ Name: "workload_trig",
+ Metadata: map[string]string{
+ "podSelector": "pod=workload-test",
+ "value": "1",
+ },
+ },
+ }
+
+ so := createScaledObjectCSL(namespaceName, csl, triggers)
+
+ err := k8sClient.Create(context.Background(), namespace)
+ Expect(err).ToNot(HaveOccurred())
+ err = k8sClient.Create(context.Background(), workload)
+ Expect(err).ToNot(HaveOccurred())
+ Eventually(func() error {
+ return k8sClient.Create(context.Background(), so)
+ }).ShouldNot(HaveOccurred())
+})
+
+var _ = It("should validate the so creation with ComplexScalingLogic.ExteralCalc without Target", func() {
+ namespaceName := "complex-scaling-logic-ec-no-target-good"
+ namespace := createNamespace(namespaceName)
+ workload := createDeployment(namespaceName, false, false)
+
+ csl := ComplexScalingLogic{ExternalCalculations: []ExternalCalculation{
+ {
+ Name: "calc1",
+ URL: "http://test.com",
+ Timeout: "10s",
+ }, {
+ Name: "calc2",
+ URL: "http://test2.com",
+ Timeout: "1m2s",
+ },
+ }}
+
+ triggers := []ScaleTriggers{
+ {
+ Type: "cron",
+ Name: "cron_trig",
+ Metadata: map[string]string{
+ "timezone": "UTC",
+ "start": "0 * * * *",
+ "end": "1 * * * *",
+ "desiredReplicas": "1",
+ },
+ },
+ {
+ Type: "kubernetes-workload",
+ Name: "workload_trig",
+ Metadata: map[string]string{
+ "podSelector": "pod=workload-test",
+ "value": "1",
+ },
+ },
+ }
+
+ so := createScaledObjectCSL(namespaceName, csl, triggers)
+
+ err := k8sClient.Create(context.Background(), namespace)
+ Expect(err).ToNot(HaveOccurred())
+ err = k8sClient.Create(context.Background(), workload)
+ Expect(err).ToNot(HaveOccurred())
+ Eventually(func() error {
+ return k8sClient.Create(context.Background(), so)
+ }).ShouldNot(HaveOccurred())
+})
+
+var _ = It("shouldnt validate the so creation with csl.Formula but no target", func() {
+ namespaceName := "complex-scaling-logic-formula-no-target-bad"
+ namespace := createNamespace(namespaceName)
+ workload := createDeployment(namespaceName, false, false)
+
+ csl := ComplexScalingLogic{Formula: "workload_trig + cron_trig"}
+
+ triggers := []ScaleTriggers{
+ {
+ Type: "cron",
+ Name: "cron_trig",
+ Metadata: map[string]string{
+ "timezone": "UTC",
+ "start": "0 * * * *",
+ "end": "1 * * * *",
+ "desiredReplicas": "1",
+ },
+ },
+ {
+ Type: "kubernetes-workload",
+ Name: "workload_trig",
+ Metadata: map[string]string{
+ "podSelector": "pod=workload-test",
+ "value": "1",
+ },
+ },
+ }
+
+ so := createScaledObjectCSL(namespaceName, csl, triggers)
+
+ err := k8sClient.Create(context.Background(), namespace)
+ Expect(err).ToNot(HaveOccurred())
+ err = k8sClient.Create(context.Background(), workload)
+ Expect(err).ToNot(HaveOccurred())
+ Eventually(func() error {
+ return k8sClient.Create(context.Background(), so)
+ }).Should(HaveOccurred())
+})
+
+var _ = It("shouldnt validate the so creation with ComplexScalingLogic url empty", func() {
+ namespaceName := "complex-scaling-logic-empty-url-bad"
+ namespace := createNamespace(namespaceName)
+ workload := createDeployment(namespaceName, true, true)
+
+ csl := ComplexScalingLogic{ExternalCalculations: []ExternalCalculation{
+ {
+ Name: "calc1",
+ URL: "",
+ Timeout: "10s",
+ },
+ },
+ }
+
+ triggers := []ScaleTriggers{
+ {
+ Type: "cron",
+ Name: "cron_trig",
+ Metadata: map[string]string{
+ "timezone": "UTC",
+ "start": "0 * * * *",
+ "end": "1 * * * *",
+ "desiredReplicas": "1",
+ },
+ },
+ {
+ Type: "kubernetes-workload",
+ Name: "workload_trig",
+ Metadata: map[string]string{
+ "podSelector": "pod=workload-test",
+ "value": "1",
+ },
+ },
+ }
+
+ so := createScaledObjectCSL(namespaceName, csl, triggers)
+
+ err := k8sClient.Create(context.Background(), namespace)
+ Expect(err).ToNot(HaveOccurred())
+ err = k8sClient.Create(context.Background(), workload)
+ Expect(err).ToNot(HaveOccurred())
+ Eventually(func() error {
+ return k8sClient.Create(context.Background(), so)
+ }).Should(HaveOccurred())
+})
+
+var _ = It("shouldnt validate the so creation with ComplexScalingLogic when triggers dont have names", func() {
+ namespaceName := "complex-scaling-logic-triggers-no-names-bad"
+ namespace := createNamespace(namespaceName)
+ workload := createDeployment(namespaceName, true, true)
+
+ csl := ComplexScalingLogic{ExternalCalculations: []ExternalCalculation{
+ {
+ Name: "calc1",
+ URL: "",
+ Timeout: "10s",
+ },
+ },
+ }
+
+ triggers := []ScaleTriggers{
+ {
+ Type: "cron",
+ Metadata: map[string]string{
+ "timezone": "UTC",
+ "start": "0 * * * *",
+ "end": "1 * * * *",
+ "desiredReplicas": "1",
+ },
+ },
+ {
+ Type: "kubernetes-workload",
+ Metadata: map[string]string{
+ "podSelector": "pod=workload-test",
+ "value": "1",
+ },
+ },
+ }
+
+ so := createScaledObjectCSL(namespaceName, csl, triggers)
+
+ err := k8sClient.Create(context.Background(), namespace)
+ Expect(err).ToNot(HaveOccurred())
+ err = k8sClient.Create(context.Background(), workload)
+ Expect(err).ToNot(HaveOccurred())
+ Eventually(func() error {
+ return k8sClient.Create(context.Background(), so)
+ }).Should(HaveOccurred())
+})
+
var _ = AfterSuite(func() {
cancel()
By("tearing down the test environment")
@@ -761,3 +1079,31 @@ func createScaledObjectSTZ(name string, namespace string, targetName string, min
},
}
}
+
+func createScaledObjectCSL(namespace string, csl ComplexScalingLogic, triggers []ScaleTriggers) *ScaledObject {
+ name := soName
+ targetName := workloadName
+ return &ScaledObject{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: namespace,
+ UID: types.UID(name),
+ },
+ TypeMeta: metav1.TypeMeta{
+ Kind: "ScaledObject",
+ APIVersion: "keda.sh",
+ },
+ Spec: ScaledObjectSpec{
+ ScaleTargetRef: &ScaleTarget{
+ Name: targetName,
+ },
+ MinReplicaCount: pointer.Int32(0),
+ MaxReplicaCount: pointer.Int32(10),
+ CooldownPeriod: pointer.Int32(1),
+ Triggers: triggers,
+ Advanced: &AdvancedConfig{
+ ComplexScalingLogic: csl,
+ },
+ },
+ }
+}
diff --git a/apis/keda/v1alpha1/zz_generated.deepcopy.go b/apis/keda/v1alpha1/zz_generated.deepcopy.go
index e10bb0fb81b..167e0be4323 100755
--- a/apis/keda/v1alpha1/zz_generated.deepcopy.go
+++ b/apis/keda/v1alpha1/zz_generated.deepcopy.go
@@ -35,6 +35,7 @@ func (in *AdvancedConfig) DeepCopyInto(out *AdvancedConfig) {
*out = new(HorizontalPodAutoscalerConfig)
(*in).DeepCopyInto(*out)
}
+ in.ComplexScalingLogic.DeepCopyInto(&out.ComplexScalingLogic)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdvancedConfig.
@@ -267,6 +268,26 @@ func (in *ClusterTriggerAuthenticationList) DeepCopyObject() runtime.Object {
return nil
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ComplexScalingLogic) DeepCopyInto(out *ComplexScalingLogic) {
+ *out = *in
+ if in.ExternalCalculations != nil {
+ in, out := &in.ExternalCalculations, &out.ExternalCalculations
+ *out = make([]ExternalCalculation, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComplexScalingLogic.
+func (in *ComplexScalingLogic) DeepCopy() *ComplexScalingLogic {
+ if in == nil {
+ return nil
+ }
+ out := new(ComplexScalingLogic)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Condition) DeepCopyInto(out *Condition) {
*out = *in
@@ -316,6 +337,21 @@ func (in *Credential) DeepCopy() *Credential {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExternalCalculation) DeepCopyInto(out *ExternalCalculation) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalCalculation.
+func (in *ExternalCalculation) DeepCopy() *ExternalCalculation {
+ if in == nil {
+ return nil
+ }
+ out := new(ExternalCalculation)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Fallback) DeepCopyInto(out *Fallback) {
*out = *in
@@ -753,6 +789,13 @@ func (in *ScaledObjectStatus) DeepCopyInto(out *ScaledObjectStatus) {
*out = make([]string, len(*in))
copy(*out, *in)
}
+ if in.ExternalCalculationHealth != nil {
+ in, out := &in.ExternalCalculationHealth, &out.ExternalCalculationHealth
+ *out = make(map[string]HealthStatus, len(*in))
+ for key, val := range *in {
+ (*out)[key] = *val.DeepCopy()
+ }
+ }
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make(Conditions, len(*in))
diff --git a/config/crd/bases/keda.sh_scaledobjects.yaml b/config/crd/bases/keda.sh_scaledobjects.yaml
index c83b988b301..3654deb74af 100644
--- a/config/crd/bases/keda.sh_scaledobjects.yaml
+++ b/config/crd/bases/keda.sh_scaledobjects.yaml
@@ -73,6 +73,37 @@ spec:
advanced:
description: AdvancedConfig specifies advance scaling options
properties:
+ complexScalingLogic:
+ description: ComplexScalingLogic describes advanced scaling logic
+ options like formula and gRPC server for external calculations
+ properties:
+ externalCalculators:
+ items:
+ description: ExternalCalculation structure describes name
+ and URL of a gRPC server that KEDA can connect to with
+ collected metrics and modify them. Each server has a timeout
+ and tls certification. If certDir is left empty, it will
+ connect with insecure.NewCredentials()
+ properties:
+ certDir:
+ type: string
+ name:
+ type: string
+ timeout:
+ type: string
+ url:
+ type: string
+ required:
+ - name
+ - timeout
+ - url
+ type: object
+ type: array
+ formula:
+ type: string
+ target:
+ type: string
+ type: object
horizontalPodAutoscalerConfig:
description: HorizontalPodAutoscalerConfig specifies horizontal
scale config
@@ -291,6 +322,8 @@ spec:
status:
description: ScaledObjectStatus is the status for a ScaledObject resource
properties:
+ compositeScalerName:
+ type: string
conditions:
description: Conditions an array representation to store multiple
Conditions
@@ -315,6 +348,19 @@ spec:
- type
type: object
type: array
+ externalCalculationHealth:
+ additionalProperties:
+ description: HealthStatus is the status for a ScaledObject's health
+ properties:
+ numberOfFailures:
+ format: int32
+ type: integer
+ status:
+ description: HealthStatusType is an indication of whether the
+ health status is happy or failing
+ type: string
+ type: object
+ type: object
externalMetricNames:
items:
type: string
diff --git a/controllers/keda/hpa.go b/controllers/keda/hpa.go
index be2edeefed3..00fd8285464 100644
--- a/controllers/keda/hpa.go
+++ b/controllers/keda/hpa.go
@@ -19,6 +19,7 @@ package keda
import (
"context"
"fmt"
+ "reflect"
"sort"
"strings"
"unicode"
@@ -26,6 +27,7 @@ import (
"github.com/go-logr/logr"
autoscalingv2 "k8s.io/api/autoscaling/v2"
"k8s.io/apimachinery/pkg/api/equality"
+ "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
@@ -212,6 +214,7 @@ func (r *ScaledObjectReconciler) getScaledObjectMetricSpecs(ctx context.Context,
return nil, err
}
+ // TODO: it returns indexed names here
metricSpecs := cache.GetMetricSpecForScaling(ctx)
for _, metricSpec := range metricSpecs {
@@ -246,7 +249,55 @@ func (r *ScaledObjectReconciler) getScaledObjectMetricSpecs(ctx context.Context,
updateHealthStatus(scaledObject, externalMetricNames, status)
+ // if ComplexScalingLogic struct is not empty, expect Formula or ExternalCalculation
+ // to be non-empty. If target is > 0.0 create a compositeScaler structure
+ if scaledObject.Spec.Advanced != nil && !reflect.DeepEqual(scaledObject.Spec.Advanced.ComplexScalingLogic, kedav1alpha1.ComplexScalingLogic{}) {
+ validNumTarget, validMetricType, err := kedav1alpha1.ValidateComplexScalingLogic(scaledObject, scaledObjectMetricSpecs)
+ if err != nil {
+ logger.Error(err, "error validating compositeScalingLogic")
+ return nil, err
+ }
+
+ // if target is valid, use composite scaler.
+ // Expect Formula or ExternalCalculation that returns one metric
+ if validNumTarget > 0.0 {
+ qual := resource.NewMilliQuantity(int64(validNumTarget*1000), resource.DecimalSI)
+
+ if err != nil {
+ logger.Error(err, "Error parsing Quantity elements for composite scaler")
+ return nil, err
+ }
+ compositeSpec := autoscalingv2.MetricSpec{
+ Type: autoscalingv2.MetricSourceType("External"),
+ External: &autoscalingv2.ExternalMetricSource{
+ Metric: autoscalingv2.MetricIdentifier{
+ Name: "composite-metric-name",
+ Selector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{"scaledobject.keda.sh/name": scaledObject.Name},
+ },
+ },
+ Target: autoscalingv2.MetricTarget{
+ Type: validMetricType,
+ AverageValue: qual,
+ },
+ },
+ }
+ status.CompositeScalerName = "composite-metric-name"
+
+ // overwrite external metrics in returned array with composite metric ONLY (keep resource metrics)
+ finalHpaSpecs := []autoscalingv2.MetricSpec{}
+ // keep resource specs
+ for _, rm := range scaledObjectMetricSpecs {
+ if rm.Resource != nil {
+ finalHpaSpecs = append(finalHpaSpecs, rm)
+ }
+ }
+ finalHpaSpecs = append(finalHpaSpecs, compositeSpec)
+ scaledObjectMetricSpecs = finalHpaSpecs
+ }
+ }
err = kedastatus.UpdateScaledObjectStatus(ctx, r.Client, logger, scaledObject, status)
+
if err != nil {
logger.Error(err, "Error updating scaledObject status with used externalMetricNames")
return nil, err
diff --git a/go.mod b/go.mod
index 03061962127..bfefca752c9 100644
--- a/go.mod
+++ b/go.mod
@@ -22,6 +22,7 @@ require (
github.com/DataDog/datadog-api-client-go v1.16.0
github.com/Huawei/gophercloud v1.0.21
github.com/Shopify/sarama v1.38.1
+ github.com/antonmedv/expr v1.12.5
github.com/arangodb/go-driver v1.6.0
github.com/aws/aws-sdk-go v1.44.287
github.com/bradleyfalzon/ghinstallation/v2 v2.5.0
diff --git a/go.sum b/go.sum
index ed248ca96f4..21d5cefde2e 100644
--- a/go.sum
+++ b/go.sum
@@ -131,6 +131,8 @@ github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHG
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 h1:yL7+Jz0jTC6yykIK/Wh74gnTJnrGr5AyrNMXuA0gves=
github.com/antlr/antlr4/runtime/Go/antlr v1.4.10/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY=
+github.com/antonmedv/expr v1.12.5 h1:Fq4okale9swwL3OeLLs9WD9H6GbgBLJyN/NUHRv+n0E=
+github.com/antonmedv/expr v1.12.5/go.mod h1:FPC8iWArxls7axbVLsW+kpg1mz29A1b2M6jt+hZfDkU=
github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ=
github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk=
github.com/arangodb/go-driver v1.6.0 h1:NFWj/idqXZxhFVueihMSI2R9NotNIsgvNfM/xmpekb4=
diff --git a/pkg/externalscaling/api/externalCalculation.pb.go b/pkg/externalscaling/api/externalCalculation.pb.go
new file mode 100644
index 00000000000..c8d1d75e84d
--- /dev/null
+++ b/pkg/externalscaling/api/externalCalculation.pb.go
@@ -0,0 +1,302 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.28.1
+// protoc v3.19.6
+// source: externalCalculation.proto
+
+package externalCalculation
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type MetricsList struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ MetricValues []*Metric `protobuf:"bytes,1,rep,name=metricValues,proto3" json:"metricValues,omitempty"`
+}
+
+func (x *MetricsList) Reset() {
+ *x = MetricsList{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_externalCalculation_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MetricsList) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MetricsList) ProtoMessage() {}
+
+func (x *MetricsList) ProtoReflect() protoreflect.Message {
+ mi := &file_externalCalculation_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MetricsList.ProtoReflect.Descriptor instead.
+func (*MetricsList) Descriptor() ([]byte, []int) {
+ return file_externalCalculation_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *MetricsList) GetMetricValues() []*Metric {
+ if x != nil {
+ return x.MetricValues
+ }
+ return nil
+}
+
+type Metric struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Value float32 `protobuf:"fixed32,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *Metric) Reset() {
+ *x = Metric{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_externalCalculation_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Metric) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Metric) ProtoMessage() {}
+
+func (x *Metric) ProtoReflect() protoreflect.Message {
+ mi := &file_externalCalculation_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Metric.ProtoReflect.Descriptor instead.
+func (*Metric) Descriptor() ([]byte, []int) {
+ return file_externalCalculation_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *Metric) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *Metric) GetValue() float32 {
+ if x != nil {
+ return x.Value
+ }
+ return 0
+}
+
+type Response struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ List *MetricsList `protobuf:"bytes,1,opt,name=list,proto3" json:"list,omitempty"`
+ Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"`
+}
+
+func (x *Response) Reset() {
+ *x = Response{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_externalCalculation_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Response) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Response) ProtoMessage() {}
+
+func (x *Response) ProtoReflect() protoreflect.Message {
+ mi := &file_externalCalculation_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Response.ProtoReflect.Descriptor instead.
+func (*Response) Descriptor() ([]byte, []int) {
+ return file_externalCalculation_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *Response) GetList() *MetricsList {
+ if x != nil {
+ return x.List
+ }
+ return nil
+}
+
+func (x *Response) GetError() string {
+ if x != nil {
+ return x.Error
+ }
+ return ""
+}
+
+var File_externalCalculation_proto protoreflect.FileDescriptor
+
+var file_externalCalculation_proto_rawDesc = []byte{
+ 0x0a, 0x19, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x43, 0x61, 0x6c, 0x63, 0x75, 0x6c,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x13, 0x65, 0x78, 0x74,
+ 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x43, 0x61, 0x6c, 0x63, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x22, 0x4e, 0x0a, 0x0b, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x4c, 0x69, 0x73, 0x74, 0x12,
+ 0x3f, 0x0a, 0x0c, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18,
+ 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c,
+ 0x43, 0x61, 0x6c, 0x63, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4d, 0x65, 0x74, 0x72,
+ 0x69, 0x63, 0x52, 0x0c, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73,
+ 0x22, 0x32, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14,
+ 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x05, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x22, 0x56, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x12, 0x34, 0x0a, 0x04, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20,
+ 0x2e, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x43, 0x61, 0x6c, 0x63, 0x75, 0x6c, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x4c, 0x69, 0x73, 0x74,
+ 0x52, 0x04, 0x6c, 0x69, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x32, 0x65, 0x0a, 0x13,
+ 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x43, 0x61, 0x6c, 0x63, 0x75, 0x6c, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x09, 0x43, 0x61, 0x6c, 0x63, 0x75, 0x6c, 0x61, 0x74, 0x65,
+ 0x12, 0x20, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x43, 0x61, 0x6c, 0x63, 0x75,
+ 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x4c, 0x69,
+ 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x43, 0x61, 0x6c,
+ 0x63, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x22, 0x00, 0x42, 0x17, 0x5a, 0x15, 0x2e, 0x3b, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61,
+ 0x6c, 0x43, 0x61, 0x6c, 0x63, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_externalCalculation_proto_rawDescOnce sync.Once
+ file_externalCalculation_proto_rawDescData = file_externalCalculation_proto_rawDesc
+)
+
+func file_externalCalculation_proto_rawDescGZIP() []byte {
+ file_externalCalculation_proto_rawDescOnce.Do(func() {
+ file_externalCalculation_proto_rawDescData = protoimpl.X.CompressGZIP(file_externalCalculation_proto_rawDescData)
+ })
+ return file_externalCalculation_proto_rawDescData
+}
+
+var file_externalCalculation_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
+var file_externalCalculation_proto_goTypes = []interface{}{
+ (*MetricsList)(nil), // 0: externalCalculation.MetricsList
+ (*Metric)(nil), // 1: externalCalculation.Metric
+ (*Response)(nil), // 2: externalCalculation.Response
+}
+var file_externalCalculation_proto_depIdxs = []int32{
+ 1, // 0: externalCalculation.MetricsList.metricValues:type_name -> externalCalculation.Metric
+ 0, // 1: externalCalculation.Response.list:type_name -> externalCalculation.MetricsList
+ 0, // 2: externalCalculation.ExternalCalculation.Calculate:input_type -> externalCalculation.MetricsList
+ 2, // 3: externalCalculation.ExternalCalculation.Calculate:output_type -> externalCalculation.Response
+ 3, // [3:4] is the sub-list for method output_type
+ 2, // [2:3] is the sub-list for method input_type
+ 2, // [2:2] is the sub-list for extension type_name
+ 2, // [2:2] is the sub-list for extension extendee
+ 0, // [0:2] is the sub-list for field type_name
+}
+
+func init() { file_externalCalculation_proto_init() }
+func file_externalCalculation_proto_init() {
+ if File_externalCalculation_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_externalCalculation_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MetricsList); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_externalCalculation_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Metric); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_externalCalculation_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Response); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_externalCalculation_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 3,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_externalCalculation_proto_goTypes,
+ DependencyIndexes: file_externalCalculation_proto_depIdxs,
+ MessageInfos: file_externalCalculation_proto_msgTypes,
+ }.Build()
+ File_externalCalculation_proto = out.File
+ file_externalCalculation_proto_rawDesc = nil
+ file_externalCalculation_proto_goTypes = nil
+ file_externalCalculation_proto_depIdxs = nil
+}
diff --git a/pkg/externalscaling/api/externalCalculation.proto b/pkg/externalscaling/api/externalCalculation.proto
new file mode 100644
index 00000000000..c3269163a1e
--- /dev/null
+++ b/pkg/externalscaling/api/externalCalculation.proto
@@ -0,0 +1,22 @@
+syntax = "proto3";
+
+package externalCalculation;
+option go_package = ".;externalCalculation";
+
+service ExternalCalculation {
+ rpc Calculate (MetricsList) returns (Response) {};
+}
+
+message MetricsList {
+ repeated Metric metricValues = 1;
+}
+
+message Metric {
+ string name = 1;
+ float value = 2;
+}
+
+message Response {
+ MetricsList list = 1;
+ string error = 2;
+}
diff --git a/pkg/externalscaling/api/externalCalculation_grpc.pb.go b/pkg/externalscaling/api/externalCalculation_grpc.pb.go
new file mode 100644
index 00000000000..73368cf5de9
--- /dev/null
+++ b/pkg/externalscaling/api/externalCalculation_grpc.pb.go
@@ -0,0 +1,105 @@
+// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
+// versions:
+// - protoc-gen-go-grpc v1.2.0
+// - protoc v3.19.6
+// source: externalCalculation.proto
+
+package externalCalculation
+
+import (
+ context "context"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+)
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+// Requires gRPC-Go v1.32.0 or later.
+const _ = grpc.SupportPackageIsVersion7
+
+// ExternalCalculationClient is the client API for ExternalCalculation service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
+type ExternalCalculationClient interface {
+ Calculate(ctx context.Context, in *MetricsList, opts ...grpc.CallOption) (*Response, error)
+}
+
+type externalCalculationClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewExternalCalculationClient(cc grpc.ClientConnInterface) ExternalCalculationClient {
+ return &externalCalculationClient{cc}
+}
+
+func (c *externalCalculationClient) Calculate(ctx context.Context, in *MetricsList, opts ...grpc.CallOption) (*Response, error) {
+ out := new(Response)
+ err := c.cc.Invoke(ctx, "/externalCalculation.ExternalCalculation/Calculate", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// ExternalCalculationServer is the server API for ExternalCalculation service.
+// All implementations must embed UnimplementedExternalCalculationServer
+// for forward compatibility
+type ExternalCalculationServer interface {
+ Calculate(context.Context, *MetricsList) (*Response, error)
+ mustEmbedUnimplementedExternalCalculationServer()
+}
+
+// UnimplementedExternalCalculationServer must be embedded to have forward compatible implementations.
+type UnimplementedExternalCalculationServer struct {
+}
+
+func (UnimplementedExternalCalculationServer) Calculate(context.Context, *MetricsList) (*Response, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Calculate not implemented")
+}
+func (UnimplementedExternalCalculationServer) mustEmbedUnimplementedExternalCalculationServer() {}
+
+// UnsafeExternalCalculationServer may be embedded to opt out of forward compatibility for this service.
+// Use of this interface is not recommended, as added methods to ExternalCalculationServer will
+// result in compilation errors.
+type UnsafeExternalCalculationServer interface {
+ mustEmbedUnimplementedExternalCalculationServer()
+}
+
+func RegisterExternalCalculationServer(s grpc.ServiceRegistrar, srv ExternalCalculationServer) {
+ s.RegisterService(&ExternalCalculation_ServiceDesc, srv)
+}
+
+func _ExternalCalculation_Calculate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(MetricsList)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ExternalCalculationServer).Calculate(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/externalCalculation.ExternalCalculation/Calculate",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ExternalCalculationServer).Calculate(ctx, req.(*MetricsList))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+// ExternalCalculation_ServiceDesc is the grpc.ServiceDesc for ExternalCalculation service.
+// It's only intended for direct use with grpc.RegisterService,
+// and not to be introspected or modified (even as a copy)
+var ExternalCalculation_ServiceDesc = grpc.ServiceDesc{
+ ServiceName: "externalCalculation.ExternalCalculation",
+ HandlerType: (*ExternalCalculationServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "Calculate",
+ Handler: _ExternalCalculation_Calculate_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "externalCalculation.proto",
+}
diff --git a/pkg/externalscaling/api/run b/pkg/externalscaling/api/run
new file mode 100755
index 00000000000..e56c62bde2a
--- /dev/null
+++ b/pkg/externalscaling/api/run
@@ -0,0 +1,3 @@
+#!/usr/bin/bash
+
+protoc --go_out=. --go-grpc_out=. externalCalculation.proto
diff --git a/pkg/externalscaling/client.go b/pkg/externalscaling/client.go
new file mode 100644
index 00000000000..a827f48c7ee
--- /dev/null
+++ b/pkg/externalscaling/client.go
@@ -0,0 +1,170 @@
+package externalscaling
+
+import (
+ "context"
+ "crypto/tls"
+ "crypto/x509"
+ "fmt"
+ "os"
+ "path"
+ "time"
+
+ "github.com/go-logr/logr"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/connectivity"
+ "google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/credentials/insecure"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/metrics/pkg/apis/external_metrics"
+
+ cl "github.com/kedacore/keda/v2/pkg/externalscaling/api"
+)
+
+type GrpcClient struct {
+ Client cl.ExternalCalculationClient
+ Connection *grpc.ClientConn
+}
+
+func NewGrpcClient(url string, certDir string) (*GrpcClient, error) {
+ retryPolicy := `{
+ "methodConfig": [{
+ "timeout": "3s",
+ "waitForReady": true,
+ "retryPolicy": {
+ "InitialBackoff": ".25s",
+ "MaxBackoff": "2.0s",
+ "BackoffMultiplier": 2,
+ "RetryableStatusCodes": [ "UNAVAILABLE" ]
+ }
+ }]}`
+
+ opts := []grpc.DialOption{
+ grpc.WithDefaultServiceConfig(retryPolicy),
+ grpc.WithTransportCredentials(insecure.NewCredentials()),
+ }
+
+ // if certDir is not empty, load certificates
+ if certDir != "" {
+ creds, err := loadCertificates(certDir)
+ if err != nil {
+ return nil, fmt.Errorf("externalCalculator error while creating new client: %w", err)
+ }
+ opts = []grpc.DialOption{
+ grpc.WithDefaultServiceConfig(retryPolicy),
+ grpc.WithTransportCredentials(creds),
+ }
+ }
+
+ conn, err := grpc.Dial(url, opts...)
+ if err != nil {
+ return nil, fmt.Errorf("externalCalculator error while creating new client: %w", err)
+ }
+
+ return &GrpcClient{Client: cl.NewExternalCalculationClient(conn), Connection: conn}, nil
+}
+
+func (c *GrpcClient) Calculate(ctx context.Context, list *cl.MetricsList) (*cl.MetricsList, error) {
+ response, err := c.Client.Calculate(ctx, list)
+ if err != nil {
+ return nil, fmt.Errorf("error in externalscaling.Calculate %w", err)
+ }
+ return response.List, nil
+}
+
+// WaitForConnectionReady waits for gRPC connection to be ready
+// returns true if the connection was successful, false if we hit a timeout or context canceled
+func (c *GrpcClient) WaitForConnectionReady(ctx context.Context, url string, timeout time.Duration, logger logr.Logger) bool {
+ currentState := c.Connection.GetState()
+ if currentState != connectivity.Ready {
+ logger.Info(fmt.Sprintf("Waiting for %v to establish a gRPC connection to server for external calculator at %s", timeout, url))
+ timeoutTimer := time.After(timeout)
+ for {
+ select {
+ case <-ctx.Done():
+ return false
+ case <-timeoutTimer:
+ err := fmt.Errorf("hit '%v' timeout trying to connect externalCalculator at '%s'", timeout, url)
+ logger.Error(err, "error while waiting for connection for externalCalculator")
+ return false
+ default:
+ c.Connection.Connect()
+ time.Sleep(500 * time.Millisecond)
+ currentState := c.Connection.GetState()
+ if currentState == connectivity.Ready {
+ return true
+ }
+ }
+ }
+ }
+ return true
+}
+
+// ConvertToGeneratedStruct converts K8s external metrics list to gRPC generated
+// external metrics list
+func ConvertToGeneratedStruct(inK8sList []external_metrics.ExternalMetricValue) *cl.MetricsList {
+ outExternal := cl.MetricsList{}
+ for _, val := range inK8sList {
+ metric := cl.Metric{Name: val.MetricName, Value: float32(val.Value.Value())}
+ outExternal.MetricValues = append(outExternal.MetricValues, &metric)
+ }
+ return &outExternal
+}
+
+// ConvertFromGeneratedStruct converts gRPC generated external metrics list to
+// K8s external_metrics list
+func ConvertFromGeneratedStruct(inExternal *cl.MetricsList) []external_metrics.ExternalMetricValue {
+ outK8sList := []external_metrics.ExternalMetricValue{}
+ for _, inValue := range inExternal.MetricValues {
+ outValue := external_metrics.ExternalMetricValue{}
+ outValue.MetricName = inValue.Name
+ outValue.Timestamp = v1.Now()
+ outValue.Value.SetMilli(int64(inValue.Value * 1000))
+ outK8sList = append(outK8sList, outValue)
+ }
+ return outK8sList
+}
+
+// close connection
+func (c *GrpcClient) CloseConnection() error {
+ if c.Connection != nil {
+ err := c.Connection.Close()
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// load certificates taken from a directory given as an argument
+// expects ca.crt, tls.crt and tls.key to be present in the directory
+func loadCertificates(certDir string) (credentials.TransportCredentials, error) {
+ // Load certificate of the CA who signed client's certificate
+ pemClientCA, err := os.ReadFile(path.Join(certDir, "ca.crt"))
+ if err != nil {
+ return nil, err
+ }
+
+ // Get the SystemCertPool, continue with an empty pool on error
+ certPool, _ := x509.SystemCertPool()
+ if certPool == nil {
+ certPool = x509.NewCertPool()
+ }
+ if !certPool.AppendCertsFromPEM(pemClientCA) {
+ return nil, fmt.Errorf("failed to add client CA's certificate")
+ }
+
+ // Load certificate and private key
+ cert, err := tls.LoadX509KeyPair(path.Join(certDir, "tls.crt"), path.Join(certDir, "tls.key"))
+ if err != nil {
+ return nil, err
+ }
+
+ // Create the credentials and return it
+ config := &tls.Config{
+ MinVersion: tls.VersionTLS13,
+ Certificates: []tls.Certificate{cert},
+ }
+ config.RootCAs = certPool
+
+ return credentials.NewTLS(config), nil
+}
diff --git a/pkg/fallback/fallback.go b/pkg/fallback/fallback.go
index 286144692f0..ff773bd4dd1 100644
--- a/pkg/fallback/fallback.go
+++ b/pkg/fallback/fallback.go
@@ -18,6 +18,9 @@ package fallback
import (
"context"
+ "fmt"
+ "reflect"
+ "strconv"
v2 "k8s.io/api/autoscaling/v2"
"k8s.io/apimachinery/pkg/api/resource"
@@ -27,28 +30,96 @@ import (
logf "sigs.k8s.io/controller-runtime/pkg/log"
kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1"
+ cl "github.com/kedacore/keda/v2/pkg/externalscaling/api"
)
var log = logf.Log.WithName("fallback")
-func isFallbackEnabled(scaledObject *kedav1alpha1.ScaledObject, metricSpec v2.MetricSpec) bool {
- if scaledObject.Spec.Fallback == nil {
+const healthStr string = "health"
+const externalCalculatorStr string = "externalcalculator"
+
+// TODO: gauron99 - possible refactor this if trying to unify status updates & fallback functionality
+func isFallbackEnabled(scaledObject *kedav1alpha1.ScaledObject, metricSpec v2.MetricSpec, determiner string) bool {
+ switch determiner {
+ case healthStr:
+ if scaledObject.Spec.Fallback == nil {
+ return false
+ }
+ if metricSpec.External.Target.Type != v2.AverageValueMetricType {
+ log.V(0).Info("Fallback can only be enabled for triggers with metric of type AverageValue", "scaledObject.Namespace", scaledObject.Namespace, "scaledObject.Name", scaledObject.Name)
+ return false
+ }
+ case externalCalculatorStr:
+ if scaledObject.Spec.Fallback == nil || scaledObject.Spec.Advanced.ComplexScalingLogic.Target == "" {
+ return false
+ }
+ default:
+ log.V(0).Info("Internal error in isFallbackEnabled - wrong determiner - this should never happen")
return false
}
+ return true
+}
- if metricSpec.External.Target.Type != v2.AverageValueMetricType {
- log.V(0).Info("Fallback can only be enabled for triggers with metric of type AverageValue", "scaledObject.Namespace", scaledObject.Namespace, "scaledObject.Name", scaledObject.Name)
- return false
+// TODO: gauron99 - possible refactor of fallback functionality to unify status updates
+// Possibly move external health status into the existing one with prefix like external-modifier or something.
+// Separate status updates & fallback functionality
+func GetMetricsWithFallbackExternalCalculator(ctx context.Context, client runtimeclient.Client, metrics *cl.MetricsList, suppressedError error, metricName string, scaledObject *kedav1alpha1.ScaledObject) (bool, error) {
+ const determiner string = "externalcalculator"
+ status := scaledObject.Status.DeepCopy()
+
+ initHealthStatus(status, determiner)
+
+ healthStatus := getHealthStatus(status, metricName, determiner)
+
+ if healthStatus == nil {
+ // should never be nil
+ err := fmt.Errorf("internal error getting health status in GetMetricsWithFallbackExternalCalculator - wrong determiner")
+ return false, err
}
- return true
+ // if there is no error
+ if suppressedError == nil {
+ zero := int32(0)
+ healthStatus.NumberOfFailures = &zero
+ healthStatus.Status = kedav1alpha1.HealthStatusHappy
+ status.ExternalCalculationHealth[metricName] = *healthStatus
+
+ updateStatus(ctx, client, scaledObject, status, v2.MetricSpec{})
+
+ return false, nil
+ }
+
+ healthStatus.Status = kedav1alpha1.HealthStatusFailing
+ *healthStatus.NumberOfFailures++
+ status.ExternalCalculationHealth[metricName] = *healthStatus
+ updateStatus(ctx, client, scaledObject, status, v2.MetricSpec{})
+
+ switch {
+ case !isFallbackEnabled(scaledObject, v2.MetricSpec{}, determiner):
+ return false, suppressedError
+ case !validateFallback(scaledObject):
+ log.Info("Failed to validate ScaledObject ComplexScalingLogic Fallback. Please check that parameters are positive integers", "scaledObject.Namespace", scaledObject.Namespace, "scaledObject.Name", scaledObject.Name)
+ return false, suppressedError
+ case *healthStatus.NumberOfFailures > scaledObject.Spec.Fallback.FailureThreshold:
+ doExternalCalculationFallback(scaledObject, metrics, metricName, suppressedError)
+ return true, nil
+
+ default:
+ return false, suppressedError
+ }
}
func GetMetricsWithFallback(ctx context.Context, client runtimeclient.Client, metrics []external_metrics.ExternalMetricValue, suppressedError error, metricName string, scaledObject *kedav1alpha1.ScaledObject, metricSpec v2.MetricSpec) ([]external_metrics.ExternalMetricValue, error) {
+ const determiner string = "health"
status := scaledObject.Status.DeepCopy()
- initHealthStatus(status)
- healthStatus := getHealthStatus(status, metricName)
+ initHealthStatus(status, determiner)
+ healthStatus := getHealthStatus(status, metricName, determiner)
+ if healthStatus == nil {
+ // should never be nil
+ err := fmt.Errorf("internal error getting health status in GetMetricsWithFallback - wrong determiner")
+ return metrics, err
+ }
if suppressedError == nil {
zero := int32(0)
@@ -67,7 +138,7 @@ func GetMetricsWithFallback(ctx context.Context, client runtimeclient.Client, me
updateStatus(ctx, client, scaledObject, status, metricSpec)
switch {
- case !isFallbackEnabled(scaledObject, metricSpec):
+ case !isFallbackEnabled(scaledObject, metricSpec, determiner):
return nil, suppressedError
case !validateFallback(scaledObject):
log.Info("Failed to validate ScaledObject Spec. Please check that parameters are positive integers", "scaledObject.Namespace", scaledObject.Namespace, "scaledObject.Name", scaledObject.Name)
@@ -79,15 +150,26 @@ func GetMetricsWithFallback(ctx context.Context, client runtimeclient.Client, me
}
}
-func fallbackExistsInScaledObject(scaledObject *kedav1alpha1.ScaledObject, metricSpec v2.MetricSpec) bool {
- if !isFallbackEnabled(scaledObject, metricSpec) || !validateFallback(scaledObject) {
+func fallbackExistsInScaledObject(scaledObject *kedav1alpha1.ScaledObject, metricSpec v2.MetricSpec, determiner string) bool {
+ if !isFallbackEnabled(scaledObject, metricSpec, determiner) || !validateFallback(scaledObject) {
return false
}
- for _, element := range scaledObject.Status.Health {
- if element.Status == kedav1alpha1.HealthStatusFailing && *element.NumberOfFailures > scaledObject.Spec.Fallback.FailureThreshold {
- return true
+ switch determiner {
+ case healthStr:
+ for _, element := range scaledObject.Status.Health {
+ if element.Status == kedav1alpha1.HealthStatusFailing && *element.NumberOfFailures > scaledObject.Spec.Fallback.FailureThreshold {
+ return true
+ }
+ }
+ case externalCalculatorStr:
+ for _, element := range scaledObject.Status.ExternalCalculationHealth {
+ if element.Status == kedav1alpha1.HealthStatusFailing && *element.NumberOfFailures > scaledObject.Spec.Fallback.FailureThreshold {
+ return true
+ }
}
+ default:
+ // this should never happen
}
return false
@@ -112,13 +194,37 @@ func doFallback(scaledObject *kedav1alpha1.ScaledObject, metricSpec v2.MetricSpe
return fallbackMetrics
}
+func doExternalCalculationFallback(scaledObject *kedav1alpha1.ScaledObject, metrics *cl.MetricsList, metricName string, suppressedError error) {
+ replicas := int64(scaledObject.Spec.Fallback.Replicas)
+ normalisationValue, err := strconv.ParseFloat(scaledObject.Spec.Advanced.ComplexScalingLogic.Target, 64)
+ if err != nil {
+ log.Error(err, "error converting string to float in ExternalCalculation fallback")
+ return
+ }
+ metric := cl.Metric{
+ Name: metricName,
+ Value: float32(normalisationValue * float64(replicas)),
+ }
+ metrics.MetricValues = []*cl.Metric{&metric}
+ log.Info(fmt.Sprintf("Suppressing error, externalCalculator falling back to %d fallback.replicas", scaledObject.Spec.Fallback.Replicas), "scaledObject.Namespace", scaledObject.Namespace, "scaledObject.Name", scaledObject.Name, "suppressedError", suppressedError)
+}
+
func updateStatus(ctx context.Context, client runtimeclient.Client, scaledObject *kedav1alpha1.ScaledObject, status *kedav1alpha1.ScaledObjectStatus, metricSpec v2.MetricSpec) {
patch := runtimeclient.MergeFrom(scaledObject.DeepCopy())
- if fallbackExistsInScaledObject(scaledObject, metricSpec) {
- status.Conditions.SetFallbackCondition(metav1.ConditionTrue, "FallbackExists", "At least one trigger is falling back on this scaled object")
+ // if metricSpec is empty, expect to update externalCalculator
+ if reflect.DeepEqual(metricSpec, v2.MetricSpec{}) {
+ if fallbackExistsInScaledObject(scaledObject, metricSpec, externalCalculatorStr) {
+ status.Conditions.SetExternalFallbackCondition(metav1.ConditionTrue, "ExternalFallbackExists", "At least one external calculator is failing on this scaled object")
+ } else {
+ status.Conditions.SetExternalFallbackCondition(metav1.ConditionFalse, "NoExternalFallbackFound", "No external fallbacks are active on this scaled object")
+ }
} else {
- status.Conditions.SetFallbackCondition(metav1.ConditionFalse, "NoFallbackFound", "No fallbacks are active on this scaled object")
+ if fallbackExistsInScaledObject(scaledObject, metricSpec, healthStr) {
+ status.Conditions.SetFallbackCondition(metav1.ConditionTrue, "FallbackExists", "At least one trigger is falling back on this scaled object")
+ } else {
+ status.Conditions.SetFallbackCondition(metav1.ConditionFalse, "NoFallbackFound", "No fallbacks are active on this scaled object")
+ }
}
scaledObject.Status = *status
@@ -128,24 +234,54 @@ func updateStatus(ctx context.Context, client runtimeclient.Client, scaledObject
}
}
-func getHealthStatus(status *kedav1alpha1.ScaledObjectStatus, metricName string) *kedav1alpha1.HealthStatus {
- // Get health status for a specific metric
- _, healthStatusExists := status.Health[metricName]
- if !healthStatusExists {
- zero := int32(0)
- healthStatus := kedav1alpha1.HealthStatus{
- NumberOfFailures: &zero,
- Status: kedav1alpha1.HealthStatusHappy,
+func getHealthStatus(status *kedav1alpha1.ScaledObjectStatus, metricName string, determiner string) *kedav1alpha1.HealthStatus {
+ switch determiner {
+ case healthStr:
+ // Get health status for a specific metric
+ _, healthStatusExists := status.Health[metricName]
+ if !healthStatusExists {
+ zero := int32(0)
+ healthStatus := kedav1alpha1.HealthStatus{
+ NumberOfFailures: &zero,
+ Status: kedav1alpha1.HealthStatusHappy,
+ }
+ status.Health[metricName] = healthStatus
}
- status.Health[metricName] = healthStatus
+ healthStatus := status.Health[metricName]
+ return &healthStatus
+ case externalCalculatorStr:
+ // Get health status for a specific metric
+ _, healthStatusExists := status.ExternalCalculationHealth[metricName]
+ if !healthStatusExists {
+ zero := int32(0)
+ healthStatus := kedav1alpha1.HealthStatus{
+ NumberOfFailures: &zero,
+ Status: kedav1alpha1.HealthStatusHappy,
+ }
+ status.ExternalCalculationHealth[metricName] = healthStatus
+ }
+ healthStatus := status.ExternalCalculationHealth[metricName]
+ return &healthStatus
+ default:
+ // if wrong determiner was given
+ return nil
}
- healthStatus := status.Health[metricName]
- return &healthStatus
}
-func initHealthStatus(status *kedav1alpha1.ScaledObjectStatus) {
- // Init health status if missing
- if status.Health == nil {
- status.Health = make(map[string]kedav1alpha1.HealthStatus)
+// Init health status of given structure. Possible determiners are "health" and
+// "externalcalculator". These represent (1) default health status for ScaledObjectStatus.Health
+// and (2) externalCalculators health status for ScaledObjectStatus.ExternalCalculationHealth
+func initHealthStatus(status *kedav1alpha1.ScaledObjectStatus, determiner string) {
+ // Init specific health status if missing ("health" for standard; "external" for external calculator health)
+ switch determiner {
+ case healthStr:
+ if status.Health == nil {
+ status.Health = make(map[string]kedav1alpha1.HealthStatus)
+ }
+ case externalCalculatorStr:
+ if status.ExternalCalculationHealth == nil {
+ status.ExternalCalculationHealth = make(map[string]kedav1alpha1.HealthStatus)
+ }
+ default:
}
}
diff --git a/pkg/fallback/fallback_test.go b/pkg/fallback/fallback_test.go
index 9f10ff8aad2..62070783307 100644
--- a/pkg/fallback/fallback_test.go
+++ b/pkg/fallback/fallback_test.go
@@ -32,6 +32,8 @@ import (
"k8s.io/metrics/pkg/apis/external_metrics"
kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1"
+ externalscaling "github.com/kedacore/keda/v2/pkg/externalscaling"
+ externalscalingAPI "github.com/kedacore/keda/v2/pkg/externalscaling/api"
"github.com/kedacore/keda/v2/pkg/mock/mock_client"
mock_scalers "github.com/kedacore/keda/v2/pkg/mock/mock_scaler"
)
@@ -65,7 +67,7 @@ var _ = Describe("fallback", func() {
expectedMetricValue := float64(5)
primeGetMetrics(scaler, expectedMetricValue)
- so := buildScaledObject(nil, nil)
+ so := buildScaledObject(nil, nil, nil)
metricSpec := createMetricSpec(3)
expectStatusPatch(ctrl, client)
@@ -95,6 +97,7 @@ var _ = Describe("fallback", func() {
},
},
},
+ nil,
)
metricSpec := createMetricSpec(3)
@@ -112,7 +115,7 @@ var _ = Describe("fallback", func() {
It("should propagate the error when fallback is disabled", func() {
scaler.EXPECT().GetMetricsAndActivity(gomock.Any(), gomock.Eq(metricName)).Return(nil, false, errors.New("Some error"))
- so := buildScaledObject(nil, nil)
+ so := buildScaledObject(nil, nil, nil)
metricSpec := createMetricSpec(3)
expectStatusPatch(ctrl, client)
@@ -140,6 +143,7 @@ var _ = Describe("fallback", func() {
},
},
},
+ nil,
)
metricSpec := createMetricSpec(10)
@@ -171,6 +175,7 @@ var _ = Describe("fallback", func() {
},
},
},
+ nil,
)
metricSpec := createMetricSpec(10)
expectStatusPatch(ctrl, client)
@@ -189,7 +194,7 @@ var _ = Describe("fallback", func() {
&kedav1alpha1.Fallback{
FailureThreshold: int32(3),
Replicas: int32(10),
- }, nil,
+ }, nil, nil,
)
qty := resource.NewQuantity(int64(3), resource.DecimalSI)
@@ -202,7 +207,7 @@ var _ = Describe("fallback", func() {
},
}
- isEnabled := isFallbackEnabled(so, metricsSpec)
+ isEnabled := isFallbackEnabled(so, metricsSpec, "health")
Expect(isEnabled).Should(BeFalse())
})
@@ -224,6 +229,7 @@ var _ = Describe("fallback", func() {
},
},
},
+ nil,
)
metricSpec := createMetricSpec(10)
@@ -257,6 +263,7 @@ var _ = Describe("fallback", func() {
},
},
},
+ nil,
)
metricSpec := createMetricSpec(10)
expectStatusPatch(ctrl, client)
@@ -291,6 +298,7 @@ var _ = Describe("fallback", func() {
},
},
},
+ nil,
)
metricSpec := createMetricSpec(10)
expectStatusPatch(ctrl, client)
@@ -325,6 +333,7 @@ var _ = Describe("fallback", func() {
},
},
},
+ nil,
)
metricSpec := createMetricSpec(10)
expectStatusPatch(ctrl, client)
@@ -336,6 +345,399 @@ var _ = Describe("fallback", func() {
condition := so.Status.Conditions.GetFallbackCondition()
Expect(condition.IsTrue()).Should(BeFalse())
})
+
+ // ---------------------------------------------------------------------------
+ // fallback for ComplexScalingLogic ExternalCalculators
+ // ---------------------------------------------------------------------------
+
+ // --- set condition to false ---
+ // invalid FailureThreshold eg. < 0
+ It("should set the ec-fallback condition to false if the Fallback FailureThreshold is invalid", func() {
+ primeGetMetrics(scaler, 2)
+ startingNumberOfFailures := int32(0)
+ wrongThreshold := int32(-2)
+
+ csl := &kedav1alpha1.ComplexScalingLogic{
+ Formula: "",
+ Target: "2",
+
+ ExternalCalculations: []kedav1alpha1.ExternalCalculation{
+ {Name: metricName, URL: "fake-url", Timeout: "5"}},
+ }
+
+ so := buildScaledObject(
+ &kedav1alpha1.Fallback{
+ FailureThreshold: wrongThreshold,
+ Replicas: int32(10),
+ },
+ &kedav1alpha1.ScaledObjectStatus{
+ ExternalCalculationHealth: map[string]kedav1alpha1.HealthStatus{
+ metricName: {
+ NumberOfFailures: &startingNumberOfFailures,
+ Status: kedav1alpha1.HealthStatusHappy,
+ },
+ },
+ },
+ csl,
+ )
+ metrics, _, err := scaler.GetMetricsAndActivity(context.Background(), metricName)
+ Expect(err).Should(BeNil())
+ convertedMetrics, err := mockCalculateForExternalCalculator(metrics, "")
+ expectStatusPatch(ctrl, client)
+ fbApplied, err := GetMetricsWithFallbackExternalCalculator(context.Background(), client, convertedMetrics, err, metricName, so)
+
+ Expect(fbApplied).Should(BeFalse())
+ Expect(err).Should(BeNil())
+ condition := so.Status.Conditions.GetExternalFallbackCondition()
+ Expect(condition.IsTrue()).Should(BeFalse())
+ Expect(condition.Type).Should(Equal(kedav1alpha1.ConditionExternalFallback))
+ Expect(condition.Reason).Should(Equal("NoExternalFallbackFound"))
+ })
+
+ It("should set the ec-fallback condition to false when a ec-fallback disabled", func() {
+ primeGetMetrics(scaler, 2)
+ startingNumberOfFailures := int32(0)
+ so := buildScaledObject(
+ nil,
+ &kedav1alpha1.ScaledObjectStatus{
+ ExternalCalculationHealth: map[string]kedav1alpha1.HealthStatus{
+ metricName: {
+ NumberOfFailures: &startingNumberOfFailures,
+ Status: kedav1alpha1.HealthStatusHappy,
+ },
+ },
+ },
+ nil,
+ )
+
+ metrics, _, err := scaler.GetMetricsAndActivity(context.Background(), metricName)
+ Expect(err).Should(BeNil())
+ convertedMetrics, err := mockCalculateForExternalCalculator(metrics, "")
+ expectStatusPatch(ctrl, client)
+ fbApplied, err := GetMetricsWithFallbackExternalCalculator(context.Background(), client, convertedMetrics, err, metricName, so)
+
+ Expect(fbApplied).Should(BeFalse())
+ Expect(err).Should(BeNil())
+ condition := so.Status.Conditions.GetExternalFallbackCondition()
+ Expect(condition.IsTrue()).Should(BeFalse())
+ Expect(condition.Type).Should(Equal(kedav1alpha1.ConditionExternalFallback))
+ Expect(condition.Reason).Should(Equal("NoExternalFallbackFound"))
+ })
+
+ It("should set ec-fallback condition to false when the number of replicas is invalid", func() {
+ primeGetMetrics(scaler, 2)
+ startingNumberOfFailures := int32(0)
+ threshold := int32(2)
+ replicas := int32(-1)
+
+ csl := &kedav1alpha1.ComplexScalingLogic{
+ Formula: "",
+ Target: "2",
+
+ ExternalCalculations: []kedav1alpha1.ExternalCalculation{
+ {Name: metricName, URL: "fake-url", Timeout: "5"}},
+ }
+
+ so := buildScaledObject(
+ &kedav1alpha1.Fallback{
+ FailureThreshold: threshold,
+ Replicas: replicas,
+ },
+ &kedav1alpha1.ScaledObjectStatus{
+ ExternalCalculationHealth: map[string]kedav1alpha1.HealthStatus{
+ metricName: {
+ NumberOfFailures: &startingNumberOfFailures,
+ Status: kedav1alpha1.HealthStatusHappy,
+ },
+ },
+ },
+ csl,
+ )
+ metrics, _, err := scaler.GetMetricsAndActivity(context.Background(), metricName)
+ Expect(err).Should(BeNil())
+ convertedMetrics, err := mockCalculateForExternalCalculator(metrics, "")
+
+ expectStatusPatch(ctrl, client)
+ fbApplied, err := GetMetricsWithFallbackExternalCalculator(context.Background(), client, convertedMetrics, err, metricName, so)
+
+ Expect(fbApplied).Should(BeFalse())
+ Expect(err).Should(BeNil())
+
+ condition := so.Status.Conditions.GetExternalFallbackCondition()
+ Expect(condition.IsTrue()).Should(BeFalse())
+ Expect(condition.Type).Should(Equal(kedav1alpha1.ConditionExternalFallback))
+ Expect(condition.Reason).Should(Equal("NoExternalFallbackFound"))
+ })
+
+ It("should set ec-fallback condition to false when all is valid but no error exists", func() {
+ primeGetMetrics(scaler, 2)
+ startingNumberOfFailures := int32(0)
+ threshold := int32(2)
+ replicas := int32(3)
+
+ csl := &kedav1alpha1.ComplexScalingLogic{
+ Formula: "",
+ Target: "2",
+
+ ExternalCalculations: []kedav1alpha1.ExternalCalculation{
+ {Name: metricName, URL: "fake-url", Timeout: "5"}},
+ }
+
+ so := buildScaledObject(
+ &kedav1alpha1.Fallback{
+ FailureThreshold: threshold,
+ Replicas: replicas,
+ },
+ &kedav1alpha1.ScaledObjectStatus{
+ ExternalCalculationHealth: map[string]kedav1alpha1.HealthStatus{
+ metricName: {
+ NumberOfFailures: &startingNumberOfFailures,
+ Status: kedav1alpha1.HealthStatusHappy,
+ },
+ },
+ },
+ csl,
+ )
+ metrics, _, err := scaler.GetMetricsAndActivity(context.Background(), metricName)
+ Expect(err).Should(BeNil())
+ convertedMetrics, err := mockCalculateForExternalCalculator(metrics, "")
+
+ expectStatusPatch(ctrl, client)
+ fbApplied, err := GetMetricsWithFallbackExternalCalculator(context.Background(), client, convertedMetrics, err, metricName, so)
+
+ Expect(fbApplied).Should(BeFalse())
+ Expect(err).Should(BeNil())
+
+ condition := so.Status.Conditions.GetExternalFallbackCondition()
+ Expect(condition.IsTrue()).Should(BeFalse())
+ Expect(condition.Type).Should(Equal(kedav1alpha1.ConditionExternalFallback))
+ Expect(condition.Reason).Should(Equal("NoExternalFallbackFound"))
+ })
+ It("should set ec-fallback condition to false when err exists but config is invalid", func() {
+ primeGetMetrics(scaler, 2)
+ startingNumberOfFailures := int32(0)
+ threshold := int32(-2) // invalid
+ replicas := int32(3)
+
+ csl := &kedav1alpha1.ComplexScalingLogic{
+ Formula: "",
+ Target: "2",
+
+ ExternalCalculations: []kedav1alpha1.ExternalCalculation{
+ {Name: metricName, URL: "fake-url", Timeout: "5"}},
+ }
+
+ so := buildScaledObject(
+ &kedav1alpha1.Fallback{
+ FailureThreshold: threshold,
+ Replicas: replicas,
+ },
+ &kedav1alpha1.ScaledObjectStatus{
+ ExternalCalculationHealth: map[string]kedav1alpha1.HealthStatus{
+ metricName: {
+ NumberOfFailures: &startingNumberOfFailures,
+ Status: kedav1alpha1.HealthStatusHappy,
+ },
+ },
+ },
+ csl,
+ )
+ metrics, _, err := scaler.GetMetricsAndActivity(context.Background(), metricName)
+ Expect(err).Should(BeNil())
+ convertedMetrics, err := mockCalculateForExternalCalculator(metrics, "err in external calculation")
+ expectStatusPatch(ctrl, client)
+ fbApplied, err := GetMetricsWithFallbackExternalCalculator(context.Background(), client, convertedMetrics, err, metricName, so)
+
+ Expect(fbApplied).Should(BeFalse())
+ Expect(err).ShouldNot(BeNil())
+
+ condition := so.Status.Conditions.GetExternalFallbackCondition()
+ Expect(condition.IsTrue()).Should(BeFalse())
+ Expect(condition.Type).Should(Equal(kedav1alpha1.ConditionExternalFallback))
+ Expect(condition.Reason).Should(Equal("NoExternalFallbackFound"))
+ })
+
+ It("should set ec-fallback condition to false when config valid, err exists but threshold not reached, metric didnt change", func() {
+ primeGetMetrics(scaler, 2)
+ startingNumberOfFailures := int32(0)
+ threshold := int32(2)
+ replicas := int32(3)
+ csl := &kedav1alpha1.ComplexScalingLogic{
+ Formula: "",
+ Target: "2",
+
+ ExternalCalculations: []kedav1alpha1.ExternalCalculation{
+ {Name: metricName, URL: "fake-url", Timeout: "5"}},
+ }
+
+ so := buildScaledObject(
+ &kedav1alpha1.Fallback{
+ FailureThreshold: threshold,
+ Replicas: replicas,
+ },
+ &kedav1alpha1.ScaledObjectStatus{
+ ExternalCalculationHealth: map[string]kedav1alpha1.HealthStatus{
+ metricName: {
+ NumberOfFailures: &startingNumberOfFailures,
+ Status: kedav1alpha1.HealthStatusHappy,
+ },
+ },
+ },
+ csl,
+ )
+ metrics, _, err := scaler.GetMetricsAndActivity(context.Background(), metricName)
+ Expect(err).Should(BeNil())
+ convertedMetrics, err := mockCalculateForExternalCalculator(metrics, "err in external calculation")
+ expectStatusPatch(ctrl, client)
+ fbApplied, err := GetMetricsWithFallbackExternalCalculator(context.Background(), client, convertedMetrics, err, metricName, so)
+
+ Expect(fbApplied).Should(BeFalse())
+ Expect(err).ShouldNot(BeNil())
+
+ condition := so.Status.Conditions.GetExternalFallbackCondition()
+ Expect(condition.IsTrue()).Should(BeFalse())
+ Expect(condition.Type).Should(Equal(kedav1alpha1.ConditionExternalFallback))
+ Expect(condition.Reason).Should(Equal("NoExternalFallbackFound"))
+ })
+ // --- set condition to true ---
+ It("should set ec-fallback condition to true when config is valid and err exists & return valid metric", func() {
+ primeGetMetrics(scaler, 2)
+ startingNumberOfFailures := int32(3)
+ threshold := int32(2)
+ replicas := int32(3)
+ target := "2"
+ csl := &kedav1alpha1.ComplexScalingLogic{
+ Formula: "",
+ Target: target,
+
+ ExternalCalculations: []kedav1alpha1.ExternalCalculation{
+ {Name: metricName, URL: "fake-url", Timeout: "5"}},
+ }
+
+ so := buildScaledObject(
+ &kedav1alpha1.Fallback{
+ FailureThreshold: threshold,
+ Replicas: replicas,
+ },
+ &kedav1alpha1.ScaledObjectStatus{
+ ExternalCalculationHealth: map[string]kedav1alpha1.HealthStatus{
+ metricName: {
+ NumberOfFailures: &startingNumberOfFailures,
+ Status: kedav1alpha1.HealthStatusFailing,
+ },
+ },
+ },
+ csl,
+ )
+ metrics, _, err := scaler.GetMetricsAndActivity(context.Background(), metricName)
+ Expect(err).Should(BeNil())
+ convertedMetrics, err := mockCalculateForExternalCalculator(metrics, "err in external calculation")
+
+ expectStatusPatch(ctrl, client)
+ fbApplied, err := GetMetricsWithFallbackExternalCalculator(context.Background(), client, convertedMetrics, err, metricName, so)
+
+ Expect(fbApplied).Should(BeTrue())
+ Expect(err).Should(BeNil())
+
+ // check condition
+ condition := so.Status.Conditions.GetExternalFallbackCondition()
+ Expect(condition.IsTrue()).Should(BeTrue())
+ Expect(condition.Type).Should(Equal(kedav1alpha1.ConditionExternalFallback))
+ Expect(condition.Reason).Should(Equal("ExternalFallbackExists"))
+
+ // check metric
+ Expect(convertedMetrics).ShouldNot(BeNil())
+ Expect(convertedMetrics.MetricValues).Should(HaveLen(1))
+ Expect(convertedMetrics.MetricValues[0].Value).Should(Equal(float32(replicas * 2)))
+ })
+
+ // It("should return a ec-fallback metric when number of failures are beyond threshold", func() {
+ // })
+
+ It("should bump the number of failures when calculation call fails", func() {
+ primeGetMetrics(scaler, 2)
+ startingNumberOfFailures := int32(0)
+ threshold := int32(3)
+ replicas := int32(3)
+ target := "2"
+ csl := &kedav1alpha1.ComplexScalingLogic{
+ Formula: "",
+ Target: target,
+
+ ExternalCalculations: []kedav1alpha1.ExternalCalculation{
+ {Name: metricName, URL: "fake-url", Timeout: "5"}},
+ }
+
+ so := buildScaledObject(
+ &kedav1alpha1.Fallback{
+ FailureThreshold: threshold,
+ Replicas: replicas,
+ },
+ &kedav1alpha1.ScaledObjectStatus{
+ ExternalCalculationHealth: map[string]kedav1alpha1.HealthStatus{
+ metricName: {
+ NumberOfFailures: &startingNumberOfFailures,
+ Status: kedav1alpha1.HealthStatusHappy,
+ },
+ },
+ },
+ csl,
+ )
+
+ metrics, _, err := scaler.GetMetricsAndActivity(context.Background(), metricName)
+ Expect(err).Should(BeNil())
+ convertedMetrics, err := mockCalculateForExternalCalculator(metrics, "err in external calculation")
+ expectStatusPatch(ctrl, client)
+
+ fbApplied, err := GetMetricsWithFallbackExternalCalculator(context.Background(), client, convertedMetrics, err, metricName, so)
+
+ Expect(fbApplied).Should(BeFalse())
+ Expect(err).ShouldNot(BeNil())
+
+ Expect(so.Status.ExternalCalculationHealth[metricName]).To(haveFailureAndStatus(1, kedav1alpha1.HealthStatusFailing))
+ })
+
+ It("should reset the health status when scaler metrics are available", func() {
+ primeGetMetrics(scaler, 2)
+ startingNumberOfFailures := int32(5)
+ threshold := int32(3)
+ replicas := int32(3)
+ target := "2"
+ csl := &kedav1alpha1.ComplexScalingLogic{
+ Formula: "",
+ Target: target,
+
+ ExternalCalculations: []kedav1alpha1.ExternalCalculation{
+ {Name: metricName, URL: "fake-url", Timeout: "5"}},
+ }
+
+ so := buildScaledObject(
+ &kedav1alpha1.Fallback{
+ FailureThreshold: threshold,
+ Replicas: replicas,
+ },
+ &kedav1alpha1.ScaledObjectStatus{
+ ExternalCalculationHealth: map[string]kedav1alpha1.HealthStatus{
+ metricName: {
+ NumberOfFailures: &startingNumberOfFailures,
+ Status: kedav1alpha1.HealthStatusFailing,
+ },
+ },
+ },
+ csl,
+ )
+ metrics, _, err := scaler.GetMetricsAndActivity(context.Background(), metricName)
+ Expect(err).Should(BeNil())
+ convertedMetrics, err := mockCalculateForExternalCalculator(metrics, "")
+ expectStatusPatch(ctrl, client)
+
+ fbApplied, err := GetMetricsWithFallbackExternalCalculator(context.Background(), client, convertedMetrics, err, metricName, so)
+
+ Expect(fbApplied).Should(BeFalse())
+ Expect(err).Should(BeNil())
+
+ Expect(so.Status.ExternalCalculationHealth[metricName]).To(haveFailureAndStatus(0, kedav1alpha1.HealthStatusHappy))
+ })
})
func haveFailureAndStatus(numberOfFailures int, status kedav1alpha1.HealthStatusType) types.GomegaMatcher {
@@ -380,7 +782,7 @@ func expectStatusPatch(ctrl *gomock.Controller, client *mock_client.MockClient)
client.EXPECT().Status().Return(statusWriter)
}
-func buildScaledObject(fallbackConfig *kedav1alpha1.Fallback, status *kedav1alpha1.ScaledObjectStatus) *kedav1alpha1.ScaledObject {
+func buildScaledObject(fallbackConfig *kedav1alpha1.Fallback, status *kedav1alpha1.ScaledObjectStatus, csl *kedav1alpha1.ComplexScalingLogic) *kedav1alpha1.ScaledObject {
scaledObject := &kedav1alpha1.ScaledObject{
ObjectMeta: metav1.ObjectMeta{Name: "clean-up-test", Namespace: "default"},
Spec: kedav1alpha1.ScaledObjectSpec{
@@ -406,6 +808,11 @@ func buildScaledObject(fallbackConfig *kedav1alpha1.Fallback, status *kedav1alph
scaledObject.Status = *status
}
+ // used for testing ec-fallback (ComplexScalingLogic.ExternalCalculators)
+ if csl != nil {
+ scaledObject.Spec.Advanced = &kedav1alpha1.AdvancedConfig{ComplexScalingLogic: *csl}
+ }
+
scaledObject.Status.Conditions = *kedav1alpha1.GetInitializedConditions()
return scaledObject
@@ -432,3 +839,19 @@ func createMetricSpec(averageValue int) v2.MetricSpec {
},
}
}
+
+// simulate calculation for externalCalculator metric (return nil if empty input)
+func mockCalculateForExternalCalculator(metrics []external_metrics.ExternalMetricValue, err string) (ret *externalscalingAPI.MetricsList, resultErr error) {
+ if len(metrics) > 0 {
+ ret = externalscaling.ConvertToGeneratedStruct(metrics)
+ } else {
+ ret = nil
+ }
+ if err != "" {
+ resultErr = fmt.Errorf(err)
+ } else {
+ resultErr = nil
+ }
+
+ return ret, resultErr
+}
diff --git a/pkg/mock/mock_externalscaling/mock_externalscaling.go b/pkg/mock/mock_externalscaling/mock_externalscaling.go
new file mode 100644
index 00000000000..b527a65c376
--- /dev/null
+++ b/pkg/mock/mock_externalscaling/mock_externalscaling.go
@@ -0,0 +1,142 @@
+// Code generated by MockGen. DO NOT EDIT.
+// Source: pkg/externalscaling/api/externalCalculation_grpc.pb.go
+
+// Package mock_externalscaling is a generated GoMock package.
+package mock_externalscaling
+
+import (
+ context "context"
+ reflect "reflect"
+
+ gomock "github.com/golang/mock/gomock"
+ externalCalculation "github.com/kedacore/keda/v2/pkg/externalscaling/api"
+ grpc "google.golang.org/grpc"
+)
+
+// MockExternalCalculationClient is a mock of ExternalCalculationClient interface.
+type MockExternalCalculationClient struct {
+ ctrl *gomock.Controller
+ recorder *MockExternalCalculationClientMockRecorder
+}
+
+// MockExternalCalculationClientMockRecorder is the mock recorder for MockExternalCalculationClient.
+type MockExternalCalculationClientMockRecorder struct {
+ mock *MockExternalCalculationClient
+}
+
+// NewMockExternalCalculationClient creates a new mock instance.
+func NewMockExternalCalculationClient(ctrl *gomock.Controller) *MockExternalCalculationClient {
+ mock := &MockExternalCalculationClient{ctrl: ctrl}
+ mock.recorder = &MockExternalCalculationClientMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockExternalCalculationClient) EXPECT() *MockExternalCalculationClientMockRecorder {
+ return m.recorder
+}
+
+// Calculate mocks base method.
+func (m *MockExternalCalculationClient) Calculate(ctx context.Context, in *externalCalculation.MetricsList, opts ...grpc.CallOption) (*externalCalculation.Response, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{ctx, in}
+ for _, a := range opts {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "Calculate", varargs...)
+ ret0, _ := ret[0].(*externalCalculation.Response)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// Calculate indicates an expected call of Calculate.
+func (mr *MockExternalCalculationClientMockRecorder) Calculate(ctx, in interface{}, opts ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{ctx, in}, opts...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Calculate", reflect.TypeOf((*MockExternalCalculationClient)(nil).Calculate), varargs...)
+}
+
+// MockExternalCalculationServer is a mock of ExternalCalculationServer interface.
+type MockExternalCalculationServer struct {
+ ctrl *gomock.Controller
+ recorder *MockExternalCalculationServerMockRecorder
+}
+
+// MockExternalCalculationServerMockRecorder is the mock recorder for MockExternalCalculationServer.
+type MockExternalCalculationServerMockRecorder struct {
+ mock *MockExternalCalculationServer
+}
+
+// NewMockExternalCalculationServer creates a new mock instance.
+func NewMockExternalCalculationServer(ctrl *gomock.Controller) *MockExternalCalculationServer {
+ mock := &MockExternalCalculationServer{ctrl: ctrl}
+ mock.recorder = &MockExternalCalculationServerMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockExternalCalculationServer) EXPECT() *MockExternalCalculationServerMockRecorder {
+ return m.recorder
+}
+
+// Calculate mocks base method.
+func (m *MockExternalCalculationServer) Calculate(arg0 context.Context, arg1 *externalCalculation.MetricsList) (*externalCalculation.Response, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Calculate", arg0, arg1)
+ ret0, _ := ret[0].(*externalCalculation.Response)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// Calculate indicates an expected call of Calculate.
+func (mr *MockExternalCalculationServerMockRecorder) Calculate(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Calculate", reflect.TypeOf((*MockExternalCalculationServer)(nil).Calculate), arg0, arg1)
+}
+
+// mustEmbedUnimplementedExternalCalculationServer mocks base method.
+func (m *MockExternalCalculationServer) mustEmbedUnimplementedExternalCalculationServer() {
+ m.ctrl.T.Helper()
+ m.ctrl.Call(m, "mustEmbedUnimplementedExternalCalculationServer")
+}
+
+// mustEmbedUnimplementedExternalCalculationServer indicates an expected call of mustEmbedUnimplementedExternalCalculationServer.
+func (mr *MockExternalCalculationServerMockRecorder) mustEmbedUnimplementedExternalCalculationServer() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "mustEmbedUnimplementedExternalCalculationServer", reflect.TypeOf((*MockExternalCalculationServer)(nil).mustEmbedUnimplementedExternalCalculationServer))
+}
+
+// MockUnsafeExternalCalculationServer is a mock of UnsafeExternalCalculationServer interface.
+type MockUnsafeExternalCalculationServer struct {
+ ctrl *gomock.Controller
+ recorder *MockUnsafeExternalCalculationServerMockRecorder
+}
+
+// MockUnsafeExternalCalculationServerMockRecorder is the mock recorder for MockUnsafeExternalCalculationServer.
+type MockUnsafeExternalCalculationServerMockRecorder struct {
+ mock *MockUnsafeExternalCalculationServer
+}
+
+// NewMockUnsafeExternalCalculationServer creates a new mock instance.
+func NewMockUnsafeExternalCalculationServer(ctrl *gomock.Controller) *MockUnsafeExternalCalculationServer {
+ mock := &MockUnsafeExternalCalculationServer{ctrl: ctrl}
+ mock.recorder = &MockUnsafeExternalCalculationServerMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockUnsafeExternalCalculationServer) EXPECT() *MockUnsafeExternalCalculationServerMockRecorder {
+ return m.recorder
+}
+
+// mustEmbedUnimplementedExternalCalculationServer mocks base method.
+func (m *MockUnsafeExternalCalculationServer) mustEmbedUnimplementedExternalCalculationServer() {
+ m.ctrl.T.Helper()
+ m.ctrl.Call(m, "mustEmbedUnimplementedExternalCalculationServer")
+}
+
+// mustEmbedUnimplementedExternalCalculationServer indicates an expected call of mustEmbedUnimplementedExternalCalculationServer.
+func (mr *MockUnsafeExternalCalculationServerMockRecorder) mustEmbedUnimplementedExternalCalculationServer() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "mustEmbedUnimplementedExternalCalculationServer", reflect.TypeOf((*MockUnsafeExternalCalculationServer)(nil).mustEmbedUnimplementedExternalCalculationServer))
+}
diff --git a/pkg/scaling/cache/scalers_cache.go b/pkg/scaling/cache/scalers_cache.go
index da3a0d05100..fec0f3da91b 100644
--- a/pkg/scaling/cache/scalers_cache.go
+++ b/pkg/scaling/cache/scalers_cache.go
@@ -19,6 +19,7 @@ package cache
import (
"context"
"fmt"
+ "strconv"
"time"
v2 "k8s.io/api/autoscaling/v2"
@@ -27,16 +28,24 @@ import (
logf "sigs.k8s.io/controller-runtime/pkg/log"
kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1"
+ externalscaling "github.com/kedacore/keda/v2/pkg/externalscaling"
"github.com/kedacore/keda/v2/pkg/scalers"
)
var log = logf.Log.WithName("scalers_cache")
type ScalersCache struct {
- ScaledObject *kedav1alpha1.ScaledObject
- Scalers []ScalerBuilder
- ScalableObjectGeneration int64
- Recorder record.EventRecorder
+ ScaledObject *kedav1alpha1.ScaledObject
+ Scalers []ScalerBuilder
+ ScalableObjectGeneration int64
+ Recorder record.EventRecorder
+ ExternalCalculationGrpcClients []ExternalCalculationClient
+}
+
+type ExternalCalculationClient struct {
+ Name string
+ Client *externalscaling.GrpcClient
+ Connected bool
}
type ScalerBuilder struct {
@@ -70,6 +79,14 @@ func (c *ScalersCache) GetPushScalers() []scalers.PushScaler {
// Close closes all scalers in the cache
func (c *ScalersCache) Close(ctx context.Context) {
+ for _, client := range c.ExternalCalculationGrpcClients {
+ err := client.Client.CloseConnection()
+ if err != nil {
+ log.Error(err, fmt.Sprintf("couldn't close grpc connection for externalCalculator '%s'", client.Name))
+ } else {
+ log.V(0).Info(fmt.Sprintf("successfully closed grpc connection for externalCalculator '%s'", client.Name))
+ }
+ }
scalers := c.Scalers
c.Scalers = nil
for _, s := range scalers {
@@ -138,6 +155,67 @@ func (c *ScalersCache) GetMetricsAndActivityForScaler(ctx context.Context, index
return metric, activity, time.Since(startTime).Milliseconds(), err
}
+// RefreshExternalCalcClientsCache tries to create clients for all
+// externalCalculators present in the ScaledObject and saves them to the cache
+// Returns client asked for by name if exists.
+func (c *ScalersCache) RefreshExternalCalcClientsCache(ctx context.Context, so *kedav1alpha1.ScaledObject, ecName string) ExternalCalculationClient {
+ log.Info(fmt.Sprintf("Refreshing externalCalculation clients in cache because '%s' wasn't found in cache", ecName))
+ // this function is invoked most likely when cache was invalid/returned an error
+ // earlier than ec-client instances could be created
+
+ // close all existing connections
+ for _, client := range c.ExternalCalculationGrpcClients {
+ if client.Connected {
+ err := client.Client.CloseConnection()
+ if err != nil {
+ log.Error(err, fmt.Sprintf("couldn't close grpc connection for externalCalculator '%s'", client.Name))
+ } else {
+ log.V(0).Info(fmt.Sprintf("successfully closed grpc connection for externalCalculator '%s'", client.Name))
+ }
+ }
+ }
+
+ // create new clients
+ ret := ExternalCalculationClient{}
+ newClients := []ExternalCalculationClient{}
+ for _, ec := range so.Spec.Advanced.ComplexScalingLogic.ExternalCalculations {
+ // try to create new client instance and connect to the server
+ timeout, err := strconv.ParseInt(ec.Timeout, 10, 64)
+ if err != nil {
+ // expect timeout in time format like 1m10s
+ parsedTime, err := time.ParseDuration(ec.Timeout)
+ if err != nil {
+ log.Error(err, "error while converting type of timeout for external calculator")
+ break
+ }
+ timeout = int64(parsedTime.Seconds())
+ }
+ ecClient, err := externalscaling.NewGrpcClient(ec.URL, ec.CertificateDirectory)
+ var connected bool
+ if err != nil {
+ log.Error(err, fmt.Sprintf("error creating new grpc client for external calculator at %s", ec.URL))
+ } else {
+ if !ecClient.WaitForConnectionReady(ctx, ec.URL, time.Duration(timeout)*time.Second, log) {
+ connected = false
+ err = fmt.Errorf("client failed to connect to server")
+ log.Error(err, fmt.Sprintf("error in creating gRPC connection for external calculator '%s' via '%s'", ec.Name, ec.URL))
+ } else {
+ connected = true
+ log.Info(fmt.Sprintf("successfully connected to gRPC server ExternalCalculator '%s' at '%s'", ec.Name, ec.URL))
+ }
+ }
+ ecClientStruct := ExternalCalculationClient{Name: ec.Name, Client: ecClient, Connected: connected}
+ // match current one to return
+ if ecClientStruct.Name == ecName {
+ ret = ecClientStruct
+ }
+ newClients = append(newClients, ecClientStruct)
+ }
+ // save to cache
+ c.ExternalCalculationGrpcClients = newClients
+ return ret
+}
+
func (c *ScalersCache) refreshScaler(ctx context.Context, id int) (scalers.Scaler, error) {
if id < 0 || id >= len(c.Scalers) {
return nil, fmt.Errorf("scaler with id %d not found, len = %d, cache has been probably already invalidated", id, len(c.Scalers))
diff --git a/pkg/scaling/scale_handler.go b/pkg/scaling/scale_handler.go
index 649d19fd1a7..fb4f7bd8504 100644
--- a/pkg/scaling/scale_handler.go
+++ b/pkg/scaling/scale_handler.go
@@ -18,12 +18,18 @@ package scaling
import (
"context"
+ "errors"
"fmt"
+ "reflect"
+ "strconv"
"strings"
"sync"
"time"
+ expr "github.com/antonmedv/expr"
+ "github.com/go-logr/logr"
corev1 "k8s.io/api/core/v1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
corev1listers "k8s.io/client-go/listers/core/v1"
@@ -35,6 +41,8 @@ import (
kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1"
"github.com/kedacore/keda/v2/pkg/eventreason"
+ externalscaling "github.com/kedacore/keda/v2/pkg/externalscaling"
+ externalscalingAPI "github.com/kedacore/keda/v2/pkg/externalscaling/api"
"github.com/kedacore/keda/v2/pkg/fallback"
"github.com/kedacore/keda/v2/pkg/prommetrics"
"github.com/kedacore/keda/v2/pkg/scalers"
@@ -357,6 +365,47 @@ func (h *scaleHandler) performGetScalersCache(ctx context.Context, key string, s
return nil, err
}
+ externalCalculationClients := []cache.ExternalCalculationClient{}
+
+ // if scalableObject is scaledObject, check for External Calculators and establish
+ // their connections to gRPC servers and save the client instances
+ // new: scaledObject is sometimes NOT updated due to unresolved issue (more info https://github.com/kedacore/keda/issues/4389)
+ switch val := scalableObject.(type) {
+ case *kedav1alpha1.ScaledObject:
+ if val.Spec.Advanced != nil {
+ for _, ec := range val.Spec.Advanced.ComplexScalingLogic.ExternalCalculations {
+ timeout, err := strconv.ParseInt(ec.Timeout, 10, 64)
+ if err != nil {
+ // expect timeout in time format like 1m10s
+ parsedTime, err := time.ParseDuration(ec.Timeout)
+ if err != nil {
+ log.Error(err, "error while converting type of timeout for external calculator")
+ break
+ }
+ timeout = int64(parsedTime.Seconds())
+ }
+ ecClient, err := externalscaling.NewGrpcClient(ec.URL, ec.CertificateDirectory)
+
+ var connected bool
+ if err != nil {
+ log.Error(err, fmt.Sprintf("error creating new grpc client for external calculator at %s", ec.URL))
+ } else {
+ if !ecClient.WaitForConnectionReady(ctx, ec.URL, time.Duration(timeout)*time.Second, log) {
+ connected = false
+ err = fmt.Errorf("client failed to connect to server")
+ log.Error(err, fmt.Sprintf("error in creating gRPC connection for external calculator '%s' via '%s'", ec.Name, ec.URL))
+ } else {
+ connected = true
+ log.Info(fmt.Sprintf("successfully connected to gRPC server ExternalCalculator '%s' at '%s'", ec.Name, ec.URL))
+ }
+ }
+ ecClientStruct := cache.ExternalCalculationClient{Name: ec.Name, Client: ecClient, Connected: connected}
+ externalCalculationClients = append(externalCalculationClients, ecClientStruct)
+ }
+ }
+ default:
+ }
+
newCache := &cache.ScalersCache{
Scalers: scalers,
ScalableObjectGeneration: withTriggers.Generation,
@@ -365,11 +414,11 @@ func (h *scaleHandler) performGetScalersCache(ctx context.Context, key string, s
switch obj := scalableObject.(type) {
case *kedav1alpha1.ScaledObject:
newCache.ScaledObject = obj
+ newCache.ExternalCalculationGrpcClients = externalCalculationClients
default:
}
h.scalerCaches[key] = newCache
-
return h.scalerCaches[key], nil
}
@@ -401,9 +450,8 @@ func (h *scaleHandler) ClearScalersCache(ctx context.Context, scalableObject int
// GetScaledObjectMetrics returns metrics for specified metric name for a ScaledObject identified by its name and namespace.
// It could either query the metric value directly from the scaler or from a cache, that's being stored for the scaler.
-func (h *scaleHandler) GetScaledObjectMetrics(ctx context.Context, scaledObjectName, scaledObjectNamespace, metricName string) (*external_metrics.ExternalMetricValueList, error) {
+func (h *scaleHandler) GetScaledObjectMetrics(ctx context.Context, scaledObjectName, scaledObjectNamespace, metricsName string) (*external_metrics.ExternalMetricValueList, error) {
logger := log.WithValues("scaledObject.Namespace", scaledObjectNamespace, "scaledObject.Name", scaledObjectName)
-
var matchingMetrics []external_metrics.ExternalMetricValue
cache, err := h.getScalersCacheForScaledObject(ctx, scaledObjectName, scaledObjectNamespace)
@@ -421,10 +469,17 @@ func (h *scaleHandler) GetScaledObjectMetrics(ctx context.Context, scaledObjectN
logger.Error(err, "scaledObject not found in the cache")
return nil, err
}
-
isScalerError := false
scaledObjectIdentifier := scaledObject.GenerateIdentifier()
+ // returns all relevant metrics for current scaler (standard is one metric,
+ // composite scaler gets all external metrics for further computation)
+ metricsArray, err := h.getTrueMetricArray(ctx, metricsName, scaledObject)
+ if err != nil {
+ logger.Error(err, "error getting true metrics array, probably because of invalid cache")
+ }
+ metricTriggerPairList := make(map[string]string)
+
// let's check metrics for all scalers in a ScaledObject
scalers, scalerConfigs := cache.GetScalers()
for scalerIndex := 0; scalerIndex < len(scalers); scalerIndex++ {
@@ -440,14 +495,31 @@ func (h *scaleHandler) GetScaledObjectMetrics(ctx context.Context, scaledObjectN
cache.Recorder.Event(scaledObject, corev1.EventTypeWarning, eventreason.KEDAScalerFailed, err.Error())
}
+ if len(metricsArray) == 0 {
+ err = fmt.Errorf("no metrics found getting metricsArray array %s", metricsName)
+ logger.Error(err, "error metricsArray is empty")
+ // TODO: add cache.Recorder?
+ }
+
for _, spec := range metricSpecs {
// skip cpu/memory resource scaler
if spec.External == nil {
continue
}
- // Filter only the desired metric
- if strings.EqualFold(spec.External.Metric.Name, metricName) {
+ // Filter only the desired metric or if composite scaler is active,
+ // metricsArray contains all external metrics
+ if arrayContainsElement(spec.External.Metric.Name, metricsArray) {
+ // if compositeScaler is used, override with current metric, otherwise do nothing
+ metricName := spec.External.Metric.Name
+
+ // Pair metric values with its trigger names. This is applied only when
+ // ComplexScalingLogic.Formula is defined in SO.
+ metricTriggerPairList, err = addPairTriggerAndMetric(metricTriggerPairList, scaledObject, metricName, scalerConfigs[scalerIndex].TriggerName)
+ if err != nil {
+ logger.Error(err, "error pairing triggers & metrics for compositeScaler")
+ }
+
var metrics []external_metrics.ExternalMetricValue
// if cache is defined for this scaler/metric, let's try to hit it first
@@ -499,9 +571,11 @@ func (h *scaleHandler) GetScaledObjectMetrics(ctx context.Context, scaledObjectN
}
if len(matchingMetrics) == 0 {
- return nil, fmt.Errorf("no matching metrics found for " + metricName)
+ return nil, fmt.Errorf("no matching metrics found for " + metricsName)
}
+ // handle complexScalingLogic here and simply return the matchingMetrics
+ matchingMetrics = h.handleComplexScalingLogic(ctx, scaledObject, matchingMetrics, metricTriggerPairList, cache, logger)
return &external_metrics.ExternalMetricValueList{
Items: matchingMetrics,
}, nil
@@ -680,3 +754,196 @@ func (h *scaleHandler) isScaledJobActive(ctx context.Context, scaledJob *kedav1a
logger.V(1).WithValues("ScaledJob", scaledJob.Name).Info("Checking if ScaleJob Scalers are active", "isActive", isActive, "maxValue", maxFloatValue, "MultipleScalersCalculation", scaledJob.Spec.ScalingStrategy.MultipleScalersCalculation)
return isActive, queueLength, maxValue
}
+
+// getTrueMetricArray is a help function made for composite scaler to determine
+// what metrics should be used. In case of composite scaler (ComplexScalingLogic struct),
+// all external metrics will be used. Returns all external metrics otherwise it
+// returns the same metric given.
+func (h *scaleHandler) getTrueMetricArray(ctx context.Context, metricName string, so *kedav1alpha1.ScaledObject) ([]string, error) {
+ // if composite scaler is given return all external metrics
+ if so != nil && so.Spec.Advanced != nil && so.Spec.Advanced.ComplexScalingLogic.Target != "" {
+ if len(so.Status.ExternalMetricNames) == 0 {
+ scaledObject := &kedav1alpha1.ScaledObject{}
+ err := h.client.Get(ctx, types.NamespacedName{Name: so.Name, Namespace: so.Namespace}, scaledObject)
+ if err != nil {
+ log.Error(err, "failed to get ScaledObject", "name", so.Name, "namespace", so.Namespace)
+ return nil, err
+ }
+ if len(scaledObject.Status.ExternalMetricNames) == 0 {
+ err := fmt.Errorf("failed to get ScaledObject.Status.ExternalMetricNames, probably invalid ScaledObject cache")
+ log.Error(err, "failed to get ScaledObject.Status.ExternalMetricNames, probably invalid ScaledObject cache", "scaledObject.Name", scaledObject.Name, "scaledObject.Namespace", scaledObject.Namespace)
+ return nil, err
+ }
+
+ so = scaledObject
+ }
+ return so.Status.ExternalMetricNames, nil
+ }
+ return []string{metricName}, nil
+}
+
+// help function to determine whether or not metricName is the correct one.
+// standard function will be array of one element if it matches or none if it doesnt
+// that is given from getTrueMetricArray().
+// In case of compositeScaler, cycle through all external metric names
+func arrayContainsElement(el string, arr []string) bool {
+ for _, item := range arr {
+ if strings.EqualFold(item, el) {
+ return true
+ }
+ }
+ return false
+}
+
+// if given right conditions, try to apply the given custom formula in SO
+func applyComplexLogicFormula(csl kedav1alpha1.ComplexScalingLogic, metrics []external_metrics.ExternalMetricValue, pairList map[string]string) ([]external_metrics.ExternalMetricValue, error) {
+ if csl.Formula != "" {
+ // add last external calculation name as a possible trigger (user can
+ // manipulate with metrics in ExternalCalculation service and it is expected
+ // to be named as the ExternalCalculation[len()-1] value)
+ if len(csl.ExternalCalculations) > 0 {
+ lastElemIndex := len(csl.ExternalCalculations) - 1
+ lastElem := csl.ExternalCalculations[lastElemIndex].Name
+ // expect last element of external calculation array via its name
+ pairList[lastElem] = lastElem
+ }
+ metrics, err := calculateComplexLogicFormula(metrics, csl.Formula, pairList)
+ return metrics, err
+ }
+ return metrics, nil
+}
+
+// calculate custom formula to metrics and return calculated and finalized metric
+func calculateComplexLogicFormula(list []external_metrics.ExternalMetricValue, formula string, pairList map[string]string) ([]external_metrics.ExternalMetricValue, error) {
+ var ret external_metrics.ExternalMetricValue
+ var out float64
+ ret.MetricName = "composite-metric-name"
+ ret.Timestamp = v1.Now()
+
+ // using https://github.com/antonmedv/expr to evaluate formula expression
+ data := make(map[string]float64)
+ for _, v := range list {
+ data[pairList[v.MetricName]] = v.Value.AsApproximateFloat64()
+ }
+ program, err := expr.Compile(formula)
+ if err != nil {
+ return nil, fmt.Errorf("error trying to compile custom formula: %w", err)
+ }
+
+ tmp, err := expr.Run(program, data)
+ if err != nil {
+ return nil, fmt.Errorf("error trying to run custom formula: %w", err)
+ }
+
+ out = tmp.(float64)
+ ret.Value.SetMilli(int64(out * 1000))
+ return []external_metrics.ExternalMetricValue{ret}, nil
+}
+
+// Add pair trigger-metric to the triggers-metrics list for custom formula. Trigger name is used in
+// formula itself (in SO) and metric name is used for its value internally.
+func addPairTriggerAndMetric(list map[string]string, so *kedav1alpha1.ScaledObject, metric string, trigger string) (map[string]string, error) {
+ if so.Spec.Advanced != nil && so.Spec.Advanced.ComplexScalingLogic.Formula != "" {
+ if trigger == "" {
+ return list, fmt.Errorf("trigger name not given with compositeScaler for metric %s", metric)
+ }
+
+ triggerHasMetrics := 0
+ // count number of metrics per trigger
+ for _, t := range list {
+ if strings.HasPrefix(t, trigger) {
+ triggerHasMetrics++
+ }
+ }
+
+ // if trigger doesnt have a pair yet
+ if triggerHasMetrics == 0 {
+ list[metric] = trigger
+ } else {
+ // if trigger has a pair add a number
+ list[metric] = fmt.Sprintf("%s%02d", trigger, triggerHasMetrics)
+ }
+
+ return list, nil
+ }
+ return map[string]string{}, nil
+}
+
+// getECClientFromCache returns ExternalCalculationClient from cacheClients array
+// and bool whether or not it is connected
+func getECClientFromCache(ctx context.Context, ecName string, c *cache.ScalersCache, so *kedav1alpha1.ScaledObject) cache.ExternalCalculationClient {
+ ret := cache.ExternalCalculationClient{}
+ found := false
+ for _, ecClient := range c.ExternalCalculationGrpcClients {
+ if ecClient.Name == ecName {
+ found = true
+ ret = ecClient
+ break
+ }
+ }
+ if !found {
+ // didnt find cached client that matches external Calculator, try refreshing cache
+ ret = c.RefreshExternalCalcClientsCache(ctx, so, ecName)
+ }
+
+ return ret
+}
+
+// callCalculate checks whether connection is established and calls grpc method Calculate
+// for given externalCalculator. Returns metricsList and collected errors if any
+func callCalculate(ctx context.Context, ecClient cache.ExternalCalculationClient, list *externalscalingAPI.MetricsList) (*externalscalingAPI.MetricsList, error) {
+ var err error
+ if ecClient.Connected {
+ list, err = ecClient.Client.Calculate(ctx, list)
+ } else {
+ err = fmt.Errorf("trying to call method Calculate for '%s' externalCalculator when not connected", ecClient.Name)
+ }
+ return list, err
+}
+
+// apply all defined ComplexScalingLogic structures (externalCalculators/formula)
+// and simply return calculated metrics
+func (h *scaleHandler) handleComplexScalingLogic(ctx context.Context,
+ so *kedav1alpha1.ScaledObject, metrics []external_metrics.ExternalMetricValue,
+ metricTriggerList map[string]string, cacheObj *cache.ScalersCache, log logr.Logger) []external_metrics.ExternalMetricValue {
+ var err error
+ // if ComplexScalingLogic structure for externalCalculator/formula is defined, let's apply it
+ if so != nil && so.Spec.Advanced != nil && !reflect.DeepEqual(so.Spec.Advanced.ComplexScalingLogic, kedav1alpha1.ComplexScalingLogic{}) {
+ csl := so.Spec.Advanced.ComplexScalingLogic
+
+ // apply externalCalculators if defined
+ if len(csl.ExternalCalculations) > 0 {
+ grpcMetricList := externalscaling.ConvertToGeneratedStruct(metrics)
+
+ // Apply external calculations - call gRPC server on each url and return
+ // modified metric list in order
+ for _, ec := range csl.ExternalCalculations {
+ // get client's instance from cache
+ ecCacheClient := getECClientFromCache(ctx, ec.Name, cacheObj, so)
+ // attempt to connect to the gRPC server and call its method Calculate
+ grpcMetricList, err = callCalculate(ctx, ecCacheClient, grpcMetricList)
+ log.V(1).Info(fmt.Sprintf("metricsList after calling Calculate for externalCalculator '%s': %v", ec.Name, grpcMetricList))
+ if grpcMetricList == nil {
+ grpcMetricList = &externalscalingAPI.MetricsList{}
+ err = errors.Join(err, fmt.Errorf("grpc method Calculate returned nil metric list for externalCalculator"))
+ }
+ // externalCalculator fallback logic
+ fallbackApplied, err := fallback.GetMetricsWithFallbackExternalCalculator(ctx, h.client, grpcMetricList, err, ec.Name, so)
+ if err != nil {
+ log.Error(err, fmt.Sprintf("error remained after trying to apply fallback metrics for externalCalculator '%s'", ec.Name))
+ break
+ }
+ if fallbackApplied {
+ break
+ }
+ }
+ metrics = externalscaling.ConvertFromGeneratedStruct(grpcMetricList)
+ }
+ // apply formula if defined
+ metrics, err = applyComplexLogicFormula(csl, metrics, metricTriggerList)
+ if err != nil {
+ log.Error(err, "error applying custom compositeScaler formula")
+ }
+ }
+ return metrics
+}
diff --git a/pkg/scaling/scale_handler_test.go b/pkg/scaling/scale_handler_test.go
index b10a8dab183..5d48d91ed71 100644
--- a/pkg/scaling/scale_handler_test.go
+++ b/pkg/scaling/scale_handler_test.go
@@ -35,7 +35,10 @@ import (
"k8s.io/metrics/pkg/apis/external_metrics"
kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1"
+ externalscaling "github.com/kedacore/keda/v2/pkg/externalscaling"
+ ec "github.com/kedacore/keda/v2/pkg/externalscaling/api"
"github.com/kedacore/keda/v2/pkg/mock/mock_client"
+ mock_ec "github.com/kedacore/keda/v2/pkg/mock/mock_externalscaling"
mock_scalers "github.com/kedacore/keda/v2/pkg/mock/mock_scaler"
"github.com/kedacore/keda/v2/pkg/mock/mock_scaling/mock_executor"
"github.com/kedacore/keda/v2/pkg/scalers"
@@ -43,9 +46,13 @@ import (
"github.com/kedacore/keda/v2/pkg/scaling/cache/metricscache"
)
+const testNamespaceGlobal = "testNamespace"
+const compositeMetricNameGlobal = "composite-metric-name"
+const testNameGlobal = "testName"
+
func TestGetScaledObjectMetrics_DirectCall(t *testing.T) {
- scaledObjectName := "testName"
- scaledObjectNamespace := "testNamespace"
+ scaledObjectName := testNameGlobal
+ scaledObjectNamespace := testNamespaceGlobal
metricName := "test-metric-name"
longPollingInterval := int32(300)
@@ -616,6 +623,378 @@ func createScaler(ctrl *gomock.Controller, queueLength int64, averageValue int64
return scaler
}
+// -----------------------------------------------------------------------------
+// test for complexScalingLogic formula & external calculators
+// -----------------------------------------------------------------------------
+
+const triggerName1 = "trigger_one"
+const triggerName2 = "trigger_two"
+const metricName1 = "metric_one"
+const metricName2 = "metric_two"
+
+func TestComplexScalingFormula(t *testing.T) {
+ scaledObjectName := testNameGlobal
+ scaledObjectNamespace := testNamespaceGlobal
+ compositeMetricName := compositeMetricNameGlobal
+
+ ctrl := gomock.NewController(t)
+ recorder := record.NewFakeRecorder(1)
+ mockClient := mock_client.NewMockClient(ctrl)
+ mockExecutor := mock_executor.NewMockScaleExecutor(ctrl)
+ mockStatusWriter := mock_client.NewMockStatusWriter(ctrl)
+
+ metricsSpecs1 := []v2.MetricSpec{createMetricSpec(2, metricName1)}
+ metricsSpecs2 := []v2.MetricSpec{createMetricSpec(5, metricName2)}
+ metricValue1 := scalers.GenerateMetricInMili(metricName1, float64(2))
+ metricValue2 := scalers.GenerateMetricInMili(metricName2, float64(5))
+
+ scaler1 := mock_scalers.NewMockScaler(ctrl)
+ scaler2 := mock_scalers.NewMockScaler(ctrl)
+ // dont use cached metrics
+ scalerConfig1 := scalers.ScalerConfig{TriggerUseCachedMetrics: false, TriggerName: triggerName1, ScalerIndex: 0}
+ scalerConfig2 := scalers.ScalerConfig{TriggerUseCachedMetrics: false, TriggerName: triggerName2, ScalerIndex: 1}
+ factory1 := func() (scalers.Scaler, *scalers.ScalerConfig, error) {
+ return scaler1, &scalerConfig1, nil
+ }
+ factory2 := func() (scalers.Scaler, *scalers.ScalerConfig, error) {
+ return scaler2, &scalerConfig2, nil
+ }
+
+ scaledObject := kedav1alpha1.ScaledObject{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: scaledObjectName,
+ Namespace: scaledObjectNamespace,
+ },
+ Spec: kedav1alpha1.ScaledObjectSpec{
+ ScaleTargetRef: &kedav1alpha1.ScaleTarget{
+ Name: "test",
+ },
+ Advanced: &kedav1alpha1.AdvancedConfig{
+ ComplexScalingLogic: kedav1alpha1.ComplexScalingLogic{
+ Target: "2",
+ Formula: fmt.Sprintf("%s + %s", triggerName1, triggerName2),
+ },
+ },
+ Triggers: []kedav1alpha1.ScaleTriggers{
+ {Name: triggerName1, Type: "fake_trig1"},
+ {Name: triggerName2, Type: "fake_trig2"},
+ },
+ },
+ Status: kedav1alpha1.ScaledObjectStatus{
+ ScaleTargetGVKR: &kedav1alpha1.GroupVersionKindResource{
+ Group: "apps",
+ Kind: "Deployment",
+ },
+ ExternalMetricNames: []string{metricName1, metricName2},
+ },
+ }
+
+ scalerCache := cache.ScalersCache{
+ ScaledObject: &scaledObject,
+ Scalers: []cache.ScalerBuilder{{
+ Scaler: scaler1,
+ ScalerConfig: scalerConfig1,
+ Factory: factory1,
+ },
+ {
+ Scaler: scaler2,
+ ScalerConfig: scalerConfig2,
+ Factory: factory2,
+ },
+ },
+ Recorder: recorder,
+ }
+
+ caches := map[string]*cache.ScalersCache{}
+ caches[scaledObject.GenerateIdentifier()] = &scalerCache
+
+ sh := scaleHandler{
+ client: mockClient,
+ scaleLoopContexts: &sync.Map{},
+ scaleExecutor: mockExecutor,
+ globalHTTPTimeout: time.Duration(1000),
+ recorder: recorder,
+ scalerCaches: caches,
+ scalerCachesLock: &sync.RWMutex{},
+ scaledObjectsMetricCache: metricscache.NewMetricsCache(),
+ }
+
+ mockClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil)
+ scaler1.EXPECT().GetMetricSpecForScaling(gomock.Any()).Return(metricsSpecs1)
+ scaler2.EXPECT().GetMetricSpecForScaling(gomock.Any()).Return(metricsSpecs2)
+ scaler1.EXPECT().GetMetricsAndActivity(gomock.Any(), gomock.Any()).Return([]external_metrics.ExternalMetricValue{metricValue1, metricValue2}, true, nil)
+ scaler2.EXPECT().GetMetricsAndActivity(gomock.Any(), gomock.Any()).Return([]external_metrics.ExternalMetricValue{metricValue1, metricValue2}, true, nil)
+ mockExecutor.EXPECT().RequestScale(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any())
+ sh.checkScalers(context.TODO(), &scaledObject, &sync.RWMutex{})
+
+ mockClient.EXPECT().Status().Return(mockStatusWriter).Times(2)
+ mockStatusWriter.EXPECT().Patch(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).Times(2)
+ scaler1.EXPECT().GetMetricSpecForScaling(gomock.Any()).Return(metricsSpecs1)
+ scaler2.EXPECT().GetMetricSpecForScaling(gomock.Any()).Return(metricsSpecs2)
+ scaler1.EXPECT().GetMetricsAndActivity(gomock.Any(), gomock.Any()).Return([]external_metrics.ExternalMetricValue{metricValue1, metricValue2}, true, nil)
+ scaler2.EXPECT().GetMetricsAndActivity(gomock.Any(), gomock.Any()).Return([]external_metrics.ExternalMetricValue{metricValue1, metricValue2}, true, nil)
+ metrics, err := sh.GetScaledObjectMetrics(context.TODO(), scaledObjectName, scaledObjectNamespace, compositeMetricName)
+ assert.Nil(t, err)
+ assert.Equal(t, float64(7), metrics.Items[0].Value.AsApproximateFloat64())
+}
+
+func TestComplexScalingExternalCalculator(t *testing.T) {
+ scaledObjectName := testNameGlobal
+ scaledObjectNamespace := testNamespaceGlobal
+ compositeMetricName := compositeMetricNameGlobal
+
+ ctrl := gomock.NewController(t)
+ recorder := record.NewFakeRecorder(1)
+ mockClient := mock_client.NewMockClient(ctrl)
+ mockExecutor := mock_executor.NewMockScaleExecutor(ctrl)
+ mockStatusWriter := mock_client.NewMockStatusWriter(ctrl)
+
+ metricsSpecs1 := []v2.MetricSpec{createMetricSpec(2, metricName1)}
+ metricsSpecs2 := []v2.MetricSpec{createMetricSpec(8, metricName2)}
+ metricValue1 := scalers.GenerateMetricInMili(metricName1, float64(2))
+ metricValue2 := scalers.GenerateMetricInMili(metricName2, float64(8))
+
+ scaler1 := mock_scalers.NewMockScaler(ctrl)
+ scaler2 := mock_scalers.NewMockScaler(ctrl)
+ // dont use cached metrics
+ scalerConfig1 := scalers.ScalerConfig{TriggerUseCachedMetrics: false, TriggerName: triggerName1, ScalerIndex: 0}
+ scalerConfig2 := scalers.ScalerConfig{TriggerUseCachedMetrics: false, TriggerName: triggerName2, ScalerIndex: 1}
+ factory1 := func() (scalers.Scaler, *scalers.ScalerConfig, error) {
+ return scaler1, &scalerConfig1, nil
+ }
+ factory2 := func() (scalers.Scaler, *scalers.ScalerConfig, error) {
+ return scaler2, &scalerConfig2, nil
+ }
+
+ ecClient := mock_ec.NewMockExternalCalculationClient(ctrl)
+ expectOutList := ec.MetricsList{MetricValues: []*ec.Metric{{Name: "fake_calc", Value: 5}}}
+
+ scaledObject := kedav1alpha1.ScaledObject{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: scaledObjectName,
+ Namespace: scaledObjectNamespace,
+ },
+ Spec: kedav1alpha1.ScaledObjectSpec{
+ ScaleTargetRef: &kedav1alpha1.ScaleTarget{
+ Name: "test",
+ },
+ Advanced: &kedav1alpha1.AdvancedConfig{
+ ComplexScalingLogic: kedav1alpha1.ComplexScalingLogic{
+ Target: "2",
+ ExternalCalculations: []kedav1alpha1.ExternalCalculation{
+ {Name: "fake_calc", URL: "fake_url", Timeout: "10s"},
+ },
+ },
+ },
+ Triggers: []kedav1alpha1.ScaleTriggers{
+ {Name: triggerName1, Type: "fake_trig1"},
+ {Name: triggerName2, Type: "fake_trig2"},
+ },
+ },
+ Status: kedav1alpha1.ScaledObjectStatus{
+ ScaleTargetGVKR: &kedav1alpha1.GroupVersionKindResource{
+ Group: "apps",
+ Kind: "Deployment",
+ },
+ ExternalMetricNames: []string{metricName1, metricName2},
+ },
+ }
+
+ scalerCache := cache.ScalersCache{
+ ScaledObject: &scaledObject,
+ Scalers: []cache.ScalerBuilder{{
+ Scaler: scaler1,
+ ScalerConfig: scalerConfig1,
+ Factory: factory1,
+ },
+ {
+ Scaler: scaler2,
+ ScalerConfig: scalerConfig2,
+ Factory: factory2,
+ },
+ },
+ Recorder: recorder,
+ ExternalCalculationGrpcClients: []cache.ExternalCalculationClient{
+ {Name: "fake_calc", Client: &externalscaling.GrpcClient{Client: ecClient}, Connected: true},
+ },
+ }
+
+ caches := map[string]*cache.ScalersCache{}
+ caches[scaledObject.GenerateIdentifier()] = &scalerCache
+
+ sh := scaleHandler{
+ client: mockClient,
+ scaleLoopContexts: &sync.Map{},
+ scaleExecutor: mockExecutor,
+ globalHTTPTimeout: time.Duration(1000),
+ recorder: recorder,
+ scalerCaches: caches,
+ scalerCachesLock: &sync.RWMutex{},
+ scaledObjectsMetricCache: metricscache.NewMetricsCache(),
+ }
+
+ mockClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil)
+ scaler1.EXPECT().GetMetricSpecForScaling(gomock.Any()).Return(metricsSpecs1)
+ scaler2.EXPECT().GetMetricSpecForScaling(gomock.Any()).Return(metricsSpecs2)
+ scaler1.EXPECT().GetMetricsAndActivity(gomock.Any(), gomock.Any()).Return([]external_metrics.ExternalMetricValue{metricValue1, metricValue2}, true, nil)
+ scaler2.EXPECT().GetMetricsAndActivity(gomock.Any(), gomock.Any()).Return([]external_metrics.ExternalMetricValue{metricValue1, metricValue2}, true, nil)
+ mockExecutor.EXPECT().RequestScale(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any())
+ sh.checkScalers(context.TODO(), &scaledObject, &sync.RWMutex{})
+
+ mockClient.EXPECT().Status().Return(mockStatusWriter).Times(3)
+ mockStatusWriter.EXPECT().Patch(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).Times(3)
+ scaler1.EXPECT().GetMetricSpecForScaling(gomock.Any()).Return(metricsSpecs1)
+ scaler2.EXPECT().GetMetricSpecForScaling(gomock.Any()).Return(metricsSpecs2)
+ scaler1.EXPECT().GetMetricsAndActivity(gomock.Any(), gomock.Any()).Return([]external_metrics.ExternalMetricValue{metricValue1, metricValue2}, true, nil)
+ scaler2.EXPECT().GetMetricsAndActivity(gomock.Any(), gomock.Any()).Return([]external_metrics.ExternalMetricValue{metricValue1, metricValue2}, true, nil)
+
+ // return value of expectOutList for externalCalculator
+ ecClient.EXPECT().Calculate(context.TODO(), gomock.Any()).Return(&ec.Response{List: &expectOutList, Error: ""}, nil).Times(2)
+
+ clRes, err := ecClient.Calculate(context.Background(), &ec.MetricsList{MetricValues: []*ec.Metric{{Name: "one", Value: 2}, {Name: "two", Value: 8}}})
+ assert.Nil(t, err)
+ assert.Equal(t, 5, int(clRes.List.MetricValues[0].Value))
+
+ metrics, err := sh.GetScaledObjectMetrics(context.TODO(), scaledObjectName, scaledObjectNamespace, compositeMetricName)
+ assert.Nil(t, err)
+ assert.Equal(t, float64(5), metrics.Items[0].Value.AsApproximateFloat64())
+}
+
+// test external calculator fallback logic in GetScaledObjectMetrics
+func TestComplexScalingExternalCalculatorFallback(t *testing.T) {
+ scaledObjectName := testNameGlobal
+ scaledObjectNamespace := testNamespaceGlobal
+ compositeMetricName := compositeMetricNameGlobal
+
+ ctrl := gomock.NewController(t)
+ recorder := record.NewFakeRecorder(1)
+ mockClient := mock_client.NewMockClient(ctrl)
+ mockExecutor := mock_executor.NewMockScaleExecutor(ctrl)
+ mockStatusWriter := mock_client.NewMockStatusWriter(ctrl)
+
+ metricsSpecs1 := []v2.MetricSpec{createMetricSpec(2, metricName1)}
+ metricsSpecs2 := []v2.MetricSpec{createMetricSpec(8, metricName2)}
+ metricValue1 := scalers.GenerateMetricInMili(metricName1, float64(2))
+ metricValue2 := scalers.GenerateMetricInMili(metricName2, float64(8))
+
+ scaler1 := mock_scalers.NewMockScaler(ctrl)
+ scaler2 := mock_scalers.NewMockScaler(ctrl)
+ // dont use cached metrics
+ scalerConfig1 := scalers.ScalerConfig{TriggerUseCachedMetrics: false, TriggerName: triggerName1, ScalerIndex: 0}
+ scalerConfig2 := scalers.ScalerConfig{TriggerUseCachedMetrics: false, TriggerName: triggerName2, ScalerIndex: 1}
+ factory1 := func() (scalers.Scaler, *scalers.ScalerConfig, error) {
+ return scaler1, &scalerConfig1, nil
+ }
+ factory2 := func() (scalers.Scaler, *scalers.ScalerConfig, error) {
+ return scaler2, &scalerConfig2, nil
+ }
+
+ ecClient := mock_ec.NewMockExternalCalculationClient(ctrl)
+ numOfFailures := int32(5)
+
+ scaledObject := kedav1alpha1.ScaledObject{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: scaledObjectName,
+ Namespace: scaledObjectNamespace,
+ },
+ Spec: kedav1alpha1.ScaledObjectSpec{
+ ScaleTargetRef: &kedav1alpha1.ScaleTarget{
+ Name: "test",
+ },
+ Advanced: &kedav1alpha1.AdvancedConfig{
+ ComplexScalingLogic: kedav1alpha1.ComplexScalingLogic{
+ Target: "2",
+ ExternalCalculations: []kedav1alpha1.ExternalCalculation{
+ {Name: "fake_calc", URL: "fake_url", Timeout: "10s"},
+ },
+ },
+ },
+ Fallback: &kedav1alpha1.Fallback{
+ FailureThreshold: 3,
+ Replicas: int32(6),
+ },
+ Triggers: []kedav1alpha1.ScaleTriggers{
+ {Name: triggerName1, Type: "fake_trig1"},
+ {Name: triggerName2, Type: "fake_trig2"},
+ },
+ },
+ Status: kedav1alpha1.ScaledObjectStatus{
+ ScaleTargetGVKR: &kedav1alpha1.GroupVersionKindResource{
+ Group: "apps",
+ Kind: "Deployment",
+ },
+ ExternalMetricNames: []string{metricName1, metricName2},
+ ExternalCalculationHealth: map[string]kedav1alpha1.HealthStatus{
+ "fake_calc": {
+ NumberOfFailures: &numOfFailures,
+ Status: kedav1alpha1.HealthStatusFailing,
+ },
+ },
+ },
+ }
+
+ scalerCache := cache.ScalersCache{
+ ScaledObject: &scaledObject,
+ Scalers: []cache.ScalerBuilder{{
+ Scaler: scaler1,
+ ScalerConfig: scalerConfig1,
+ Factory: factory1,
+ },
+ {
+ Scaler: scaler2,
+ ScalerConfig: scalerConfig2,
+ Factory: factory2,
+ },
+ },
+ Recorder: recorder,
+ ExternalCalculationGrpcClients: []cache.ExternalCalculationClient{
+ {Name: "fake_calc", Client: &externalscaling.GrpcClient{Client: ecClient}, Connected: true},
+ },
+ }
+
+ caches := map[string]*cache.ScalersCache{}
+ caches[scaledObject.GenerateIdentifier()] = &scalerCache
+
+ sh := scaleHandler{
+ client: mockClient,
+ scaleLoopContexts: &sync.Map{},
+ scaleExecutor: mockExecutor,
+ globalHTTPTimeout: time.Duration(1000),
+ recorder: recorder,
+ scalerCaches: caches,
+ scalerCachesLock: &sync.RWMutex{},
+ scaledObjectsMetricCache: metricscache.NewMetricsCache(),
+ }
+
+ mockClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil)
+ scaler1.EXPECT().GetMetricSpecForScaling(gomock.Any()).Return(metricsSpecs1)
+ scaler2.EXPECT().GetMetricSpecForScaling(gomock.Any()).Return(metricsSpecs2)
+ scaler1.EXPECT().GetMetricsAndActivity(gomock.Any(), gomock.Any()).Return([]external_metrics.ExternalMetricValue{metricValue1, metricValue2}, true, nil)
+ scaler2.EXPECT().GetMetricsAndActivity(gomock.Any(), gomock.Any()).Return([]external_metrics.ExternalMetricValue{metricValue1, metricValue2}, true, nil)
+ mockExecutor.EXPECT().RequestScale(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any())
+ sh.checkScalers(context.TODO(), &scaledObject, &sync.RWMutex{})
+
+ mockClient.EXPECT().Status().Return(mockStatusWriter).Times(3)
+ mockStatusWriter.EXPECT().Patch(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).Times(3)
+ scaler1.EXPECT().GetMetricSpecForScaling(gomock.Any()).Return(metricsSpecs1)
+ scaler2.EXPECT().GetMetricSpecForScaling(gomock.Any()).Return(metricsSpecs2)
+ scaler1.EXPECT().GetMetricsAndActivity(gomock.Any(), gomock.Any()).Return([]external_metrics.ExternalMetricValue{metricValue1, metricValue2}, true, nil)
+ scaler2.EXPECT().GetMetricsAndActivity(gomock.Any(), gomock.Any()).Return([]external_metrics.ExternalMetricValue{metricValue1, metricValue2}, true, nil)
+
+ // return error in calculate for externalCalculator
+ ecClient.EXPECT().Calculate(context.TODO(), gomock.Any()).Return(&ec.Response{List: nil, Error: "error in calculate"}, fmt.Errorf("error in calculate")).Times(2)
+
+ ecRes, err := ecClient.Calculate(context.Background(), &ec.MetricsList{MetricValues: []*ec.Metric{{Name: "one", Value: 2}, {Name: "two", Value: 8}}})
+ assert.NotNil(t, err)
+ assert.Equal(t, fmt.Errorf("error in calculate"), err)
+ assert.Equal(t, "error in calculate", ecRes.Error)
+
+ metrics, err := sh.GetScaledObjectMetrics(context.TODO(), scaledObjectName, scaledObjectNamespace, compositeMetricName)
+ assert.Nil(t, err)
+ // fallback set to 6 replicas, target is 2 -> value should be 12
+ assert.Equal(t, float64(12), metrics.Items[0].Value.AsApproximateFloat64())
+}
+
// createMetricSpec creates MetricSpec for given metric name and target value.
func createMetricSpec(averageValue int64, metricName string) v2.MetricSpec {
qty := resource.NewQuantity(averageValue, resource.DecimalSI)
diff --git a/tests/internals/external_scaling/external_scaling_test.go b/tests/internals/external_scaling/external_scaling_test.go
new file mode 100644
index 00000000000..5f1bc6cde59
--- /dev/null
+++ b/tests/internals/external_scaling/external_scaling_test.go
@@ -0,0 +1,672 @@
+//go:build e2e
+// +build e2e
+
+package external_scaling_test
+
+import (
+ "context"
+ "fmt"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/joho/godotenv"
+ "github.com/stretchr/testify/assert"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/client-go/kubernetes"
+
+ . "github.com/kedacore/keda/v2/tests/helper"
+)
+
+const (
+ testName = "external-scaling-test"
+)
+
+// Load environment variables from .env file
+var _ = godotenv.Load("../../.env")
+
+const (
+ serverAvgName = "server-avg"
+ serverAddName = "server-add"
+ targetAvgPort = 50051
+ targetAddPort = 50052
+)
+
+var (
+ namespace = fmt.Sprintf("%s-ns", testName)
+ deploymentName = fmt.Sprintf("%s-deployment", testName)
+ metricsServerDeploymentName = fmt.Sprintf("%s-metrics-server", testName)
+ serviceName = fmt.Sprintf("%s-service", testName)
+ triggerAuthName = fmt.Sprintf("%s-ta", testName)
+ scaledObjectName = fmt.Sprintf("%s-so", testName)
+ secretName = fmt.Sprintf("%s-secret", testName)
+ metricsServerEndpoint = fmt.Sprintf("http://%s.%s.svc.cluster.local:8080/api/value", serviceName, namespace)
+
+ serviceExternalAvgName = fmt.Sprintf("%s-%s-service", testName, serverAvgName)
+ serviceExternalAddName = fmt.Sprintf("%s-%s-service", testName, serverAddName)
+ podExternalAvgName = fmt.Sprintf("%s-pod", serverAvgName)
+ podExternalAddname = fmt.Sprintf("%s-pod", serverAddName)
+)
+
+type templateData struct {
+ TestNamespace string
+ DeploymentName string
+ ScaledObject string
+ TriggerAuthName string
+ SecretName string
+ ServiceName string
+ MetricsServerDeploymentName string
+ MetricsServerEndpoint string
+ MetricValue int
+
+ ServiceExternalAvgName string
+ ServiceExternalAddName string
+ PodExternalAvgName string
+ PodExternalAddname string
+ ExternalAvgPort int
+ ExternalAddPort int
+ ExternalAvgIP string
+ ExternalAddIP string
+ ServerAvgName string
+ ServerAddName string
+}
+
+const (
+ secretTemplate = `apiVersion: v1
+kind: Secret
+metadata:
+ name: {{.SecretName}}
+ namespace: {{.TestNamespace}}
+data:
+ AUTH_PASSWORD: U0VDUkVUCg==
+ AUTH_USERNAME: VVNFUgo=
+`
+
+ triggerAuthenticationTemplate = `apiVersion: keda.sh/v1alpha1
+kind: TriggerAuthentication
+metadata:
+ name: {{.TriggerAuthName}}
+ namespace: {{.TestNamespace}}
+spec:
+ secretTargetRef:
+ - parameter: username
+ name: {{.SecretName}}
+ key: AUTH_USERNAME
+ - parameter: password
+ name: {{.SecretName}}
+ key: AUTH_PASSWORD
+`
+
+ deploymentTemplate = `
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app: {{.DeploymentName}}
+ name: {{.DeploymentName}}
+ namespace: {{.TestNamespace}}
+spec:
+ selector:
+ matchLabels:
+ app: {{.DeploymentName}}
+ replicas: 0
+ template:
+ metadata:
+ labels:
+ app: {{.DeploymentName}}
+ spec:
+ containers:
+ - name: nginx
+ image: nginxinc/nginx-unprivileged
+ ports:
+ - containerPort: 80
+`
+
+ // for metrics-api trigger
+ metricsServerDeploymentTemplate = `
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{.MetricsServerDeploymentName}}
+ namespace: {{.TestNamespace}}
+ labels:
+ app: {{.MetricsServerDeploymentName}}
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: {{.MetricsServerDeploymentName}}
+ template:
+ metadata:
+ labels:
+ app: {{.MetricsServerDeploymentName}}
+ spec:
+ containers:
+ - name: metrics
+ image: ghcr.io/kedacore/tests-metrics-api
+ ports:
+ - containerPort: 8080
+ envFrom:
+ - secretRef:
+ name: {{.SecretName}}
+ imagePullPolicy: Always
+`
+
+ // for SO with 2 external scaling grpc servers
+ soExternalCalculatorTwoTemplate = `
+apiVersion: keda.sh/v1alpha1
+kind: ScaledObject
+metadata:
+ name: {{.ScaledObject}}
+ namespace: {{.TestNamespace}}
+ labels:
+ app: {{.DeploymentName}}
+spec:
+ scaleTargetRef:
+ name: {{.DeploymentName}}
+ advanced:
+ complexScalingLogic:
+ target: '2'
+ externalCalculators:
+ - name: first_avg
+ url: {{.ExternalAvgIP}}:{{.ExternalAvgPort}}
+ timeout: '10s'
+ - name: second_add
+ url: {{.ExternalAddIP}}:{{.ExternalAddPort}}
+ timeout: '10s'
+ pollingInterval: 5
+ cooldownPeriod: 5
+ minReplicaCount: 0
+ maxReplicaCount: 10
+ triggers:
+ - type: metrics-api
+ name: metrics_api
+ metadata:
+ targetValue: "2"
+ url: "{{.MetricsServerEndpoint}}"
+ valueLocation: 'value'
+ method: "query"
+ authenticationRef:
+ name: {{.TriggerAuthName}}
+ - type: kubernetes-workload
+ name: kw_trig
+ metadata:
+ podSelector: pod=workload-test
+ value: '1'
+`
+
+ soFormulaTemplate = `
+apiVersion: keda.sh/v1alpha1
+kind: ScaledObject
+metadata:
+ name: {{.ScaledObject}}
+ namespace: {{.TestNamespace}}
+ labels:
+ app: {{.DeploymentName}}
+spec:
+ scaleTargetRef:
+ name: {{.DeploymentName}}
+ advanced:
+ horizontalPodAutoscalerConfig:
+ behavior:
+ scaleDown:
+ stabilizationWindowSeconds: 5
+ complexScalingLogic:
+ target: '2'
+ formula: metrics_api + kw_trig
+ pollingInterval: 5
+ cooldownPeriod: 5
+ minReplicaCount: 0
+ maxReplicaCount: 10
+ triggers:
+ - type: metrics-api
+ name: metrics_api
+ metadata:
+ targetValue: "2"
+ url: "{{.MetricsServerEndpoint}}"
+ valueLocation: 'value'
+ method: "query"
+ authenticationRef:
+ name: {{.TriggerAuthName}}
+ - type: kubernetes-workload
+ name: kw_trig
+ metadata:
+ podSelector: pod=workload-test
+ value: '1'
+`
+
+ soBothTemplate = `
+apiVersion: keda.sh/v1alpha1
+kind: ScaledObject
+metadata:
+ name: {{.ScaledObject}}
+ namespace: {{.TestNamespace}}
+ labels:
+ app: {{.DeploymentName}}
+spec:
+ scaleTargetRef:
+ name: {{.DeploymentName}}
+ advanced:
+ horizontalPodAutoscalerConfig:
+ behavior:
+ scaleDown:
+ stabilizationWindowSeconds: 5
+ complexScalingLogic:
+ target: '2'
+ externalCalculators:
+ - name: first_avg
+ url: {{.ExternalAvgIP}}:{{.ExternalAvgPort}}
+ timeout: '10s'
+ - name: second_add
+ url: {{.ExternalAddIP}}:{{.ExternalAddPort}}
+ timeout: '10s'
+ formula: second_add + 2
+ pollingInterval: 5
+ cooldownPeriod: 5
+ minReplicaCount: 0
+ maxReplicaCount: 10
+ triggers:
+ - type: metrics-api
+ name: metrics_api
+ metadata:
+ targetValue: "2"
+ url: "{{.MetricsServerEndpoint}}"
+ valueLocation: 'value'
+ method: "query"
+ authenticationRef:
+ name: {{.TriggerAuthName}}
+ - type: kubernetes-workload
+ name: kw_trig
+ metadata:
+ podSelector: pod=workload-test
+ value: '1'
+`
+
+ soFallbackTemplate = `
+apiVersion: keda.sh/v1alpha1
+kind: ScaledObject
+metadata:
+ name: {{.ScaledObject}}
+ namespace: {{.TestNamespace}}
+ labels:
+ app: {{.DeploymentName}}
+spec:
+ scaleTargetRef:
+ name: {{.DeploymentName}}
+ advanced:
+ horizontalPodAutoscalerConfig:
+ behavior:
+ scaleDown:
+ stabilizationWindowSeconds: 5
+ complexScalingLogic:
+ target: '2'
+ externalCalculators:
+ - name: first_avg
+ url: {{.ExternalAvgIP}}:{{.ExternalAvgPort}}
+ timeout: '10s'
+ - name: second_add
+ url: {{.ExternalAddIP}}:{{.ExternalAddPort}}
+ timeout: '10s'
+ pollingInterval: 5
+ cooldownPeriod: 5
+ minReplicaCount: 0
+ maxReplicaCount: 10
+ fallback:
+ replicas: 3
+ failureThreshold: 1
+ triggers:
+ - type: metrics-api
+ name: metrics_api
+ metadata:
+ targetValue: "2"
+ url: "{{.MetricsServerEndpoint}}"
+ valueLocation: 'value'
+ method: "query"
+ authenticationRef:
+ name: {{.TriggerAuthName}}
+ - type: kubernetes-workload
+ name: kw_trig
+ metadata:
+ podSelector: pod=workload-test
+ value: '1'
+`
+
+ workloadDeploymentTemplate = `
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: depl-workload-base
+ namespace: {{.TestNamespace}}
+ labels:
+ deploy: workload-test
+spec:
+ replicas: 0
+ selector:
+ matchLabels:
+ pod: workload-test
+ template:
+ metadata:
+ labels:
+ pod: workload-test
+ spec:
+ containers:
+ - name: nginx
+ image: 'nginxinc/nginx-unprivileged'`
+
+ updateMetricsTemplate = `
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: update-ms-value
+ namespace: {{.TestNamespace}}
+spec:
+ ttlSecondsAfterFinished: 0
+ backoffLimit: 4
+ template:
+ spec:
+ containers:
+ - name: job-curl
+ image: curlimages/curl
+ imagePullPolicy: Always
+ command: ["curl", "-X", "POST", "{{.MetricsServerEndpoint}}/{{.MetricValue}}"]
+ restartPolicy: OnFailure
+`
+
+ // image contains python grpc server that creates average from given metrics
+ podExternalAvgTemplate = `
+apiVersion: v1
+kind: Pod
+metadata:
+ name: {{.ServerAvgName}}
+ namespace: {{.TestNamespace}}
+ labels:
+ app: {{.ServerAvgName}}
+spec:
+ containers:
+ - name: server-avg-container
+ image: docker.io/4141gauron3268/python-proto-server-avg
+`
+
+ // image contains python grpc server that adds 2 to the metric value
+ podExternalAddTemplate = `
+apiVersion: v1
+kind: Pod
+metadata:
+ name: {{.ServerAddName}}
+ namespace: {{.TestNamespace}}
+ labels:
+ app: {{.ServerAddName}}
+spec:
+ containers:
+ - name: server-add-container
+ image: docker.io/4141gauron3268/python-proto-server-add
+`
+
+ serviceTemplate = `
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{.ServiceName}}
+ namespace: {{.TestNamespace}}
+spec:
+ selector:
+ app: {{.MetricsServerDeploymentName}}
+ ports:
+ - port: 8080
+ targetPort: 8080
+`
+
+ serviceExternalAvgTemplate = `
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{.ServiceExternalAvgName}}
+ namespace: {{.TestNamespace}}
+spec:
+ selector:
+ app: {{.ServerAvgName}}
+ ports:
+ - port: {{.ExternalAvgPort}}
+ targetPort: {{.ExternalAvgPort}}
+`
+
+ serviceExternalAddTemplate = `
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{.ServiceExternalAddName}}
+ namespace: {{.TestNamespace}}
+spec:
+ selector:
+ app: {{.ServerAddName}}
+ ports:
+ - port: {{.ExternalAddPort}}
+ targetPort: {{.ExternalAddPort}}
+`
+)
+
+func TestExternalScaling(t *testing.T) {
+ // setup
+ t.Log("-- setting up ---")
+ kc := GetKubernetesClient(t)
+ data, templates := getTemplateData()
+ CreateKubernetesResources(t, kc, namespace, data, templates)
+
+ // check grpc server pods are running
+ assert.True(t, waitForPodsReadyInNamespace(t, kc, namespace, []string{serverAddName, serverAvgName}, 12, 10),
+ fmt.Sprintf("pods '%v' should be ready after 2 minutes", []string{serverAddName, serverAvgName}))
+
+ ADDIP, err := ExecuteCommand(fmt.Sprintf("kubectl get endpoints %s -o custom-columns=IP:.subsets[0].addresses[0].ip -n %s", serviceExternalAddName, namespace))
+ assert.NoErrorf(t, err, "cannot get service ADD - %s", err)
+
+ AVGIP, err := ExecuteCommand(fmt.Sprintf("kubectl get endpoints %s -o custom-columns=IP:.subsets[0].addresses[0].ip -n %s", serviceExternalAvgName, namespace))
+ assert.NoErrorf(t, err, "cannot get service AVG - %s", err)
+
+ data.ExternalAvgIP = strings.Split(string(AVGIP), "\n")[1]
+ data.ExternalAddIP = strings.Split(string(ADDIP), "\n")[1]
+ testTwoExternalCalculators(t, kc, data)
+ testComplexFormula(t, kc, data)
+ testFormulaAndEC(t, kc, data)
+ testFallback(t, kc, data)
+
+ templates = append(templates, Template{Name: "soFallbackTemplate", Config: soFallbackTemplate})
+ DeleteKubernetesResources(t, namespace, data, templates)
+}
+
+func testTwoExternalCalculators(t *testing.T, kc *kubernetes.Clientset, data templateData) {
+ t.Log("--- testTwoExternalCalculators ---")
+ KubectlApplyWithTemplate(t, data, "soExternalCalculatorTwoTemplate", soExternalCalculatorTwoTemplate)
+ // metrics calculation: avg-> 3 + 3 = 6 / 2 = 3; add-> 3 + 2 = 5; target=2 ==> 3
+ data.MetricValue = 3
+ KubectlApplyWithTemplate(t, data, "updateMetricsTemplate", updateMetricsTemplate)
+ _, err := ExecuteCommand(fmt.Sprintf("kubectl scale deployment/depl-workload-base --replicas=3 -n %s", namespace))
+ assert.NoErrorf(t, err, "cannot scale workload deployment - %s", err)
+
+ assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, "depl-workload-base", namespace, 3, 6, 10),
+ "replica count should be %d after 1 minute", 3)
+
+ assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, namespace, 3, 12, 10),
+ "replica count should be %d after 2 minutes", 3)
+
+ // // ------------------------------------------------------------------ // //
+
+ // metrics calculation: avg-> 11 + 5 = 16 / 2 = 8; add-> 8 + 2 = 10; target=2 ==> 5
+ data.MetricValue = 11
+ KubectlApplyWithTemplate(t, data, "updateMetricsTemplate", updateMetricsTemplate)
+ _, err = ExecuteCommand(fmt.Sprintf("kubectl scale deployment/depl-workload-base --replicas=5 -n %s", namespace))
+ assert.NoErrorf(t, err, "cannot scale workload deployment - %s", err)
+
+ assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, "depl-workload-base", namespace, 5, 6, 10),
+ "replica count should be %d after 1 minute", 5)
+ assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, namespace, 5, 12, 10),
+ "replica count should be %d after 2 minutes", 5)
+}
+
+func testComplexFormula(t *testing.T, kc *kubernetes.Clientset, data templateData) {
+ t.Log("--- testComplexFormula ---")
+ // formula simply adds 2 metrics together (3+2=5; target = 2 -> 5/2 replicas should be 3)
+ data.MetricValue = 3
+ KubectlApplyWithTemplate(t, data, "updateMetricsTemplate", updateMetricsTemplate)
+
+ KubectlApplyWithTemplate(t, data, "soFormulaTemplate", soFormulaTemplate)
+ _, err := ExecuteCommand(fmt.Sprintf("kubectl scale deployment/depl-workload-base --replicas=2 -n %s", namespace))
+ assert.NoErrorf(t, err, "cannot scale workload deployment - %s", err)
+
+ assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, "depl-workload-base", namespace, 2, 6, 10),
+ "replica count should be %d after 1 minute", 2)
+ assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, namespace, 3, 12, 10),
+ "replica count should be %d after 2 minutes", 3)
+}
+
+func testFormulaAndEC(t *testing.T, kc *kubernetes.Clientset, data templateData) {
+ t.Log("--- testFormulaAndEC ---")
+ KubectlApplyWithTemplate(t, data, "soBothTemplate", soBothTemplate)
+
+ data.MetricValue = 4
+ KubectlApplyWithTemplate(t, data, "updateMetricsTemplate", updateMetricsTemplate)
+
+ _, err := ExecuteCommand(fmt.Sprintf("kubectl scale deployment/depl-workload-base --replicas=2 -n %s", namespace))
+ assert.NoErrorf(t, err, "cannot scale workload deployment - %s", err)
+
+ // first -> 4 + 2 = 6 / 2 = 3; add 3 + 2 = 5; formula 5 + 2 = 7 / 2 -> 4
+ assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, "depl-workload-base", namespace, 2, 6, 10),
+ "replica count should be %d after 1 minute", 2)
+ assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, namespace, 4, 12, 10),
+ "replica count should be %d after 2 minutes", 4)
+}
+
+func testFallback(t *testing.T, kc *kubernetes.Clientset, data templateData) {
+ t.Log("--- testFallback ---")
+ KubectlApplyWithTemplate(t, data, "soFallbackTemplate", soFallbackTemplate)
+ _, err := ExecuteCommand(fmt.Sprintf("kubectl scale deployment/depl-workload-base --replicas=0 -n %s", namespace))
+ assert.NoErrorf(t, err, "cannot scale workload deployment - %s", err)
+ assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, "depl-workload-base", namespace, 0, 6, 10),
+ "replica count should be %d after 1 minute", 0)
+
+ data.MetricValue = 3
+ KubectlApplyWithTemplate(t, data, "updateMetricsTemplate", updateMetricsTemplate)
+ assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, namespace, 2, 12, 10),
+ "replica count should be %d after 2 minutes", 2)
+
+ // delete grpc server to apply ec-fallback (simulate connection issue to server)
+ t.Logf("--- delete grpc server (named %s) for fallback ---", serverAddName)
+ KubectlDeleteWithTemplate(t, data, "podExternalAddTemplate", podExternalAddTemplate)
+
+ assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, namespace, 3, 12, 10),
+ "replica count should be %d after 2 minutes", 3)
+
+ t.Log("--- trigger default fallback as well, should not change replicas ---")
+
+ _, err = ExecuteCommand(fmt.Sprintf("kubectl scale deployment/%s --replicas=0 -n %s", metricsServerDeploymentName, namespace))
+ assert.NoErrorf(t, err, "cannot scale ms deployment - %s", err)
+ AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, namespace, 3, 30)
+
+ // fix all errors to restore function
+ t.Log("--- restore all errors and scale normally ---")
+
+ _, err = ExecuteCommand(fmt.Sprintf("kubectl scale deployment/%s --replicas=1 -n %s", metricsServerDeploymentName, namespace))
+ assert.NoErrorf(t, err, "cannot scale ms deployment - %s", err)
+ // send new metric otherwise you get: "error requesting metrics endpoint:
+ // \"http://external-scaling-test-service.external-scaling-test-ns.svc.cluster.local:8080/api/value\"
+ data.MetricValue = 3
+ KubectlApplyWithTemplate(t, data, "updateMetricsTemplate", updateMetricsTemplate)
+ KubectlApplyWithTemplate(t, data, "podExternalAddTemplate", podExternalAddTemplate)
+ assert.True(t, waitForPodsReadyInNamespace(t, kc, namespace, []string{serverAddName}, 12, 10),
+ fmt.Sprintf("pod '%v' should be ready after 2 minutes", []string{serverAddName, serverAvgName}))
+
+ ADDIP, err := ExecuteCommand(fmt.Sprintf("kubectl get endpoints %s -o custom-columns=IP:.subsets[0].addresses[0].ip -n %s", serviceExternalAddName, namespace))
+ assert.NoErrorf(t, err, "cannot get endpoint for server ADD - %s", err)
+ data.ExternalAddIP = strings.Split(string(ADDIP), "\n")[1]
+ KubectlApplyWithTemplate(t, data, "soFallbackTemplate", soFallbackTemplate)
+ data.MetricValue = 3
+ KubectlApplyWithTemplate(t, data, "updateMetricsTemplate", updateMetricsTemplate)
+ _, err = ExecuteCommand(fmt.Sprintf("kubectl scale deployment/depl-workload-base --replicas=1 -n %s", namespace))
+ assert.NoErrorf(t, err, "cannot scale workload deployment - %s", err)
+ // calculation: avg 3 + 1 = 4 / 2 = 2; add 2 + 2 = 4; 4 / 2 = 2 replicas
+ assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, namespace, 2, 18, 10),
+ "replica count should be %d after 3 minutes", 2)
+}
+
+func getTemplateData() (templateData, []Template) {
+ return templateData{
+ TestNamespace: namespace,
+ DeploymentName: deploymentName,
+ MetricsServerDeploymentName: metricsServerDeploymentName,
+ ServiceName: serviceName,
+ TriggerAuthName: triggerAuthName,
+ ScaledObject: scaledObjectName,
+ SecretName: secretName,
+ MetricsServerEndpoint: metricsServerEndpoint,
+ MetricValue: 0,
+
+ ServiceExternalAvgName: serviceExternalAvgName,
+ ServiceExternalAddName: serviceExternalAddName,
+ PodExternalAvgName: podExternalAvgName,
+ PodExternalAddname: podExternalAddname,
+ ExternalAvgPort: targetAvgPort,
+ ExternalAddPort: targetAddPort,
+ ServerAvgName: serverAvgName,
+ ServerAddName: serverAddName,
+ }, []Template{
+ // basic: scaled deployment, metrics-api trigger server & authentication
+ {Name: "secretTemplate", Config: secretTemplate},
+ {Name: "metricsServerDeploymentTemplate", Config: metricsServerDeploymentTemplate},
+ {Name: "serviceTemplate", Config: serviceTemplate},
+ {Name: "triggerAuthenticationTemplate", Config: triggerAuthenticationTemplate},
+ {Name: "deploymentTemplate", Config: deploymentTemplate},
+ // workload base
+ {Name: "workloadDeploymentTemplate", Config: workloadDeploymentTemplate},
+ // grpc server pods
+ {Name: "podExternalAvgTemplate", Config: podExternalAvgTemplate},
+ {Name: "podExternalAddTemplate", Config: podExternalAddTemplate},
+ // services for pod endpoints
+ {Name: "serviceExternalAvgTemplate", Config: serviceExternalAvgTemplate},
+ {Name: "serviceExternalAddTemplate", Config: serviceExternalAddTemplate},
+ // so
+ // {Name: "soExternalCalculatorTwoTemplate", Config: soExternalCalculatorTwoTemplate},
+ }
+}
+
+// Waits until deployment count hits target or number of iterations are done.
+func waitForPodsReadyInNamespace(t *testing.T, kc *kubernetes.Clientset, namespace string,
+ names []string, iterations, intervalSeconds int) bool {
+ for i := 0; i < iterations; i++ {
+ runningCount := 0
+ pods, _ := kc.CoreV1().Pods(namespace).List(context.Background(), metav1.ListOptions{})
+ namedPods := []corev1.Pod{}
+
+ // count pods by name
+ for _, pod := range pods.Items {
+ if contains(names, pod.Name) {
+ namedPods = append(namedPods, pod)
+ }
+ }
+
+ for _, readyPod := range namedPods {
+ if readyPod.Status.Phase == corev1.PodRunning {
+ runningCount++
+ } else {
+ break
+ }
+ }
+
+ t.Logf("Waiting for pods '%v' to be ready. Namespace - %s, Current - %d, Target - %d",
+ names, namespace, runningCount, len(names))
+
+ if runningCount == len(names) {
+ return true
+ }
+ time.Sleep(time.Duration(intervalSeconds) * time.Second)
+ }
+ return false
+}
+
+func contains(s []string, str string) bool {
+ for _, v := range s {
+ if v == str {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/antonmedv/expr/.gitignore b/vendor/github.com/antonmedv/expr/.gitignore
new file mode 100644
index 00000000000..b0df3eb4442
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/.gitignore
@@ -0,0 +1,8 @@
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+*.test
+*.out
+*.html
diff --git a/vendor/github.com/antonmedv/expr/LICENSE b/vendor/github.com/antonmedv/expr/LICENSE
new file mode 100644
index 00000000000..7d058f841cb
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2019 Anton Medvedev
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/antonmedv/expr/README.md b/vendor/github.com/antonmedv/expr/README.md
new file mode 100644
index 00000000000..242431f2ceb
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/README.md
@@ -0,0 +1,160 @@
+# Expr
+[](https://github.com/antonmedv/expr/actions/workflows/test.yml)
+[](https://goreportcard.com/report/github.com/antonmedv/expr)
+[](https://godoc.org/github.com/antonmedv/expr)
+
+
+
+**Expr** package provides an engine that can compile and evaluate expressions.
+An expression is a one-liner that returns a value (mostly, but not limited to, booleans).
+It is designed for simplicity, speed and safety.
+
+The purpose of the package is to allow users to use expressions inside configuration for more complex logic.
+It is a perfect candidate for the foundation of a _business rule engine_.
+The idea is to let configure things in a dynamic way without recompile of a program:
+
+```coffeescript
+# Get the special price if
+user.Group in ["good_customers", "collaborator"]
+
+# Promote article to the homepage when
+len(article.Comments) > 100 and article.Category not in ["misc"]
+
+# Send an alert when
+product.Stock < 15
+```
+
+## Features
+
+* Seamless integration with Go (no need to redefine types)
+* Static typing ([example](https://godoc.org/github.com/antonmedv/expr#example-Env)).
+ ```go
+ out, err := expr.Compile(`name + age`)
+ // err: invalid operation + (mismatched types string and int)
+ // | name + age
+ // | .....^
+ ```
+* User-friendly error messages.
+* Reasonable set of basic operators.
+* Builtins `all`, `none`, `any`, `one`, `filter`, `map`.
+ ```coffeescript
+ all(Tweets, {.Size <= 280})
+ ```
+* Fast ([benchmarks](https://github.com/antonmedv/golang-expression-evaluation-comparison#readme)): uses bytecode virtual machine and optimizing compiler.
+
+## Install
+
+```
+go get github.com/antonmedv/expr
+```
+
+## Documentation
+
+* See [Getting Started](https://expr.medv.io/docs/Getting-Started) page for developer documentation.
+* See [Language Definition](https://expr.medv.io/docs/Language-Definition) page to learn the syntax.
+
+## Expr Code Editor
+
+
+
+
+
+Also, I have an embeddable code editor written in JavaScript which allows editing expressions with syntax highlighting and autocomplete based on your types declaration.
+
+[Learn more →](https://antonmedv.github.io/expr/)
+
+## Examples
+
+[Play Online](https://play.golang.org/p/z7T8ytJ1T1d)
+
+```go
+package main
+
+import (
+ "fmt"
+ "github.com/antonmedv/expr"
+)
+
+func main() {
+ env := map[string]interface{}{
+ "greet": "Hello, %v!",
+ "names": []string{"world", "you"},
+ "sprintf": fmt.Sprintf,
+ }
+
+ code := `sprintf(greet, names[0])`
+
+ program, err := expr.Compile(code, expr.Env(env))
+ if err != nil {
+ panic(err)
+ }
+
+ output, err := expr.Run(program, env)
+ if err != nil {
+ panic(err)
+ }
+
+ fmt.Println(output)
+}
+```
+
+[Play Online](https://play.golang.org/p/4S4brsIvU4i)
+
+```go
+package main
+
+import (
+ "fmt"
+ "github.com/antonmedv/expr"
+)
+
+type Tweet struct {
+ Len int
+}
+
+type Env struct {
+ Tweets []Tweet
+}
+
+func main() {
+ code := `all(Tweets, {.Len <= 240})`
+
+ program, err := expr.Compile(code, expr.Env(Env{}))
+ if err != nil {
+ panic(err)
+ }
+
+ env := Env{
+ Tweets: []Tweet{{42}, {98}, {69}},
+ }
+ output, err := expr.Run(program, env)
+ if err != nil {
+ panic(err)
+ }
+
+ fmt.Println(output)
+}
+```
+
+## Who uses Expr?
+
+* [Aviasales](https://aviasales.ru) uses Expr as a business rule engine for our flight search engine.
+* [Wish.com](https://www.wish.com) uses Expr for decision-making rule engine in the Wish Assistant.
+* [Argo](https://argoproj.github.io) uses Expr in Argo Rollouts and Argo Workflows for Kubernetes.
+* [Crowdsec](https://crowdsec.net) uses Expr in a security automation tool.
+* [FACEIT](https://www.faceit.com) uses Expr to allow customization of its eSports matchmaking algorithm.
+* [qiniu](https://www.qiniu.com) uses Expr in trade systems.
+* [Junglee Games](https://www.jungleegames.com/) uses Expr for an in house marketing retention tool [Project Audience](https://www.linkedin.com/pulse/meet-project-audience-our-no-code-swiss-army-knife-product-bharti).
+* [OpenTelemetry](https://opentelemetry.io) uses Expr in the OpenTelemetry Collector.
+* [Philips Labs](https://github.com/philips-labs/tabia) uses Expr in Tabia, a tool for collecting insights on the characteristics of our code bases.
+* [CoreDNS](https://coredns.io) uses Expr in CoreDNS, a DNS server.
+* [Chaos Mesh](https://chaos-mesh.org) uses Expr in Chaos Mesh, a cloud-native Chaos Engineering platform.
+* [Milvus](https://milvus.io) uses Expr in Milvus, an open-source vector database.
+* [Visually.io](https://visually.io) uses Expr as a business rule engine for our personalization targeting algorithm.
+* [Akvorado](https://github.com/akvorado/akvorado) uses Expr to classify exporters and interfaces in network flows.
+
+[Add your company too](https://github.com/antonmedv/expr/edit/master/README.md)
+
+## License
+
+[MIT](https://github.com/antonmedv/expr/blob/master/LICENSE)
diff --git a/vendor/github.com/antonmedv/expr/ast/node.go b/vendor/github.com/antonmedv/expr/ast/node.go
new file mode 100644
index 00000000000..e85f853e91f
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/ast/node.go
@@ -0,0 +1,169 @@
+package ast
+
+import (
+ "reflect"
+ "regexp"
+
+ "github.com/antonmedv/expr/builtin"
+ "github.com/antonmedv/expr/file"
+)
+
+// Node represents items of abstract syntax tree.
+type Node interface {
+ Location() file.Location
+ SetLocation(file.Location)
+ Type() reflect.Type
+ SetType(reflect.Type)
+}
+
+func Patch(node *Node, newNode Node) {
+ newNode.SetType((*node).Type())
+ newNode.SetLocation((*node).Location())
+ *node = newNode
+}
+
+type base struct {
+ loc file.Location
+ nodeType reflect.Type
+}
+
+func (n *base) Location() file.Location {
+ return n.loc
+}
+
+func (n *base) SetLocation(loc file.Location) {
+ n.loc = loc
+}
+
+func (n *base) Type() reflect.Type {
+ return n.nodeType
+}
+
+func (n *base) SetType(t reflect.Type) {
+ n.nodeType = t
+}
+
+type NilNode struct {
+ base
+}
+
+type IdentifierNode struct {
+ base
+ Value string
+ Deref bool
+ FieldIndex []int
+ Method bool // true if method, false if field
+ MethodIndex int // index of method, set only if Method is true
+}
+
+type IntegerNode struct {
+ base
+ Value int
+}
+
+type FloatNode struct {
+ base
+ Value float64
+}
+
+type BoolNode struct {
+ base
+ Value bool
+}
+
+type StringNode struct {
+ base
+ Value string
+}
+
+type ConstantNode struct {
+ base
+ Value interface{}
+}
+
+type UnaryNode struct {
+ base
+ Operator string
+ Node Node
+}
+
+type BinaryNode struct {
+ base
+ Regexp *regexp.Regexp
+ Operator string
+ Left Node
+ Right Node
+}
+
+type ChainNode struct {
+ base
+ Node Node
+}
+
+type MemberNode struct {
+ base
+ Node Node
+ Property Node
+ Name string // Name of the filed or method. Used for error reporting.
+ Optional bool
+ Deref bool
+ FieldIndex []int
+
+ // TODO: Replace with a single MethodIndex field of &int type.
+ Method bool
+ MethodIndex int
+}
+
+type SliceNode struct {
+ base
+ Node Node
+ From Node
+ To Node
+}
+
+type CallNode struct {
+ base
+ Callee Node
+ Arguments []Node
+ Typed int
+ Fast bool
+ Func *builtin.Function
+}
+
+type BuiltinNode struct {
+ base
+ Name string
+ Arguments []Node
+}
+
+type ClosureNode struct {
+ base
+ Node Node
+}
+
+type PointerNode struct {
+ base
+}
+
+type ConditionalNode struct {
+ base
+ Cond Node
+ Exp1 Node
+ Exp2 Node
+}
+
+type ArrayNode struct {
+ base
+ Nodes []Node
+}
+
+type MapNode struct {
+ base
+ Pairs []Node
+}
+
+type PairNode struct {
+ base
+ Key Node
+ Value Node
+}
diff --git a/vendor/github.com/antonmedv/expr/ast/print.go b/vendor/github.com/antonmedv/expr/ast/print.go
new file mode 100644
index 00000000000..56bc7dbe2e3
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/ast/print.go
@@ -0,0 +1,59 @@
+package ast
+
+import (
+ "fmt"
+ "reflect"
+ "regexp"
+)
+
+func Dump(node Node) string {
+ return dump(reflect.ValueOf(node), "")
+}
+
+func dump(v reflect.Value, ident string) string {
+ if !v.IsValid() {
+ return "nil"
+ }
+ t := v.Type()
+ switch t.Kind() {
+ case reflect.Struct:
+ out := t.Name() + "{\n"
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ if isPrivate(f.Name) {
+ continue
+ }
+ s := v.Field(i)
+ out += fmt.Sprintf("%v%v: %v,\n", ident+"\t", f.Name, dump(s, ident+"\t"))
+ }
+ return out + ident + "}"
+ case reflect.Slice:
+ if v.Len() == 0 {
+ return t.String() + "{}"
+ }
+ out := t.String() + "{\n"
+ for i := 0; i < v.Len(); i++ {
+ s := v.Index(i)
+ out += fmt.Sprintf("%v%v,", ident+"\t", dump(s, ident+"\t"))
+ if i+1 < v.Len() {
+ out += "\n"
+ }
+ }
+ return out + "\n" + ident + "}"
+ case reflect.Ptr:
+ return dump(v.Elem(), ident)
+ case reflect.Interface:
+ return dump(reflect.ValueOf(v.Interface()), ident)
+
+ case reflect.String:
+ return fmt.Sprintf("%q", v)
+ default:
+ return fmt.Sprintf("%v", v)
+ }
+}
+
+var isCapital = regexp.MustCompile("^[A-Z]")
+
+func isPrivate(s string) bool {
+ return !isCapital.Match([]byte(s))
+}
diff --git a/vendor/github.com/antonmedv/expr/ast/visitor.go b/vendor/github.com/antonmedv/expr/ast/visitor.go
new file mode 100644
index 00000000000..351e5d72b23
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/ast/visitor.go
@@ -0,0 +1,68 @@
+package ast
+
+import "fmt"
+
+type Visitor interface {
+ Visit(node *Node)
+}
+
+func Walk(node *Node, v Visitor) {
+ switch n := (*node).(type) {
+ case *NilNode:
+ case *IdentifierNode:
+ case *IntegerNode:
+ case *FloatNode:
+ case *BoolNode:
+ case *StringNode:
+ case *ConstantNode:
+ case *UnaryNode:
+ Walk(&n.Node, v)
+ case *BinaryNode:
+ Walk(&n.Left, v)
+ Walk(&n.Right, v)
+ case *ChainNode:
+ Walk(&n.Node, v)
+ case *MemberNode:
+ Walk(&n.Node, v)
+ Walk(&n.Property, v)
+ case *SliceNode:
+ Walk(&n.Node, v)
+ if n.From != nil {
+ Walk(&n.From, v)
+ }
+ if n.To != nil {
+ Walk(&n.To, v)
+ }
+ case *CallNode:
+ Walk(&n.Callee, v)
+ for i := range n.Arguments {
+ Walk(&n.Arguments[i], v)
+ }
+ case *BuiltinNode:
+ for i := range n.Arguments {
+ Walk(&n.Arguments[i], v)
+ }
+ case *ClosureNode:
+ Walk(&n.Node, v)
+ case *PointerNode:
+ case *ConditionalNode:
+ Walk(&n.Cond, v)
+ Walk(&n.Exp1, v)
+ Walk(&n.Exp2, v)
+ case *ArrayNode:
+ for i := range n.Nodes {
+ Walk(&n.Nodes[i], v)
+ }
+ case *MapNode:
+ for i := range n.Pairs {
+ Walk(&n.Pairs[i], v)
+ }
+ case *PairNode:
+ Walk(&n.Key, v)
+ Walk(&n.Value, v)
+ default:
+ panic(fmt.Sprintf("undefined node type (%T)", node))
+ }
+
+ v.Visit(node)
+}
diff --git a/vendor/github.com/antonmedv/expr/builtin/builtin.go b/vendor/github.com/antonmedv/expr/builtin/builtin.go
new file mode 100644
index 00000000000..ad9376962ee
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/builtin/builtin.go
@@ -0,0 +1,101 @@
+package builtin
+
+import (
+ "fmt"
+ "reflect"
+)
+
+var (
+ anyType = reflect.TypeOf(new(interface{})).Elem()
+ integerType = reflect.TypeOf(0)
+ floatType = reflect.TypeOf(float64(0))
+)
+
+type Function struct {
+ Name string
+ Func func(args ...interface{}) (interface{}, error)
+ Opcode int
+ Types []reflect.Type
+ Validate func(args []reflect.Type) (reflect.Type, error)
+}
+
+const (
+ Len = iota + 1
+ Abs
+ Int
+ Float
+)
+
+var Builtins = map[int]*Function{
+ Len: {
+ Name: "len",
+ Opcode: Len,
+ Validate: func(args []reflect.Type) (reflect.Type, error) {
+ if len(args) != 1 {
+ return anyType, fmt.Errorf("invalid number of arguments for len (expected 1, got %d)", len(args))
+ }
+ switch kind(args[0]) {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String, reflect.Interface:
+ return integerType, nil
+ }
+ return anyType, fmt.Errorf("invalid argument for len (type %s)", args[0])
+ },
+ },
+ Abs: {
+ Name: "abs",
+ Opcode: Abs,
+ Validate: func(args []reflect.Type) (reflect.Type, error) {
+ if len(args) != 1 {
+ return anyType, fmt.Errorf("invalid number of arguments for abs (expected 1, got %d)", len(args))
+ }
+ switch kind(args[0]) {
+ case reflect.Float32, reflect.Float64, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Interface:
+ return args[0], nil
+ }
+ return anyType, fmt.Errorf("invalid argument for abs (type %s)", args[0])
+ },
+ },
+ Int: {
+ Name: "int",
+ Opcode: Int,
+ Validate: func(args []reflect.Type) (reflect.Type, error) {
+ if len(args) != 1 {
+ return anyType, fmt.Errorf("invalid number of arguments for int (expected 1, got %d)", len(args))
+ }
+ switch kind(args[0]) {
+ case reflect.Interface:
+ return integerType, nil
+ case reflect.Float32, reflect.Float64, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return integerType, nil
+ case reflect.String:
+ return integerType, nil
+ }
+ return anyType, fmt.Errorf("invalid argument for int (type %s)", args[0])
+ },
+ },
+ Float: {
+ Name: "float",
+ Opcode: Float,
+ Validate: func(args []reflect.Type) (reflect.Type, error) {
+ if len(args) != 1 {
+ return anyType, fmt.Errorf("invalid number of arguments for float (expected 1, got %d)", len(args))
+ }
+ switch kind(args[0]) {
+ case reflect.Interface:
+ return floatType, nil
+ case reflect.Float32, reflect.Float64, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return floatType, nil
+ case reflect.String:
+ return floatType, nil
+ }
+ return anyType, fmt.Errorf("invalid argument for float (type %s)", args[0])
+ },
+ },
+}
+
+func kind(t reflect.Type) reflect.Kind {
+ if t == nil {
+ return reflect.Invalid
+ }
+ return t.Kind()
+}
diff --git a/vendor/github.com/antonmedv/expr/checker/checker.go b/vendor/github.com/antonmedv/expr/checker/checker.go
new file mode 100644
index 00000000000..00025a33cee
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/checker/checker.go
@@ -0,0 +1,856 @@
+package checker
+
+import (
+ "fmt"
+ "reflect"
+ "regexp"
+
+ "github.com/antonmedv/expr/ast"
+ "github.com/antonmedv/expr/builtin"
+ "github.com/antonmedv/expr/conf"
+ "github.com/antonmedv/expr/file"
+ "github.com/antonmedv/expr/parser"
+ "github.com/antonmedv/expr/vm"
+)
+
+func Check(tree *parser.Tree, config *conf.Config) (t reflect.Type, err error) {
+ if config == nil {
+ config = conf.New(nil)
+ }
+
+ v := &visitor{
+ config: config,
+ collections: make([]reflect.Type, 0),
+ parents: make([]ast.Node, 0),
+ }
+
+ t, _ = v.visit(tree.Node)
+
+ if v.err != nil {
+ return t, v.err.Bind(tree.Source)
+ }
+
+ if v.config.Expect != reflect.Invalid {
+ switch v.config.Expect {
+ case reflect.Int, reflect.Int64, reflect.Float64:
+ if !isNumber(t) && !isAny(t) {
+ return nil, fmt.Errorf("expected %v, but got %v", v.config.Expect, t)
+ }
+ default:
+ if t != nil {
+ if t.Kind() == v.config.Expect {
+ return t, nil
+ }
+ }
+ return nil, fmt.Errorf("expected %v, but got %v", v.config.Expect, t)
+ }
+ }
+
+ return t, nil
+}
+
+type visitor struct {
+ config *conf.Config
+ collections []reflect.Type
+ parents []ast.Node
+ err *file.Error
+}
+
+type info struct {
+ method bool
+ fn *builtin.Function
+}
+
+func (v *visitor) visit(node ast.Node) (reflect.Type, info) {
+ var t reflect.Type
+ var i info
+ v.parents = append(v.parents, node)
+ switch n := node.(type) {
+ case *ast.NilNode:
+ t, i = v.NilNode(n)
+ case *ast.IdentifierNode:
+ t, i = v.IdentifierNode(n)
+ case *ast.IntegerNode:
+ t, i = v.IntegerNode(n)
+ case *ast.FloatNode:
+ t, i = v.FloatNode(n)
+ case *ast.BoolNode:
+ t, i = v.BoolNode(n)
+ case *ast.StringNode:
+ t, i = v.StringNode(n)
+ case *ast.ConstantNode:
+ t, i = v.ConstantNode(n)
+ case *ast.UnaryNode:
+ t, i = v.UnaryNode(n)
+ case *ast.BinaryNode:
+ t, i = v.BinaryNode(n)
+ case *ast.ChainNode:
+ t, i = v.ChainNode(n)
+ case *ast.MemberNode:
+ t, i = v.MemberNode(n)
+ case *ast.SliceNode:
+ t, i = v.SliceNode(n)
+ case *ast.CallNode:
+ t, i = v.CallNode(n)
+ case *ast.BuiltinNode:
+ t, i = v.BuiltinNode(n)
+ case *ast.ClosureNode:
+ t, i = v.ClosureNode(n)
+ case *ast.PointerNode:
+ t, i = v.PointerNode(n)
+ case *ast.ConditionalNode:
+ t, i = v.ConditionalNode(n)
+ case *ast.ArrayNode:
+ t, i = v.ArrayNode(n)
+ case *ast.MapNode:
+ t, i = v.MapNode(n)
+ case *ast.PairNode:
+ t, i = v.PairNode(n)
+ default:
+ panic(fmt.Sprintf("undefined node type (%T)", node))
+ }
+ v.parents = v.parents[:len(v.parents)-1]
+ node.SetType(t)
+ return t, i
+}
+
+func (v *visitor) error(node ast.Node, format string, args ...interface{}) (reflect.Type, info) {
+ if v.err == nil { // show first error
+ v.err = &file.Error{
+ Location: node.Location(),
+ Message: fmt.Sprintf(format, args...),
+ }
+ }
+ return anyType, info{} // interface represent undefined type
+}
+
+func (v *visitor) NilNode(*ast.NilNode) (reflect.Type, info) {
+ return nilType, info{}
+}
+
+func (v *visitor) IdentifierNode(node *ast.IdentifierNode) (reflect.Type, info) {
+ if fn, ok := v.config.Functions[node.Value]; ok {
+ // Return anyType instead of func type as we don't know the arguments yet.
+ // The func type can be one of the fn.Types. The type will be resolved
+ // when the arguments are known in CallNode.
+ return anyType, info{fn: fn}
+ }
+ if v.config.Types == nil {
+ node.Deref = true
+ } else if t, ok := v.config.Types[node.Value]; ok {
+ if t.Ambiguous {
+ return v.error(node, "ambiguous identifier %v", node.Value)
+ }
+ d, c := deref(t.Type)
+ node.Deref = c
+ node.Method = t.Method
+ node.MethodIndex = t.MethodIndex
+ node.FieldIndex = t.FieldIndex
+ return d, info{method: t.Method}
+ }
+ if v.config.Strict {
+ return v.error(node, "unknown name %v", node.Value)
+ }
+ if v.config.DefaultType != nil {
+ return v.config.DefaultType, info{}
+ }
+ return anyType, info{}
+}
+
+func (v *visitor) IntegerNode(*ast.IntegerNode) (reflect.Type, info) {
+ return integerType, info{}
+}
+
+func (v *visitor) FloatNode(*ast.FloatNode) (reflect.Type, info) {
+ return floatType, info{}
+}
+
+func (v *visitor) BoolNode(*ast.BoolNode) (reflect.Type, info) {
+ return boolType, info{}
+}
+
+func (v *visitor) StringNode(*ast.StringNode) (reflect.Type, info) {
+ return stringType, info{}
+}
+
+func (v *visitor) ConstantNode(node *ast.ConstantNode) (reflect.Type, info) {
+ return reflect.TypeOf(node.Value), info{}
+}
+
+func (v *visitor) UnaryNode(node *ast.UnaryNode) (reflect.Type, info) {
+ t, _ := v.visit(node.Node)
+
+ switch node.Operator {
+
+ case "!", "not":
+ if isBool(t) {
+ return boolType, info{}
+ }
+ if isAny(t) {
+ return boolType, info{}
+ }
+
+ case "+", "-":
+ if isNumber(t) {
+ return t, info{}
+ }
+ if isAny(t) {
+ return anyType, info{}
+ }
+
+ default:
+ return v.error(node, "unknown operator (%v)", node.Operator)
+ }
+
+ return v.error(node, `invalid operation: %v (mismatched type %v)`, node.Operator, t)
+}
+
+func (v *visitor) BinaryNode(node *ast.BinaryNode) (reflect.Type, info) {
+ l, _ := v.visit(node.Left)
+ r, _ := v.visit(node.Right)
+
+ // check operator overloading
+ if fns, ok := v.config.Operators[node.Operator]; ok {
+ t, _, ok := conf.FindSuitableOperatorOverload(fns, v.config.Types, l, r)
+ if ok {
+ return t, info{}
+ }
+ }
+
+ switch node.Operator {
+ case "==", "!=":
+ if isNumber(l) && isNumber(r) {
+ return boolType, info{}
+ }
+ if l == nil || r == nil { // It is possible to compare with nil.
+ return boolType, info{}
+ }
+ if l.Kind() == r.Kind() {
+ return boolType, info{}
+ }
+ if isAny(l) || isAny(r) {
+ return boolType, info{}
+ }
+
+ case "or", "||", "and", "&&":
+ if isBool(l) && isBool(r) {
+ return boolType, info{}
+ }
+ if or(l, r, isBool) {
+ return boolType, info{}
+ }
+
+ case "<", ">", ">=", "<=":
+ if isNumber(l) && isNumber(r) {
+ return boolType, info{}
+ }
+ if isString(l) && isString(r) {
+ return boolType, info{}
+ }
+ if isTime(l) && isTime(r) {
+ return boolType, info{}
+ }
+ if or(l, r, isNumber, isString, isTime) {
+ return boolType, info{}
+ }
+
+ case "-":
+ if isNumber(l) && isNumber(r) {
+ return combined(l, r), info{}
+ }
+ if isTime(l) && isTime(r) {
+ return durationType, info{}
+ }
+ if or(l, r, isNumber, isTime) {
+ return anyType, info{}
+ }
+
+ case "/", "*":
+ if isNumber(l) && isNumber(r) {
+ return combined(l, r), info{}
+ }
+ if or(l, r, isNumber) {
+ return anyType, info{}
+ }
+
+ case "**", "^":
+ if isNumber(l) && isNumber(r) {
+ return floatType, info{}
+ }
+ if or(l, r, isNumber) {
+ return floatType, info{}
+ }
+
+ case "%":
+ if isInteger(l) && isInteger(r) {
+ return combined(l, r), info{}
+ }
+ if or(l, r, isInteger) {
+ return anyType, info{}
+ }
+
+ case "+":
+ if isNumber(l) && isNumber(r) {
+ return combined(l, r), info{}
+ }
+ if isString(l) && isString(r) {
+ return stringType, info{}
+ }
+ if isTime(l) && isDuration(r) {
+ return timeType, info{}
+ }
+ if isDuration(l) && isTime(r) {
+ return timeType, info{}
+ }
+ if or(l, r, isNumber, isString, isTime, isDuration) {
+ return anyType, info{}
+ }
+
+ case "in":
+ if (isString(l) || isAny(l)) && isStruct(r) {
+ return boolType, info{}
+ }
+ if isMap(r) {
+ return boolType, info{}
+ }
+ if isArray(r) {
+ return boolType, info{}
+ }
+ if isAny(l) && anyOf(r, isString, isArray, isMap) {
+ return boolType, info{}
+ }
+ if isAny(r) {
+ return boolType, info{}
+ }
+
+ case "matches":
+ if s, ok := node.Right.(*ast.StringNode); ok {
+ r, err := regexp.Compile(s.Value)
+ if err != nil {
+ return v.error(node, err.Error())
+ }
+ node.Regexp = r
+ }
+ if isString(l) && isString(r) {
+ return boolType, info{}
+ }
+ if or(l, r, isString) {
+ return boolType, info{}
+ }
+
+ case "contains", "startsWith", "endsWith":
+ if isString(l) && isString(r) {
+ return boolType, info{}
+ }
+ if or(l, r, isString) {
+ return boolType, info{}
+ }
+
+ case "..":
+ ret := reflect.SliceOf(integerType)
+ if isInteger(l) && isInteger(r) {
+ return ret, info{}
+ }
+ if or(l, r, isInteger) {
+ return ret, info{}
+ }
+
+ case "??":
+ if l == nil && r != nil {
+ return r, info{}
+ }
+ if l != nil && r == nil {
+ return l, info{}
+ }
+ if l == nil && r == nil {
+ return nilType, info{}
+ }
+ if r.AssignableTo(l) {
+ return l, info{}
+ }
+ return anyType, info{}
+
+ default:
+ return v.error(node, "unknown operator (%v)", node.Operator)
+
+ }
+
+ return v.error(node, `invalid operation: %v (mismatched types %v and %v)`, node.Operator, l, r)
+}
+
+func (v *visitor) ChainNode(node *ast.ChainNode) (reflect.Type, info) {
+ return v.visit(node.Node)
+}
+
+func (v *visitor) MemberNode(node *ast.MemberNode) (reflect.Type, info) {
+ base, _ := v.visit(node.Node)
+ prop, _ := v.visit(node.Property)
+
+ if name, ok := node.Property.(*ast.StringNode); ok {
+ if base == nil {
+ return v.error(node, "type %v has no field %v", base, name.Value)
+ }
+ // First, check methods defined on base type itself,
+ // independent of which type it is. Without dereferencing.
+ if m, ok := base.MethodByName(name.Value); ok {
+ if base.Kind() == reflect.Interface {
+ // In case of interface type method will not have a receiver,
+ // and to prevent checker decreasing numbers of in arguments
+ // return method type as not method (second argument is false).
+
+ // Also, we can not use m.Index here, because it will be
+ // different indexes for different types which implement
+ // the same interface.
+ return m.Type, info{}
+ } else {
+ node.Method = true
+ node.MethodIndex = m.Index
+ node.Name = name.Value
+ return m.Type, info{method: true}
+ }
+ }
+ }
+
+ if base.Kind() == reflect.Ptr {
+ base = base.Elem()
+ }
+
+ switch base.Kind() {
+ case reflect.Interface:
+ node.Deref = true
+ return anyType, info{}
+
+ case reflect.Map:
+ if prop != nil && !prop.AssignableTo(base.Key()) && !isAny(prop) {
+ return v.error(node.Property, "cannot use %v to get an element from %v", prop, base)
+ }
+ t, c := deref(base.Elem())
+ node.Deref = c
+ return t, info{}
+
+ case reflect.Array, reflect.Slice:
+ if !isInteger(prop) && !isAny(prop) {
+ return v.error(node.Property, "array elements can only be selected using an integer (got %v)", prop)
+ }
+ t, c := deref(base.Elem())
+ node.Deref = c
+ return t, info{}
+
+ case reflect.Struct:
+ if name, ok := node.Property.(*ast.StringNode); ok {
+ propertyName := name.Value
+ if field, ok := fetchField(base, propertyName); ok {
+ t, c := deref(field.Type)
+ node.Deref = c
+ node.FieldIndex = field.Index
+ node.Name = propertyName
+ return t, info{}
+ }
+ if len(v.parents) > 1 {
+ if _, ok := v.parents[len(v.parents)-2].(*ast.CallNode); ok {
+ return v.error(node, "type %v has no method %v", base, propertyName)
+ }
+ }
+ return v.error(node, "type %v has no field %v", base, propertyName)
+ }
+ }
+
+ return v.error(node, "type %v[%v] is undefined", base, prop)
+}
+
+func (v *visitor) SliceNode(node *ast.SliceNode) (reflect.Type, info) {
+ t, _ := v.visit(node.Node)
+
+ switch t.Kind() {
+ case reflect.Interface:
+ // ok
+ case reflect.String, reflect.Array, reflect.Slice:
+ // ok
+ default:
+ return v.error(node, "cannot slice %v", t)
+ }
+
+ if node.From != nil {
+ from, _ := v.visit(node.From)
+ if !isInteger(from) && !isAny(from) {
+ return v.error(node.From, "non-integer slice index %v", from)
+ }
+ }
+ if node.To != nil {
+ to, _ := v.visit(node.To)
+ if !isInteger(to) && !isAny(to) {
+ return v.error(node.To, "non-integer slice index %v", to)
+ }
+ }
+ return t, info{}
+}
+
+func (v *visitor) CallNode(node *ast.CallNode) (reflect.Type, info) {
+ fn, fnInfo := v.visit(node.Callee)
+
+ if fnInfo.fn != nil {
+ f := fnInfo.fn
+ node.Func = f
+ if f.Validate != nil {
+ args := make([]reflect.Type, len(node.Arguments))
+ for i, arg := range node.Arguments {
+ args[i], _ = v.visit(arg)
+ }
+ t, err := f.Validate(args)
+ if err != nil {
+ return v.error(node, "%v", err)
+ }
+ return t, info{}
+ }
+ if len(f.Types) == 0 {
+ t, err := v.checkFunc(f.Name, functionType, false, node)
+ if err != nil {
+ if v.err == nil {
+ v.err = err
+ }
+ return anyType, info{}
+ }
+ // No type was specified, so we assume the function returns any.
+ return t, info{}
+ }
+ var lastErr *file.Error
+ for _, t := range f.Types {
+ outType, err := v.checkFunc(f.Name, t, false, node)
+ if err != nil {
+ lastErr = err
+ continue
+ }
+ return outType, info{}
+ }
+ if lastErr != nil {
+ if v.err == nil {
+ v.err = lastErr
+ }
+ return anyType, info{}
+ }
+ }
+
+ fnName := "function"
+ if identifier, ok := node.Callee.(*ast.IdentifierNode); ok {
+ fnName = identifier.Value
+ }
+ if member, ok := node.Callee.(*ast.MemberNode); ok {
+ if name, ok := member.Property.(*ast.StringNode); ok {
+ fnName = name.Value
+ }
+ }
+ switch fn.Kind() {
+ case reflect.Interface:
+ return anyType, info{}
+ case reflect.Func:
+ inputParamsCount := 1 // for functions
+ if fnInfo.method {
+ inputParamsCount = 2 // for methods
+ }
+ // TODO: Deprecate OpCallFast and move fn(...any) any to TypedFunc list.
+ // To do this we need add support for variadic arguments in OpCallTyped.
+ if !isAny(fn) &&
+ fn.IsVariadic() &&
+ fn.NumIn() == inputParamsCount &&
+ fn.NumOut() == 1 &&
+ fn.Out(0).Kind() == reflect.Interface {
+ rest := fn.In(fn.NumIn() - 1) // function has only one param for functions and two for methods
+ if rest.Kind() == reflect.Slice && rest.Elem().Kind() == reflect.Interface {
+ node.Fast = true
+ }
+ }
+
+ outType, err := v.checkFunc(fnName, fn, fnInfo.method, node)
+ if err != nil {
+ if v.err == nil {
+ v.err = err
+ }
+ return anyType, info{}
+ }
+
+ v.findTypedFunc(node, fn, fnInfo.method)
+
+ return outType, info{}
+ }
+ return v.error(node, "%v is not callable", fn)
+}
+
+func (v *visitor) checkFunc(name string, fn reflect.Type, method bool, node *ast.CallNode) (reflect.Type, *file.Error) {
+ if isAny(fn) {
+ return anyType, nil
+ }
+
+ if fn.NumOut() == 0 {
+ return anyType, &file.Error{
+ Location: node.Location(),
+ Message: fmt.Sprintf("func %v doesn't return value", name),
+ }
+ }
+ if numOut := fn.NumOut(); numOut > 2 {
+ return anyType, &file.Error{
+ Location: node.Location(),
+ Message: fmt.Sprintf("func %v returns more then two values", name),
+ }
+ }
+
+ // If func is method on an env, first argument should be a receiver,
+ // and actual arguments less than fnNumIn by one.
+ fnNumIn := fn.NumIn()
+ if method {
+ fnNumIn--
+ }
+ // Skip first argument in case of the receiver.
+ fnInOffset := 0
+ if method {
+ fnInOffset = 1
+ }
+
+ if fn.IsVariadic() {
+ if len(node.Arguments) < fnNumIn-1 {
+ return anyType, &file.Error{
+ Location: node.Location(),
+ Message: fmt.Sprintf("not enough arguments to call %v", name),
+ }
+ }
+ } else {
+ if len(node.Arguments) > fnNumIn {
+ return anyType, &file.Error{
+ Location: node.Location(),
+ Message: fmt.Sprintf("too many arguments to call %v", name),
+ }
+ }
+ if len(node.Arguments) < fnNumIn {
+ return anyType, &file.Error{
+ Location: node.Location(),
+ Message: fmt.Sprintf("not enough arguments to call %v", name),
+ }
+ }
+ }
+
+ for i, arg := range node.Arguments {
+ t, _ := v.visit(arg)
+
+ var in reflect.Type
+ if fn.IsVariadic() && i >= fnNumIn-1 {
+ // For variadic arguments fn(xs ...int), go replaces type of xs (int) with ([]int).
+ // As we compare arguments one by one, we need underling type.
+ in = fn.In(fn.NumIn() - 1).Elem()
+ } else {
+ in = fn.In(i + fnInOffset)
+ }
+
+ if isIntegerOrArithmeticOperation(arg) && (isInteger(in) || isFloat(in)) {
+ t = in
+ setTypeForIntegers(arg, t)
+ }
+
+ if t == nil {
+ continue
+ }
+
+ if !t.AssignableTo(in) && t.Kind() != reflect.Interface {
+ return anyType, &file.Error{
+ Location: arg.Location(),
+ Message: fmt.Sprintf("cannot use %v as argument (type %v) to call %v ", t, in, name),
+ }
+ }
+ }
+
+ return fn.Out(0), nil
+}
+
+func (v *visitor) BuiltinNode(node *ast.BuiltinNode) (reflect.Type, info) {
+ switch node.Name {
+ case "all", "none", "any", "one":
+ collection, _ := v.visit(node.Arguments[0])
+ if !isArray(collection) && !isAny(collection) {
+ return v.error(node.Arguments[0], "builtin %v takes only array (got %v)", node.Name, collection)
+ }
+
+ v.collections = append(v.collections, collection)
+ closure, _ := v.visit(node.Arguments[1])
+ v.collections = v.collections[:len(v.collections)-1]
+
+ if isFunc(closure) &&
+ closure.NumOut() == 1 &&
+ closure.NumIn() == 1 && isAny(closure.In(0)) {
+
+ if !isBool(closure.Out(0)) && !isAny(closure.Out(0)) {
+ return v.error(node.Arguments[1], "closure should return boolean (got %v)", closure.Out(0).String())
+ }
+ return boolType, info{}
+ }
+ return v.error(node.Arguments[1], "closure should has one input and one output param")
+
+ case "filter":
+ collection, _ := v.visit(node.Arguments[0])
+ if !isArray(collection) && !isAny(collection) {
+ return v.error(node.Arguments[0], "builtin %v takes only array (got %v)", node.Name, collection)
+ }
+
+ v.collections = append(v.collections, collection)
+ closure, _ := v.visit(node.Arguments[1])
+ v.collections = v.collections[:len(v.collections)-1]
+
+ if isFunc(closure) &&
+ closure.NumOut() == 1 &&
+ closure.NumIn() == 1 && isAny(closure.In(0)) {
+
+ if !isBool(closure.Out(0)) && !isAny(closure.Out(0)) {
+ return v.error(node.Arguments[1], "closure should return boolean (got %v)", closure.Out(0).String())
+ }
+ if isAny(collection) {
+ return arrayType, info{}
+ }
+ return reflect.SliceOf(collection.Elem()), info{}
+ }
+ return v.error(node.Arguments[1], "closure should has one input and one output param")
+
+ case "map":
+ collection, _ := v.visit(node.Arguments[0])
+ if !isArray(collection) && !isAny(collection) {
+ return v.error(node.Arguments[0], "builtin %v takes only array (got %v)", node.Name, collection)
+ }
+
+ v.collections = append(v.collections, collection)
+ closure, _ := v.visit(node.Arguments[1])
+ v.collections = v.collections[:len(v.collections)-1]
+
+ if isFunc(closure) &&
+ closure.NumOut() == 1 &&
+ closure.NumIn() == 1 && isAny(closure.In(0)) {
+
+ return reflect.SliceOf(closure.Out(0)), info{}
+ }
+ return v.error(node.Arguments[1], "closure should has one input and one output param")
+
+ case "count":
+ collection, _ := v.visit(node.Arguments[0])
+ if !isArray(collection) && !isAny(collection) {
+ return v.error(node.Arguments[0], "builtin %v takes only array (got %v)", node.Name, collection)
+ }
+
+ v.collections = append(v.collections, collection)
+ closure, _ := v.visit(node.Arguments[1])
+ v.collections = v.collections[:len(v.collections)-1]
+
+ if isFunc(closure) &&
+ closure.NumOut() == 1 &&
+ closure.NumIn() == 1 && isAny(closure.In(0)) {
+ if !isBool(closure.Out(0)) && !isAny(closure.Out(0)) {
+ return v.error(node.Arguments[1], "closure should return boolean (got %v)", closure.Out(0).String())
+ }
+
+ return integerType, info{}
+ }
+ return v.error(node.Arguments[1], "closure should has one input and one output param")
+
+ default:
+ return v.error(node, "unknown builtin %v", node.Name)
+ }
+}
+
+func (v *visitor) ClosureNode(node *ast.ClosureNode) (reflect.Type, info) {
+ t, _ := v.visit(node.Node)
+ return reflect.FuncOf([]reflect.Type{anyType}, []reflect.Type{t}, false), info{}
+}
+
+func (v *visitor) PointerNode(node *ast.PointerNode) (reflect.Type, info) {
+ if len(v.collections) == 0 {
+ return v.error(node, "cannot use pointer accessor outside closure")
+ }
+
+ collection := v.collections[len(v.collections)-1]
+ switch collection.Kind() {
+ case reflect.Interface:
+ return anyType, info{}
+ case reflect.Array, reflect.Slice:
+ return collection.Elem(), info{}
+ }
+ return v.error(node, "cannot use %v as array", collection)
+}
+
+func (v *visitor) ConditionalNode(node *ast.ConditionalNode) (reflect.Type, info) {
+ c, _ := v.visit(node.Cond)
+ if !isBool(c) && !isAny(c) {
+ return v.error(node.Cond, "non-bool expression (type %v) used as condition", c)
+ }
+
+ t1, _ := v.visit(node.Exp1)
+ t2, _ := v.visit(node.Exp2)
+
+ if t1 == nil && t2 != nil {
+ return t2, info{}
+ }
+ if t1 != nil && t2 == nil {
+ return t1, info{}
+ }
+ if t1 == nil && t2 == nil {
+ return nilType, info{}
+ }
+ if t1.AssignableTo(t2) {
+ return t1, info{}
+ }
+ return anyType, info{}
+}
+
+func (v *visitor) ArrayNode(node *ast.ArrayNode) (reflect.Type, info) {
+ for _, node := range node.Nodes {
+ v.visit(node)
+ }
+ return arrayType, info{}
+}
+
+func (v *visitor) MapNode(node *ast.MapNode) (reflect.Type, info) {
+ for _, pair := range node.Pairs {
+ v.visit(pair)
+ }
+ return mapType, info{}
+}
+
+func (v *visitor) PairNode(node *ast.PairNode) (reflect.Type, info) {
+ v.visit(node.Key)
+ v.visit(node.Value)
+ return nilType, info{}
+}
+
+func (v *visitor) findTypedFunc(node *ast.CallNode, fn reflect.Type, method bool) {
+ // OnCallTyped doesn't work for functions with variadic arguments,
+ // and doesn't work named function, like `type MyFunc func() int`.
+ // In PkgPath() is an empty string, it's unnamed function.
+ if !fn.IsVariadic() && fn.PkgPath() == "" {
+ fnNumIn := fn.NumIn()
+ fnInOffset := 0
+ if method {
+ fnNumIn--
+ fnInOffset = 1
+ }
+ funcTypes:
+ for i := range vm.FuncTypes {
+ if i == 0 {
+ continue
+ }
+ typed := reflect.ValueOf(vm.FuncTypes[i]).Elem().Type()
+ if typed.Kind() != reflect.Func {
+ continue
+ }
+ if typed.NumOut() != fn.NumOut() {
+ continue
+ }
+ for j := 0; j < typed.NumOut(); j++ {
+ if typed.Out(j) != fn.Out(j) {
+ continue funcTypes
+ }
+ }
+ if typed.NumIn() != fnNumIn {
+ continue
+ }
+ for j := 0; j < typed.NumIn(); j++ {
+ if typed.In(j) != fn.In(j+fnInOffset) {
+ continue funcTypes
+ }
+ }
+ node.Typed = i
+ }
+ }
+}
diff --git a/vendor/github.com/antonmedv/expr/checker/types.go b/vendor/github.com/antonmedv/expr/checker/types.go
new file mode 100644
index 00000000000..7ccd8948091
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/checker/types.go
@@ -0,0 +1,262 @@
+package checker
+
+import (
+ "reflect"
+ "time"
+
+ "github.com/antonmedv/expr/ast"
+ "github.com/antonmedv/expr/conf"
+)
+
+var (
+ nilType = reflect.TypeOf(nil)
+ boolType = reflect.TypeOf(true)
+ integerType = reflect.TypeOf(0)
+ floatType = reflect.TypeOf(float64(0))
+ stringType = reflect.TypeOf("")
+ arrayType = reflect.TypeOf([]interface{}{})
+ mapType = reflect.TypeOf(map[string]interface{}{})
+ anyType = reflect.TypeOf(new(interface{})).Elem()
+ timeType = reflect.TypeOf(time.Time{})
+ durationType = reflect.TypeOf(time.Duration(0))
+ functionType = reflect.TypeOf(new(func(...interface{}) (interface{}, error))).Elem()
+ errorType = reflect.TypeOf((*error)(nil)).Elem()
+)
+
+func combined(a, b reflect.Type) reflect.Type {
+ if a.Kind() == b.Kind() {
+ return a
+ }
+ if isFloat(a) || isFloat(b) {
+ return floatType
+ }
+ return integerType
+}
+
+func anyOf(t reflect.Type, fns ...func(reflect.Type) bool) bool {
+ for _, fn := range fns {
+ if fn(t) {
+ return true
+ }
+ }
+ return false
+}
+
+func or(l, r reflect.Type, fns ...func(reflect.Type) bool) bool {
+ if isAny(l) && isAny(r) {
+ return true
+ }
+ if isAny(l) && anyOf(r, fns...) {
+ return true
+ }
+ if isAny(r) && anyOf(l, fns...) {
+ return true
+ }
+ return false
+}
+
+func isAny(t reflect.Type) bool {
+ if t != nil {
+ switch t.Kind() {
+ case reflect.Interface:
+ return true
+ }
+ }
+ return false
+}
+
+func isInteger(t reflect.Type) bool {
+ if t != nil {
+ switch t.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ fallthrough
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return true
+ }
+ }
+ return false
+}
+
+func isFloat(t reflect.Type) bool {
+ if t != nil {
+ switch t.Kind() {
+ case reflect.Float32, reflect.Float64:
+ return true
+ }
+ }
+ return false
+}
+
+func isNumber(t reflect.Type) bool {
+ return isInteger(t) || isFloat(t)
+}
+
+func isTime(t reflect.Type) bool {
+ if t != nil {
+ switch t {
+ case timeType:
+ return true
+ }
+ }
+ return isAny(t)
+}
+
+func isDuration(t reflect.Type) bool {
+ if t != nil {
+ switch t {
+ case durationType:
+ return true
+ }
+ }
+ return false
+}
+
+func isBool(t reflect.Type) bool {
+ if t != nil {
+ switch t.Kind() {
+ case reflect.Bool:
+ return true
+ }
+ }
+ return false
+}
+
+func isString(t reflect.Type) bool {
+ if t != nil {
+ switch t.Kind() {
+ case reflect.String:
+ return true
+ }
+ }
+ return false
+}
+
+func isArray(t reflect.Type) bool {
+ if t != nil {
+ switch t.Kind() {
+ case reflect.Ptr:
+ return isArray(t.Elem())
+ case reflect.Slice, reflect.Array:
+ return true
+ }
+ }
+ return false
+}
+
+func isMap(t reflect.Type) bool {
+ if t != nil {
+ switch t.Kind() {
+ case reflect.Ptr:
+ return isMap(t.Elem())
+ case reflect.Map:
+ return true
+ }
+ }
+ return false
+}
+
+func isStruct(t reflect.Type) bool {
+ if t != nil {
+ switch t.Kind() {
+ case reflect.Ptr:
+ return isStruct(t.Elem())
+ case reflect.Struct:
+ return true
+ }
+ }
+ return false
+}
+
+func isFunc(t reflect.Type) bool {
+ if t != nil {
+ switch t.Kind() {
+ case reflect.Ptr:
+ return isFunc(t.Elem())
+ case reflect.Func:
+ return true
+ }
+ }
+ return false
+}
+
+func fetchField(t reflect.Type, name string) (reflect.StructField, bool) {
+ if t != nil {
+ // First check all structs fields.
+ for i := 0; i < t.NumField(); i++ {
+ field := t.Field(i)
+ // Search all fields, even embedded structs.
+ if conf.FieldName(field) == name {
+ return field, true
+ }
+ }
+
+ // Second check fields of embedded structs.
+ for i := 0; i < t.NumField(); i++ {
+ anon := t.Field(i)
+ if anon.Anonymous {
+ if field, ok := fetchField(anon.Type, name); ok {
+ field.Index = append(anon.Index, field.Index...)
+ return field, true
+ }
+ }
+ }
+ }
+ return reflect.StructField{}, false
+}
+
+func deref(t reflect.Type) (reflect.Type, bool) {
+ if t == nil {
+ return nil, false
+ }
+ if t.Kind() == reflect.Interface {
+ return t, true
+ }
+ found := false
+ for t != nil && t.Kind() == reflect.Ptr {
+ e := t.Elem()
+ switch e.Kind() {
+ case reflect.Struct, reflect.Map, reflect.Array, reflect.Slice:
+ return t, false
+ default:
+ found = true
+ t = e
+ }
+ }
+ return t, found
+}
+
+func isIntegerOrArithmeticOperation(node ast.Node) bool {
+ switch n := node.(type) {
+ case *ast.IntegerNode:
+ return true
+ case *ast.UnaryNode:
+ switch n.Operator {
+ case "+", "-":
+ return true
+ }
+ case *ast.BinaryNode:
+ switch n.Operator {
+ case "+", "/", "-", "*":
+ return true
+ }
+ }
+ return false
+}
+
+func setTypeForIntegers(node ast.Node, t reflect.Type) {
+ switch n := node.(type) {
+ case *ast.IntegerNode:
+ n.SetType(t)
+ case *ast.UnaryNode:
+ switch n.Operator {
+ case "+", "-":
+ setTypeForIntegers(n.Node, t)
+ }
+ case *ast.BinaryNode:
+ switch n.Operator {
+ case "+", "/", "-", "*":
+ setTypeForIntegers(n.Left, t)
+ setTypeForIntegers(n.Right, t)
+ }
+ }
+}
diff --git a/vendor/github.com/antonmedv/expr/compiler/compiler.go b/vendor/github.com/antonmedv/expr/compiler/compiler.go
new file mode 100644
index 00000000000..3cd32af0f27
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/compiler/compiler.go
@@ -0,0 +1,739 @@
+package compiler
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/antonmedv/expr/ast"
+ "github.com/antonmedv/expr/conf"
+ "github.com/antonmedv/expr/file"
+ "github.com/antonmedv/expr/parser"
+ . "github.com/antonmedv/expr/vm"
+ "github.com/antonmedv/expr/vm/runtime"
+)
+
+const (
+ placeholder = 12345
+)
+
+func Compile(tree *parser.Tree, config *conf.Config) (program *Program, err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ err = fmt.Errorf("%v", r)
+ }
+ }()
+
+ c := &compiler{
+ locations: make([]file.Location, 0),
+ constantsIndex: make(map[interface{}]int),
+ functionsIndex: make(map[string]int),
+ }
+
+ if config != nil {
+ c.mapEnv = config.MapEnv
+ c.cast = config.Expect
+ }
+
+ c.compile(tree.Node)
+
+ switch c.cast {
+ case reflect.Int:
+ c.emit(OpCast, 0)
+ case reflect.Int64:
+ c.emit(OpCast, 1)
+ case reflect.Float64:
+ c.emit(OpCast, 2)
+ }
+
+ program = &Program{
+ Node: tree.Node,
+ Source: tree.Source,
+ Locations: c.locations,
+ Constants: c.constants,
+ Bytecode: c.bytecode,
+ Arguments: c.arguments,
+ Functions: c.functions,
+ }
+ return
+}
+
+type compiler struct {
+ locations []file.Location
+ bytecode []Opcode
+ constants []interface{}
+ constantsIndex map[interface{}]int
+ functions []Function
+ functionsIndex map[string]int
+ mapEnv bool
+ cast reflect.Kind
+ nodes []ast.Node
+ chains [][]int
+ arguments []int
+}
+
+func (c *compiler) emitLocation(loc file.Location, op Opcode, arg int) int {
+ c.bytecode = append(c.bytecode, op)
+ current := len(c.bytecode)
+ c.arguments = append(c.arguments, arg)
+ c.locations = append(c.locations, loc)
+ return current
+}
+
+func (c *compiler) emit(op Opcode, args ...int) int {
+ arg := 0
+ if len(args) > 1 {
+ panic("too many arguments")
+ }
+ if len(args) == 1 {
+ arg = args[0]
+ }
+ var loc file.Location
+ if len(c.nodes) > 0 {
+ loc = c.nodes[len(c.nodes)-1].Location()
+ }
+ return c.emitLocation(loc, op, arg)
+}
+
+func (c *compiler) emitPush(value interface{}) int {
+ return c.emit(OpPush, c.addConstant(value))
+}
+
+func (c *compiler) addConstant(constant interface{}) int {
+ indexable := true
+ hash := constant
+ switch reflect.TypeOf(constant).Kind() {
+ case reflect.Slice, reflect.Map, reflect.Struct:
+ indexable = false
+ }
+ if field, ok := constant.(*runtime.Field); ok {
+ indexable = true
+ hash = fmt.Sprintf("%v", field)
+ }
+ if method, ok := constant.(*runtime.Method); ok {
+ indexable = true
+ hash = fmt.Sprintf("%v", method)
+ }
+ if indexable {
+ if p, ok := c.constantsIndex[hash]; ok {
+ return p
+ }
+ }
+ c.constants = append(c.constants, constant)
+ p := len(c.constants) - 1
+ if indexable {
+ c.constantsIndex[hash] = p
+ }
+ return p
+}
+
+func (c *compiler) addFunction(node *ast.CallNode) int {
+ if node.Func == nil {
+ panic("function is nil")
+ }
+ if p, ok := c.functionsIndex[node.Func.Name]; ok {
+ return p
+ }
+ p := len(c.functions)
+ c.functions = append(c.functions, node.Func.Func)
+ c.functionsIndex[node.Func.Name] = p
+ return p
+}
+
+func (c *compiler) patchJump(placeholder int) {
+ offset := len(c.bytecode) - placeholder
+ c.arguments[placeholder-1] = offset
+}
+
+func (c *compiler) calcBackwardJump(to int) int {
+ return len(c.bytecode) + 1 - to
+}
+
+func (c *compiler) compile(node ast.Node) {
+ c.nodes = append(c.nodes, node)
+ defer func() {
+ c.nodes = c.nodes[:len(c.nodes)-1]
+ }()
+
+ switch n := node.(type) {
+ case *ast.NilNode:
+ c.NilNode(n)
+ case *ast.IdentifierNode:
+ c.IdentifierNode(n)
+ case *ast.IntegerNode:
+ c.IntegerNode(n)
+ case *ast.FloatNode:
+ c.FloatNode(n)
+ case *ast.BoolNode:
+ c.BoolNode(n)
+ case *ast.StringNode:
+ c.StringNode(n)
+ case *ast.ConstantNode:
+ c.ConstantNode(n)
+ case *ast.UnaryNode:
+ c.UnaryNode(n)
+ case *ast.BinaryNode:
+ c.BinaryNode(n)
+ case *ast.ChainNode:
+ c.ChainNode(n)
+ case *ast.MemberNode:
+ c.MemberNode(n)
+ case *ast.SliceNode:
+ c.SliceNode(n)
+ case *ast.CallNode:
+ c.CallNode(n)
+ case *ast.BuiltinNode:
+ c.BuiltinNode(n)
+ case *ast.ClosureNode:
+ c.ClosureNode(n)
+ case *ast.PointerNode:
+ c.PointerNode(n)
+ case *ast.ConditionalNode:
+ c.ConditionalNode(n)
+ case *ast.ArrayNode:
+ c.ArrayNode(n)
+ case *ast.MapNode:
+ c.MapNode(n)
+ case *ast.PairNode:
+ c.PairNode(n)
+ default:
+ panic(fmt.Sprintf("undefined node type (%T)", node))
+ }
+}
+
+func (c *compiler) NilNode(_ *ast.NilNode) {
+ c.emit(OpNil)
+}
+
+func (c *compiler) IdentifierNode(node *ast.IdentifierNode) {
+ if c.mapEnv {
+ c.emit(OpLoadFast, c.addConstant(node.Value))
+ } else if len(node.FieldIndex) > 0 {
+ c.emit(OpLoadField, c.addConstant(&runtime.Field{
+ Index: node.FieldIndex,
+ Path: []string{node.Value},
+ }))
+ } else if node.Method {
+ c.emit(OpLoadMethod, c.addConstant(&runtime.Method{
+ Name: node.Value,
+ Index: node.MethodIndex,
+ }))
+ } else {
+ c.emit(OpLoadConst, c.addConstant(node.Value))
+ }
+ if node.Deref {
+ c.emit(OpDeref)
+ } else if node.Type() == nil {
+ c.emit(OpDeref)
+ }
+}
+
+func (c *compiler) IntegerNode(node *ast.IntegerNode) {
+ t := node.Type()
+ if t == nil {
+ c.emitPush(node.Value)
+ return
+ }
+ switch t.Kind() {
+ case reflect.Float32:
+ c.emitPush(float32(node.Value))
+ case reflect.Float64:
+ c.emitPush(float64(node.Value))
+ case reflect.Int:
+ c.emitPush(node.Value)
+ case reflect.Int8:
+ c.emitPush(int8(node.Value))
+ case reflect.Int16:
+ c.emitPush(int16(node.Value))
+ case reflect.Int32:
+ c.emitPush(int32(node.Value))
+ case reflect.Int64:
+ c.emitPush(int64(node.Value))
+ case reflect.Uint:
+ c.emitPush(uint(node.Value))
+ case reflect.Uint8:
+ c.emitPush(uint8(node.Value))
+ case reflect.Uint16:
+ c.emitPush(uint16(node.Value))
+ case reflect.Uint32:
+ c.emitPush(uint32(node.Value))
+ case reflect.Uint64:
+ c.emitPush(uint64(node.Value))
+ default:
+ c.emitPush(node.Value)
+ }
+}
+
+func (c *compiler) FloatNode(node *ast.FloatNode) {
+ c.emitPush(node.Value)
+}
+
+func (c *compiler) BoolNode(node *ast.BoolNode) {
+ if node.Value {
+ c.emit(OpTrue)
+ } else {
+ c.emit(OpFalse)
+ }
+}
+
+func (c *compiler) StringNode(node *ast.StringNode) {
+ c.emitPush(node.Value)
+}
+
+func (c *compiler) ConstantNode(node *ast.ConstantNode) {
+ c.emitPush(node.Value)
+}
+
+func (c *compiler) UnaryNode(node *ast.UnaryNode) {
+ c.compile(node.Node)
+
+ switch node.Operator {
+
+ case "!", "not":
+ c.emit(OpNot)
+
+ case "+":
+ // Do nothing
+
+ case "-":
+ c.emit(OpNegate)
+
+ default:
+ panic(fmt.Sprintf("unknown operator (%v)", node.Operator))
+ }
+}
+
+func (c *compiler) BinaryNode(node *ast.BinaryNode) {
+ l := kind(node.Left)
+ r := kind(node.Right)
+
+ switch node.Operator {
+ case "==":
+ c.compile(node.Left)
+ c.compile(node.Right)
+
+ if l == r && l == reflect.Int {
+ c.emit(OpEqualInt)
+ } else if l == r && l == reflect.String {
+ c.emit(OpEqualString)
+ } else {
+ c.emit(OpEqual)
+ }
+
+ case "!=":
+ c.compile(node.Left)
+ c.compile(node.Right)
+ c.emit(OpEqual)
+ c.emit(OpNot)
+
+ case "or", "||":
+ c.compile(node.Left)
+ end := c.emit(OpJumpIfTrue, placeholder)
+ c.emit(OpPop)
+ c.compile(node.Right)
+ c.patchJump(end)
+
+ case "and", "&&":
+ c.compile(node.Left)
+ end := c.emit(OpJumpIfFalse, placeholder)
+ c.emit(OpPop)
+ c.compile(node.Right)
+ c.patchJump(end)
+
+ case "<":
+ c.compile(node.Left)
+ c.compile(node.Right)
+ c.emit(OpLess)
+
+ case ">":
+ c.compile(node.Left)
+ c.compile(node.Right)
+ c.emit(OpMore)
+
+ case "<=":
+ c.compile(node.Left)
+ c.compile(node.Right)
+ c.emit(OpLessOrEqual)
+
+ case ">=":
+ c.compile(node.Left)
+ c.compile(node.Right)
+ c.emit(OpMoreOrEqual)
+
+ case "+":
+ c.compile(node.Left)
+ c.compile(node.Right)
+ c.emit(OpAdd)
+
+ case "-":
+ c.compile(node.Left)
+ c.compile(node.Right)
+ c.emit(OpSubtract)
+
+ case "*":
+ c.compile(node.Left)
+ c.compile(node.Right)
+ c.emit(OpMultiply)
+
+ case "/":
+ c.compile(node.Left)
+ c.compile(node.Right)
+ c.emit(OpDivide)
+
+ case "%":
+ c.compile(node.Left)
+ c.compile(node.Right)
+ c.emit(OpModulo)
+
+ case "**", "^":
+ c.compile(node.Left)
+ c.compile(node.Right)
+ c.emit(OpExponent)
+
+ case "in":
+ c.compile(node.Left)
+ c.compile(node.Right)
+ c.emit(OpIn)
+
+ case "matches":
+ if node.Regexp != nil {
+ c.compile(node.Left)
+ c.emit(OpMatchesConst, c.addConstant(node.Regexp))
+ } else {
+ c.compile(node.Left)
+ c.compile(node.Right)
+ c.emit(OpMatches)
+ }
+
+ case "contains":
+ c.compile(node.Left)
+ c.compile(node.Right)
+ c.emit(OpContains)
+
+ case "startsWith":
+ c.compile(node.Left)
+ c.compile(node.Right)
+ c.emit(OpStartsWith)
+
+ case "endsWith":
+ c.compile(node.Left)
+ c.compile(node.Right)
+ c.emit(OpEndsWith)
+
+ case "..":
+ c.compile(node.Left)
+ c.compile(node.Right)
+ c.emit(OpRange)
+
+ case "??":
+ c.compile(node.Left)
+ end := c.emit(OpJumpIfNotNil, placeholder)
+ c.emit(OpPop)
+ c.compile(node.Right)
+ c.patchJump(end)
+
+ default:
+ panic(fmt.Sprintf("unknown operator (%v)", node.Operator))
+
+ }
+}
+
+func (c *compiler) ChainNode(node *ast.ChainNode) {
+ c.chains = append(c.chains, []int{})
+ c.compile(node.Node)
+ // Chain activate (got nit somewhere)
+ for _, ph := range c.chains[len(c.chains)-1] {
+ c.patchJump(ph)
+ }
+ c.chains = c.chains[:len(c.chains)-1]
+}
+
+func (c *compiler) MemberNode(node *ast.MemberNode) {
+ if node.Method {
+ c.compile(node.Node)
+ c.emit(OpMethod, c.addConstant(&runtime.Method{
+ Name: node.Name,
+ Index: node.MethodIndex,
+ }))
+ return
+ }
+ op := OpFetch
+ original := node
+ index := node.FieldIndex
+ path := []string{node.Name}
+ base := node.Node
+ if len(node.FieldIndex) > 0 {
+ op = OpFetchField
+ for !node.Optional {
+ ident, ok := base.(*ast.IdentifierNode)
+ if ok && len(ident.FieldIndex) > 0 {
+ if ident.Deref {
+ panic("IdentifierNode should not be dereferenced")
+ }
+ index = append(ident.FieldIndex, index...)
+ path = append([]string{ident.Value}, path...)
+ c.emitLocation(ident.Location(), OpLoadField, c.addConstant(
+ &runtime.Field{Index: index, Path: path},
+ ))
+ goto deref
+ }
+ member, ok := base.(*ast.MemberNode)
+ if ok && len(member.FieldIndex) > 0 {
+ if member.Deref {
+ panic("MemberNode should not be dereferenced")
+ }
+ index = append(member.FieldIndex, index...)
+ path = append([]string{member.Name}, path...)
+ node = member
+ base = member.Node
+ } else {
+ break
+ }
+ }
+ }
+
+ c.compile(base)
+ if node.Optional {
+ ph := c.emit(OpJumpIfNil, placeholder)
+ c.chains[len(c.chains)-1] = append(c.chains[len(c.chains)-1], ph)
+ }
+
+ if op == OpFetch {
+ c.compile(node.Property)
+ c.emit(OpFetch)
+ } else {
+ c.emitLocation(node.Location(), op, c.addConstant(
+ &runtime.Field{Index: index, Path: path},
+ ))
+ }
+
+deref:
+ if original.Deref {
+ c.emit(OpDeref)
+ } else if original.Type() == nil {
+ c.emit(OpDeref)
+ }
+}
+
+func (c *compiler) SliceNode(node *ast.SliceNode) {
+ c.compile(node.Node)
+ if node.To != nil {
+ c.compile(node.To)
+ } else {
+ c.emit(OpLen)
+ }
+ if node.From != nil {
+ c.compile(node.From)
+ } else {
+ c.emitPush(0)
+ }
+ c.emit(OpSlice)
+}
+
+func (c *compiler) CallNode(node *ast.CallNode) {
+ for _, arg := range node.Arguments {
+ c.compile(arg)
+ }
+ if node.Func != nil {
+ if node.Func.Opcode > 0 {
+ c.emit(OpBuiltin, node.Func.Opcode)
+ return
+ }
+ switch len(node.Arguments) {
+ case 0:
+ c.emit(OpCall0, c.addFunction(node))
+ case 1:
+ c.emit(OpCall1, c.addFunction(node))
+ case 2:
+ c.emit(OpCall2, c.addFunction(node))
+ case 3:
+ c.emit(OpCall3, c.addFunction(node))
+ default:
+ c.emit(OpLoadFunc, c.addFunction(node))
+ c.emit(OpCallN, len(node.Arguments))
+ }
+ return
+ }
+ c.compile(node.Callee)
+ if node.Typed > 0 {
+ c.emit(OpCallTyped, node.Typed)
+ return
+ } else if node.Fast {
+ c.emit(OpCallFast, len(node.Arguments))
+ } else {
+ c.emit(OpCall, len(node.Arguments))
+ }
+}
+
+func (c *compiler) BuiltinNode(node *ast.BuiltinNode) {
+ switch node.Name {
+ case "all":
+ c.compile(node.Arguments[0])
+ c.emit(OpBegin)
+ var loopBreak int
+ c.emitLoop(func() {
+ c.compile(node.Arguments[1])
+ loopBreak = c.emit(OpJumpIfFalse, placeholder)
+ c.emit(OpPop)
+ })
+ c.emit(OpTrue)
+ c.patchJump(loopBreak)
+ c.emit(OpEnd)
+
+ case "none":
+ c.compile(node.Arguments[0])
+ c.emit(OpBegin)
+ var loopBreak int
+ c.emitLoop(func() {
+ c.compile(node.Arguments[1])
+ c.emit(OpNot)
+ loopBreak = c.emit(OpJumpIfFalse, placeholder)
+ c.emit(OpPop)
+ })
+ c.emit(OpTrue)
+ c.patchJump(loopBreak)
+ c.emit(OpEnd)
+
+ case "any":
+ c.compile(node.Arguments[0])
+ c.emit(OpBegin)
+ var loopBreak int
+ c.emitLoop(func() {
+ c.compile(node.Arguments[1])
+ loopBreak = c.emit(OpJumpIfTrue, placeholder)
+ c.emit(OpPop)
+ })
+ c.emit(OpFalse)
+ c.patchJump(loopBreak)
+ c.emit(OpEnd)
+
+ case "one":
+ c.compile(node.Arguments[0])
+ c.emit(OpBegin)
+ c.emitLoop(func() {
+ c.compile(node.Arguments[1])
+ c.emitCond(func() {
+ c.emit(OpIncrementCount)
+ })
+ })
+ c.emit(OpGetCount)
+ c.emitPush(1)
+ c.emit(OpEqual)
+ c.emit(OpEnd)
+
+ case "filter":
+ c.compile(node.Arguments[0])
+ c.emit(OpBegin)
+ c.emitLoop(func() {
+ c.compile(node.Arguments[1])
+ c.emitCond(func() {
+ c.emit(OpIncrementCount)
+ c.emit(OpPointer)
+ })
+ })
+ c.emit(OpGetCount)
+ c.emit(OpEnd)
+ c.emit(OpArray)
+
+ case "map":
+ c.compile(node.Arguments[0])
+ c.emit(OpBegin)
+ c.emitLoop(func() {
+ c.compile(node.Arguments[1])
+ })
+ c.emit(OpGetLen)
+ c.emit(OpEnd)
+ c.emit(OpArray)
+
+ case "count":
+ c.compile(node.Arguments[0])
+ c.emit(OpBegin)
+ c.emitLoop(func() {
+ c.compile(node.Arguments[1])
+ c.emitCond(func() {
+ c.emit(OpIncrementCount)
+ })
+ })
+ c.emit(OpGetCount)
+ c.emit(OpEnd)
+
+ default:
+ panic(fmt.Sprintf("unknown builtin %v", node.Name))
+ }
+}
+
+func (c *compiler) emitCond(body func()) {
+ noop := c.emit(OpJumpIfFalse, placeholder)
+ c.emit(OpPop)
+
+ body()
+
+ jmp := c.emit(OpJump, placeholder)
+ c.patchJump(noop)
+ c.emit(OpPop)
+ c.patchJump(jmp)
+}
+
+func (c *compiler) emitLoop(body func()) {
+ begin := len(c.bytecode)
+ end := c.emit(OpJumpIfEnd, placeholder)
+
+ body()
+
+ c.emit(OpIncrementIt)
+ c.emit(OpJumpBackward, c.calcBackwardJump(begin))
+ c.patchJump(end)
+}
+
+func (c *compiler) ClosureNode(node *ast.ClosureNode) {
+ c.compile(node.Node)
+}
+
+func (c *compiler) PointerNode(node *ast.PointerNode) {
+ c.emit(OpPointer)
+}
+
+func (c *compiler) ConditionalNode(node *ast.ConditionalNode) {
+ c.compile(node.Cond)
+ otherwise := c.emit(OpJumpIfFalse, placeholder)
+
+ c.emit(OpPop)
+ c.compile(node.Exp1)
+ end := c.emit(OpJump, placeholder)
+
+ c.patchJump(otherwise)
+ c.emit(OpPop)
+ c.compile(node.Exp2)
+
+ c.patchJump(end)
+}
+
+func (c *compiler) ArrayNode(node *ast.ArrayNode) {
+ for _, node := range node.Nodes {
+ c.compile(node)
+ }
+
+ c.emitPush(len(node.Nodes))
+ c.emit(OpArray)
+}
+
+func (c *compiler) MapNode(node *ast.MapNode) {
+ for _, pair := range node.Pairs {
+ c.compile(pair)
+ }
+
+ c.emitPush(len(node.Pairs))
+ c.emit(OpMap)
+}
+
+func (c *compiler) PairNode(node *ast.PairNode) {
+ c.compile(node.Key)
+ c.compile(node.Value)
+}
+
+func kind(node ast.Node) reflect.Kind {
+ t := node.Type()
+ if t == nil {
+ return reflect.Invalid
+ }
+ return t.Kind()
+}
diff --git a/vendor/github.com/antonmedv/expr/conf/config.go b/vendor/github.com/antonmedv/expr/conf/config.go
new file mode 100644
index 00000000000..1ac0fa7d291
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/conf/config.go
@@ -0,0 +1,96 @@
+package conf
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/antonmedv/expr/ast"
+ "github.com/antonmedv/expr/builtin"
+ "github.com/antonmedv/expr/vm/runtime"
+)
+
+type Config struct {
+ Env interface{}
+ Types TypesTable
+ MapEnv bool
+ DefaultType reflect.Type
+ Operators OperatorsTable
+ Expect reflect.Kind
+ Optimize bool
+ Strict bool
+ ConstFns map[string]reflect.Value
+ Visitors []ast.Visitor
+ Functions map[string]*builtin.Function
+}
+
+// CreateNew creates new config with default values.
+func CreateNew() *Config {
+ c := &Config{
+ Operators: make(map[string][]string),
+ ConstFns: make(map[string]reflect.Value),
+ Functions: make(map[string]*builtin.Function),
+ Optimize: true,
+ }
+ for _, f := range builtin.Builtins {
+ c.Functions[f.Name] = f
+ }
+ return c
+}
+
+// New creates new config with environment.
+func New(env interface{}) *Config {
+ c := CreateNew()
+ c.WithEnv(env)
+ return c
+}
+
+func (c *Config) WithEnv(env interface{}) {
+ var mapEnv bool
+ var mapValueType reflect.Type
+ if _, ok := env.(map[string]interface{}); ok {
+ mapEnv = true
+ } else {
+ if reflect.ValueOf(env).Kind() == reflect.Map {
+ mapValueType = reflect.TypeOf(env).Elem()
+ }
+ }
+
+ c.Env = env
+ c.Types = CreateTypesTable(env)
+ c.MapEnv = mapEnv
+ c.DefaultType = mapValueType
+ c.Strict = true
+}
+
+func (c *Config) Operator(operator string, fns ...string) {
+ c.Operators[operator] = append(c.Operators[operator], fns...)
+}
+
+func (c *Config) ConstExpr(name string) {
+ if c.Env == nil {
+ panic("no environment is specified for ConstExpr()")
+ }
+ fn := reflect.ValueOf(runtime.Fetch(c.Env, name))
+ if fn.Kind() != reflect.Func {
+ panic(fmt.Errorf("const expression %q must be a function", name))
+ }
+ c.ConstFns[name] = fn
+}
+
+func (c *Config) Check() {
+ for operator, fns := range c.Operators {
+ for _, fn := range fns {
+ fnType, ok := c.Types[fn]
+ if !ok || fnType.Type.Kind() != reflect.Func {
+ panic(fmt.Errorf("function %s for %s operator does not exist in the environment", fn, operator))
+ }
+ requiredNumIn := 2
+ if fnType.Method {
+ requiredNumIn = 3 // As first argument of method is receiver.
+ }
+ if fnType.Type.NumIn() != requiredNumIn || fnType.Type.NumOut() != 1 {
+ panic(fmt.Errorf("function %s for %s operator does not have a correct signature", fn, operator))
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/antonmedv/expr/conf/functions.go b/vendor/github.com/antonmedv/expr/conf/functions.go
new file mode 100644
index 00000000000..8f52a955753
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/conf/functions.go
@@ -0,0 +1 @@
+package conf
diff --git a/vendor/github.com/antonmedv/expr/conf/operators.go b/vendor/github.com/antonmedv/expr/conf/operators.go
new file mode 100644
index 00000000000..13e069d76ca
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/conf/operators.go
@@ -0,0 +1,59 @@
+package conf
+
+import (
+ "reflect"
+
+ "github.com/antonmedv/expr/ast"
+)
+
+// OperatorsTable maps binary operators to corresponding list of functions.
+// Functions should be provided in the environment to allow operator overloading.
+type OperatorsTable map[string][]string
+
+func FindSuitableOperatorOverload(fns []string, types TypesTable, l, r reflect.Type) (reflect.Type, string, bool) {
+ for _, fn := range fns {
+ fnType := types[fn]
+ firstInIndex := 0
+ if fnType.Method {
+ firstInIndex = 1 // As first argument to method is receiver.
+ }
+ firstArgType := fnType.Type.In(firstInIndex)
+ secondArgType := fnType.Type.In(firstInIndex + 1)
+
+ firstArgumentFit := l == firstArgType || (firstArgType.Kind() == reflect.Interface && (l == nil || l.Implements(firstArgType)))
+ secondArgumentFit := r == secondArgType || (secondArgType.Kind() == reflect.Interface && (r == nil || r.Implements(secondArgType)))
+ if firstArgumentFit && secondArgumentFit {
+ return fnType.Type.Out(0), fn, true
+ }
+ }
+ return nil, "", false
+}
+
+type OperatorPatcher struct {
+ Operators OperatorsTable
+ Types TypesTable
+}
+
+func (p *OperatorPatcher) Visit(node *ast.Node) {
+ binaryNode, ok := (*node).(*ast.BinaryNode)
+ if !ok {
+ return
+ }
+
+ fns, ok := p.Operators[binaryNode.Operator]
+ if !ok {
+ return
+ }
+
+ leftType := binaryNode.Left.Type()
+ rightType := binaryNode.Right.Type()
+
+ _, fn, ok := FindSuitableOperatorOverload(fns, p.Types, leftType, rightType)
+ if ok {
+ newNode := &ast.CallNode{
+ Callee: &ast.IdentifierNode{Value: fn},
+ Arguments: []ast.Node{binaryNode.Left, binaryNode.Right},
+ }
+ ast.Patch(node, newNode)
+ }
+}
diff --git a/vendor/github.com/antonmedv/expr/conf/types_table.go b/vendor/github.com/antonmedv/expr/conf/types_table.go
new file mode 100644
index 00000000000..e917f5fa844
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/conf/types_table.go
@@ -0,0 +1,123 @@
+package conf
+
+import (
+ "reflect"
+)
+
+type Tag struct {
+ Type reflect.Type
+ Ambiguous bool
+ FieldIndex []int
+ Method bool
+ MethodIndex int
+}
+
+type TypesTable map[string]Tag
+
+// CreateTypesTable creates types table for type checks during parsing.
+// If struct is passed, all fields will be treated as variables,
+// as well as all fields of embedded structs and struct itself.
+//
+// If map is passed, all items will be treated as variables
+// (key as name, value as type).
+func CreateTypesTable(i interface{}) TypesTable {
+ if i == nil {
+ return nil
+ }
+
+ types := make(TypesTable)
+ v := reflect.ValueOf(i)
+ t := reflect.TypeOf(i)
+
+ d := t
+ if t.Kind() == reflect.Ptr {
+ d = t.Elem()
+ }
+
+ switch d.Kind() {
+ case reflect.Struct:
+ types = FieldsFromStruct(d)
+
+ // Methods of struct should be gathered from original struct with pointer,
+ // as methods maybe declared on pointer receiver. Also this method retrieves
+ // all embedded structs methods as well, no need to recursion.
+ for i := 0; i < t.NumMethod(); i++ {
+ m := t.Method(i)
+ types[m.Name] = Tag{
+ Type: m.Type,
+ Method: true,
+ MethodIndex: i,
+ }
+ }
+
+ case reflect.Map:
+ for _, key := range v.MapKeys() {
+ value := v.MapIndex(key)
+ if key.Kind() == reflect.String && value.IsValid() && value.CanInterface() {
+ types[key.String()] = Tag{Type: reflect.TypeOf(value.Interface())}
+ }
+ }
+
+ // A map may have method too.
+ for i := 0; i < t.NumMethod(); i++ {
+ m := t.Method(i)
+ types[m.Name] = Tag{
+ Type: m.Type,
+ Method: true,
+ MethodIndex: i,
+ }
+ }
+ }
+
+ return types
+}
+
+func FieldsFromStruct(t reflect.Type) TypesTable {
+ types := make(TypesTable)
+ t = dereference(t)
+ if t == nil {
+ return types
+ }
+
+ switch t.Kind() {
+ case reflect.Struct:
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+
+ if f.Anonymous {
+ for name, typ := range FieldsFromStruct(f.Type) {
+ if _, ok := types[name]; ok {
+ types[name] = Tag{Ambiguous: true}
+ } else {
+ typ.FieldIndex = append(f.Index, typ.FieldIndex...)
+ types[name] = typ
+ }
+ }
+ }
+
+ types[FieldName(f)] = Tag{
+ Type: f.Type,
+ FieldIndex: f.Index,
+ }
+ }
+ }
+
+ return types
+}
+
+func dereference(t reflect.Type) reflect.Type {
+ if t == nil {
+ return nil
+ }
+ if t.Kind() == reflect.Ptr {
+ t = dereference(t.Elem())
+ }
+ return t
+}
+
+func FieldName(field reflect.StructField) string {
+ if taggedName := field.Tag.Get("expr"); taggedName != "" {
+ return taggedName
+ }
+ return field.Name
+}
diff --git a/vendor/github.com/antonmedv/expr/expr.go b/vendor/github.com/antonmedv/expr/expr.go
new file mode 100644
index 00000000000..14f6af285c5
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/expr.go
@@ -0,0 +1,205 @@
+package expr
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/antonmedv/expr/ast"
+ "github.com/antonmedv/expr/builtin"
+ "github.com/antonmedv/expr/checker"
+ "github.com/antonmedv/expr/compiler"
+ "github.com/antonmedv/expr/conf"
+ "github.com/antonmedv/expr/file"
+ "github.com/antonmedv/expr/optimizer"
+ "github.com/antonmedv/expr/parser"
+ "github.com/antonmedv/expr/vm"
+)
+
+// Option for configuring config.
+type Option func(c *conf.Config)
+
+// Env specifies expected input of env for type checks.
+// If struct is passed, all fields will be treated as variables,
+// as well as all fields of embedded structs and struct itself.
+// If map is passed, all items will be treated as variables.
+// Methods defined on this type will be available as functions.
+func Env(env interface{}) Option {
+ return func(c *conf.Config) {
+ c.WithEnv(env)
+ }
+}
+
+// AllowUndefinedVariables allows to use undefined variables inside expressions.
+// This can be used with expr.Env option to partially define a few variables.
+func AllowUndefinedVariables() Option {
+ return func(c *conf.Config) {
+ c.Strict = false
+ }
+}
+
+// Operator allows to replace a binary operator with a function.
+func Operator(operator string, fn ...string) Option {
+ return func(c *conf.Config) {
+ c.Operator(operator, fn...)
+ }
+}
+
+// ConstExpr defines func expression as constant. If all argument to this function is constants,
+// then it can be replaced by result of this func call on compile step.
+func ConstExpr(fn string) Option {
+ return func(c *conf.Config) {
+ c.ConstExpr(fn)
+ }
+}
+
+// AsKind tells the compiler to expect kind of the result.
+func AsKind(kind reflect.Kind) Option {
+ return func(c *conf.Config) {
+ c.Expect = kind
+ }
+}
+
+// AsBool tells the compiler to expect a boolean result.
+func AsBool() Option {
+ return func(c *conf.Config) {
+ c.Expect = reflect.Bool
+ }
+}
+
+// AsInt tells the compiler to expect an int result.
+func AsInt() Option {
+ return func(c *conf.Config) {
+ c.Expect = reflect.Int
+ }
+}
+
+// AsInt64 tells the compiler to expect an int64 result.
+func AsInt64() Option {
+ return func(c *conf.Config) {
+ c.Expect = reflect.Int64
+ }
+}
+
+// AsFloat64 tells the compiler to expect a float64 result.
+func AsFloat64() Option {
+ return func(c *conf.Config) {
+ c.Expect = reflect.Float64
+ }
+}
+
+// Optimize turns optimizations on or off.
+func Optimize(b bool) Option {
+ return func(c *conf.Config) {
+ c.Optimize = b
+ }
+}
+
+// Patch adds visitor to list of visitors what will be applied before compiling AST to bytecode.
+func Patch(visitor ast.Visitor) Option {
+ return func(c *conf.Config) {
+ c.Visitors = append(c.Visitors, visitor)
+ }
+}
+
+// Function adds function to list of functions what will be available in expressions.
+func Function(name string, fn func(params ...interface{}) (interface{}, error), types ...interface{}) Option {
+ return func(c *conf.Config) {
+ ts := make([]reflect.Type, len(types))
+ for i, t := range types {
+ t := reflect.TypeOf(t)
+ if t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+ if t.Kind() != reflect.Func {
+ panic(fmt.Sprintf("expr: type of %s is not a function", name))
+ }
+ ts[i] = t
+ }
+ c.Functions[name] = &builtin.Function{
+ Name: name,
+ Func: fn,
+ Types: ts,
+ }
+ }
+}
+
+// Compile parses and compiles given input expression to bytecode program.
+func Compile(input string, ops ...Option) (*vm.Program, error) {
+ config := conf.CreateNew()
+
+ for _, op := range ops {
+ op(config)
+ }
+ config.Check()
+
+ if len(config.Operators) > 0 {
+ config.Visitors = append(config.Visitors, &conf.OperatorPatcher{
+ Operators: config.Operators,
+ Types: config.Types,
+ })
+ }
+
+ tree, err := parser.Parse(input)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(config.Visitors) > 0 {
+ for _, v := range config.Visitors {
+ // We need to perform types check, because some visitors may rely on
+ // types information available in the tree.
+ _, _ = checker.Check(tree, config)
+ ast.Walk(&tree.Node, v)
+ }
+ _, err = checker.Check(tree, config)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ _, err = checker.Check(tree, config)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if config.Optimize {
+ err = optimizer.Optimize(&tree.Node, config)
+ if err != nil {
+ if fileError, ok := err.(*file.Error); ok {
+ return nil, fileError.Bind(tree.Source)
+ }
+ return nil, err
+ }
+ }
+
+ program, err := compiler.Compile(tree, config)
+ if err != nil {
+ return nil, err
+ }
+
+ return program, nil
+}
+
+// Run evaluates given bytecode program.
+func Run(program *vm.Program, env interface{}) (interface{}, error) {
+ return vm.Run(program, env)
+}
+
+// Eval parses, compiles and runs given input.
+func Eval(input string, env interface{}) (interface{}, error) {
+ if _, ok := env.(Option); ok {
+ return nil, fmt.Errorf("misused expr.Eval: second argument (env) should be passed without expr.Env")
+ }
+
+ program, err := Compile(input)
+ if err != nil {
+ return nil, err
+ }
+
+ output, err := Run(program, env)
+ if err != nil {
+ return nil, err
+ }
+
+ return output, nil
+}
diff --git a/vendor/github.com/antonmedv/expr/file/error.go b/vendor/github.com/antonmedv/expr/file/error.go
new file mode 100644
index 00000000000..1e7e81b947b
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/file/error.go
@@ -0,0 +1,69 @@
+package file
+
+import (
+ "fmt"
+ "strings"
+ "unicode/utf8"
+)
+
+type Error struct {
+ Location
+ Message string
+ Snippet string
+ Prev error
+}
+
+func (e *Error) Error() string {
+ return e.format()
+}
+
+func (e *Error) Bind(source *Source) *Error {
+ if snippet, found := source.Snippet(e.Location.Line); found {
+ snippet := strings.Replace(snippet, "\t", " ", -1)
+ srcLine := "\n | " + snippet
+ var bytes = []byte(snippet)
+ var indLine = "\n | "
+ for i := 0; i < e.Location.Column && len(bytes) > 0; i++ {
+ _, sz := utf8.DecodeRune(bytes)
+ bytes = bytes[sz:]
+ if sz > 1 {
+ goto noind
+ } else {
+ indLine += "."
+ }
+ }
+ if _, sz := utf8.DecodeRune(bytes); sz > 1 {
+ goto noind
+ } else {
+ indLine += "^"
+ }
+ srcLine += indLine
+
+ noind:
+ e.Snippet = srcLine
+ }
+ return e
+}
+
+
+func (e *Error) Unwrap() error {
+ return e.Prev
+}
+
+func (e *Error) Wrap(err error) {
+ e.Prev = err
+}
+
+
+func (e *Error) format() string {
+ if e.Location.Empty() {
+ return e.Message
+ }
+ return fmt.Sprintf(
+ "%s (%d:%d)%s",
+ e.Message,
+ e.Line,
+ e.Column+1, // add one to the 0-based column for display
+ e.Snippet,
+ )
+}
diff --git a/vendor/github.com/antonmedv/expr/file/location.go b/vendor/github.com/antonmedv/expr/file/location.go
new file mode 100644
index 00000000000..a92e27f0b1c
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/file/location.go
@@ -0,0 +1,10 @@
+package file
+
+type Location struct {
+ Line int // The 1-based line of the location.
+ Column int // The 0-based column number of the location.
+}
+
+func (l Location) Empty() bool {
+ return l.Column == 0 && l.Line == 0
+}
diff --git a/vendor/github.com/antonmedv/expr/file/source.go b/vendor/github.com/antonmedv/expr/file/source.go
new file mode 100644
index 00000000000..9ee297b5802
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/file/source.go
@@ -0,0 +1,76 @@
+package file
+
+import (
+ "encoding/json"
+ "strings"
+ "unicode/utf8"
+)
+
+type Source struct {
+ contents []rune
+ lineOffsets []int32
+}
+
+func NewSource(contents string) *Source {
+ s := &Source{
+ contents: []rune(contents),
+ }
+ s.updateOffsets()
+ return s
+}
+
+func (s *Source) MarshalJSON() ([]byte, error) {
+ return json.Marshal(s.contents)
+}
+
+func (s *Source) UnmarshalJSON(b []byte) error {
+ contents := make([]rune, 0)
+ err := json.Unmarshal(b, &contents)
+ if err != nil {
+ return err
+ }
+
+ s.contents = contents
+ s.updateOffsets()
+ return nil
+}
+
+func (s *Source) Content() string {
+ return string(s.contents)
+}
+
+func (s *Source) Snippet(line int) (string, bool) {
+ charStart, found := s.findLineOffset(line)
+ if !found || len(s.contents) == 0 {
+ return "", false
+ }
+ charEnd, found := s.findLineOffset(line + 1)
+ if found {
+ return string(s.contents[charStart : charEnd-1]), true
+ }
+ return string(s.contents[charStart:]), true
+}
+
+// updateOffsets compute line offsets up front as they are referred to frequently.
+func (s *Source) updateOffsets() {
+ lines := strings.Split(string(s.contents), "\n")
+ offsets := make([]int32, len(lines))
+ var offset int32
+ for i, line := range lines {
+ offset = offset + int32(utf8.RuneCountInString(line)) + 1
+ offsets[int32(i)] = offset
+ }
+ s.lineOffsets = offsets
+}
+
+// findLineOffset returns the offset where the (1-indexed) line begins,
+// or false if line doesn't exist.
+func (s *Source) findLineOffset(line int) (int32, bool) {
+ if line == 1 {
+ return 0, true
+ } else if line > 1 && line <= len(s.lineOffsets) {
+ offset := s.lineOffsets[line-2]
+ return offset, true
+ }
+ return -1, false
+}
diff --git a/vendor/github.com/antonmedv/expr/optimizer/const_expr.go b/vendor/github.com/antonmedv/expr/optimizer/const_expr.go
new file mode 100644
index 00000000000..7ececb3dbad
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/optimizer/const_expr.go
@@ -0,0 +1,85 @@
+package optimizer
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ . "github.com/antonmedv/expr/ast"
+ "github.com/antonmedv/expr/file"
+)
+
+var errorType = reflect.TypeOf((*error)(nil)).Elem()
+
+type constExpr struct {
+ applied bool
+ err error
+ fns map[string]reflect.Value
+}
+
+func (c *constExpr) Visit(node *Node) {
+ defer func() {
+ if r := recover(); r != nil {
+ msg := fmt.Sprintf("%v", r)
+ // Make message more actual, it's a runtime error, but at compile step.
+ msg = strings.Replace(msg, "runtime error:", "compile error:", 1)
+ c.err = &file.Error{
+ Location: (*node).Location(),
+ Message: msg,
+ }
+ }
+ }()
+
+ patch := func(newNode Node) {
+ c.applied = true
+ Patch(node, newNode)
+ }
+
+ if call, ok := (*node).(*CallNode); ok {
+ if name, ok := call.Callee.(*IdentifierNode); ok {
+ fn, ok := c.fns[name.Value]
+ if ok {
+ in := make([]reflect.Value, len(call.Arguments))
+ for i := 0; i < len(call.Arguments); i++ {
+ arg := call.Arguments[i]
+ var param interface{}
+
+ switch a := arg.(type) {
+ case *NilNode:
+ param = nil
+ case *IntegerNode:
+ param = a.Value
+ case *FloatNode:
+ param = a.Value
+ case *BoolNode:
+ param = a.Value
+ case *StringNode:
+ param = a.Value
+ case *ConstantNode:
+ param = a.Value
+
+ default:
+ return // Const expr optimization not applicable.
+ }
+
+ if param == nil && reflect.TypeOf(param) == nil {
+ // In case of nil value and nil type use this hack,
+ // otherwise reflect.Call will panic on zero value.
+ in[i] = reflect.ValueOf(¶m).Elem()
+ } else {
+ in[i] = reflect.ValueOf(param)
+ }
+ }
+
+ out := fn.Call(in)
+ value := out[0].Interface()
+ if len(out) == 2 && out[1].Type() == errorType && !out[1].IsNil() {
+ c.err = out[1].Interface().(error)
+ return
+ }
+ constNode := &ConstantNode{Value: value}
+ patch(constNode)
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/antonmedv/expr/optimizer/const_range.go b/vendor/github.com/antonmedv/expr/optimizer/const_range.go
new file mode 100644
index 00000000000..26d6d6f571b
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/optimizer/const_range.go
@@ -0,0 +1,40 @@
+package optimizer
+
+import (
+ . "github.com/antonmedv/expr/ast"
+)
+
+type constRange struct{}
+
+func (*constRange) Visit(node *Node) {
+ switch n := (*node).(type) {
+ case *BinaryNode:
+ if n.Operator == ".." {
+ if min, ok := n.Left.(*IntegerNode); ok {
+ if max, ok := n.Right.(*IntegerNode); ok {
+ size := max.Value - min.Value + 1
+ // In case the max < min, patch empty slice
+ // as max must be greater than equal to min.
+ if size < 1 {
+ Patch(node, &ConstantNode{
+ Value: make([]int, 0),
+ })
+ return
+ }
+ // In this case array is too big. Skip generation,
+ // and wait for memory budget detection on runtime.
+ if size > 1e6 {
+ return
+ }
+ value := make([]int, size)
+ for i := range value {
+ value[i] = min.Value + i
+ }
+ Patch(node, &ConstantNode{
+ Value: value,
+ })
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/antonmedv/expr/optimizer/fold.go b/vendor/github.com/antonmedv/expr/optimizer/fold.go
new file mode 100644
index 00000000000..b62b2d7ed42
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/optimizer/fold.go
@@ -0,0 +1,343 @@
+package optimizer
+
+import (
+ "math"
+ "reflect"
+
+ . "github.com/antonmedv/expr/ast"
+ "github.com/antonmedv/expr/file"
+)
+
+type fold struct {
+ applied bool
+ err *file.Error
+}
+
+func (fold *fold) Visit(node *Node) {
+ patch := func(newNode Node) {
+ fold.applied = true
+ Patch(node, newNode)
+ }
+ // for IntegerNode the type may have been changed from int->float
+ // preserve this information by setting the type after the Patch
+ patchWithType := func(newNode Node, leafType reflect.Type) {
+ patch(newNode)
+ newNode.SetType(leafType)
+ }
+
+ switch n := (*node).(type) {
+ case *UnaryNode:
+ switch n.Operator {
+ case "-":
+ if i, ok := n.Node.(*IntegerNode); ok {
+ patchWithType(&IntegerNode{Value: -i.Value}, n.Node.Type())
+ }
+ if i, ok := n.Node.(*FloatNode); ok {
+ patchWithType(&FloatNode{Value: -i.Value}, n.Node.Type())
+ }
+ case "+":
+ if i, ok := n.Node.(*IntegerNode); ok {
+ patchWithType(&IntegerNode{Value: i.Value}, n.Node.Type())
+ }
+ if i, ok := n.Node.(*FloatNode); ok {
+ patchWithType(&FloatNode{Value: i.Value}, n.Node.Type())
+ }
+ case "!", "not":
+ if a := toBool(n.Node); a != nil {
+ patch(&BoolNode{Value: !a.Value})
+ }
+ }
+
+ case *BinaryNode:
+ switch n.Operator {
+ case "+":
+ {
+ a := toInteger(n.Left)
+ b := toInteger(n.Right)
+ if a != nil && b != nil {
+ patchWithType(&IntegerNode{Value: a.Value + b.Value}, a.Type())
+ }
+ }
+ {
+ a := toInteger(n.Left)
+ b := toFloat(n.Right)
+ if a != nil && b != nil {
+ patchWithType(&FloatNode{Value: float64(a.Value) + b.Value}, a.Type())
+ }
+ }
+ {
+ a := toFloat(n.Left)
+ b := toInteger(n.Right)
+ if a != nil && b != nil {
+ patchWithType(&FloatNode{Value: a.Value + float64(b.Value)}, a.Type())
+ }
+ }
+ {
+ a := toFloat(n.Left)
+ b := toFloat(n.Right)
+ if a != nil && b != nil {
+ patchWithType(&FloatNode{Value: a.Value + b.Value}, a.Type())
+ }
+ }
+ {
+ a := toString(n.Left)
+ b := toString(n.Right)
+ if a != nil && b != nil {
+ patch(&StringNode{Value: a.Value + b.Value})
+ }
+ }
+ case "-":
+ {
+ a := toInteger(n.Left)
+ b := toInteger(n.Right)
+ if a != nil && b != nil {
+ patchWithType(&IntegerNode{Value: a.Value - b.Value}, a.Type())
+ }
+ }
+ {
+ a := toInteger(n.Left)
+ b := toFloat(n.Right)
+ if a != nil && b != nil {
+ patchWithType(&FloatNode{Value: float64(a.Value) - b.Value}, a.Type())
+ }
+ }
+ {
+ a := toFloat(n.Left)
+ b := toInteger(n.Right)
+ if a != nil && b != nil {
+ patchWithType(&FloatNode{Value: a.Value - float64(b.Value)}, a.Type())
+ }
+ }
+ {
+ a := toFloat(n.Left)
+ b := toFloat(n.Right)
+ if a != nil && b != nil {
+ patchWithType(&FloatNode{Value: a.Value - b.Value}, a.Type())
+ }
+ }
+ case "*":
+ {
+ a := toInteger(n.Left)
+ b := toInteger(n.Right)
+ if a != nil && b != nil {
+ patchWithType(&IntegerNode{Value: a.Value * b.Value}, a.Type())
+ }
+ }
+ {
+ a := toInteger(n.Left)
+ b := toFloat(n.Right)
+ if a != nil && b != nil {
+ patchWithType(&FloatNode{Value: float64(a.Value) * b.Value}, a.Type())
+ }
+ }
+ {
+ a := toFloat(n.Left)
+ b := toInteger(n.Right)
+ if a != nil && b != nil {
+ patchWithType(&FloatNode{Value: a.Value * float64(b.Value)}, a.Type())
+ }
+ }
+ {
+ a := toFloat(n.Left)
+ b := toFloat(n.Right)
+ if a != nil && b != nil {
+ patchWithType(&FloatNode{Value: a.Value * b.Value}, a.Type())
+ }
+ }
+ case "/":
+ {
+ a := toInteger(n.Left)
+ b := toInteger(n.Right)
+ if a != nil && b != nil {
+ patchWithType(&FloatNode{Value: float64(a.Value) / float64(b.Value)}, a.Type())
+ }
+ }
+ {
+ a := toInteger(n.Left)
+ b := toFloat(n.Right)
+ if a != nil && b != nil {
+ patchWithType(&FloatNode{Value: float64(a.Value) / b.Value}, a.Type())
+ }
+ }
+ {
+ a := toFloat(n.Left)
+ b := toInteger(n.Right)
+ if a != nil && b != nil {
+ patchWithType(&FloatNode{Value: a.Value / float64(b.Value)}, a.Type())
+ }
+ }
+ {
+ a := toFloat(n.Left)
+ b := toFloat(n.Right)
+ if a != nil && b != nil {
+ patchWithType(&FloatNode{Value: a.Value / b.Value}, a.Type())
+ }
+ }
+ case "%":
+ if a, ok := n.Left.(*IntegerNode); ok {
+ if b, ok := n.Right.(*IntegerNode); ok {
+ if b.Value == 0 {
+ fold.err = &file.Error{
+ Location: (*node).Location(),
+ Message: "integer divide by zero",
+ }
+ return
+ }
+ patch(&IntegerNode{Value: a.Value % b.Value})
+ }
+ }
+ case "**", "^":
+ {
+ a := toInteger(n.Left)
+ b := toInteger(n.Right)
+ if a != nil && b != nil {
+ patchWithType(&FloatNode{Value: math.Pow(float64(a.Value), float64(b.Value))}, a.Type())
+ }
+ }
+ {
+ a := toInteger(n.Left)
+ b := toFloat(n.Right)
+ if a != nil && b != nil {
+ patchWithType(&FloatNode{Value: math.Pow(float64(a.Value), b.Value)}, a.Type())
+ }
+ }
+ {
+ a := toFloat(n.Left)
+ b := toInteger(n.Right)
+ if a != nil && b != nil {
+ patchWithType(&FloatNode{Value: math.Pow(a.Value, float64(b.Value))}, a.Type())
+ }
+ }
+ {
+ a := toFloat(n.Left)
+ b := toFloat(n.Right)
+ if a != nil && b != nil {
+ patchWithType(&FloatNode{Value: math.Pow(a.Value, b.Value)}, a.Type())
+ }
+ }
+ case "and", "&&":
+ a := toBool(n.Left)
+ b := toBool(n.Right)
+
+ if a != nil && a.Value { // true and x
+ patch(n.Right)
+ } else if b != nil && b.Value { // x and true
+ patch(n.Left)
+ } else if (a != nil && !a.Value) || (b != nil && !b.Value) { // "x and false" or "false and x"
+ patch(&BoolNode{Value: false})
+ }
+ case "or", "||":
+ a := toBool(n.Left)
+ b := toBool(n.Right)
+
+ if a != nil && !a.Value { // false or x
+ patch(n.Right)
+ } else if b != nil && !b.Value { // x or false
+ patch(n.Left)
+ } else if (a != nil && a.Value) || (b != nil && b.Value) { // "x or true" or "true or x"
+ patch(&BoolNode{Value: true})
+ }
+ case "==":
+ {
+ a := toInteger(n.Left)
+ b := toInteger(n.Right)
+ if a != nil && b != nil {
+ patch(&BoolNode{Value: a.Value == b.Value})
+ }
+ }
+ {
+ a := toString(n.Left)
+ b := toString(n.Right)
+ if a != nil && b != nil {
+ patch(&BoolNode{Value: a.Value == b.Value})
+ }
+ }
+ {
+ a := toBool(n.Left)
+ b := toBool(n.Right)
+ if a != nil && b != nil {
+ patch(&BoolNode{Value: a.Value == b.Value})
+ }
+ }
+ }
+
+ case *ArrayNode:
+ if len(n.Nodes) > 0 {
+ for _, a := range n.Nodes {
+ switch a.(type) {
+ case *IntegerNode, *FloatNode, *StringNode, *BoolNode:
+ continue
+ default:
+ return
+ }
+ }
+ value := make([]interface{}, len(n.Nodes))
+ for i, a := range n.Nodes {
+ switch b := a.(type) {
+ case *IntegerNode:
+ value[i] = b.Value
+ case *FloatNode:
+ value[i] = b.Value
+ case *StringNode:
+ value[i] = b.Value
+ case *BoolNode:
+ value[i] = b.Value
+ }
+ }
+ patch(&ConstantNode{Value: value})
+ }
+
+ case *BuiltinNode:
+ switch n.Name {
+ case "filter":
+ if len(n.Arguments) != 2 {
+ return
+ }
+ if base, ok := n.Arguments[0].(*BuiltinNode); ok && base.Name == "filter" {
+ patch(&BuiltinNode{
+ Name: "filter",
+ Arguments: []Node{
+ base.Arguments[0],
+ &BinaryNode{
+ Operator: "&&",
+ Left: base.Arguments[1],
+ Right: n.Arguments[1],
+ },
+ },
+ })
+ }
+ }
+ }
+}
+
+func toString(n Node) *StringNode {
+ switch a := n.(type) {
+ case *StringNode:
+ return a
+ }
+ return nil
+}
+
+func toInteger(n Node) *IntegerNode {
+ switch a := n.(type) {
+ case *IntegerNode:
+ return a
+ }
+ return nil
+}
+
+func toFloat(n Node) *FloatNode {
+ switch a := n.(type) {
+ case *FloatNode:
+ return a
+ }
+ return nil
+}
+
+func toBool(n Node) *BoolNode {
+ switch a := n.(type) {
+ case *BoolNode:
+ return a
+ }
+ return nil
+}
diff --git a/vendor/github.com/antonmedv/expr/optimizer/in_array.go b/vendor/github.com/antonmedv/expr/optimizer/in_array.go
new file mode 100644
index 00000000000..a51957631c0
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/optimizer/in_array.go
@@ -0,0 +1,64 @@
+package optimizer
+
+import (
+ "reflect"
+
+ . "github.com/antonmedv/expr/ast"
+)
+
+type inArray struct{}
+
+func (*inArray) Visit(node *Node) {
+ switch n := (*node).(type) {
+ case *BinaryNode:
+ if n.Operator == "in" {
+ if array, ok := n.Right.(*ArrayNode); ok {
+ if len(array.Nodes) > 0 {
+ t := n.Left.Type()
+ if t == nil || t.Kind() != reflect.Int {
+ // This optimization can be only performed if left side is int type,
+ // as runtime.in func uses reflect.Map.MapIndex and keys of map must,
+ // be same as checked value type.
+ goto string
+ }
+
+ for _, a := range array.Nodes {
+ if _, ok := a.(*IntegerNode); !ok {
+ goto string
+ }
+ }
+ {
+ value := make(map[int]struct{})
+ for _, a := range array.Nodes {
+ value[a.(*IntegerNode).Value] = struct{}{}
+ }
+ Patch(node, &BinaryNode{
+ Operator: n.Operator,
+ Left: n.Left,
+ Right: &ConstantNode{Value: value},
+ })
+ }
+
+ string:
+ for _, a := range array.Nodes {
+ if _, ok := a.(*StringNode); !ok {
+ return
+ }
+ }
+ {
+ value := make(map[string]struct{})
+ for _, a := range array.Nodes {
+ value[a.(*StringNode).Value] = struct{}{}
+ }
+ Patch(node, &BinaryNode{
+ Operator: n.Operator,
+ Left: n.Left,
+ Right: &ConstantNode{Value: value},
+ })
+ }
+
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/antonmedv/expr/optimizer/in_range.go b/vendor/github.com/antonmedv/expr/optimizer/in_range.go
new file mode 100644
index 00000000000..7895249e0be
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/optimizer/in_range.go
@@ -0,0 +1,34 @@
+package optimizer
+
+import (
+ . "github.com/antonmedv/expr/ast"
+)
+
+type inRange struct{}
+
+func (*inRange) Visit(node *Node) {
+ switch n := (*node).(type) {
+ case *BinaryNode:
+ if n.Operator == "in" {
+ if rng, ok := n.Right.(*BinaryNode); ok && rng.Operator == ".." {
+ if from, ok := rng.Left.(*IntegerNode); ok {
+ if to, ok := rng.Right.(*IntegerNode); ok {
+ Patch(node, &BinaryNode{
+ Operator: "and",
+ Left: &BinaryNode{
+ Operator: ">=",
+ Left: n.Left,
+ Right: from,
+ },
+ Right: &BinaryNode{
+ Operator: "<=",
+ Left: n.Left,
+ Right: to,
+ },
+ })
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/antonmedv/expr/optimizer/optimizer.go b/vendor/github.com/antonmedv/expr/optimizer/optimizer.go
new file mode 100644
index 00000000000..9c97496c8d6
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/optimizer/optimizer.go
@@ -0,0 +1,37 @@
+package optimizer
+
+import (
+ . "github.com/antonmedv/expr/ast"
+ "github.com/antonmedv/expr/conf"
+)
+
+func Optimize(node *Node, config *conf.Config) error {
+ Walk(node, &inArray{})
+ for limit := 1000; limit >= 0; limit-- {
+ fold := &fold{}
+ Walk(node, fold)
+ if fold.err != nil {
+ return fold.err
+ }
+ if !fold.applied {
+ break
+ }
+ }
+ if config != nil && len(config.ConstFns) > 0 {
+ for limit := 100; limit >= 0; limit-- {
+ constExpr := &constExpr{
+ fns: config.ConstFns,
+ }
+ Walk(node, constExpr)
+ if constExpr.err != nil {
+ return constExpr.err
+ }
+ if !constExpr.applied {
+ break
+ }
+ }
+ }
+ Walk(node, &inRange{})
+ Walk(node, &constRange{})
+ return nil
+}
diff --git a/vendor/github.com/antonmedv/expr/parser/lexer/lexer.go b/vendor/github.com/antonmedv/expr/parser/lexer/lexer.go
new file mode 100644
index 00000000000..cfb1e8c61b8
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/parser/lexer/lexer.go
@@ -0,0 +1,221 @@
+package lexer
+
+import (
+ "fmt"
+ "strings"
+ "unicode/utf8"
+
+ "github.com/antonmedv/expr/file"
+)
+
+func Lex(source *file.Source) ([]Token, error) {
+ l := &lexer{
+ input: source.Content(),
+ tokens: make([]Token, 0),
+ }
+
+ l.loc = file.Location{Line: 1, Column: 0}
+ l.prev = l.loc
+ l.startLoc = l.loc
+
+ for state := root; state != nil; {
+ state = state(l)
+ }
+
+ if l.err != nil {
+ return nil, l.err.Bind(source)
+ }
+
+ return l.tokens, nil
+}
+
+type lexer struct {
+ input string
+ tokens []Token
+ start, end int // current position in input
+ width int // last rune width
+ startLoc file.Location // start location
+ prev, loc file.Location // prev location of end location, end location
+ err *file.Error
+}
+
+const eof rune = -1
+
+func (l *lexer) next() rune {
+ if l.end >= len(l.input) {
+ l.width = 0
+ return eof
+ }
+ r, w := utf8.DecodeRuneInString(l.input[l.end:])
+ l.width = w
+ l.end += w
+
+ l.prev = l.loc
+ if r == '\n' {
+ l.loc.Line++
+ l.loc.Column = 0
+ } else {
+ l.loc.Column++
+ }
+
+ return r
+}
+
+func (l *lexer) peek() rune {
+ r := l.next()
+ l.backup()
+ return r
+}
+
+func (l *lexer) backup() {
+ l.end -= l.width
+ l.loc = l.prev
+}
+
+func (l *lexer) emit(t Kind) {
+ l.emitValue(t, l.word())
+}
+
+func (l *lexer) emitValue(t Kind, value string) {
+ l.tokens = append(l.tokens, Token{
+ Location: l.startLoc,
+ Kind: t,
+ Value: value,
+ })
+ l.start = l.end
+ l.startLoc = l.loc
+}
+
+func (l *lexer) emitEOF() {
+ l.tokens = append(l.tokens, Token{
+ Location: l.prev, // Point to previous position for better error messages.
+ Kind: EOF,
+ })
+ l.start = l.end
+ l.startLoc = l.loc
+}
+
+func (l *lexer) skip() {
+ l.start = l.end
+ l.startLoc = l.loc
+}
+
+func (l *lexer) word() string {
+ return l.input[l.start:l.end]
+}
+
+func (l *lexer) ignore() {
+ l.start = l.end
+ l.startLoc = l.loc
+}
+
+func (l *lexer) accept(valid string) bool {
+ if strings.ContainsRune(valid, l.next()) {
+ return true
+ }
+ l.backup()
+ return false
+}
+
+func (l *lexer) acceptRun(valid string) {
+ for strings.ContainsRune(valid, l.next()) {
+ }
+ l.backup()
+}
+
+func (l *lexer) skipSpaces() {
+ r := l.peek()
+ for ; r == ' '; r = l.peek() {
+ l.next()
+ }
+ l.skip()
+}
+
+func (l *lexer) acceptWord(word string) bool {
+ pos, loc, prev := l.end, l.loc, l.prev
+
+ l.skipSpaces()
+
+ for _, ch := range word {
+ if l.next() != ch {
+ l.end, l.loc, l.prev = pos, loc, prev
+ return false
+ }
+ }
+ if r := l.peek(); r != ' ' && r != eof {
+ l.end, l.loc, l.prev = pos, loc, prev
+ return false
+ }
+
+ return true
+}
+
+func (l *lexer) error(format string, args ...interface{}) stateFn {
+ if l.err == nil { // show first error
+ l.err = &file.Error{
+ Location: l.loc,
+ Message: fmt.Sprintf(format, args...),
+ }
+ }
+ return nil
+}
+
+func digitVal(ch rune) int {
+ switch {
+ case '0' <= ch && ch <= '9':
+ return int(ch - '0')
+ case 'a' <= lower(ch) && lower(ch) <= 'f':
+ return int(lower(ch) - 'a' + 10)
+ }
+ return 16 // larger than any legal digit val
+}
+
+func lower(ch rune) rune { return ('a' - 'A') | ch } // returns lower-case ch iff ch is ASCII letter
+
+func (l *lexer) scanDigits(ch rune, base, n int) rune {
+ for n > 0 && digitVal(ch) < base {
+ ch = l.next()
+ n--
+ }
+ if n > 0 {
+ l.error("invalid char escape")
+ }
+ return ch
+}
+
+func (l *lexer) scanEscape(quote rune) rune {
+ ch := l.next() // read character after '/'
+ switch ch {
+ case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', quote:
+ // nothing to do
+ ch = l.next()
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ ch = l.scanDigits(ch, 8, 3)
+ case 'x':
+ ch = l.scanDigits(l.next(), 16, 2)
+ case 'u':
+ ch = l.scanDigits(l.next(), 16, 4)
+ case 'U':
+ ch = l.scanDigits(l.next(), 16, 8)
+ default:
+ l.error("invalid char escape")
+ }
+ return ch
+}
+
+func (l *lexer) scanString(quote rune) (n int) {
+ ch := l.next() // read character after quote
+ for ch != quote {
+ if ch == '\n' || ch == eof {
+ l.error("literal not terminated")
+ return
+ }
+ if ch == '\\' {
+ ch = l.scanEscape(quote)
+ } else {
+ ch = l.next()
+ }
+ n++
+ }
+ return
+}
diff --git a/vendor/github.com/antonmedv/expr/parser/lexer/state.go b/vendor/github.com/antonmedv/expr/parser/lexer/state.go
new file mode 100644
index 00000000000..1212aa3217f
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/parser/lexer/state.go
@@ -0,0 +1,198 @@
+package lexer
+
+import (
+ "strings"
+)
+
+type stateFn func(*lexer) stateFn
+
+func root(l *lexer) stateFn {
+ switch r := l.next(); {
+ case r == eof:
+ l.emitEOF()
+ return nil
+ case IsSpace(r):
+ l.ignore()
+ return root
+ case r == '\'' || r == '"':
+ l.scanString(r)
+ str, err := unescape(l.word())
+ if err != nil {
+ l.error("%v", err)
+ }
+ l.emitValue(String, str)
+ case '0' <= r && r <= '9':
+ l.backup()
+ return number
+ case r == '?':
+ return questionMark
+ case r == '/':
+ return slash
+ case strings.ContainsRune("([{", r):
+ l.emit(Bracket)
+ case strings.ContainsRune(")]}", r):
+ l.emit(Bracket)
+ case strings.ContainsRune("#,:%+-^", r): // single rune operator
+ l.emit(Operator)
+ case strings.ContainsRune("&|!=*<>", r): // possible double rune operator
+ l.accept("&|=*")
+ l.emit(Operator)
+ case r == '.':
+ l.backup()
+ return dot
+ case IsAlphaNumeric(r):
+ l.backup()
+ return identifier
+ default:
+ return l.error("unrecognized character: %#U", r)
+ }
+ return root
+}
+
+func number(l *lexer) stateFn {
+ if !l.scanNumber() {
+ return l.error("bad number syntax: %q", l.word())
+ }
+ l.emit(Number)
+ return root
+}
+
+func (l *lexer) scanNumber() bool {
+ digits := "0123456789_"
+ // Is it hex?
+ if l.accept("0") {
+ // Note: Leading 0 does not mean octal in floats.
+ if l.accept("xX") {
+ digits = "0123456789abcdefABCDEF_"
+ } else if l.accept("oO") {
+ digits = "01234567_"
+ } else if l.accept("bB") {
+ digits = "01_"
+ }
+ }
+ l.acceptRun(digits)
+ loc, prev, end := l.loc, l.prev, l.end
+ if l.accept(".") {
+ // Lookup for .. operator: if after dot there is another dot (1..2), it maybe a range operator.
+ if l.peek() == '.' {
+ // We can't backup() here, as it would require two backups,
+ // and backup() func supports only one for now. So, save and
+ // restore it here.
+ l.loc, l.prev, l.end = loc, prev, end
+ return true
+ }
+ l.acceptRun(digits)
+ }
+ if l.accept("eE") {
+ l.accept("+-")
+ l.acceptRun(digits)
+ }
+ // Next thing mustn't be alphanumeric.
+ if IsAlphaNumeric(l.peek()) {
+ l.next()
+ return false
+ }
+ return true
+}
+
+func dot(l *lexer) stateFn {
+ l.next()
+ if l.accept("0123456789") {
+ l.backup()
+ return number
+ }
+ l.accept(".")
+ l.emit(Operator)
+ return root
+}
+
+func identifier(l *lexer) stateFn {
+loop:
+ for {
+ switch r := l.next(); {
+ case IsAlphaNumeric(r):
+ // absorb
+ default:
+ l.backup()
+ switch l.word() {
+ case "not":
+ return not
+ case "in", "or", "and", "matches", "contains", "startsWith", "endsWith":
+ l.emit(Operator)
+ default:
+ l.emit(Identifier)
+ }
+ break loop
+ }
+ }
+ return root
+}
+
+func not(l *lexer) stateFn {
+ l.emit(Operator)
+
+ l.skipSpaces()
+
+ pos, loc, prev := l.end, l.loc, l.prev
+
+ // Get the next word.
+ for {
+ r := l.next()
+ if IsAlphaNumeric(r) {
+ // absorb
+ } else {
+ l.backup()
+ break
+ }
+ }
+
+ switch l.word() {
+ case "in", "matches", "contains", "startsWith", "endsWith":
+ l.emit(Operator)
+ default:
+ l.end, l.loc, l.prev = pos, loc, prev
+ }
+ return root
+}
+
+func questionMark(l *lexer) stateFn {
+ l.accept(".?")
+ l.emit(Operator)
+ return root
+}
+
+func slash(l *lexer) stateFn {
+ if l.accept("/") {
+ return singleLineComment
+ }
+ if l.accept("*") {
+ return multiLineComment
+ }
+ l.emit(Operator)
+ return root
+}
+
+func singleLineComment(l *lexer) stateFn {
+ for {
+ r := l.next()
+ if r == eof || r == '\n' {
+ break
+ }
+ }
+ l.ignore()
+ return root
+}
+
+func multiLineComment(l *lexer) stateFn {
+ for {
+ r := l.next()
+ if r == eof {
+ return l.error("unclosed comment")
+ }
+ if r == '*' && l.accept("/") {
+ break
+ }
+ }
+ l.ignore()
+ return root
+}
diff --git a/vendor/github.com/antonmedv/expr/parser/lexer/token.go b/vendor/github.com/antonmedv/expr/parser/lexer/token.go
new file mode 100644
index 00000000000..8917b26dce6
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/parser/lexer/token.go
@@ -0,0 +1,47 @@
+package lexer
+
+import (
+ "fmt"
+
+ "github.com/antonmedv/expr/file"
+)
+
+type Kind string
+
+const (
+ Identifier Kind = "Identifier"
+ Number Kind = "Number"
+ String Kind = "String"
+ Operator Kind = "Operator"
+ Bracket Kind = "Bracket"
+ EOF Kind = "EOF"
+)
+
+type Token struct {
+ file.Location
+ Kind Kind
+ Value string
+}
+
+func (t Token) String() string {
+ if t.Value == "" {
+ return string(t.Kind)
+ }
+ return fmt.Sprintf("%s(%#v)", t.Kind, t.Value)
+}
+
+func (t Token) Is(kind Kind, values ...string) bool {
+ if len(values) == 0 {
+ return kind == t.Kind
+ }
+
+ for _, v := range values {
+ if v == t.Value {
+ goto found
+ }
+ }
+ return false
+
+found:
+ return kind == t.Kind
+}
diff --git a/vendor/github.com/antonmedv/expr/parser/lexer/utils.go b/vendor/github.com/antonmedv/expr/parser/lexer/utils.go
new file mode 100644
index 00000000000..72e3cf20c97
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/parser/lexer/utils.go
@@ -0,0 +1,194 @@
+package lexer
+
+import (
+ "fmt"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+func IsSpace(r rune) bool {
+ return unicode.IsSpace(r)
+}
+
+func IsAlphaNumeric(r rune) bool {
+ return IsAlphabetic(r) || unicode.IsDigit(r)
+}
+
+func IsAlphabetic(r rune) bool {
+ return r == '_' || r == '$' || unicode.IsLetter(r)
+}
+
+var (
+ newlineNormalizer = strings.NewReplacer("\r\n", "\n", "\r", "\n")
+)
+
+// Unescape takes a quoted string, unquotes, and unescapes it.
+func unescape(value string) (string, error) {
+ // All strings normalize newlines to the \n representation.
+ value = newlineNormalizer.Replace(value)
+ n := len(value)
+
+ // Nothing to unescape / decode.
+ if n < 2 {
+ return value, fmt.Errorf("unable to unescape string")
+ }
+
+ // Quoted string of some form, must have same first and last char.
+ if value[0] != value[n-1] || (value[0] != '"' && value[0] != '\'') {
+ return value, fmt.Errorf("unable to unescape string")
+ }
+
+ value = value[1 : n-1]
+
+ // The string contains escape characters.
+ // The following logic is adapted from `strconv/quote.go`
+ var runeTmp [utf8.UTFMax]byte
+ buf := make([]byte, 0, 3*n/2)
+ for len(value) > 0 {
+ c, multibyte, rest, err := unescapeChar(value)
+ if err != nil {
+ return "", err
+ }
+ value = rest
+ if c < utf8.RuneSelf || !multibyte {
+ buf = append(buf, byte(c))
+ } else {
+ n := utf8.EncodeRune(runeTmp[:], c)
+ buf = append(buf, runeTmp[:n]...)
+ }
+ }
+ return string(buf), nil
+}
+
+// unescapeChar takes a string input and returns the following info:
+//
+// value - the escaped unicode rune at the front of the string.
+// multibyte - whether the rune value might require multiple bytes to represent.
+// tail - the remainder of the input string.
+// err - error value, if the character could not be unescaped.
+//
+// When multibyte is true the return value may still fit within a single byte,
+// but a multibyte conversion is attempted which is more expensive than when the
+// value is known to fit within one byte.
+func unescapeChar(s string) (value rune, multibyte bool, tail string, err error) {
+ // 1. Character is not an escape sequence.
+ switch c := s[0]; {
+ case c >= utf8.RuneSelf:
+ r, size := utf8.DecodeRuneInString(s)
+ return r, true, s[size:], nil
+ case c != '\\':
+ return rune(s[0]), false, s[1:], nil
+ }
+
+ // 2. Last character is the start of an escape sequence.
+ if len(s) <= 1 {
+ err = fmt.Errorf("unable to unescape string, found '\\' as last character")
+ return
+ }
+
+ c := s[1]
+ s = s[2:]
+ // 3. Common escape sequences shared with Google SQL
+ switch c {
+ case 'a':
+ value = '\a'
+ case 'b':
+ value = '\b'
+ case 'f':
+ value = '\f'
+ case 'n':
+ value = '\n'
+ case 'r':
+ value = '\r'
+ case 't':
+ value = '\t'
+ case 'v':
+ value = '\v'
+ case '\\':
+ value = '\\'
+ case '\'':
+ value = '\''
+ case '"':
+ value = '"'
+ case '`':
+ value = '`'
+ case '?':
+ value = '?'
+
+ // 4. Unicode escape sequences, reproduced from `strconv/quote.go`
+ case 'x', 'X', 'u', 'U':
+ n := 0
+ switch c {
+ case 'x', 'X':
+ n = 2
+ case 'u':
+ n = 4
+ case 'U':
+ n = 8
+ }
+ var v rune
+ if len(s) < n {
+ err = fmt.Errorf("unable to unescape string")
+ return
+ }
+ for j := 0; j < n; j++ {
+ x, ok := unhex(s[j])
+ if !ok {
+ err = fmt.Errorf("unable to unescape string")
+ return
+ }
+ v = v<<4 | x
+ }
+ s = s[n:]
+ if v > utf8.MaxRune {
+ err = fmt.Errorf("unable to unescape string")
+ return
+ }
+ value = v
+ multibyte = true
+
+ // 5. Octal escape sequences, must be three digits \[0-3][0-7][0-7]
+ case '0', '1', '2', '3':
+ if len(s) < 2 {
+ err = fmt.Errorf("unable to unescape octal sequence in string")
+ return
+ }
+ v := rune(c - '0')
+ for j := 0; j < 2; j++ {
+ x := s[j]
+ if x < '0' || x > '7' {
+ err = fmt.Errorf("unable to unescape octal sequence in string")
+ return
+ }
+ v = v*8 + rune(x-'0')
+ }
+ if v > utf8.MaxRune {
+ err = fmt.Errorf("unable to unescape string")
+ return
+ }
+ value = v
+ s = s[2:]
+ multibyte = true
+
+ // Unknown escape sequence.
+ default:
+ err = fmt.Errorf("unable to unescape string")
+ }
+
+ tail = s
+ return
+}
+
+func unhex(b byte) (rune, bool) {
+ c := rune(b)
+ switch {
+ case '0' <= c && c <= '9':
+ return c - '0', true
+ case 'a' <= c && c <= 'f':
+ return c - 'a' + 10, true
+ case 'A' <= c && c <= 'F':
+ return c - 'A' + 10, true
+ }
+ return 0, false
+}
diff --git a/vendor/github.com/antonmedv/expr/parser/parser.go b/vendor/github.com/antonmedv/expr/parser/parser.go
new file mode 100644
index 00000000000..fd26fe18bdc
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/parser/parser.go
@@ -0,0 +1,610 @@
+package parser
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+
+ . "github.com/antonmedv/expr/ast"
+ "github.com/antonmedv/expr/file"
+ . "github.com/antonmedv/expr/parser/lexer"
+)
+
+type associativity int
+
+const (
+ left associativity = iota + 1
+ right
+)
+
+type operator struct {
+ precedence int
+ associativity associativity
+}
+
+type builtin struct {
+ arity int
+}
+
+var unaryOperators = map[string]operator{
+ "not": {50, left},
+ "!": {50, left},
+ "-": {90, left},
+ "+": {90, left},
+}
+
+var binaryOperators = map[string]operator{
+ "or": {10, left},
+ "||": {10, left},
+ "and": {15, left},
+ "&&": {15, left},
+ "==": {20, left},
+ "!=": {20, left},
+ "<": {20, left},
+ ">": {20, left},
+ ">=": {20, left},
+ "<=": {20, left},
+ "in": {20, left},
+ "matches": {20, left},
+ "contains": {20, left},
+ "startsWith": {20, left},
+ "endsWith": {20, left},
+ "..": {25, left},
+ "+": {30, left},
+ "-": {30, left},
+ "*": {60, left},
+ "/": {60, left},
+ "%": {60, left},
+ "**": {100, right},
+ "^": {100, right},
+ "??": {500, left},
+}
+
+var builtins = map[string]builtin{
+ "all": {2},
+ "none": {2},
+ "any": {2},
+ "one": {2},
+ "filter": {2},
+ "map": {2},
+ "count": {2},
+}
+
+type parser struct {
+ tokens []Token
+ current Token
+ pos int
+ err *file.Error
+ depth int // closure call depth
+}
+
+type Tree struct {
+ Node Node
+ Source *file.Source
+}
+
+func Parse(input string) (*Tree, error) {
+ source := file.NewSource(input)
+
+ tokens, err := Lex(source)
+ if err != nil {
+ return nil, err
+ }
+
+ p := &parser{
+ tokens: tokens,
+ current: tokens[0],
+ }
+
+ node := p.parseExpression(0)
+
+ if !p.current.Is(EOF) {
+ p.error("unexpected token %v", p.current)
+ }
+
+ if p.err != nil {
+ return nil, p.err.Bind(source)
+ }
+
+ return &Tree{
+ Node: node,
+ Source: source,
+ }, nil
+}
+
+func (p *parser) error(format string, args ...interface{}) {
+ p.errorAt(p.current, format, args...)
+}
+
+func (p *parser) errorAt(token Token, format string, args ...interface{}) {
+ if p.err == nil { // show first error
+ p.err = &file.Error{
+ Location: token.Location,
+ Message: fmt.Sprintf(format, args...),
+ }
+ }
+}
+
+func (p *parser) next() {
+ p.pos++
+ if p.pos >= len(p.tokens) {
+ p.error("unexpected end of expression")
+ return
+ }
+ p.current = p.tokens[p.pos]
+}
+
+func (p *parser) expect(kind Kind, values ...string) {
+ if p.current.Is(kind, values...) {
+ p.next()
+ return
+ }
+ p.error("unexpected token %v", p.current)
+}
+
+// parse functions
+
+func (p *parser) parseExpression(precedence int) Node {
+ nodeLeft := p.parsePrimary()
+
+ lastOperator := ""
+ opToken := p.current
+ for opToken.Is(Operator) && p.err == nil {
+ negate := false
+ var notToken Token
+
+ if opToken.Is(Operator, "not") {
+ p.next()
+ notToken = p.current
+ negate = true
+ opToken = p.current
+ }
+
+ if op, ok := binaryOperators[opToken.Value]; ok {
+ if op.precedence >= precedence {
+ p.next()
+
+ if lastOperator == "??" && opToken.Value != "??" && !opToken.Is(Bracket, "(") {
+ p.errorAt(opToken, "Operator (%v) and coalesce expressions (??) cannot be mixed. Wrap either by parentheses.", opToken.Value)
+ break
+ }
+
+ var nodeRight Node
+ if op.associativity == left {
+ nodeRight = p.parseExpression(op.precedence + 1)
+ } else {
+ nodeRight = p.parseExpression(op.precedence)
+ }
+
+ nodeLeft = &BinaryNode{
+ Operator: opToken.Value,
+ Left: nodeLeft,
+ Right: nodeRight,
+ }
+ nodeLeft.SetLocation(opToken.Location)
+
+ if negate {
+ nodeLeft = &UnaryNode{
+ Operator: "not",
+ Node: nodeLeft,
+ }
+ nodeLeft.SetLocation(notToken.Location)
+ }
+
+ lastOperator = opToken.Value
+ opToken = p.current
+ continue
+ }
+ }
+ break
+ }
+
+ if precedence == 0 {
+ nodeLeft = p.parseConditionalExpression(nodeLeft)
+ }
+
+ return nodeLeft
+}
+
+func (p *parser) parsePrimary() Node {
+ token := p.current
+
+ if token.Is(Operator) {
+ if op, ok := unaryOperators[token.Value]; ok {
+ p.next()
+ expr := p.parseExpression(op.precedence)
+ node := &UnaryNode{
+ Operator: token.Value,
+ Node: expr,
+ }
+ node.SetLocation(token.Location)
+ return p.parsePostfixExpression(node)
+ }
+ }
+
+ if token.Is(Bracket, "(") {
+ p.next()
+ expr := p.parseExpression(0)
+ p.expect(Bracket, ")") // "an opened parenthesis is not properly closed"
+ return p.parsePostfixExpression(expr)
+ }
+
+ if p.depth > 0 {
+ if token.Is(Operator, "#") || token.Is(Operator, ".") {
+ if token.Is(Operator, "#") {
+ p.next()
+ }
+ node := &PointerNode{}
+ node.SetLocation(token.Location)
+ return p.parsePostfixExpression(node)
+ }
+ } else {
+ if token.Is(Operator, "#") || token.Is(Operator, ".") {
+ p.error("cannot use pointer accessor outside closure")
+ }
+ }
+
+ return p.parsePrimaryExpression()
+}
+
+func (p *parser) parseConditionalExpression(node Node) Node {
+ var expr1, expr2 Node
+ for p.current.Is(Operator, "?") && p.err == nil {
+ p.next()
+
+ if !p.current.Is(Operator, ":") {
+ expr1 = p.parseExpression(0)
+ p.expect(Operator, ":")
+ expr2 = p.parseExpression(0)
+ } else {
+ p.next()
+ expr1 = node
+ expr2 = p.parseExpression(0)
+ }
+
+ node = &ConditionalNode{
+ Cond: node,
+ Exp1: expr1,
+ Exp2: expr2,
+ }
+ }
+ return node
+}
+
+func (p *parser) parsePrimaryExpression() Node {
+ var node Node
+ token := p.current
+
+ switch token.Kind {
+
+ case Identifier:
+ p.next()
+ switch token.Value {
+ case "true":
+ node := &BoolNode{Value: true}
+ node.SetLocation(token.Location)
+ return node
+ case "false":
+ node := &BoolNode{Value: false}
+ node.SetLocation(token.Location)
+ return node
+ case "nil":
+ node := &NilNode{}
+ node.SetLocation(token.Location)
+ return node
+ default:
+ node = p.parseIdentifierExpression(token)
+ }
+
+ case Number:
+ p.next()
+ value := strings.Replace(token.Value, "_", "", -1)
+ if strings.Contains(value, "x") {
+ number, err := strconv.ParseInt(value, 0, 64)
+ if err != nil {
+ p.error("invalid hex literal: %v", err)
+ }
+ node := &IntegerNode{Value: int(number)}
+ node.SetLocation(token.Location)
+ return node
+ } else if strings.ContainsAny(value, ".eE") {
+ number, err := strconv.ParseFloat(value, 64)
+ if err != nil {
+ p.error("invalid float literal: %v", err)
+ }
+ node := &FloatNode{Value: number}
+ node.SetLocation(token.Location)
+ return node
+ } else {
+ number, err := strconv.ParseInt(value, 10, 64)
+ if err != nil {
+ p.error("invalid integer literal: %v", err)
+ }
+ node := &IntegerNode{Value: int(number)}
+ node.SetLocation(token.Location)
+ return node
+ }
+
+ case String:
+ p.next()
+ node := &StringNode{Value: token.Value}
+ node.SetLocation(token.Location)
+ return node
+
+ default:
+ if token.Is(Bracket, "[") {
+ node = p.parseArrayExpression(token)
+ } else if token.Is(Bracket, "{") {
+ node = p.parseMapExpression(token)
+ } else {
+ p.error("unexpected token %v", token)
+ }
+ }
+
+ return p.parsePostfixExpression(node)
+}
+
+func (p *parser) parseIdentifierExpression(token Token) Node {
+ var node Node
+ if p.current.Is(Bracket, "(") {
+ var arguments []Node
+
+ if b, ok := builtins[token.Value]; ok {
+ p.expect(Bracket, "(")
+ // TODO: Add builtins signatures.
+ if b.arity == 1 {
+ arguments = make([]Node, 1)
+ arguments[0] = p.parseExpression(0)
+ } else if b.arity == 2 {
+ arguments = make([]Node, 2)
+ arguments[0] = p.parseExpression(0)
+ p.expect(Operator, ",")
+ arguments[1] = p.parseClosure()
+ }
+ p.expect(Bracket, ")")
+
+ node = &BuiltinNode{
+ Name: token.Value,
+ Arguments: arguments,
+ }
+ node.SetLocation(token.Location)
+ } else {
+ callee := &IdentifierNode{Value: token.Value}
+ callee.SetLocation(token.Location)
+ node = &CallNode{
+ Callee: callee,
+ Arguments: p.parseArguments(),
+ }
+ node.SetLocation(token.Location)
+ }
+ } else {
+ node = &IdentifierNode{Value: token.Value}
+ node.SetLocation(token.Location)
+ }
+ return node
+}
+
+func (p *parser) parseClosure() Node {
+ startToken := p.current
+ expectClosingBracket := false
+ if p.current.Is(Bracket, "{") {
+ p.next()
+ expectClosingBracket = true
+ }
+
+ p.depth++
+ node := p.parseExpression(0)
+ p.depth--
+
+ if expectClosingBracket {
+ p.expect(Bracket, "}")
+ }
+ closure := &ClosureNode{
+ Node: node,
+ }
+ closure.SetLocation(startToken.Location)
+ return closure
+}
+
+func (p *parser) parseArrayExpression(token Token) Node {
+ nodes := make([]Node, 0)
+
+ p.expect(Bracket, "[")
+ for !p.current.Is(Bracket, "]") && p.err == nil {
+ if len(nodes) > 0 {
+ p.expect(Operator, ",")
+ if p.current.Is(Bracket, "]") {
+ goto end
+ }
+ }
+ node := p.parseExpression(0)
+ nodes = append(nodes, node)
+ }
+end:
+ p.expect(Bracket, "]")
+
+ node := &ArrayNode{Nodes: nodes}
+ node.SetLocation(token.Location)
+ return node
+}
+
+func (p *parser) parseMapExpression(token Token) Node {
+ p.expect(Bracket, "{")
+
+ nodes := make([]Node, 0)
+ for !p.current.Is(Bracket, "}") && p.err == nil {
+ if len(nodes) > 0 {
+ p.expect(Operator, ",")
+ if p.current.Is(Bracket, "}") {
+ goto end
+ }
+ if p.current.Is(Operator, ",") {
+ p.error("unexpected token %v", p.current)
+ }
+ }
+
+ var key Node
+ // Map key can be one of:
+ // * number
+ // * string
+ // * identifier, which is equivalent to a string
+ // * expression, which must be enclosed in parentheses -- (1 + 2)
+ if p.current.Is(Number) || p.current.Is(String) || p.current.Is(Identifier) {
+ key = &StringNode{Value: p.current.Value}
+ key.SetLocation(token.Location)
+ p.next()
+ } else if p.current.Is(Bracket, "(") {
+ key = p.parseExpression(0)
+ } else {
+ p.error("a map key must be a quoted string, a number, a identifier, or an expression enclosed in parentheses (unexpected token %v)", p.current)
+ }
+
+ p.expect(Operator, ":")
+
+ node := p.parseExpression(0)
+ pair := &PairNode{Key: key, Value: node}
+ pair.SetLocation(token.Location)
+ nodes = append(nodes, pair)
+ }
+
+end:
+ p.expect(Bracket, "}")
+
+ node := &MapNode{Pairs: nodes}
+ node.SetLocation(token.Location)
+ return node
+}
+
+func (p *parser) parsePostfixExpression(node Node) Node {
+ postfixToken := p.current
+ for (postfixToken.Is(Operator) || postfixToken.Is(Bracket)) && p.err == nil {
+ if postfixToken.Value == "." || postfixToken.Value == "?." {
+ p.next()
+
+ propertyToken := p.current
+ p.next()
+
+ if propertyToken.Kind != Identifier &&
+ // Operators like "not" and "matches" are valid methods or property names.
+ (propertyToken.Kind != Operator || !isValidIdentifier(propertyToken.Value)) {
+ p.error("expected name")
+ }
+
+ property := &StringNode{Value: propertyToken.Value}
+ property.SetLocation(propertyToken.Location)
+
+ chainNode, isChain := node.(*ChainNode)
+ optional := postfixToken.Value == "?."
+
+ if isChain {
+ node = chainNode.Node
+ }
+
+ memberNode := &MemberNode{
+ Node: node,
+ Property: property,
+ Optional: optional,
+ }
+ memberNode.SetLocation(propertyToken.Location)
+
+ if p.current.Is(Bracket, "(") {
+ node = &CallNode{
+ Callee: memberNode,
+ Arguments: p.parseArguments(),
+ }
+ node.SetLocation(propertyToken.Location)
+ } else {
+ node = memberNode
+ }
+
+ if isChain || optional {
+ node = &ChainNode{Node: node}
+ }
+
+ } else if postfixToken.Value == "[" {
+ p.next()
+ var from, to Node
+
+ if p.current.Is(Operator, ":") { // slice without from [:1]
+ p.next()
+
+ if !p.current.Is(Bracket, "]") { // slice without from and to [:]
+ to = p.parseExpression(0)
+ }
+
+ node = &SliceNode{
+ Node: node,
+ To: to,
+ }
+ node.SetLocation(postfixToken.Location)
+ p.expect(Bracket, "]")
+
+ } else {
+
+ from = p.parseExpression(0)
+
+ if p.current.Is(Operator, ":") {
+ p.next()
+
+ if !p.current.Is(Bracket, "]") { // slice without to [1:]
+ to = p.parseExpression(0)
+ }
+
+ node = &SliceNode{
+ Node: node,
+ From: from,
+ To: to,
+ }
+ node.SetLocation(postfixToken.Location)
+ p.expect(Bracket, "]")
+
+ } else {
+ // Slice operator [:] was not found,
+ // it should be just an index node.
+ node = &MemberNode{
+ Node: node,
+ Property: from,
+ }
+ node.SetLocation(postfixToken.Location)
+ p.expect(Bracket, "]")
+ }
+ }
+ } else {
+ break
+ }
+ postfixToken = p.current
+ }
+ return node
+}
+
+func isValidIdentifier(str string) bool {
+ if len(str) == 0 {
+ return false
+ }
+ h, w := utf8.DecodeRuneInString(str)
+ if !IsAlphabetic(h) {
+ return false
+ }
+ for _, r := range str[w:] {
+ if !IsAlphaNumeric(r) {
+ return false
+ }
+ }
+ return true
+}
+
+func (p *parser) parseArguments() []Node {
+ p.expect(Bracket, "(")
+ nodes := make([]Node, 0)
+ for !p.current.Is(Bracket, ")") && p.err == nil {
+ if len(nodes) > 0 {
+ p.expect(Operator, ",")
+ }
+ node := p.parseExpression(0)
+ nodes = append(nodes, node)
+ }
+ p.expect(Bracket, ")")
+
+ return nodes
+}
diff --git a/vendor/github.com/antonmedv/expr/vm/generated.go b/vendor/github.com/antonmedv/expr/vm/generated.go
new file mode 100644
index 00000000000..9fc7883e2df
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/vm/generated.go
@@ -0,0 +1,262 @@
+// Code generated by vm/func_types/main.go. DO NOT EDIT.
+
+package vm
+
+import (
+ "fmt"
+ "time"
+)
+
+var FuncTypes = []interface{}{
+ 1: new(func() time.Duration),
+ 2: new(func() time.Month),
+ 3: new(func() time.Time),
+ 4: new(func() time.Weekday),
+ 5: new(func() []uint8),
+ 6: new(func() []interface{}),
+ 7: new(func() bool),
+ 8: new(func() uint8),
+ 9: new(func() float64),
+ 10: new(func() int),
+ 11: new(func() int64),
+ 12: new(func() interface{}),
+ 13: new(func() map[string]interface{}),
+ 14: new(func() int32),
+ 15: new(func() string),
+ 16: new(func() uint),
+ 17: new(func() uint64),
+ 18: new(func(time.Duration) time.Duration),
+ 19: new(func(time.Duration) time.Time),
+ 20: new(func(time.Time) time.Duration),
+ 21: new(func(time.Time) bool),
+ 22: new(func([]interface{}, string) string),
+ 23: new(func([]string, string) string),
+ 24: new(func(bool) bool),
+ 25: new(func(bool) float64),
+ 26: new(func(bool) int),
+ 27: new(func(bool) string),
+ 28: new(func(float64) bool),
+ 29: new(func(float64) float64),
+ 30: new(func(float64) int),
+ 31: new(func(float64) string),
+ 32: new(func(int) bool),
+ 33: new(func(int) float64),
+ 34: new(func(int) int),
+ 35: new(func(int) string),
+ 36: new(func(int, int) int),
+ 37: new(func(int, int) string),
+ 38: new(func(int64) time.Time),
+ 39: new(func(string) []string),
+ 40: new(func(string) bool),
+ 41: new(func(string) float64),
+ 42: new(func(string) int),
+ 43: new(func(string) string),
+ 44: new(func(string, uint8) int),
+ 45: new(func(string, int) int),
+ 46: new(func(string, int32) int),
+ 47: new(func(string, string) bool),
+ 48: new(func(string, string) string),
+ 49: new(func(interface{}) bool),
+ 50: new(func(interface{}) float64),
+ 51: new(func(interface{}) int),
+ 52: new(func(interface{}) string),
+ 53: new(func(interface{}) interface{}),
+ 54: new(func(interface{}) []interface{}),
+ 55: new(func(interface{}) map[string]interface{}),
+ 56: new(func([]interface{}) interface{}),
+ 57: new(func([]interface{}) []interface{}),
+ 58: new(func([]interface{}) map[string]interface{}),
+ 59: new(func(interface{}, interface{}) bool),
+ 60: new(func(interface{}, interface{}) string),
+ 61: new(func(interface{}, interface{}) interface{}),
+ 62: new(func(interface{}, interface{}) []interface{}),
+}
+
+func (vm *VM) call(fn interface{}, kind int) interface{} {
+ switch kind {
+ case 1:
+ return fn.(func() time.Duration)()
+ case 2:
+ return fn.(func() time.Month)()
+ case 3:
+ return fn.(func() time.Time)()
+ case 4:
+ return fn.(func() time.Weekday)()
+ case 5:
+ return fn.(func() []uint8)()
+ case 6:
+ return fn.(func() []interface{})()
+ case 7:
+ return fn.(func() bool)()
+ case 8:
+ return fn.(func() uint8)()
+ case 9:
+ return fn.(func() float64)()
+ case 10:
+ return fn.(func() int)()
+ case 11:
+ return fn.(func() int64)()
+ case 12:
+ return fn.(func() interface{})()
+ case 13:
+ return fn.(func() map[string]interface{})()
+ case 14:
+ return fn.(func() int32)()
+ case 15:
+ return fn.(func() string)()
+ case 16:
+ return fn.(func() uint)()
+ case 17:
+ return fn.(func() uint64)()
+ case 18:
+ arg1 := vm.pop().(time.Duration)
+ return fn.(func(time.Duration) time.Duration)(arg1)
+ case 19:
+ arg1 := vm.pop().(time.Duration)
+ return fn.(func(time.Duration) time.Time)(arg1)
+ case 20:
+ arg1 := vm.pop().(time.Time)
+ return fn.(func(time.Time) time.Duration)(arg1)
+ case 21:
+ arg1 := vm.pop().(time.Time)
+ return fn.(func(time.Time) bool)(arg1)
+ case 22:
+ arg2 := vm.pop().(string)
+ arg1 := vm.pop().([]interface{})
+ return fn.(func([]interface{}, string) string)(arg1, arg2)
+ case 23:
+ arg2 := vm.pop().(string)
+ arg1 := vm.pop().([]string)
+ return fn.(func([]string, string) string)(arg1, arg2)
+ case 24:
+ arg1 := vm.pop().(bool)
+ return fn.(func(bool) bool)(arg1)
+ case 25:
+ arg1 := vm.pop().(bool)
+ return fn.(func(bool) float64)(arg1)
+ case 26:
+ arg1 := vm.pop().(bool)
+ return fn.(func(bool) int)(arg1)
+ case 27:
+ arg1 := vm.pop().(bool)
+ return fn.(func(bool) string)(arg1)
+ case 28:
+ arg1 := vm.pop().(float64)
+ return fn.(func(float64) bool)(arg1)
+ case 29:
+ arg1 := vm.pop().(float64)
+ return fn.(func(float64) float64)(arg1)
+ case 30:
+ arg1 := vm.pop().(float64)
+ return fn.(func(float64) int)(arg1)
+ case 31:
+ arg1 := vm.pop().(float64)
+ return fn.(func(float64) string)(arg1)
+ case 32:
+ arg1 := vm.pop().(int)
+ return fn.(func(int) bool)(arg1)
+ case 33:
+ arg1 := vm.pop().(int)
+ return fn.(func(int) float64)(arg1)
+ case 34:
+ arg1 := vm.pop().(int)
+ return fn.(func(int) int)(arg1)
+ case 35:
+ arg1 := vm.pop().(int)
+ return fn.(func(int) string)(arg1)
+ case 36:
+ arg2 := vm.pop().(int)
+ arg1 := vm.pop().(int)
+ return fn.(func(int, int) int)(arg1, arg2)
+ case 37:
+ arg2 := vm.pop().(int)
+ arg1 := vm.pop().(int)
+ return fn.(func(int, int) string)(arg1, arg2)
+ case 38:
+ arg1 := vm.pop().(int64)
+ return fn.(func(int64) time.Time)(arg1)
+ case 39:
+ arg1 := vm.pop().(string)
+ return fn.(func(string) []string)(arg1)
+ case 40:
+ arg1 := vm.pop().(string)
+ return fn.(func(string) bool)(arg1)
+ case 41:
+ arg1 := vm.pop().(string)
+ return fn.(func(string) float64)(arg1)
+ case 42:
+ arg1 := vm.pop().(string)
+ return fn.(func(string) int)(arg1)
+ case 43:
+ arg1 := vm.pop().(string)
+ return fn.(func(string) string)(arg1)
+ case 44:
+ arg2 := vm.pop().(uint8)
+ arg1 := vm.pop().(string)
+ return fn.(func(string, uint8) int)(arg1, arg2)
+ case 45:
+ arg2 := vm.pop().(int)
+ arg1 := vm.pop().(string)
+ return fn.(func(string, int) int)(arg1, arg2)
+ case 46:
+ arg2 := vm.pop().(int32)
+ arg1 := vm.pop().(string)
+ return fn.(func(string, int32) int)(arg1, arg2)
+ case 47:
+ arg2 := vm.pop().(string)
+ arg1 := vm.pop().(string)
+ return fn.(func(string, string) bool)(arg1, arg2)
+ case 48:
+ arg2 := vm.pop().(string)
+ arg1 := vm.pop().(string)
+ return fn.(func(string, string) string)(arg1, arg2)
+ case 49:
+ arg1 := vm.pop()
+ return fn.(func(interface{}) bool)(arg1)
+ case 50:
+ arg1 := vm.pop()
+ return fn.(func(interface{}) float64)(arg1)
+ case 51:
+ arg1 := vm.pop()
+ return fn.(func(interface{}) int)(arg1)
+ case 52:
+ arg1 := vm.pop()
+ return fn.(func(interface{}) string)(arg1)
+ case 53:
+ arg1 := vm.pop()
+ return fn.(func(interface{}) interface{})(arg1)
+ case 54:
+ arg1 := vm.pop()
+ return fn.(func(interface{}) []interface{})(arg1)
+ case 55:
+ arg1 := vm.pop()
+ return fn.(func(interface{}) map[string]interface{})(arg1)
+ case 56:
+ arg1 := vm.pop().([]interface{})
+ return fn.(func([]interface{}) interface{})(arg1)
+ case 57:
+ arg1 := vm.pop().([]interface{})
+ return fn.(func([]interface{}) []interface{})(arg1)
+ case 58:
+ arg1 := vm.pop().([]interface{})
+ return fn.(func([]interface{}) map[string]interface{})(arg1)
+ case 59:
+ arg2 := vm.pop()
+ arg1 := vm.pop()
+ return fn.(func(interface{}, interface{}) bool)(arg1, arg2)
+ case 60:
+ arg2 := vm.pop()
+ arg1 := vm.pop()
+ return fn.(func(interface{}, interface{}) string)(arg1, arg2)
+ case 61:
+ arg2 := vm.pop()
+ arg1 := vm.pop()
+ return fn.(func(interface{}, interface{}) interface{})(arg1, arg2)
+ case 62:
+ arg2 := vm.pop()
+ arg1 := vm.pop()
+ return fn.(func(interface{}, interface{}) []interface{})(arg1, arg2)
+
+ }
+ panic(fmt.Sprintf("unknown function kind (%v)", kind))
+}
diff --git a/vendor/github.com/antonmedv/expr/vm/opcodes.go b/vendor/github.com/antonmedv/expr/vm/opcodes.go
new file mode 100644
index 00000000000..b3117e73c2b
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/vm/opcodes.go
@@ -0,0 +1,71 @@
+package vm
+
+type Opcode byte
+
+const (
+ OpPush Opcode = iota
+ OpPushInt
+ OpPop
+ OpLoadConst
+ OpLoadField
+ OpLoadFast
+ OpLoadMethod
+ OpLoadFunc
+ OpFetch
+ OpFetchField
+ OpMethod
+ OpTrue
+ OpFalse
+ OpNil
+ OpNegate
+ OpNot
+ OpEqual
+ OpEqualInt
+ OpEqualString
+ OpJump
+ OpJumpIfTrue
+ OpJumpIfFalse
+ OpJumpIfNil
+ OpJumpIfNotNil
+ OpJumpIfEnd
+ OpJumpBackward
+ OpIn
+ OpLess
+ OpMore
+ OpLessOrEqual
+ OpMoreOrEqual
+ OpAdd
+ OpSubtract
+ OpMultiply
+ OpDivide
+ OpModulo
+ OpExponent
+ OpRange
+ OpMatches
+ OpMatchesConst
+ OpContains
+ OpStartsWith
+ OpEndsWith
+ OpSlice
+ OpCall
+ OpCall0
+ OpCall1
+ OpCall2
+ OpCall3
+ OpCallN
+ OpCallFast
+ OpCallTyped
+ OpBuiltin
+ OpArray
+ OpMap
+ OpLen
+ OpCast
+ OpDeref
+ OpIncrementIt
+ OpIncrementCount
+ OpGetCount
+ OpGetLen
+ OpPointer
+ OpBegin
+ OpEnd // This opcode must be at the end of this list.
+)
diff --git a/vendor/github.com/antonmedv/expr/vm/program.go b/vendor/github.com/antonmedv/expr/vm/program.go
new file mode 100644
index 00000000000..d424df14f47
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/vm/program.go
@@ -0,0 +1,278 @@
+package vm
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "regexp"
+ "strings"
+ "text/tabwriter"
+
+ "github.com/antonmedv/expr/ast"
+ "github.com/antonmedv/expr/builtin"
+ "github.com/antonmedv/expr/file"
+ "github.com/antonmedv/expr/vm/runtime"
+)
+
+type Program struct {
+ Node ast.Node
+ Source *file.Source
+ Locations []file.Location
+ Constants []interface{}
+ Bytecode []Opcode
+ Arguments []int
+ Functions []Function
+}
+
+func (program *Program) Disassemble() string {
+ var buf bytes.Buffer
+ w := tabwriter.NewWriter(&buf, 0, 0, 2, ' ', 0)
+ ip := 0
+ for ip < len(program.Bytecode) {
+ pp := ip
+ op := program.Bytecode[ip]
+ arg := program.Arguments[ip]
+ ip += 1
+
+ code := func(label string) {
+ _, _ = fmt.Fprintf(w, "%v\t%v\n", pp, label)
+ }
+ jump := func(label string) {
+ _, _ = fmt.Fprintf(w, "%v\t%v\t<%v>\t(%v)\n", pp, label, arg, ip+arg)
+ }
+ jumpBack := func(label string) {
+ _, _ = fmt.Fprintf(w, "%v\t%v\t<%v>\t(%v)\n", pp, label, arg, ip-arg)
+ }
+ argument := func(label string) {
+ _, _ = fmt.Fprintf(w, "%v\t%v\t<%v>\n", pp, label, arg)
+ }
+ constant := func(label string) {
+ var c interface{}
+ if arg < len(program.Constants) {
+ c = program.Constants[arg]
+ } else {
+ c = "out of range"
+ }
+ if r, ok := c.(*regexp.Regexp); ok {
+ c = r.String()
+ }
+ if field, ok := c.(*runtime.Field); ok {
+ c = fmt.Sprintf("{%v %v}", strings.Join(field.Path, "."), field.Index)
+ }
+ if method, ok := c.(*runtime.Method); ok {
+ c = fmt.Sprintf("{%v %v}", method.Name, method.Index)
+ }
+ _, _ = fmt.Fprintf(w, "%v\t%v\t<%v>\t%v\n", pp, label, arg, c)
+ }
+ builtIn := func(label string) {
+ f, ok := builtin.Builtins[arg]
+ if !ok {
+ panic(fmt.Sprintf("unknown builtin %v", arg))
+ }
+ _, _ = fmt.Fprintf(w, "%v\t%v\t%v\n", pp, "OpBuiltin", f.Name)
+ }
+
+ switch op {
+ case OpPush:
+ constant("OpPush")
+
+ case OpPushInt:
+ argument("OpPushInt")
+
+ case OpPop:
+ code("OpPop")
+
+ case OpLoadConst:
+ constant("OpLoadConst")
+
+ case OpLoadField:
+ constant("OpLoadField")
+
+ case OpLoadFast:
+ constant("OpLoadFast")
+
+ case OpLoadMethod:
+ constant("OpLoadMethod")
+
+ case OpLoadFunc:
+ argument("OpLoadFunc")
+
+ case OpFetch:
+ code("OpFetch")
+
+ case OpFetchField:
+ constant("OpFetchField")
+
+ case OpMethod:
+ constant("OpMethod")
+
+ case OpTrue:
+ code("OpTrue")
+
+ case OpFalse:
+ code("OpFalse")
+
+ case OpNil:
+ code("OpNil")
+
+ case OpNegate:
+ code("OpNegate")
+
+ case OpNot:
+ code("OpNot")
+
+ case OpEqual:
+ code("OpEqual")
+
+ case OpEqualInt:
+ code("OpEqualInt")
+
+ case OpEqualString:
+ code("OpEqualString")
+
+ case OpJump:
+ jump("OpJump")
+
+ case OpJumpIfTrue:
+ jump("OpJumpIfTrue")
+
+ case OpJumpIfFalse:
+ jump("OpJumpIfFalse")
+
+ case OpJumpIfNil:
+ jump("OpJumpIfNil")
+
+ case OpJumpIfNotNil:
+ jump("OpJumpIfNotNil")
+
+ case OpJumpIfEnd:
+ jump("OpJumpIfEnd")
+
+ case OpJumpBackward:
+ jumpBack("OpJumpBackward")
+
+ case OpIn:
+ code("OpIn")
+
+ case OpLess:
+ code("OpLess")
+
+ case OpMore:
+ code("OpMore")
+
+ case OpLessOrEqual:
+ code("OpLessOrEqual")
+
+ case OpMoreOrEqual:
+ code("OpMoreOrEqual")
+
+ case OpAdd:
+ code("OpAdd")
+
+ case OpSubtract:
+ code("OpSubtract")
+
+ case OpMultiply:
+ code("OpMultiply")
+
+ case OpDivide:
+ code("OpDivide")
+
+ case OpModulo:
+ code("OpModulo")
+
+ case OpExponent:
+ code("OpExponent")
+
+ case OpRange:
+ code("OpRange")
+
+ case OpMatches:
+ code("OpMatches")
+
+ case OpMatchesConst:
+ constant("OpMatchesConst")
+
+ case OpContains:
+ code("OpContains")
+
+ case OpStartsWith:
+ code("OpStartsWith")
+
+ case OpEndsWith:
+ code("OpEndsWith")
+
+ case OpSlice:
+ code("OpSlice")
+
+ case OpCall:
+ argument("OpCall")
+
+ case OpCall0:
+ argument("OpCall0")
+
+ case OpCall1:
+ argument("OpCall1")
+
+ case OpCall2:
+ argument("OpCall2")
+
+ case OpCall3:
+ argument("OpCall3")
+
+ case OpCallN:
+ argument("OpCallN")
+
+ case OpCallFast:
+ argument("OpCallFast")
+
+ case OpCallTyped:
+ signature := reflect.TypeOf(FuncTypes[arg]).Elem().String()
+ _, _ = fmt.Fprintf(w, "%v\t%v\t<%v>\t%v\n", pp, "OpCallTyped", arg, signature)
+
+ case OpBuiltin:
+ builtIn("OpBuiltin")
+
+ case OpArray:
+ code("OpArray")
+
+ case OpMap:
+ code("OpMap")
+
+ case OpLen:
+ code("OpLen")
+
+ case OpCast:
+ argument("OpCast")
+
+ case OpDeref:
+ code("OpDeref")
+
+ case OpIncrementIt:
+ code("OpIncrementIt")
+
+ case OpIncrementCount:
+ code("OpIncrementCount")
+
+ case OpGetCount:
+ code("OpGetCount")
+
+ case OpGetLen:
+ code("OpGetLen")
+
+ case OpPointer:
+ code("OpPointer")
+
+ case OpBegin:
+ code("OpBegin")
+
+ case OpEnd:
+ code("OpEnd")
+
+ default:
+ _, _ = fmt.Fprintf(w, "%v\t%#x\n", ip, op)
+ }
+ }
+ _ = w.Flush()
+ return buf.String()
+}
diff --git a/vendor/github.com/antonmedv/expr/vm/runtime/generated.go b/vendor/github.com/antonmedv/expr/vm/runtime/generated.go
new file mode 100644
index 00000000000..09a4a200ed2
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/vm/runtime/generated.go
@@ -0,0 +1,3288 @@
+// Code generated by vm/runtime/helpers/main.go. DO NOT EDIT.
+
+package runtime
+
+import (
+ "fmt"
+ "reflect"
+ "time"
+)
+
+func Equal(a, b interface{}) bool {
+ switch x := a.(type) {
+ case uint:
+ switch y := b.(type) {
+ case uint:
+ return int(x) == int(y)
+ case uint8:
+ return int(x) == int(y)
+ case uint16:
+ return int(x) == int(y)
+ case uint32:
+ return int(x) == int(y)
+ case uint64:
+ return int(x) == int(y)
+ case int:
+ return int(x) == int(y)
+ case int8:
+ return int(x) == int(y)
+ case int16:
+ return int(x) == int(y)
+ case int32:
+ return int(x) == int(y)
+ case int64:
+ return int(x) == int(y)
+ case float32:
+ return float64(x) == float64(y)
+ case float64:
+ return float64(x) == float64(y)
+ }
+ case uint8:
+ switch y := b.(type) {
+ case uint:
+ return int(x) == int(y)
+ case uint8:
+ return int(x) == int(y)
+ case uint16:
+ return int(x) == int(y)
+ case uint32:
+ return int(x) == int(y)
+ case uint64:
+ return int(x) == int(y)
+ case int:
+ return int(x) == int(y)
+ case int8:
+ return int(x) == int(y)
+ case int16:
+ return int(x) == int(y)
+ case int32:
+ return int(x) == int(y)
+ case int64:
+ return int(x) == int(y)
+ case float32:
+ return float64(x) == float64(y)
+ case float64:
+ return float64(x) == float64(y)
+ }
+ case uint16:
+ switch y := b.(type) {
+ case uint:
+ return int(x) == int(y)
+ case uint8:
+ return int(x) == int(y)
+ case uint16:
+ return int(x) == int(y)
+ case uint32:
+ return int(x) == int(y)
+ case uint64:
+ return int(x) == int(y)
+ case int:
+ return int(x) == int(y)
+ case int8:
+ return int(x) == int(y)
+ case int16:
+ return int(x) == int(y)
+ case int32:
+ return int(x) == int(y)
+ case int64:
+ return int(x) == int(y)
+ case float32:
+ return float64(x) == float64(y)
+ case float64:
+ return float64(x) == float64(y)
+ }
+ case uint32:
+ switch y := b.(type) {
+ case uint:
+ return int(x) == int(y)
+ case uint8:
+ return int(x) == int(y)
+ case uint16:
+ return int(x) == int(y)
+ case uint32:
+ return int(x) == int(y)
+ case uint64:
+ return int(x) == int(y)
+ case int:
+ return int(x) == int(y)
+ case int8:
+ return int(x) == int(y)
+ case int16:
+ return int(x) == int(y)
+ case int32:
+ return int(x) == int(y)
+ case int64:
+ return int(x) == int(y)
+ case float32:
+ return float64(x) == float64(y)
+ case float64:
+ return float64(x) == float64(y)
+ }
+ case uint64:
+ switch y := b.(type) {
+ case uint:
+ return int(x) == int(y)
+ case uint8:
+ return int(x) == int(y)
+ case uint16:
+ return int(x) == int(y)
+ case uint32:
+ return int(x) == int(y)
+ case uint64:
+ return int(x) == int(y)
+ case int:
+ return int(x) == int(y)
+ case int8:
+ return int(x) == int(y)
+ case int16:
+ return int(x) == int(y)
+ case int32:
+ return int(x) == int(y)
+ case int64:
+ return int(x) == int(y)
+ case float32:
+ return float64(x) == float64(y)
+ case float64:
+ return float64(x) == float64(y)
+ }
+ case int:
+ switch y := b.(type) {
+ case uint:
+ return int(x) == int(y)
+ case uint8:
+ return int(x) == int(y)
+ case uint16:
+ return int(x) == int(y)
+ case uint32:
+ return int(x) == int(y)
+ case uint64:
+ return int(x) == int(y)
+ case int:
+ return int(x) == int(y)
+ case int8:
+ return int(x) == int(y)
+ case int16:
+ return int(x) == int(y)
+ case int32:
+ return int(x) == int(y)
+ case int64:
+ return int(x) == int(y)
+ case float32:
+ return float64(x) == float64(y)
+ case float64:
+ return float64(x) == float64(y)
+ }
+ case int8:
+ switch y := b.(type) {
+ case uint:
+ return int(x) == int(y)
+ case uint8:
+ return int(x) == int(y)
+ case uint16:
+ return int(x) == int(y)
+ case uint32:
+ return int(x) == int(y)
+ case uint64:
+ return int(x) == int(y)
+ case int:
+ return int(x) == int(y)
+ case int8:
+ return int(x) == int(y)
+ case int16:
+ return int(x) == int(y)
+ case int32:
+ return int(x) == int(y)
+ case int64:
+ return int(x) == int(y)
+ case float32:
+ return float64(x) == float64(y)
+ case float64:
+ return float64(x) == float64(y)
+ }
+ case int16:
+ switch y := b.(type) {
+ case uint:
+ return int(x) == int(y)
+ case uint8:
+ return int(x) == int(y)
+ case uint16:
+ return int(x) == int(y)
+ case uint32:
+ return int(x) == int(y)
+ case uint64:
+ return int(x) == int(y)
+ case int:
+ return int(x) == int(y)
+ case int8:
+ return int(x) == int(y)
+ case int16:
+ return int(x) == int(y)
+ case int32:
+ return int(x) == int(y)
+ case int64:
+ return int(x) == int(y)
+ case float32:
+ return float64(x) == float64(y)
+ case float64:
+ return float64(x) == float64(y)
+ }
+ case int32:
+ switch y := b.(type) {
+ case uint:
+ return int(x) == int(y)
+ case uint8:
+ return int(x) == int(y)
+ case uint16:
+ return int(x) == int(y)
+ case uint32:
+ return int(x) == int(y)
+ case uint64:
+ return int(x) == int(y)
+ case int:
+ return int(x) == int(y)
+ case int8:
+ return int(x) == int(y)
+ case int16:
+ return int(x) == int(y)
+ case int32:
+ return int(x) == int(y)
+ case int64:
+ return int(x) == int(y)
+ case float32:
+ return float64(x) == float64(y)
+ case float64:
+ return float64(x) == float64(y)
+ }
+ case int64:
+ switch y := b.(type) {
+ case uint:
+ return int(x) == int(y)
+ case uint8:
+ return int(x) == int(y)
+ case uint16:
+ return int(x) == int(y)
+ case uint32:
+ return int(x) == int(y)
+ case uint64:
+ return int(x) == int(y)
+ case int:
+ return int(x) == int(y)
+ case int8:
+ return int(x) == int(y)
+ case int16:
+ return int(x) == int(y)
+ case int32:
+ return int(x) == int(y)
+ case int64:
+ return int(x) == int(y)
+ case float32:
+ return float64(x) == float64(y)
+ case float64:
+ return float64(x) == float64(y)
+ }
+ case float32:
+ switch y := b.(type) {
+ case uint:
+ return float64(x) == float64(y)
+ case uint8:
+ return float64(x) == float64(y)
+ case uint16:
+ return float64(x) == float64(y)
+ case uint32:
+ return float64(x) == float64(y)
+ case uint64:
+ return float64(x) == float64(y)
+ case int:
+ return float64(x) == float64(y)
+ case int8:
+ return float64(x) == float64(y)
+ case int16:
+ return float64(x) == float64(y)
+ case int32:
+ return float64(x) == float64(y)
+ case int64:
+ return float64(x) == float64(y)
+ case float32:
+ return float64(x) == float64(y)
+ case float64:
+ return float64(x) == float64(y)
+ }
+ case float64:
+ switch y := b.(type) {
+ case uint:
+ return float64(x) == float64(y)
+ case uint8:
+ return float64(x) == float64(y)
+ case uint16:
+ return float64(x) == float64(y)
+ case uint32:
+ return float64(x) == float64(y)
+ case uint64:
+ return float64(x) == float64(y)
+ case int:
+ return float64(x) == float64(y)
+ case int8:
+ return float64(x) == float64(y)
+ case int16:
+ return float64(x) == float64(y)
+ case int32:
+ return float64(x) == float64(y)
+ case int64:
+ return float64(x) == float64(y)
+ case float32:
+ return float64(x) == float64(y)
+ case float64:
+ return float64(x) == float64(y)
+ }
+ case string:
+ switch y := b.(type) {
+ case string:
+ return x == y
+ }
+ case time.Time:
+ switch y := b.(type) {
+ case time.Time:
+ return x.Equal(y)
+ }
+ }
+ if IsNil(a) && IsNil(b) {
+ return true
+ }
+ return reflect.DeepEqual(a, b)
+}
+
+func Less(a, b interface{}) bool {
+ switch x := a.(type) {
+ case uint:
+ switch y := b.(type) {
+ case uint:
+ return int(x) < int(y)
+ case uint8:
+ return int(x) < int(y)
+ case uint16:
+ return int(x) < int(y)
+ case uint32:
+ return int(x) < int(y)
+ case uint64:
+ return int(x) < int(y)
+ case int:
+ return int(x) < int(y)
+ case int8:
+ return int(x) < int(y)
+ case int16:
+ return int(x) < int(y)
+ case int32:
+ return int(x) < int(y)
+ case int64:
+ return int(x) < int(y)
+ case float32:
+ return float64(x) < float64(y)
+ case float64:
+ return float64(x) < float64(y)
+ }
+ case uint8:
+ switch y := b.(type) {
+ case uint:
+ return int(x) < int(y)
+ case uint8:
+ return int(x) < int(y)
+ case uint16:
+ return int(x) < int(y)
+ case uint32:
+ return int(x) < int(y)
+ case uint64:
+ return int(x) < int(y)
+ case int:
+ return int(x) < int(y)
+ case int8:
+ return int(x) < int(y)
+ case int16:
+ return int(x) < int(y)
+ case int32:
+ return int(x) < int(y)
+ case int64:
+ return int(x) < int(y)
+ case float32:
+ return float64(x) < float64(y)
+ case float64:
+ return float64(x) < float64(y)
+ }
+ case uint16:
+ switch y := b.(type) {
+ case uint:
+ return int(x) < int(y)
+ case uint8:
+ return int(x) < int(y)
+ case uint16:
+ return int(x) < int(y)
+ case uint32:
+ return int(x) < int(y)
+ case uint64:
+ return int(x) < int(y)
+ case int:
+ return int(x) < int(y)
+ case int8:
+ return int(x) < int(y)
+ case int16:
+ return int(x) < int(y)
+ case int32:
+ return int(x) < int(y)
+ case int64:
+ return int(x) < int(y)
+ case float32:
+ return float64(x) < float64(y)
+ case float64:
+ return float64(x) < float64(y)
+ }
+ case uint32:
+ switch y := b.(type) {
+ case uint:
+ return int(x) < int(y)
+ case uint8:
+ return int(x) < int(y)
+ case uint16:
+ return int(x) < int(y)
+ case uint32:
+ return int(x) < int(y)
+ case uint64:
+ return int(x) < int(y)
+ case int:
+ return int(x) < int(y)
+ case int8:
+ return int(x) < int(y)
+ case int16:
+ return int(x) < int(y)
+ case int32:
+ return int(x) < int(y)
+ case int64:
+ return int(x) < int(y)
+ case float32:
+ return float64(x) < float64(y)
+ case float64:
+ return float64(x) < float64(y)
+ }
+ case uint64:
+ switch y := b.(type) {
+ case uint:
+ return int(x) < int(y)
+ case uint8:
+ return int(x) < int(y)
+ case uint16:
+ return int(x) < int(y)
+ case uint32:
+ return int(x) < int(y)
+ case uint64:
+ return int(x) < int(y)
+ case int:
+ return int(x) < int(y)
+ case int8:
+ return int(x) < int(y)
+ case int16:
+ return int(x) < int(y)
+ case int32:
+ return int(x) < int(y)
+ case int64:
+ return int(x) < int(y)
+ case float32:
+ return float64(x) < float64(y)
+ case float64:
+ return float64(x) < float64(y)
+ }
+ case int:
+ switch y := b.(type) {
+ case uint:
+ return int(x) < int(y)
+ case uint8:
+ return int(x) < int(y)
+ case uint16:
+ return int(x) < int(y)
+ case uint32:
+ return int(x) < int(y)
+ case uint64:
+ return int(x) < int(y)
+ case int:
+ return int(x) < int(y)
+ case int8:
+ return int(x) < int(y)
+ case int16:
+ return int(x) < int(y)
+ case int32:
+ return int(x) < int(y)
+ case int64:
+ return int(x) < int(y)
+ case float32:
+ return float64(x) < float64(y)
+ case float64:
+ return float64(x) < float64(y)
+ }
+ case int8:
+ switch y := b.(type) {
+ case uint:
+ return int(x) < int(y)
+ case uint8:
+ return int(x) < int(y)
+ case uint16:
+ return int(x) < int(y)
+ case uint32:
+ return int(x) < int(y)
+ case uint64:
+ return int(x) < int(y)
+ case int:
+ return int(x) < int(y)
+ case int8:
+ return int(x) < int(y)
+ case int16:
+ return int(x) < int(y)
+ case int32:
+ return int(x) < int(y)
+ case int64:
+ return int(x) < int(y)
+ case float32:
+ return float64(x) < float64(y)
+ case float64:
+ return float64(x) < float64(y)
+ }
+ case int16:
+ switch y := b.(type) {
+ case uint:
+ return int(x) < int(y)
+ case uint8:
+ return int(x) < int(y)
+ case uint16:
+ return int(x) < int(y)
+ case uint32:
+ return int(x) < int(y)
+ case uint64:
+ return int(x) < int(y)
+ case int:
+ return int(x) < int(y)
+ case int8:
+ return int(x) < int(y)
+ case int16:
+ return int(x) < int(y)
+ case int32:
+ return int(x) < int(y)
+ case int64:
+ return int(x) < int(y)
+ case float32:
+ return float64(x) < float64(y)
+ case float64:
+ return float64(x) < float64(y)
+ }
+ case int32:
+ switch y := b.(type) {
+ case uint:
+ return int(x) < int(y)
+ case uint8:
+ return int(x) < int(y)
+ case uint16:
+ return int(x) < int(y)
+ case uint32:
+ return int(x) < int(y)
+ case uint64:
+ return int(x) < int(y)
+ case int:
+ return int(x) < int(y)
+ case int8:
+ return int(x) < int(y)
+ case int16:
+ return int(x) < int(y)
+ case int32:
+ return int(x) < int(y)
+ case int64:
+ return int(x) < int(y)
+ case float32:
+ return float64(x) < float64(y)
+ case float64:
+ return float64(x) < float64(y)
+ }
+ case int64:
+ switch y := b.(type) {
+ case uint:
+ return int(x) < int(y)
+ case uint8:
+ return int(x) < int(y)
+ case uint16:
+ return int(x) < int(y)
+ case uint32:
+ return int(x) < int(y)
+ case uint64:
+ return int(x) < int(y)
+ case int:
+ return int(x) < int(y)
+ case int8:
+ return int(x) < int(y)
+ case int16:
+ return int(x) < int(y)
+ case int32:
+ return int(x) < int(y)
+ case int64:
+ return int(x) < int(y)
+ case float32:
+ return float64(x) < float64(y)
+ case float64:
+ return float64(x) < float64(y)
+ }
+ case float32:
+ switch y := b.(type) {
+ case uint:
+ return float64(x) < float64(y)
+ case uint8:
+ return float64(x) < float64(y)
+ case uint16:
+ return float64(x) < float64(y)
+ case uint32:
+ return float64(x) < float64(y)
+ case uint64:
+ return float64(x) < float64(y)
+ case int:
+ return float64(x) < float64(y)
+ case int8:
+ return float64(x) < float64(y)
+ case int16:
+ return float64(x) < float64(y)
+ case int32:
+ return float64(x) < float64(y)
+ case int64:
+ return float64(x) < float64(y)
+ case float32:
+ return float64(x) < float64(y)
+ case float64:
+ return float64(x) < float64(y)
+ }
+ case float64:
+ switch y := b.(type) {
+ case uint:
+ return float64(x) < float64(y)
+ case uint8:
+ return float64(x) < float64(y)
+ case uint16:
+ return float64(x) < float64(y)
+ case uint32:
+ return float64(x) < float64(y)
+ case uint64:
+ return float64(x) < float64(y)
+ case int:
+ return float64(x) < float64(y)
+ case int8:
+ return float64(x) < float64(y)
+ case int16:
+ return float64(x) < float64(y)
+ case int32:
+ return float64(x) < float64(y)
+ case int64:
+ return float64(x) < float64(y)
+ case float32:
+ return float64(x) < float64(y)
+ case float64:
+ return float64(x) < float64(y)
+ }
+ case string:
+ switch y := b.(type) {
+ case string:
+ return x < y
+ }
+ case time.Time:
+ switch y := b.(type) {
+ case time.Time:
+ return x.Before(y)
+ }
+ }
+ panic(fmt.Sprintf("invalid operation: %T < %T", a, b))
+}
+
+func More(a, b interface{}) bool {
+ switch x := a.(type) {
+ case uint:
+ switch y := b.(type) {
+ case uint:
+ return int(x) > int(y)
+ case uint8:
+ return int(x) > int(y)
+ case uint16:
+ return int(x) > int(y)
+ case uint32:
+ return int(x) > int(y)
+ case uint64:
+ return int(x) > int(y)
+ case int:
+ return int(x) > int(y)
+ case int8:
+ return int(x) > int(y)
+ case int16:
+ return int(x) > int(y)
+ case int32:
+ return int(x) > int(y)
+ case int64:
+ return int(x) > int(y)
+ case float32:
+ return float64(x) > float64(y)
+ case float64:
+ return float64(x) > float64(y)
+ }
+ case uint8:
+ switch y := b.(type) {
+ case uint:
+ return int(x) > int(y)
+ case uint8:
+ return int(x) > int(y)
+ case uint16:
+ return int(x) > int(y)
+ case uint32:
+ return int(x) > int(y)
+ case uint64:
+ return int(x) > int(y)
+ case int:
+ return int(x) > int(y)
+ case int8:
+ return int(x) > int(y)
+ case int16:
+ return int(x) > int(y)
+ case int32:
+ return int(x) > int(y)
+ case int64:
+ return int(x) > int(y)
+ case float32:
+ return float64(x) > float64(y)
+ case float64:
+ return float64(x) > float64(y)
+ }
+ case uint16:
+ switch y := b.(type) {
+ case uint:
+ return int(x) > int(y)
+ case uint8:
+ return int(x) > int(y)
+ case uint16:
+ return int(x) > int(y)
+ case uint32:
+ return int(x) > int(y)
+ case uint64:
+ return int(x) > int(y)
+ case int:
+ return int(x) > int(y)
+ case int8:
+ return int(x) > int(y)
+ case int16:
+ return int(x) > int(y)
+ case int32:
+ return int(x) > int(y)
+ case int64:
+ return int(x) > int(y)
+ case float32:
+ return float64(x) > float64(y)
+ case float64:
+ return float64(x) > float64(y)
+ }
+ case uint32:
+ switch y := b.(type) {
+ case uint:
+ return int(x) > int(y)
+ case uint8:
+ return int(x) > int(y)
+ case uint16:
+ return int(x) > int(y)
+ case uint32:
+ return int(x) > int(y)
+ case uint64:
+ return int(x) > int(y)
+ case int:
+ return int(x) > int(y)
+ case int8:
+ return int(x) > int(y)
+ case int16:
+ return int(x) > int(y)
+ case int32:
+ return int(x) > int(y)
+ case int64:
+ return int(x) > int(y)
+ case float32:
+ return float64(x) > float64(y)
+ case float64:
+ return float64(x) > float64(y)
+ }
+ case uint64:
+ switch y := b.(type) {
+ case uint:
+ return int(x) > int(y)
+ case uint8:
+ return int(x) > int(y)
+ case uint16:
+ return int(x) > int(y)
+ case uint32:
+ return int(x) > int(y)
+ case uint64:
+ return int(x) > int(y)
+ case int:
+ return int(x) > int(y)
+ case int8:
+ return int(x) > int(y)
+ case int16:
+ return int(x) > int(y)
+ case int32:
+ return int(x) > int(y)
+ case int64:
+ return int(x) > int(y)
+ case float32:
+ return float64(x) > float64(y)
+ case float64:
+ return float64(x) > float64(y)
+ }
+ case int:
+ switch y := b.(type) {
+ case uint:
+ return int(x) > int(y)
+ case uint8:
+ return int(x) > int(y)
+ case uint16:
+ return int(x) > int(y)
+ case uint32:
+ return int(x) > int(y)
+ case uint64:
+ return int(x) > int(y)
+ case int:
+ return int(x) > int(y)
+ case int8:
+ return int(x) > int(y)
+ case int16:
+ return int(x) > int(y)
+ case int32:
+ return int(x) > int(y)
+ case int64:
+ return int(x) > int(y)
+ case float32:
+ return float64(x) > float64(y)
+ case float64:
+ return float64(x) > float64(y)
+ }
+ case int8:
+ switch y := b.(type) {
+ case uint:
+ return int(x) > int(y)
+ case uint8:
+ return int(x) > int(y)
+ case uint16:
+ return int(x) > int(y)
+ case uint32:
+ return int(x) > int(y)
+ case uint64:
+ return int(x) > int(y)
+ case int:
+ return int(x) > int(y)
+ case int8:
+ return int(x) > int(y)
+ case int16:
+ return int(x) > int(y)
+ case int32:
+ return int(x) > int(y)
+ case int64:
+ return int(x) > int(y)
+ case float32:
+ return float64(x) > float64(y)
+ case float64:
+ return float64(x) > float64(y)
+ }
+ case int16:
+ switch y := b.(type) {
+ case uint:
+ return int(x) > int(y)
+ case uint8:
+ return int(x) > int(y)
+ case uint16:
+ return int(x) > int(y)
+ case uint32:
+ return int(x) > int(y)
+ case uint64:
+ return int(x) > int(y)
+ case int:
+ return int(x) > int(y)
+ case int8:
+ return int(x) > int(y)
+ case int16:
+ return int(x) > int(y)
+ case int32:
+ return int(x) > int(y)
+ case int64:
+ return int(x) > int(y)
+ case float32:
+ return float64(x) > float64(y)
+ case float64:
+ return float64(x) > float64(y)
+ }
+ case int32:
+ switch y := b.(type) {
+ case uint:
+ return int(x) > int(y)
+ case uint8:
+ return int(x) > int(y)
+ case uint16:
+ return int(x) > int(y)
+ case uint32:
+ return int(x) > int(y)
+ case uint64:
+ return int(x) > int(y)
+ case int:
+ return int(x) > int(y)
+ case int8:
+ return int(x) > int(y)
+ case int16:
+ return int(x) > int(y)
+ case int32:
+ return int(x) > int(y)
+ case int64:
+ return int(x) > int(y)
+ case float32:
+ return float64(x) > float64(y)
+ case float64:
+ return float64(x) > float64(y)
+ }
+ case int64:
+ switch y := b.(type) {
+ case uint:
+ return int(x) > int(y)
+ case uint8:
+ return int(x) > int(y)
+ case uint16:
+ return int(x) > int(y)
+ case uint32:
+ return int(x) > int(y)
+ case uint64:
+ return int(x) > int(y)
+ case int:
+ return int(x) > int(y)
+ case int8:
+ return int(x) > int(y)
+ case int16:
+ return int(x) > int(y)
+ case int32:
+ return int(x) > int(y)
+ case int64:
+ return int(x) > int(y)
+ case float32:
+ return float64(x) > float64(y)
+ case float64:
+ return float64(x) > float64(y)
+ }
+ case float32:
+ switch y := b.(type) {
+ case uint:
+ return float64(x) > float64(y)
+ case uint8:
+ return float64(x) > float64(y)
+ case uint16:
+ return float64(x) > float64(y)
+ case uint32:
+ return float64(x) > float64(y)
+ case uint64:
+ return float64(x) > float64(y)
+ case int:
+ return float64(x) > float64(y)
+ case int8:
+ return float64(x) > float64(y)
+ case int16:
+ return float64(x) > float64(y)
+ case int32:
+ return float64(x) > float64(y)
+ case int64:
+ return float64(x) > float64(y)
+ case float32:
+ return float64(x) > float64(y)
+ case float64:
+ return float64(x) > float64(y)
+ }
+ case float64:
+ switch y := b.(type) {
+ case uint:
+ return float64(x) > float64(y)
+ case uint8:
+ return float64(x) > float64(y)
+ case uint16:
+ return float64(x) > float64(y)
+ case uint32:
+ return float64(x) > float64(y)
+ case uint64:
+ return float64(x) > float64(y)
+ case int:
+ return float64(x) > float64(y)
+ case int8:
+ return float64(x) > float64(y)
+ case int16:
+ return float64(x) > float64(y)
+ case int32:
+ return float64(x) > float64(y)
+ case int64:
+ return float64(x) > float64(y)
+ case float32:
+ return float64(x) > float64(y)
+ case float64:
+ return float64(x) > float64(y)
+ }
+ case string:
+ switch y := b.(type) {
+ case string:
+ return x > y
+ }
+ case time.Time:
+ switch y := b.(type) {
+ case time.Time:
+ return x.After(y)
+ }
+ }
+ panic(fmt.Sprintf("invalid operation: %T > %T", a, b))
+}
+
+func LessOrEqual(a, b interface{}) bool {
+ switch x := a.(type) {
+ case uint:
+ switch y := b.(type) {
+ case uint:
+ return int(x) <= int(y)
+ case uint8:
+ return int(x) <= int(y)
+ case uint16:
+ return int(x) <= int(y)
+ case uint32:
+ return int(x) <= int(y)
+ case uint64:
+ return int(x) <= int(y)
+ case int:
+ return int(x) <= int(y)
+ case int8:
+ return int(x) <= int(y)
+ case int16:
+ return int(x) <= int(y)
+ case int32:
+ return int(x) <= int(y)
+ case int64:
+ return int(x) <= int(y)
+ case float32:
+ return float64(x) <= float64(y)
+ case float64:
+ return float64(x) <= float64(y)
+ }
+ case uint8:
+ switch y := b.(type) {
+ case uint:
+ return int(x) <= int(y)
+ case uint8:
+ return int(x) <= int(y)
+ case uint16:
+ return int(x) <= int(y)
+ case uint32:
+ return int(x) <= int(y)
+ case uint64:
+ return int(x) <= int(y)
+ case int:
+ return int(x) <= int(y)
+ case int8:
+ return int(x) <= int(y)
+ case int16:
+ return int(x) <= int(y)
+ case int32:
+ return int(x) <= int(y)
+ case int64:
+ return int(x) <= int(y)
+ case float32:
+ return float64(x) <= float64(y)
+ case float64:
+ return float64(x) <= float64(y)
+ }
+ case uint16:
+ switch y := b.(type) {
+ case uint:
+ return int(x) <= int(y)
+ case uint8:
+ return int(x) <= int(y)
+ case uint16:
+ return int(x) <= int(y)
+ case uint32:
+ return int(x) <= int(y)
+ case uint64:
+ return int(x) <= int(y)
+ case int:
+ return int(x) <= int(y)
+ case int8:
+ return int(x) <= int(y)
+ case int16:
+ return int(x) <= int(y)
+ case int32:
+ return int(x) <= int(y)
+ case int64:
+ return int(x) <= int(y)
+ case float32:
+ return float64(x) <= float64(y)
+ case float64:
+ return float64(x) <= float64(y)
+ }
+ case uint32:
+ switch y := b.(type) {
+ case uint:
+ return int(x) <= int(y)
+ case uint8:
+ return int(x) <= int(y)
+ case uint16:
+ return int(x) <= int(y)
+ case uint32:
+ return int(x) <= int(y)
+ case uint64:
+ return int(x) <= int(y)
+ case int:
+ return int(x) <= int(y)
+ case int8:
+ return int(x) <= int(y)
+ case int16:
+ return int(x) <= int(y)
+ case int32:
+ return int(x) <= int(y)
+ case int64:
+ return int(x) <= int(y)
+ case float32:
+ return float64(x) <= float64(y)
+ case float64:
+ return float64(x) <= float64(y)
+ }
+ case uint64:
+ switch y := b.(type) {
+ case uint:
+ return int(x) <= int(y)
+ case uint8:
+ return int(x) <= int(y)
+ case uint16:
+ return int(x) <= int(y)
+ case uint32:
+ return int(x) <= int(y)
+ case uint64:
+ return int(x) <= int(y)
+ case int:
+ return int(x) <= int(y)
+ case int8:
+ return int(x) <= int(y)
+ case int16:
+ return int(x) <= int(y)
+ case int32:
+ return int(x) <= int(y)
+ case int64:
+ return int(x) <= int(y)
+ case float32:
+ return float64(x) <= float64(y)
+ case float64:
+ return float64(x) <= float64(y)
+ }
+ case int:
+ switch y := b.(type) {
+ case uint:
+ return int(x) <= int(y)
+ case uint8:
+ return int(x) <= int(y)
+ case uint16:
+ return int(x) <= int(y)
+ case uint32:
+ return int(x) <= int(y)
+ case uint64:
+ return int(x) <= int(y)
+ case int:
+ return int(x) <= int(y)
+ case int8:
+ return int(x) <= int(y)
+ case int16:
+ return int(x) <= int(y)
+ case int32:
+ return int(x) <= int(y)
+ case int64:
+ return int(x) <= int(y)
+ case float32:
+ return float64(x) <= float64(y)
+ case float64:
+ return float64(x) <= float64(y)
+ }
+ case int8:
+ switch y := b.(type) {
+ case uint:
+ return int(x) <= int(y)
+ case uint8:
+ return int(x) <= int(y)
+ case uint16:
+ return int(x) <= int(y)
+ case uint32:
+ return int(x) <= int(y)
+ case uint64:
+ return int(x) <= int(y)
+ case int:
+ return int(x) <= int(y)
+ case int8:
+ return int(x) <= int(y)
+ case int16:
+ return int(x) <= int(y)
+ case int32:
+ return int(x) <= int(y)
+ case int64:
+ return int(x) <= int(y)
+ case float32:
+ return float64(x) <= float64(y)
+ case float64:
+ return float64(x) <= float64(y)
+ }
+ case int16:
+ switch y := b.(type) {
+ case uint:
+ return int(x) <= int(y)
+ case uint8:
+ return int(x) <= int(y)
+ case uint16:
+ return int(x) <= int(y)
+ case uint32:
+ return int(x) <= int(y)
+ case uint64:
+ return int(x) <= int(y)
+ case int:
+ return int(x) <= int(y)
+ case int8:
+ return int(x) <= int(y)
+ case int16:
+ return int(x) <= int(y)
+ case int32:
+ return int(x) <= int(y)
+ case int64:
+ return int(x) <= int(y)
+ case float32:
+ return float64(x) <= float64(y)
+ case float64:
+ return float64(x) <= float64(y)
+ }
+ case int32:
+ switch y := b.(type) {
+ case uint:
+ return int(x) <= int(y)
+ case uint8:
+ return int(x) <= int(y)
+ case uint16:
+ return int(x) <= int(y)
+ case uint32:
+ return int(x) <= int(y)
+ case uint64:
+ return int(x) <= int(y)
+ case int:
+ return int(x) <= int(y)
+ case int8:
+ return int(x) <= int(y)
+ case int16:
+ return int(x) <= int(y)
+ case int32:
+ return int(x) <= int(y)
+ case int64:
+ return int(x) <= int(y)
+ case float32:
+ return float64(x) <= float64(y)
+ case float64:
+ return float64(x) <= float64(y)
+ }
+ case int64:
+ switch y := b.(type) {
+ case uint:
+ return int(x) <= int(y)
+ case uint8:
+ return int(x) <= int(y)
+ case uint16:
+ return int(x) <= int(y)
+ case uint32:
+ return int(x) <= int(y)
+ case uint64:
+ return int(x) <= int(y)
+ case int:
+ return int(x) <= int(y)
+ case int8:
+ return int(x) <= int(y)
+ case int16:
+ return int(x) <= int(y)
+ case int32:
+ return int(x) <= int(y)
+ case int64:
+ return int(x) <= int(y)
+ case float32:
+ return float64(x) <= float64(y)
+ case float64:
+ return float64(x) <= float64(y)
+ }
+ case float32:
+ switch y := b.(type) {
+ case uint:
+ return float64(x) <= float64(y)
+ case uint8:
+ return float64(x) <= float64(y)
+ case uint16:
+ return float64(x) <= float64(y)
+ case uint32:
+ return float64(x) <= float64(y)
+ case uint64:
+ return float64(x) <= float64(y)
+ case int:
+ return float64(x) <= float64(y)
+ case int8:
+ return float64(x) <= float64(y)
+ case int16:
+ return float64(x) <= float64(y)
+ case int32:
+ return float64(x) <= float64(y)
+ case int64:
+ return float64(x) <= float64(y)
+ case float32:
+ return float64(x) <= float64(y)
+ case float64:
+ return float64(x) <= float64(y)
+ }
+ case float64:
+ switch y := b.(type) {
+ case uint:
+ return float64(x) <= float64(y)
+ case uint8:
+ return float64(x) <= float64(y)
+ case uint16:
+ return float64(x) <= float64(y)
+ case uint32:
+ return float64(x) <= float64(y)
+ case uint64:
+ return float64(x) <= float64(y)
+ case int:
+ return float64(x) <= float64(y)
+ case int8:
+ return float64(x) <= float64(y)
+ case int16:
+ return float64(x) <= float64(y)
+ case int32:
+ return float64(x) <= float64(y)
+ case int64:
+ return float64(x) <= float64(y)
+ case float32:
+ return float64(x) <= float64(y)
+ case float64:
+ return float64(x) <= float64(y)
+ }
+ case string:
+ switch y := b.(type) {
+ case string:
+ return x <= y
+ }
+ case time.Time:
+ switch y := b.(type) {
+ case time.Time:
+ return x.Before(y) || x.Equal(y)
+ }
+ }
+ panic(fmt.Sprintf("invalid operation: %T <= %T", a, b))
+}
+
+func MoreOrEqual(a, b interface{}) bool {
+ switch x := a.(type) {
+ case uint:
+ switch y := b.(type) {
+ case uint:
+ return int(x) >= int(y)
+ case uint8:
+ return int(x) >= int(y)
+ case uint16:
+ return int(x) >= int(y)
+ case uint32:
+ return int(x) >= int(y)
+ case uint64:
+ return int(x) >= int(y)
+ case int:
+ return int(x) >= int(y)
+ case int8:
+ return int(x) >= int(y)
+ case int16:
+ return int(x) >= int(y)
+ case int32:
+ return int(x) >= int(y)
+ case int64:
+ return int(x) >= int(y)
+ case float32:
+ return float64(x) >= float64(y)
+ case float64:
+ return float64(x) >= float64(y)
+ }
+ case uint8:
+ switch y := b.(type) {
+ case uint:
+ return int(x) >= int(y)
+ case uint8:
+ return int(x) >= int(y)
+ case uint16:
+ return int(x) >= int(y)
+ case uint32:
+ return int(x) >= int(y)
+ case uint64:
+ return int(x) >= int(y)
+ case int:
+ return int(x) >= int(y)
+ case int8:
+ return int(x) >= int(y)
+ case int16:
+ return int(x) >= int(y)
+ case int32:
+ return int(x) >= int(y)
+ case int64:
+ return int(x) >= int(y)
+ case float32:
+ return float64(x) >= float64(y)
+ case float64:
+ return float64(x) >= float64(y)
+ }
+ case uint16:
+ switch y := b.(type) {
+ case uint:
+ return int(x) >= int(y)
+ case uint8:
+ return int(x) >= int(y)
+ case uint16:
+ return int(x) >= int(y)
+ case uint32:
+ return int(x) >= int(y)
+ case uint64:
+ return int(x) >= int(y)
+ case int:
+ return int(x) >= int(y)
+ case int8:
+ return int(x) >= int(y)
+ case int16:
+ return int(x) >= int(y)
+ case int32:
+ return int(x) >= int(y)
+ case int64:
+ return int(x) >= int(y)
+ case float32:
+ return float64(x) >= float64(y)
+ case float64:
+ return float64(x) >= float64(y)
+ }
+ case uint32:
+ switch y := b.(type) {
+ case uint:
+ return int(x) >= int(y)
+ case uint8:
+ return int(x) >= int(y)
+ case uint16:
+ return int(x) >= int(y)
+ case uint32:
+ return int(x) >= int(y)
+ case uint64:
+ return int(x) >= int(y)
+ case int:
+ return int(x) >= int(y)
+ case int8:
+ return int(x) >= int(y)
+ case int16:
+ return int(x) >= int(y)
+ case int32:
+ return int(x) >= int(y)
+ case int64:
+ return int(x) >= int(y)
+ case float32:
+ return float64(x) >= float64(y)
+ case float64:
+ return float64(x) >= float64(y)
+ }
+ case uint64:
+ switch y := b.(type) {
+ case uint:
+ return int(x) >= int(y)
+ case uint8:
+ return int(x) >= int(y)
+ case uint16:
+ return int(x) >= int(y)
+ case uint32:
+ return int(x) >= int(y)
+ case uint64:
+ return int(x) >= int(y)
+ case int:
+ return int(x) >= int(y)
+ case int8:
+ return int(x) >= int(y)
+ case int16:
+ return int(x) >= int(y)
+ case int32:
+ return int(x) >= int(y)
+ case int64:
+ return int(x) >= int(y)
+ case float32:
+ return float64(x) >= float64(y)
+ case float64:
+ return float64(x) >= float64(y)
+ }
+ case int:
+ switch y := b.(type) {
+ case uint:
+ return int(x) >= int(y)
+ case uint8:
+ return int(x) >= int(y)
+ case uint16:
+ return int(x) >= int(y)
+ case uint32:
+ return int(x) >= int(y)
+ case uint64:
+ return int(x) >= int(y)
+ case int:
+ return int(x) >= int(y)
+ case int8:
+ return int(x) >= int(y)
+ case int16:
+ return int(x) >= int(y)
+ case int32:
+ return int(x) >= int(y)
+ case int64:
+ return int(x) >= int(y)
+ case float32:
+ return float64(x) >= float64(y)
+ case float64:
+ return float64(x) >= float64(y)
+ }
+ case int8:
+ switch y := b.(type) {
+ case uint:
+ return int(x) >= int(y)
+ case uint8:
+ return int(x) >= int(y)
+ case uint16:
+ return int(x) >= int(y)
+ case uint32:
+ return int(x) >= int(y)
+ case uint64:
+ return int(x) >= int(y)
+ case int:
+ return int(x) >= int(y)
+ case int8:
+ return int(x) >= int(y)
+ case int16:
+ return int(x) >= int(y)
+ case int32:
+ return int(x) >= int(y)
+ case int64:
+ return int(x) >= int(y)
+ case float32:
+ return float64(x) >= float64(y)
+ case float64:
+ return float64(x) >= float64(y)
+ }
+ case int16:
+ switch y := b.(type) {
+ case uint:
+ return int(x) >= int(y)
+ case uint8:
+ return int(x) >= int(y)
+ case uint16:
+ return int(x) >= int(y)
+ case uint32:
+ return int(x) >= int(y)
+ case uint64:
+ return int(x) >= int(y)
+ case int:
+ return int(x) >= int(y)
+ case int8:
+ return int(x) >= int(y)
+ case int16:
+ return int(x) >= int(y)
+ case int32:
+ return int(x) >= int(y)
+ case int64:
+ return int(x) >= int(y)
+ case float32:
+ return float64(x) >= float64(y)
+ case float64:
+ return float64(x) >= float64(y)
+ }
+ case int32:
+ switch y := b.(type) {
+ case uint:
+ return int(x) >= int(y)
+ case uint8:
+ return int(x) >= int(y)
+ case uint16:
+ return int(x) >= int(y)
+ case uint32:
+ return int(x) >= int(y)
+ case uint64:
+ return int(x) >= int(y)
+ case int:
+ return int(x) >= int(y)
+ case int8:
+ return int(x) >= int(y)
+ case int16:
+ return int(x) >= int(y)
+ case int32:
+ return int(x) >= int(y)
+ case int64:
+ return int(x) >= int(y)
+ case float32:
+ return float64(x) >= float64(y)
+ case float64:
+ return float64(x) >= float64(y)
+ }
+ case int64:
+ switch y := b.(type) {
+ case uint:
+ return int(x) >= int(y)
+ case uint8:
+ return int(x) >= int(y)
+ case uint16:
+ return int(x) >= int(y)
+ case uint32:
+ return int(x) >= int(y)
+ case uint64:
+ return int(x) >= int(y)
+ case int:
+ return int(x) >= int(y)
+ case int8:
+ return int(x) >= int(y)
+ case int16:
+ return int(x) >= int(y)
+ case int32:
+ return int(x) >= int(y)
+ case int64:
+ return int(x) >= int(y)
+ case float32:
+ return float64(x) >= float64(y)
+ case float64:
+ return float64(x) >= float64(y)
+ }
+ case float32:
+ switch y := b.(type) {
+ case uint:
+ return float64(x) >= float64(y)
+ case uint8:
+ return float64(x) >= float64(y)
+ case uint16:
+ return float64(x) >= float64(y)
+ case uint32:
+ return float64(x) >= float64(y)
+ case uint64:
+ return float64(x) >= float64(y)
+ case int:
+ return float64(x) >= float64(y)
+ case int8:
+ return float64(x) >= float64(y)
+ case int16:
+ return float64(x) >= float64(y)
+ case int32:
+ return float64(x) >= float64(y)
+ case int64:
+ return float64(x) >= float64(y)
+ case float32:
+ return float64(x) >= float64(y)
+ case float64:
+ return float64(x) >= float64(y)
+ }
+ case float64:
+ switch y := b.(type) {
+ case uint:
+ return float64(x) >= float64(y)
+ case uint8:
+ return float64(x) >= float64(y)
+ case uint16:
+ return float64(x) >= float64(y)
+ case uint32:
+ return float64(x) >= float64(y)
+ case uint64:
+ return float64(x) >= float64(y)
+ case int:
+ return float64(x) >= float64(y)
+ case int8:
+ return float64(x) >= float64(y)
+ case int16:
+ return float64(x) >= float64(y)
+ case int32:
+ return float64(x) >= float64(y)
+ case int64:
+ return float64(x) >= float64(y)
+ case float32:
+ return float64(x) >= float64(y)
+ case float64:
+ return float64(x) >= float64(y)
+ }
+ case string:
+ switch y := b.(type) {
+ case string:
+ return x >= y
+ }
+ case time.Time:
+ switch y := b.(type) {
+ case time.Time:
+ return x.After(y) || x.Equal(y)
+ }
+ }
+ panic(fmt.Sprintf("invalid operation: %T >= %T", a, b))
+}
+
+func Add(a, b interface{}) interface{} {
+ switch x := a.(type) {
+ case uint:
+ switch y := b.(type) {
+ case uint:
+ return int(x) + int(y)
+ case uint8:
+ return int(x) + int(y)
+ case uint16:
+ return int(x) + int(y)
+ case uint32:
+ return int(x) + int(y)
+ case uint64:
+ return int(x) + int(y)
+ case int:
+ return int(x) + int(y)
+ case int8:
+ return int(x) + int(y)
+ case int16:
+ return int(x) + int(y)
+ case int32:
+ return int(x) + int(y)
+ case int64:
+ return int(x) + int(y)
+ case float32:
+ return float64(x) + float64(y)
+ case float64:
+ return float64(x) + float64(y)
+ }
+ case uint8:
+ switch y := b.(type) {
+ case uint:
+ return int(x) + int(y)
+ case uint8:
+ return int(x) + int(y)
+ case uint16:
+ return int(x) + int(y)
+ case uint32:
+ return int(x) + int(y)
+ case uint64:
+ return int(x) + int(y)
+ case int:
+ return int(x) + int(y)
+ case int8:
+ return int(x) + int(y)
+ case int16:
+ return int(x) + int(y)
+ case int32:
+ return int(x) + int(y)
+ case int64:
+ return int(x) + int(y)
+ case float32:
+ return float64(x) + float64(y)
+ case float64:
+ return float64(x) + float64(y)
+ }
+ case uint16:
+ switch y := b.(type) {
+ case uint:
+ return int(x) + int(y)
+ case uint8:
+ return int(x) + int(y)
+ case uint16:
+ return int(x) + int(y)
+ case uint32:
+ return int(x) + int(y)
+ case uint64:
+ return int(x) + int(y)
+ case int:
+ return int(x) + int(y)
+ case int8:
+ return int(x) + int(y)
+ case int16:
+ return int(x) + int(y)
+ case int32:
+ return int(x) + int(y)
+ case int64:
+ return int(x) + int(y)
+ case float32:
+ return float64(x) + float64(y)
+ case float64:
+ return float64(x) + float64(y)
+ }
+ case uint32:
+ switch y := b.(type) {
+ case uint:
+ return int(x) + int(y)
+ case uint8:
+ return int(x) + int(y)
+ case uint16:
+ return int(x) + int(y)
+ case uint32:
+ return int(x) + int(y)
+ case uint64:
+ return int(x) + int(y)
+ case int:
+ return int(x) + int(y)
+ case int8:
+ return int(x) + int(y)
+ case int16:
+ return int(x) + int(y)
+ case int32:
+ return int(x) + int(y)
+ case int64:
+ return int(x) + int(y)
+ case float32:
+ return float64(x) + float64(y)
+ case float64:
+ return float64(x) + float64(y)
+ }
+ case uint64:
+ switch y := b.(type) {
+ case uint:
+ return int(x) + int(y)
+ case uint8:
+ return int(x) + int(y)
+ case uint16:
+ return int(x) + int(y)
+ case uint32:
+ return int(x) + int(y)
+ case uint64:
+ return int(x) + int(y)
+ case int:
+ return int(x) + int(y)
+ case int8:
+ return int(x) + int(y)
+ case int16:
+ return int(x) + int(y)
+ case int32:
+ return int(x) + int(y)
+ case int64:
+ return int(x) + int(y)
+ case float32:
+ return float64(x) + float64(y)
+ case float64:
+ return float64(x) + float64(y)
+ }
+ case int:
+ switch y := b.(type) {
+ case uint:
+ return int(x) + int(y)
+ case uint8:
+ return int(x) + int(y)
+ case uint16:
+ return int(x) + int(y)
+ case uint32:
+ return int(x) + int(y)
+ case uint64:
+ return int(x) + int(y)
+ case int:
+ return int(x) + int(y)
+ case int8:
+ return int(x) + int(y)
+ case int16:
+ return int(x) + int(y)
+ case int32:
+ return int(x) + int(y)
+ case int64:
+ return int(x) + int(y)
+ case float32:
+ return float64(x) + float64(y)
+ case float64:
+ return float64(x) + float64(y)
+ }
+ case int8:
+ switch y := b.(type) {
+ case uint:
+ return int(x) + int(y)
+ case uint8:
+ return int(x) + int(y)
+ case uint16:
+ return int(x) + int(y)
+ case uint32:
+ return int(x) + int(y)
+ case uint64:
+ return int(x) + int(y)
+ case int:
+ return int(x) + int(y)
+ case int8:
+ return int(x) + int(y)
+ case int16:
+ return int(x) + int(y)
+ case int32:
+ return int(x) + int(y)
+ case int64:
+ return int(x) + int(y)
+ case float32:
+ return float64(x) + float64(y)
+ case float64:
+ return float64(x) + float64(y)
+ }
+ case int16:
+ switch y := b.(type) {
+ case uint:
+ return int(x) + int(y)
+ case uint8:
+ return int(x) + int(y)
+ case uint16:
+ return int(x) + int(y)
+ case uint32:
+ return int(x) + int(y)
+ case uint64:
+ return int(x) + int(y)
+ case int:
+ return int(x) + int(y)
+ case int8:
+ return int(x) + int(y)
+ case int16:
+ return int(x) + int(y)
+ case int32:
+ return int(x) + int(y)
+ case int64:
+ return int(x) + int(y)
+ case float32:
+ return float64(x) + float64(y)
+ case float64:
+ return float64(x) + float64(y)
+ }
+ case int32:
+ switch y := b.(type) {
+ case uint:
+ return int(x) + int(y)
+ case uint8:
+ return int(x) + int(y)
+ case uint16:
+ return int(x) + int(y)
+ case uint32:
+ return int(x) + int(y)
+ case uint64:
+ return int(x) + int(y)
+ case int:
+ return int(x) + int(y)
+ case int8:
+ return int(x) + int(y)
+ case int16:
+ return int(x) + int(y)
+ case int32:
+ return int(x) + int(y)
+ case int64:
+ return int(x) + int(y)
+ case float32:
+ return float64(x) + float64(y)
+ case float64:
+ return float64(x) + float64(y)
+ }
+ case int64:
+ switch y := b.(type) {
+ case uint:
+ return int(x) + int(y)
+ case uint8:
+ return int(x) + int(y)
+ case uint16:
+ return int(x) + int(y)
+ case uint32:
+ return int(x) + int(y)
+ case uint64:
+ return int(x) + int(y)
+ case int:
+ return int(x) + int(y)
+ case int8:
+ return int(x) + int(y)
+ case int16:
+ return int(x) + int(y)
+ case int32:
+ return int(x) + int(y)
+ case int64:
+ return int(x) + int(y)
+ case float32:
+ return float64(x) + float64(y)
+ case float64:
+ return float64(x) + float64(y)
+ }
+ case float32:
+ switch y := b.(type) {
+ case uint:
+ return float64(x) + float64(y)
+ case uint8:
+ return float64(x) + float64(y)
+ case uint16:
+ return float64(x) + float64(y)
+ case uint32:
+ return float64(x) + float64(y)
+ case uint64:
+ return float64(x) + float64(y)
+ case int:
+ return float64(x) + float64(y)
+ case int8:
+ return float64(x) + float64(y)
+ case int16:
+ return float64(x) + float64(y)
+ case int32:
+ return float64(x) + float64(y)
+ case int64:
+ return float64(x) + float64(y)
+ case float32:
+ return float64(x) + float64(y)
+ case float64:
+ return float64(x) + float64(y)
+ }
+ case float64:
+ switch y := b.(type) {
+ case uint:
+ return float64(x) + float64(y)
+ case uint8:
+ return float64(x) + float64(y)
+ case uint16:
+ return float64(x) + float64(y)
+ case uint32:
+ return float64(x) + float64(y)
+ case uint64:
+ return float64(x) + float64(y)
+ case int:
+ return float64(x) + float64(y)
+ case int8:
+ return float64(x) + float64(y)
+ case int16:
+ return float64(x) + float64(y)
+ case int32:
+ return float64(x) + float64(y)
+ case int64:
+ return float64(x) + float64(y)
+ case float32:
+ return float64(x) + float64(y)
+ case float64:
+ return float64(x) + float64(y)
+ }
+ case string:
+ switch y := b.(type) {
+ case string:
+ return x + y
+ }
+ case time.Time:
+ switch y := b.(type) {
+ case time.Duration:
+ return x.Add(y)
+ }
+ case time.Duration:
+ switch y := b.(type) {
+ case time.Time:
+ return y.Add(x)
+ }
+ }
+ panic(fmt.Sprintf("invalid operation: %T + %T", a, b))
+}
+
+func Subtract(a, b interface{}) interface{} {
+ switch x := a.(type) {
+ case uint:
+ switch y := b.(type) {
+ case uint:
+ return int(x) - int(y)
+ case uint8:
+ return int(x) - int(y)
+ case uint16:
+ return int(x) - int(y)
+ case uint32:
+ return int(x) - int(y)
+ case uint64:
+ return int(x) - int(y)
+ case int:
+ return int(x) - int(y)
+ case int8:
+ return int(x) - int(y)
+ case int16:
+ return int(x) - int(y)
+ case int32:
+ return int(x) - int(y)
+ case int64:
+ return int(x) - int(y)
+ case float32:
+ return float64(x) - float64(y)
+ case float64:
+ return float64(x) - float64(y)
+ }
+ case uint8:
+ switch y := b.(type) {
+ case uint:
+ return int(x) - int(y)
+ case uint8:
+ return int(x) - int(y)
+ case uint16:
+ return int(x) - int(y)
+ case uint32:
+ return int(x) - int(y)
+ case uint64:
+ return int(x) - int(y)
+ case int:
+ return int(x) - int(y)
+ case int8:
+ return int(x) - int(y)
+ case int16:
+ return int(x) - int(y)
+ case int32:
+ return int(x) - int(y)
+ case int64:
+ return int(x) - int(y)
+ case float32:
+ return float64(x) - float64(y)
+ case float64:
+ return float64(x) - float64(y)
+ }
+ case uint16:
+ switch y := b.(type) {
+ case uint:
+ return int(x) - int(y)
+ case uint8:
+ return int(x) - int(y)
+ case uint16:
+ return int(x) - int(y)
+ case uint32:
+ return int(x) - int(y)
+ case uint64:
+ return int(x) - int(y)
+ case int:
+ return int(x) - int(y)
+ case int8:
+ return int(x) - int(y)
+ case int16:
+ return int(x) - int(y)
+ case int32:
+ return int(x) - int(y)
+ case int64:
+ return int(x) - int(y)
+ case float32:
+ return float64(x) - float64(y)
+ case float64:
+ return float64(x) - float64(y)
+ }
+ case uint32:
+ switch y := b.(type) {
+ case uint:
+ return int(x) - int(y)
+ case uint8:
+ return int(x) - int(y)
+ case uint16:
+ return int(x) - int(y)
+ case uint32:
+ return int(x) - int(y)
+ case uint64:
+ return int(x) - int(y)
+ case int:
+ return int(x) - int(y)
+ case int8:
+ return int(x) - int(y)
+ case int16:
+ return int(x) - int(y)
+ case int32:
+ return int(x) - int(y)
+ case int64:
+ return int(x) - int(y)
+ case float32:
+ return float64(x) - float64(y)
+ case float64:
+ return float64(x) - float64(y)
+ }
+ case uint64:
+ switch y := b.(type) {
+ case uint:
+ return int(x) - int(y)
+ case uint8:
+ return int(x) - int(y)
+ case uint16:
+ return int(x) - int(y)
+ case uint32:
+ return int(x) - int(y)
+ case uint64:
+ return int(x) - int(y)
+ case int:
+ return int(x) - int(y)
+ case int8:
+ return int(x) - int(y)
+ case int16:
+ return int(x) - int(y)
+ case int32:
+ return int(x) - int(y)
+ case int64:
+ return int(x) - int(y)
+ case float32:
+ return float64(x) - float64(y)
+ case float64:
+ return float64(x) - float64(y)
+ }
+ case int:
+ switch y := b.(type) {
+ case uint:
+ return int(x) - int(y)
+ case uint8:
+ return int(x) - int(y)
+ case uint16:
+ return int(x) - int(y)
+ case uint32:
+ return int(x) - int(y)
+ case uint64:
+ return int(x) - int(y)
+ case int:
+ return int(x) - int(y)
+ case int8:
+ return int(x) - int(y)
+ case int16:
+ return int(x) - int(y)
+ case int32:
+ return int(x) - int(y)
+ case int64:
+ return int(x) - int(y)
+ case float32:
+ return float64(x) - float64(y)
+ case float64:
+ return float64(x) - float64(y)
+ }
+ case int8:
+ switch y := b.(type) {
+ case uint:
+ return int(x) - int(y)
+ case uint8:
+ return int(x) - int(y)
+ case uint16:
+ return int(x) - int(y)
+ case uint32:
+ return int(x) - int(y)
+ case uint64:
+ return int(x) - int(y)
+ case int:
+ return int(x) - int(y)
+ case int8:
+ return int(x) - int(y)
+ case int16:
+ return int(x) - int(y)
+ case int32:
+ return int(x) - int(y)
+ case int64:
+ return int(x) - int(y)
+ case float32:
+ return float64(x) - float64(y)
+ case float64:
+ return float64(x) - float64(y)
+ }
+ case int16:
+ switch y := b.(type) {
+ case uint:
+ return int(x) - int(y)
+ case uint8:
+ return int(x) - int(y)
+ case uint16:
+ return int(x) - int(y)
+ case uint32:
+ return int(x) - int(y)
+ case uint64:
+ return int(x) - int(y)
+ case int:
+ return int(x) - int(y)
+ case int8:
+ return int(x) - int(y)
+ case int16:
+ return int(x) - int(y)
+ case int32:
+ return int(x) - int(y)
+ case int64:
+ return int(x) - int(y)
+ case float32:
+ return float64(x) - float64(y)
+ case float64:
+ return float64(x) - float64(y)
+ }
+ case int32:
+ switch y := b.(type) {
+ case uint:
+ return int(x) - int(y)
+ case uint8:
+ return int(x) - int(y)
+ case uint16:
+ return int(x) - int(y)
+ case uint32:
+ return int(x) - int(y)
+ case uint64:
+ return int(x) - int(y)
+ case int:
+ return int(x) - int(y)
+ case int8:
+ return int(x) - int(y)
+ case int16:
+ return int(x) - int(y)
+ case int32:
+ return int(x) - int(y)
+ case int64:
+ return int(x) - int(y)
+ case float32:
+ return float64(x) - float64(y)
+ case float64:
+ return float64(x) - float64(y)
+ }
+ case int64:
+ switch y := b.(type) {
+ case uint:
+ return int(x) - int(y)
+ case uint8:
+ return int(x) - int(y)
+ case uint16:
+ return int(x) - int(y)
+ case uint32:
+ return int(x) - int(y)
+ case uint64:
+ return int(x) - int(y)
+ case int:
+ return int(x) - int(y)
+ case int8:
+ return int(x) - int(y)
+ case int16:
+ return int(x) - int(y)
+ case int32:
+ return int(x) - int(y)
+ case int64:
+ return int(x) - int(y)
+ case float32:
+ return float64(x) - float64(y)
+ case float64:
+ return float64(x) - float64(y)
+ }
+ case float32:
+ switch y := b.(type) {
+ case uint:
+ return float64(x) - float64(y)
+ case uint8:
+ return float64(x) - float64(y)
+ case uint16:
+ return float64(x) - float64(y)
+ case uint32:
+ return float64(x) - float64(y)
+ case uint64:
+ return float64(x) - float64(y)
+ case int:
+ return float64(x) - float64(y)
+ case int8:
+ return float64(x) - float64(y)
+ case int16:
+ return float64(x) - float64(y)
+ case int32:
+ return float64(x) - float64(y)
+ case int64:
+ return float64(x) - float64(y)
+ case float32:
+ return float64(x) - float64(y)
+ case float64:
+ return float64(x) - float64(y)
+ }
+ case float64:
+ switch y := b.(type) {
+ case uint:
+ return float64(x) - float64(y)
+ case uint8:
+ return float64(x) - float64(y)
+ case uint16:
+ return float64(x) - float64(y)
+ case uint32:
+ return float64(x) - float64(y)
+ case uint64:
+ return float64(x) - float64(y)
+ case int:
+ return float64(x) - float64(y)
+ case int8:
+ return float64(x) - float64(y)
+ case int16:
+ return float64(x) - float64(y)
+ case int32:
+ return float64(x) - float64(y)
+ case int64:
+ return float64(x) - float64(y)
+ case float32:
+ return float64(x) - float64(y)
+ case float64:
+ return float64(x) - float64(y)
+ }
+ case time.Time:
+ switch y := b.(type) {
+ case time.Time:
+ return x.Sub(y)
+ }
+ }
+ panic(fmt.Sprintf("invalid operation: %T - %T", a, b))
+}
+
+func Multiply(a, b interface{}) interface{} {
+ switch x := a.(type) {
+ case uint:
+ switch y := b.(type) {
+ case uint:
+ return int(x) * int(y)
+ case uint8:
+ return int(x) * int(y)
+ case uint16:
+ return int(x) * int(y)
+ case uint32:
+ return int(x) * int(y)
+ case uint64:
+ return int(x) * int(y)
+ case int:
+ return int(x) * int(y)
+ case int8:
+ return int(x) * int(y)
+ case int16:
+ return int(x) * int(y)
+ case int32:
+ return int(x) * int(y)
+ case int64:
+ return int(x) * int(y)
+ case float32:
+ return float64(x) * float64(y)
+ case float64:
+ return float64(x) * float64(y)
+ }
+ case uint8:
+ switch y := b.(type) {
+ case uint:
+ return int(x) * int(y)
+ case uint8:
+ return int(x) * int(y)
+ case uint16:
+ return int(x) * int(y)
+ case uint32:
+ return int(x) * int(y)
+ case uint64:
+ return int(x) * int(y)
+ case int:
+ return int(x) * int(y)
+ case int8:
+ return int(x) * int(y)
+ case int16:
+ return int(x) * int(y)
+ case int32:
+ return int(x) * int(y)
+ case int64:
+ return int(x) * int(y)
+ case float32:
+ return float64(x) * float64(y)
+ case float64:
+ return float64(x) * float64(y)
+ }
+ case uint16:
+ switch y := b.(type) {
+ case uint:
+ return int(x) * int(y)
+ case uint8:
+ return int(x) * int(y)
+ case uint16:
+ return int(x) * int(y)
+ case uint32:
+ return int(x) * int(y)
+ case uint64:
+ return int(x) * int(y)
+ case int:
+ return int(x) * int(y)
+ case int8:
+ return int(x) * int(y)
+ case int16:
+ return int(x) * int(y)
+ case int32:
+ return int(x) * int(y)
+ case int64:
+ return int(x) * int(y)
+ case float32:
+ return float64(x) * float64(y)
+ case float64:
+ return float64(x) * float64(y)
+ }
+ case uint32:
+ switch y := b.(type) {
+ case uint:
+ return int(x) * int(y)
+ case uint8:
+ return int(x) * int(y)
+ case uint16:
+ return int(x) * int(y)
+ case uint32:
+ return int(x) * int(y)
+ case uint64:
+ return int(x) * int(y)
+ case int:
+ return int(x) * int(y)
+ case int8:
+ return int(x) * int(y)
+ case int16:
+ return int(x) * int(y)
+ case int32:
+ return int(x) * int(y)
+ case int64:
+ return int(x) * int(y)
+ case float32:
+ return float64(x) * float64(y)
+ case float64:
+ return float64(x) * float64(y)
+ }
+ case uint64:
+ switch y := b.(type) {
+ case uint:
+ return int(x) * int(y)
+ case uint8:
+ return int(x) * int(y)
+ case uint16:
+ return int(x) * int(y)
+ case uint32:
+ return int(x) * int(y)
+ case uint64:
+ return int(x) * int(y)
+ case int:
+ return int(x) * int(y)
+ case int8:
+ return int(x) * int(y)
+ case int16:
+ return int(x) * int(y)
+ case int32:
+ return int(x) * int(y)
+ case int64:
+ return int(x) * int(y)
+ case float32:
+ return float64(x) * float64(y)
+ case float64:
+ return float64(x) * float64(y)
+ }
+ case int:
+ switch y := b.(type) {
+ case uint:
+ return int(x) * int(y)
+ case uint8:
+ return int(x) * int(y)
+ case uint16:
+ return int(x) * int(y)
+ case uint32:
+ return int(x) * int(y)
+ case uint64:
+ return int(x) * int(y)
+ case int:
+ return int(x) * int(y)
+ case int8:
+ return int(x) * int(y)
+ case int16:
+ return int(x) * int(y)
+ case int32:
+ return int(x) * int(y)
+ case int64:
+ return int(x) * int(y)
+ case float32:
+ return float64(x) * float64(y)
+ case float64:
+ return float64(x) * float64(y)
+ }
+ case int8:
+ switch y := b.(type) {
+ case uint:
+ return int(x) * int(y)
+ case uint8:
+ return int(x) * int(y)
+ case uint16:
+ return int(x) * int(y)
+ case uint32:
+ return int(x) * int(y)
+ case uint64:
+ return int(x) * int(y)
+ case int:
+ return int(x) * int(y)
+ case int8:
+ return int(x) * int(y)
+ case int16:
+ return int(x) * int(y)
+ case int32:
+ return int(x) * int(y)
+ case int64:
+ return int(x) * int(y)
+ case float32:
+ return float64(x) * float64(y)
+ case float64:
+ return float64(x) * float64(y)
+ }
+ case int16:
+ switch y := b.(type) {
+ case uint:
+ return int(x) * int(y)
+ case uint8:
+ return int(x) * int(y)
+ case uint16:
+ return int(x) * int(y)
+ case uint32:
+ return int(x) * int(y)
+ case uint64:
+ return int(x) * int(y)
+ case int:
+ return int(x) * int(y)
+ case int8:
+ return int(x) * int(y)
+ case int16:
+ return int(x) * int(y)
+ case int32:
+ return int(x) * int(y)
+ case int64:
+ return int(x) * int(y)
+ case float32:
+ return float64(x) * float64(y)
+ case float64:
+ return float64(x) * float64(y)
+ }
+ case int32:
+ switch y := b.(type) {
+ case uint:
+ return int(x) * int(y)
+ case uint8:
+ return int(x) * int(y)
+ case uint16:
+ return int(x) * int(y)
+ case uint32:
+ return int(x) * int(y)
+ case uint64:
+ return int(x) * int(y)
+ case int:
+ return int(x) * int(y)
+ case int8:
+ return int(x) * int(y)
+ case int16:
+ return int(x) * int(y)
+ case int32:
+ return int(x) * int(y)
+ case int64:
+ return int(x) * int(y)
+ case float32:
+ return float64(x) * float64(y)
+ case float64:
+ return float64(x) * float64(y)
+ }
+ case int64:
+ switch y := b.(type) {
+ case uint:
+ return int(x) * int(y)
+ case uint8:
+ return int(x) * int(y)
+ case uint16:
+ return int(x) * int(y)
+ case uint32:
+ return int(x) * int(y)
+ case uint64:
+ return int(x) * int(y)
+ case int:
+ return int(x) * int(y)
+ case int8:
+ return int(x) * int(y)
+ case int16:
+ return int(x) * int(y)
+ case int32:
+ return int(x) * int(y)
+ case int64:
+ return int(x) * int(y)
+ case float32:
+ return float64(x) * float64(y)
+ case float64:
+ return float64(x) * float64(y)
+ }
+ case float32:
+ switch y := b.(type) {
+ case uint:
+ return float64(x) * float64(y)
+ case uint8:
+ return float64(x) * float64(y)
+ case uint16:
+ return float64(x) * float64(y)
+ case uint32:
+ return float64(x) * float64(y)
+ case uint64:
+ return float64(x) * float64(y)
+ case int:
+ return float64(x) * float64(y)
+ case int8:
+ return float64(x) * float64(y)
+ case int16:
+ return float64(x) * float64(y)
+ case int32:
+ return float64(x) * float64(y)
+ case int64:
+ return float64(x) * float64(y)
+ case float32:
+ return float64(x) * float64(y)
+ case float64:
+ return float64(x) * float64(y)
+ }
+ case float64:
+ switch y := b.(type) {
+ case uint:
+ return float64(x) * float64(y)
+ case uint8:
+ return float64(x) * float64(y)
+ case uint16:
+ return float64(x) * float64(y)
+ case uint32:
+ return float64(x) * float64(y)
+ case uint64:
+ return float64(x) * float64(y)
+ case int:
+ return float64(x) * float64(y)
+ case int8:
+ return float64(x) * float64(y)
+ case int16:
+ return float64(x) * float64(y)
+ case int32:
+ return float64(x) * float64(y)
+ case int64:
+ return float64(x) * float64(y)
+ case float32:
+ return float64(x) * float64(y)
+ case float64:
+ return float64(x) * float64(y)
+ }
+ }
+ panic(fmt.Sprintf("invalid operation: %T * %T", a, b))
+}
+
+func Divide(a, b interface{}) float64 {
+ switch x := a.(type) {
+ case uint:
+ switch y := b.(type) {
+ case uint:
+ return float64(x) / float64(y)
+ case uint8:
+ return float64(x) / float64(y)
+ case uint16:
+ return float64(x) / float64(y)
+ case uint32:
+ return float64(x) / float64(y)
+ case uint64:
+ return float64(x) / float64(y)
+ case int:
+ return float64(x) / float64(y)
+ case int8:
+ return float64(x) / float64(y)
+ case int16:
+ return float64(x) / float64(y)
+ case int32:
+ return float64(x) / float64(y)
+ case int64:
+ return float64(x) / float64(y)
+ case float32:
+ return float64(x) / float64(y)
+ case float64:
+ return float64(x) / float64(y)
+ }
+ case uint8:
+ switch y := b.(type) {
+ case uint:
+ return float64(x) / float64(y)
+ case uint8:
+ return float64(x) / float64(y)
+ case uint16:
+ return float64(x) / float64(y)
+ case uint32:
+ return float64(x) / float64(y)
+ case uint64:
+ return float64(x) / float64(y)
+ case int:
+ return float64(x) / float64(y)
+ case int8:
+ return float64(x) / float64(y)
+ case int16:
+ return float64(x) / float64(y)
+ case int32:
+ return float64(x) / float64(y)
+ case int64:
+ return float64(x) / float64(y)
+ case float32:
+ return float64(x) / float64(y)
+ case float64:
+ return float64(x) / float64(y)
+ }
+ case uint16:
+ switch y := b.(type) {
+ case uint:
+ return float64(x) / float64(y)
+ case uint8:
+ return float64(x) / float64(y)
+ case uint16:
+ return float64(x) / float64(y)
+ case uint32:
+ return float64(x) / float64(y)
+ case uint64:
+ return float64(x) / float64(y)
+ case int:
+ return float64(x) / float64(y)
+ case int8:
+ return float64(x) / float64(y)
+ case int16:
+ return float64(x) / float64(y)
+ case int32:
+ return float64(x) / float64(y)
+ case int64:
+ return float64(x) / float64(y)
+ case float32:
+ return float64(x) / float64(y)
+ case float64:
+ return float64(x) / float64(y)
+ }
+ case uint32:
+ switch y := b.(type) {
+ case uint:
+ return float64(x) / float64(y)
+ case uint8:
+ return float64(x) / float64(y)
+ case uint16:
+ return float64(x) / float64(y)
+ case uint32:
+ return float64(x) / float64(y)
+ case uint64:
+ return float64(x) / float64(y)
+ case int:
+ return float64(x) / float64(y)
+ case int8:
+ return float64(x) / float64(y)
+ case int16:
+ return float64(x) / float64(y)
+ case int32:
+ return float64(x) / float64(y)
+ case int64:
+ return float64(x) / float64(y)
+ case float32:
+ return float64(x) / float64(y)
+ case float64:
+ return float64(x) / float64(y)
+ }
+ case uint64:
+ switch y := b.(type) {
+ case uint:
+ return float64(x) / float64(y)
+ case uint8:
+ return float64(x) / float64(y)
+ case uint16:
+ return float64(x) / float64(y)
+ case uint32:
+ return float64(x) / float64(y)
+ case uint64:
+ return float64(x) / float64(y)
+ case int:
+ return float64(x) / float64(y)
+ case int8:
+ return float64(x) / float64(y)
+ case int16:
+ return float64(x) / float64(y)
+ case int32:
+ return float64(x) / float64(y)
+ case int64:
+ return float64(x) / float64(y)
+ case float32:
+ return float64(x) / float64(y)
+ case float64:
+ return float64(x) / float64(y)
+ }
+ case int:
+ switch y := b.(type) {
+ case uint:
+ return float64(x) / float64(y)
+ case uint8:
+ return float64(x) / float64(y)
+ case uint16:
+ return float64(x) / float64(y)
+ case uint32:
+ return float64(x) / float64(y)
+ case uint64:
+ return float64(x) / float64(y)
+ case int:
+ return float64(x) / float64(y)
+ case int8:
+ return float64(x) / float64(y)
+ case int16:
+ return float64(x) / float64(y)
+ case int32:
+ return float64(x) / float64(y)
+ case int64:
+ return float64(x) / float64(y)
+ case float32:
+ return float64(x) / float64(y)
+ case float64:
+ return float64(x) / float64(y)
+ }
+ case int8:
+ switch y := b.(type) {
+ case uint:
+ return float64(x) / float64(y)
+ case uint8:
+ return float64(x) / float64(y)
+ case uint16:
+ return float64(x) / float64(y)
+ case uint32:
+ return float64(x) / float64(y)
+ case uint64:
+ return float64(x) / float64(y)
+ case int:
+ return float64(x) / float64(y)
+ case int8:
+ return float64(x) / float64(y)
+ case int16:
+ return float64(x) / float64(y)
+ case int32:
+ return float64(x) / float64(y)
+ case int64:
+ return float64(x) / float64(y)
+ case float32:
+ return float64(x) / float64(y)
+ case float64:
+ return float64(x) / float64(y)
+ }
+ case int16:
+ switch y := b.(type) {
+ case uint:
+ return float64(x) / float64(y)
+ case uint8:
+ return float64(x) / float64(y)
+ case uint16:
+ return float64(x) / float64(y)
+ case uint32:
+ return float64(x) / float64(y)
+ case uint64:
+ return float64(x) / float64(y)
+ case int:
+ return float64(x) / float64(y)
+ case int8:
+ return float64(x) / float64(y)
+ case int16:
+ return float64(x) / float64(y)
+ case int32:
+ return float64(x) / float64(y)
+ case int64:
+ return float64(x) / float64(y)
+ case float32:
+ return float64(x) / float64(y)
+ case float64:
+ return float64(x) / float64(y)
+ }
+ case int32:
+ switch y := b.(type) {
+ case uint:
+ return float64(x) / float64(y)
+ case uint8:
+ return float64(x) / float64(y)
+ case uint16:
+ return float64(x) / float64(y)
+ case uint32:
+ return float64(x) / float64(y)
+ case uint64:
+ return float64(x) / float64(y)
+ case int:
+ return float64(x) / float64(y)
+ case int8:
+ return float64(x) / float64(y)
+ case int16:
+ return float64(x) / float64(y)
+ case int32:
+ return float64(x) / float64(y)
+ case int64:
+ return float64(x) / float64(y)
+ case float32:
+ return float64(x) / float64(y)
+ case float64:
+ return float64(x) / float64(y)
+ }
+ case int64:
+ switch y := b.(type) {
+ case uint:
+ return float64(x) / float64(y)
+ case uint8:
+ return float64(x) / float64(y)
+ case uint16:
+ return float64(x) / float64(y)
+ case uint32:
+ return float64(x) / float64(y)
+ case uint64:
+ return float64(x) / float64(y)
+ case int:
+ return float64(x) / float64(y)
+ case int8:
+ return float64(x) / float64(y)
+ case int16:
+ return float64(x) / float64(y)
+ case int32:
+ return float64(x) / float64(y)
+ case int64:
+ return float64(x) / float64(y)
+ case float32:
+ return float64(x) / float64(y)
+ case float64:
+ return float64(x) / float64(y)
+ }
+ case float32:
+ switch y := b.(type) {
+ case uint:
+ return float64(x) / float64(y)
+ case uint8:
+ return float64(x) / float64(y)
+ case uint16:
+ return float64(x) / float64(y)
+ case uint32:
+ return float64(x) / float64(y)
+ case uint64:
+ return float64(x) / float64(y)
+ case int:
+ return float64(x) / float64(y)
+ case int8:
+ return float64(x) / float64(y)
+ case int16:
+ return float64(x) / float64(y)
+ case int32:
+ return float64(x) / float64(y)
+ case int64:
+ return float64(x) / float64(y)
+ case float32:
+ return float64(x) / float64(y)
+ case float64:
+ return float64(x) / float64(y)
+ }
+ case float64:
+ switch y := b.(type) {
+ case uint:
+ return float64(x) / float64(y)
+ case uint8:
+ return float64(x) / float64(y)
+ case uint16:
+ return float64(x) / float64(y)
+ case uint32:
+ return float64(x) / float64(y)
+ case uint64:
+ return float64(x) / float64(y)
+ case int:
+ return float64(x) / float64(y)
+ case int8:
+ return float64(x) / float64(y)
+ case int16:
+ return float64(x) / float64(y)
+ case int32:
+ return float64(x) / float64(y)
+ case int64:
+ return float64(x) / float64(y)
+ case float32:
+ return float64(x) / float64(y)
+ case float64:
+ return float64(x) / float64(y)
+ }
+ }
+ panic(fmt.Sprintf("invalid operation: %T / %T", a, b))
+}
+
+func Modulo(a, b interface{}) int {
+ switch x := a.(type) {
+ case uint:
+ switch y := b.(type) {
+ case uint:
+ return int(x) % int(y)
+ case uint8:
+ return int(x) % int(y)
+ case uint16:
+ return int(x) % int(y)
+ case uint32:
+ return int(x) % int(y)
+ case uint64:
+ return int(x) % int(y)
+ case int:
+ return int(x) % int(y)
+ case int8:
+ return int(x) % int(y)
+ case int16:
+ return int(x) % int(y)
+ case int32:
+ return int(x) % int(y)
+ case int64:
+ return int(x) % int(y)
+ }
+ case uint8:
+ switch y := b.(type) {
+ case uint:
+ return int(x) % int(y)
+ case uint8:
+ return int(x) % int(y)
+ case uint16:
+ return int(x) % int(y)
+ case uint32:
+ return int(x) % int(y)
+ case uint64:
+ return int(x) % int(y)
+ case int:
+ return int(x) % int(y)
+ case int8:
+ return int(x) % int(y)
+ case int16:
+ return int(x) % int(y)
+ case int32:
+ return int(x) % int(y)
+ case int64:
+ return int(x) % int(y)
+ }
+ case uint16:
+ switch y := b.(type) {
+ case uint:
+ return int(x) % int(y)
+ case uint8:
+ return int(x) % int(y)
+ case uint16:
+ return int(x) % int(y)
+ case uint32:
+ return int(x) % int(y)
+ case uint64:
+ return int(x) % int(y)
+ case int:
+ return int(x) % int(y)
+ case int8:
+ return int(x) % int(y)
+ case int16:
+ return int(x) % int(y)
+ case int32:
+ return int(x) % int(y)
+ case int64:
+ return int(x) % int(y)
+ }
+ case uint32:
+ switch y := b.(type) {
+ case uint:
+ return int(x) % int(y)
+ case uint8:
+ return int(x) % int(y)
+ case uint16:
+ return int(x) % int(y)
+ case uint32:
+ return int(x) % int(y)
+ case uint64:
+ return int(x) % int(y)
+ case int:
+ return int(x) % int(y)
+ case int8:
+ return int(x) % int(y)
+ case int16:
+ return int(x) % int(y)
+ case int32:
+ return int(x) % int(y)
+ case int64:
+ return int(x) % int(y)
+ }
+ case uint64:
+ switch y := b.(type) {
+ case uint:
+ return int(x) % int(y)
+ case uint8:
+ return int(x) % int(y)
+ case uint16:
+ return int(x) % int(y)
+ case uint32:
+ return int(x) % int(y)
+ case uint64:
+ return int(x) % int(y)
+ case int:
+ return int(x) % int(y)
+ case int8:
+ return int(x) % int(y)
+ case int16:
+ return int(x) % int(y)
+ case int32:
+ return int(x) % int(y)
+ case int64:
+ return int(x) % int(y)
+ }
+ case int:
+ switch y := b.(type) {
+ case uint:
+ return int(x) % int(y)
+ case uint8:
+ return int(x) % int(y)
+ case uint16:
+ return int(x) % int(y)
+ case uint32:
+ return int(x) % int(y)
+ case uint64:
+ return int(x) % int(y)
+ case int:
+ return int(x) % int(y)
+ case int8:
+ return int(x) % int(y)
+ case int16:
+ return int(x) % int(y)
+ case int32:
+ return int(x) % int(y)
+ case int64:
+ return int(x) % int(y)
+ }
+ case int8:
+ switch y := b.(type) {
+ case uint:
+ return int(x) % int(y)
+ case uint8:
+ return int(x) % int(y)
+ case uint16:
+ return int(x) % int(y)
+ case uint32:
+ return int(x) % int(y)
+ case uint64:
+ return int(x) % int(y)
+ case int:
+ return int(x) % int(y)
+ case int8:
+ return int(x) % int(y)
+ case int16:
+ return int(x) % int(y)
+ case int32:
+ return int(x) % int(y)
+ case int64:
+ return int(x) % int(y)
+ }
+ case int16:
+ switch y := b.(type) {
+ case uint:
+ return int(x) % int(y)
+ case uint8:
+ return int(x) % int(y)
+ case uint16:
+ return int(x) % int(y)
+ case uint32:
+ return int(x) % int(y)
+ case uint64:
+ return int(x) % int(y)
+ case int:
+ return int(x) % int(y)
+ case int8:
+ return int(x) % int(y)
+ case int16:
+ return int(x) % int(y)
+ case int32:
+ return int(x) % int(y)
+ case int64:
+ return int(x) % int(y)
+ }
+ case int32:
+ switch y := b.(type) {
+ case uint:
+ return int(x) % int(y)
+ case uint8:
+ return int(x) % int(y)
+ case uint16:
+ return int(x) % int(y)
+ case uint32:
+ return int(x) % int(y)
+ case uint64:
+ return int(x) % int(y)
+ case int:
+ return int(x) % int(y)
+ case int8:
+ return int(x) % int(y)
+ case int16:
+ return int(x) % int(y)
+ case int32:
+ return int(x) % int(y)
+ case int64:
+ return int(x) % int(y)
+ }
+ case int64:
+ switch y := b.(type) {
+ case uint:
+ return int(x) % int(y)
+ case uint8:
+ return int(x) % int(y)
+ case uint16:
+ return int(x) % int(y)
+ case uint32:
+ return int(x) % int(y)
+ case uint64:
+ return int(x) % int(y)
+ case int:
+ return int(x) % int(y)
+ case int8:
+ return int(x) % int(y)
+ case int16:
+ return int(x) % int(y)
+ case int32:
+ return int(x) % int(y)
+ case int64:
+ return int(x) % int(y)
+ }
+ }
+ panic(fmt.Sprintf("invalid operation: %T %% %T", a, b))
+}
diff --git a/vendor/github.com/antonmedv/expr/vm/runtime/runtime.go b/vendor/github.com/antonmedv/expr/vm/runtime/runtime.go
new file mode 100644
index 00000000000..b2eeb65d83c
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/vm/runtime/runtime.go
@@ -0,0 +1,517 @@
+package runtime
+
+//go:generate sh -c "go run ./helpers > ./generated.go"
+
+import (
+ "fmt"
+ "math"
+ "reflect"
+ "strconv"
+)
+
+func Fetch(from, i interface{}) interface{} {
+ v := reflect.ValueOf(from)
+ kind := v.Kind()
+ if kind == reflect.Invalid {
+ panic(fmt.Sprintf("cannot fetch %v from %T", i, from))
+ }
+
+ // Methods can be defined on any type.
+ if v.NumMethod() > 0 {
+ if methodName, ok := i.(string); ok {
+ method := v.MethodByName(methodName)
+ if method.IsValid() {
+ return method.Interface()
+ }
+ }
+ }
+
+ // Structs, maps, and slices can be access through a pointer or through
+ // a value, when they are accessed through a pointer we don't want to
+ // copy them to a value.
+ if kind == reflect.Ptr {
+ v = reflect.Indirect(v)
+ kind = v.Kind()
+ }
+
+ // TODO: We can create separate opcodes for each of the cases below to make
+ // the little bit faster.
+ switch kind {
+ case reflect.Array, reflect.Slice, reflect.String:
+ index := ToInt(i)
+ if index < 0 {
+ index = v.Len() + index
+ }
+ value := v.Index(index)
+ if value.IsValid() {
+ return value.Interface()
+ }
+
+ case reflect.Map:
+ var value reflect.Value
+ if i == nil {
+ value = v.MapIndex(reflect.Zero(v.Type().Key()))
+ } else {
+ value = v.MapIndex(reflect.ValueOf(i))
+ }
+ if value.IsValid() {
+ return value.Interface()
+ } else {
+ elem := reflect.TypeOf(from).Elem()
+ return reflect.Zero(elem).Interface()
+ }
+
+ case reflect.Struct:
+ fieldName := i.(string)
+ value := v.FieldByNameFunc(func(name string) bool {
+ field, _ := v.Type().FieldByName(name)
+ if field.Tag.Get("expr") == fieldName {
+ return true
+ }
+ return name == fieldName
+ })
+ if value.IsValid() {
+ return value.Interface()
+ }
+ }
+ panic(fmt.Sprintf("cannot fetch %v from %T", i, from))
+}
+
+type Field struct {
+ Index []int
+ Path []string
+}
+
+func FetchField(from interface{}, field *Field) interface{} {
+ v := reflect.ValueOf(from)
+ kind := v.Kind()
+ if kind != reflect.Invalid {
+ if kind == reflect.Ptr {
+ v = reflect.Indirect(v)
+ }
+ // We can use v.FieldByIndex here, but it will panic if the field
+ // is not exists. And we need to recover() to generate a more
+ // user-friendly error message.
+ // Also, our fieldByIndex() function is slightly faster than the
+ // v.FieldByIndex() function as we don't need to verify what a field
+ // is a struct as we already did it on compilation step.
+ value := fieldByIndex(v, field)
+ if value.IsValid() {
+ return value.Interface()
+ }
+ }
+ panic(fmt.Sprintf("cannot get %v from %T", field.Path[0], from))
+}
+
+func fieldByIndex(v reflect.Value, field *Field) reflect.Value {
+ if len(field.Index) == 1 {
+ return v.Field(field.Index[0])
+ }
+ for i, x := range field.Index {
+ if i > 0 {
+ if v.Kind() == reflect.Ptr {
+ if v.IsNil() {
+ panic(fmt.Sprintf("cannot get %v from %v", field.Path[i], field.Path[i-1]))
+ }
+ v = v.Elem()
+ }
+ }
+ v = v.Field(x)
+ }
+ return v
+}
+
+type Method struct {
+ Index int
+ Name string
+}
+
+func FetchMethod(from interface{}, method *Method) interface{} {
+ v := reflect.ValueOf(from)
+ kind := v.Kind()
+ if kind != reflect.Invalid {
+ // Methods can be defined on any type, no need to dereference.
+ method := v.Method(method.Index)
+ if method.IsValid() {
+ return method.Interface()
+ }
+ }
+ panic(fmt.Sprintf("cannot fetch %v from %T", method.Name, from))
+}
+
+func Deref(i interface{}) interface{} {
+ if i == nil {
+ return nil
+ }
+
+ v := reflect.ValueOf(i)
+
+ if v.Kind() == reflect.Interface {
+ if v.IsNil() {
+ return i
+ }
+ v = v.Elem()
+ }
+
+ if v.Kind() == reflect.Ptr {
+ if v.IsNil() {
+ return i
+ }
+ indirect := reflect.Indirect(v)
+ switch indirect.Kind() {
+ case reflect.Struct, reflect.Map, reflect.Array, reflect.Slice:
+ default:
+ v = v.Elem()
+ }
+ }
+
+ if v.IsValid() {
+ return v.Interface()
+ }
+
+ panic(fmt.Sprintf("cannot dereference %v", i))
+}
+
+func Slice(array, from, to interface{}) interface{} {
+ v := reflect.ValueOf(array)
+
+ switch v.Kind() {
+ case reflect.Array, reflect.Slice, reflect.String:
+ length := v.Len()
+ a, b := ToInt(from), ToInt(to)
+ if a < 0 {
+ a = length + a
+ }
+ if b < 0 {
+ b = length + b
+ }
+ if b > length {
+ b = length
+ }
+ if a > b {
+ a = b
+ }
+ value := v.Slice(a, b)
+ if value.IsValid() {
+ return value.Interface()
+ }
+
+ case reflect.Ptr:
+ value := v.Elem()
+ if value.IsValid() {
+ return Slice(value.Interface(), from, to)
+ }
+
+ }
+ panic(fmt.Sprintf("cannot slice %v", from))
+}
+
+func In(needle interface{}, array interface{}) bool {
+ if array == nil {
+ return false
+ }
+ v := reflect.ValueOf(array)
+
+ switch v.Kind() {
+
+ case reflect.Array, reflect.Slice:
+ for i := 0; i < v.Len(); i++ {
+ value := v.Index(i)
+ if value.IsValid() {
+ if Equal(value.Interface(), needle) {
+ return true
+ }
+ }
+ }
+ return false
+
+ case reflect.Map:
+ var value reflect.Value
+ if needle == nil {
+ value = v.MapIndex(reflect.Zero(v.Type().Key()))
+ } else {
+ n := reflect.ValueOf(needle)
+ if !n.IsValid() {
+ panic(fmt.Sprintf("cannot use %T as index to %T", needle, array))
+ }
+ value = v.MapIndex(n)
+ }
+ if value.IsValid() {
+ return true
+ }
+ return false
+
+ case reflect.Struct:
+ n := reflect.ValueOf(needle)
+ if !n.IsValid() || n.Kind() != reflect.String {
+ panic(fmt.Sprintf("cannot use %T as field name of %T", needle, array))
+ }
+ value := v.FieldByName(n.String())
+ if value.IsValid() {
+ return true
+ }
+ return false
+
+ case reflect.Ptr:
+ value := v.Elem()
+ if value.IsValid() {
+ return In(needle, value.Interface())
+ }
+ return false
+ }
+
+ panic(fmt.Sprintf(`operator "in"" not defined on %T`, array))
+}
+
+func Len(a interface{}) interface{} {
+ v := reflect.ValueOf(a)
+ switch v.Kind() {
+ case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
+ return v.Len()
+ default:
+ panic(fmt.Sprintf("invalid argument for len (type %T)", a))
+ }
+}
+
+func Negate(i interface{}) interface{} {
+ switch v := i.(type) {
+ case float32:
+ return -v
+ case float64:
+ return -v
+ case int:
+ return -v
+ case int8:
+ return -v
+ case int16:
+ return -v
+ case int32:
+ return -v
+ case int64:
+ return -v
+ case uint:
+ return -v
+ case uint8:
+ return -v
+ case uint16:
+ return -v
+ case uint32:
+ return -v
+ case uint64:
+ return -v
+ default:
+ panic(fmt.Sprintf("invalid operation: - %T", v))
+ }
+}
+
+func Exponent(a, b interface{}) float64 {
+ return math.Pow(ToFloat64(a), ToFloat64(b))
+}
+
+func MakeRange(min, max int) []int {
+ size := max - min + 1
+ if size <= 0 {
+ return []int{}
+ }
+ rng := make([]int, size)
+ for i := range rng {
+ rng[i] = min + i
+ }
+ return rng
+}
+
+func ToInt(a interface{}) int {
+ switch x := a.(type) {
+ case float32:
+ return int(x)
+ case float64:
+ return int(x)
+ case int:
+ return x
+ case int8:
+ return int(x)
+ case int16:
+ return int(x)
+ case int32:
+ return int(x)
+ case int64:
+ return int(x)
+ case uint:
+ return int(x)
+ case uint8:
+ return int(x)
+ case uint16:
+ return int(x)
+ case uint32:
+ return int(x)
+ case uint64:
+ return int(x)
+ case string:
+ i, err := strconv.Atoi(x)
+ if err != nil {
+ panic(fmt.Sprintf("invalid operation: int(%s)", x))
+ }
+ return i
+ default:
+ panic(fmt.Sprintf("invalid operation: int(%T)", x))
+ }
+}
+
+func ToInt64(a interface{}) int64 {
+ switch x := a.(type) {
+ case float32:
+ return int64(x)
+ case float64:
+ return int64(x)
+ case int:
+ return int64(x)
+ case int8:
+ return int64(x)
+ case int16:
+ return int64(x)
+ case int32:
+ return int64(x)
+ case int64:
+ return x
+ case uint:
+ return int64(x)
+ case uint8:
+ return int64(x)
+ case uint16:
+ return int64(x)
+ case uint32:
+ return int64(x)
+ case uint64:
+ return int64(x)
+ default:
+ panic(fmt.Sprintf("invalid operation: int64(%T)", x))
+ }
+}
+
+func ToFloat64(a interface{}) float64 {
+ switch x := a.(type) {
+ case float32:
+ return float64(x)
+ case float64:
+ return x
+ case int:
+ return float64(x)
+ case int8:
+ return float64(x)
+ case int16:
+ return float64(x)
+ case int32:
+ return float64(x)
+ case int64:
+ return float64(x)
+ case uint:
+ return float64(x)
+ case uint8:
+ return float64(x)
+ case uint16:
+ return float64(x)
+ case uint32:
+ return float64(x)
+ case uint64:
+ return float64(x)
+ case string:
+ f, err := strconv.ParseFloat(x, 64)
+ if err != nil {
+ panic(fmt.Sprintf("invalid operation: float(%s)", x))
+ }
+ return f
+ default:
+ panic(fmt.Sprintf("invalid operation: float(%T)", x))
+ }
+}
+
+func IsNil(v interface{}) bool {
+ if v == nil {
+ return true
+ }
+ r := reflect.ValueOf(v)
+ switch r.Kind() {
+ case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Interface, reflect.Slice:
+ return r.IsNil()
+ default:
+ return false
+ }
+}
+
+func Abs(x interface{}) interface{} {
+ switch x.(type) {
+ case float32:
+ if x.(float32) < 0 {
+ return -x.(float32)
+ } else {
+ return x
+ }
+ case float64:
+ if x.(float64) < 0 {
+ return -x.(float64)
+ } else {
+ return x
+ }
+ case int:
+ if x.(int) < 0 {
+ return -x.(int)
+ } else {
+ return x
+ }
+ case int8:
+ if x.(int8) < 0 {
+ return -x.(int8)
+ } else {
+ return x
+ }
+ case int16:
+ if x.(int16) < 0 {
+ return -x.(int16)
+ } else {
+ return x
+ }
+ case int32:
+ if x.(int32) < 0 {
+ return -x.(int32)
+ } else {
+ return x
+ }
+ case int64:
+ if x.(int64) < 0 {
+ return -x.(int64)
+ } else {
+ return x
+ }
+ case uint:
+ if x.(uint) < 0 {
+ return -x.(uint)
+ } else {
+ return x
+ }
+ case uint8:
+ if x.(uint8) < 0 {
+ return -x.(uint8)
+ } else {
+ return x
+ }
+ case uint16:
+ if x.(uint16) < 0 {
+ return -x.(uint16)
+ } else {
+ return x
+ }
+ case uint32:
+ if x.(uint32) < 0 {
+ return -x.(uint32)
+ } else {
+ return x
+ }
+ case uint64:
+ if x.(uint64) < 0 {
+ return -x.(uint64)
+ } else {
+ return x
+ }
+ }
+ panic(fmt.Sprintf("invalid argument for abs (type %T)", x))
+}
diff --git a/vendor/github.com/antonmedv/expr/vm/vm.go b/vendor/github.com/antonmedv/expr/vm/vm.go
new file mode 100644
index 00000000000..af4fc5bf755
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/vm/vm.go
@@ -0,0 +1,523 @@
+package vm
+
+//go:generate sh -c "go run ./func_types > ./generated.go"
+
+import (
+ "fmt"
+ "reflect"
+ "regexp"
+ "strings"
+
+ "github.com/antonmedv/expr/builtin"
+ "github.com/antonmedv/expr/file"
+ "github.com/antonmedv/expr/vm/runtime"
+)
+
+var MemoryBudget int = 1e6
+var errorType = reflect.TypeOf((*error)(nil)).Elem()
+
+type Function = func(params ...interface{}) (interface{}, error)
+
+func Run(program *Program, env interface{}) (interface{}, error) {
+ if program == nil {
+ return nil, fmt.Errorf("program is nil")
+ }
+
+ vm := VM{}
+ return vm.Run(program, env)
+}
+
+type VM struct {
+ stack []interface{}
+ ip int
+ scopes []*Scope
+ debug bool
+ step chan struct{}
+ curr chan int
+ memory int
+ memoryBudget int
+}
+
+type Scope struct {
+ Array reflect.Value
+ It int
+ Len int
+ Count int
+}
+
+func Debug() *VM {
+ vm := &VM{
+ debug: true,
+ step: make(chan struct{}, 0),
+ curr: make(chan int, 0),
+ }
+ return vm
+}
+
+func (vm *VM) Run(program *Program, env interface{}) (_ interface{}, err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ f := &file.Error{
+ Location: program.Locations[vm.ip-1],
+ Message: fmt.Sprintf("%v", r),
+ }
+ if err, ok := r.(error); ok {
+ f.Wrap(err)
+ }
+ err = f.Bind(program.Source)
+ }
+ }()
+
+ if vm.stack == nil {
+ vm.stack = make([]interface{}, 0, 2)
+ } else {
+ vm.stack = vm.stack[0:0]
+ }
+
+ if vm.scopes != nil {
+ vm.scopes = vm.scopes[0:0]
+ }
+
+ vm.memoryBudget = MemoryBudget
+ vm.memory = 0
+ vm.ip = 0
+
+ for vm.ip < len(program.Bytecode) {
+ if vm.debug {
+ <-vm.step
+ }
+
+ op := program.Bytecode[vm.ip]
+ arg := program.Arguments[vm.ip]
+ vm.ip += 1
+
+ switch op {
+
+ case OpPush:
+ vm.push(program.Constants[arg])
+
+ case OpPop:
+ vm.pop()
+
+ case OpLoadConst:
+ vm.push(runtime.Fetch(env, program.Constants[arg]))
+
+ case OpLoadField:
+ vm.push(runtime.FetchField(env, program.Constants[arg].(*runtime.Field)))
+
+ case OpLoadFast:
+ vm.push(env.(map[string]interface{})[program.Constants[arg].(string)])
+
+ case OpLoadMethod:
+ vm.push(runtime.FetchMethod(env, program.Constants[arg].(*runtime.Method)))
+
+ case OpLoadFunc:
+ vm.push(program.Functions[arg])
+
+ case OpFetch:
+ b := vm.pop()
+ a := vm.pop()
+ vm.push(runtime.Fetch(a, b))
+
+ case OpFetchField:
+ a := vm.pop()
+ vm.push(runtime.FetchField(a, program.Constants[arg].(*runtime.Field)))
+
+ case OpMethod:
+ a := vm.pop()
+ vm.push(runtime.FetchMethod(a, program.Constants[arg].(*runtime.Method)))
+
+ case OpTrue:
+ vm.push(true)
+
+ case OpFalse:
+ vm.push(false)
+
+ case OpNil:
+ vm.push(nil)
+
+ case OpNegate:
+ v := runtime.Negate(vm.pop())
+ vm.push(v)
+
+ case OpNot:
+ v := vm.pop().(bool)
+ vm.push(!v)
+
+ case OpEqual:
+ b := vm.pop()
+ a := vm.pop()
+ vm.push(runtime.Equal(a, b))
+
+ case OpEqualInt:
+ b := vm.pop()
+ a := vm.pop()
+ vm.push(a.(int) == b.(int))
+
+ case OpEqualString:
+ b := vm.pop()
+ a := vm.pop()
+ vm.push(a.(string) == b.(string))
+
+ case OpJump:
+ vm.ip += arg
+
+ case OpJumpIfTrue:
+ if vm.current().(bool) {
+ vm.ip += arg
+ }
+
+ case OpJumpIfFalse:
+ if !vm.current().(bool) {
+ vm.ip += arg
+ }
+
+ case OpJumpIfNil:
+ if runtime.IsNil(vm.current()) {
+ vm.ip += arg
+ }
+
+ case OpJumpIfNotNil:
+ if !runtime.IsNil(vm.current()) {
+ vm.ip += arg
+ }
+
+ case OpJumpIfEnd:
+ scope := vm.Scope()
+ if scope.It >= scope.Len {
+ vm.ip += arg
+ }
+
+ case OpJumpBackward:
+ vm.ip -= arg
+
+ case OpIn:
+ b := vm.pop()
+ a := vm.pop()
+ vm.push(runtime.In(a, b))
+
+ case OpLess:
+ b := vm.pop()
+ a := vm.pop()
+ vm.push(runtime.Less(a, b))
+
+ case OpMore:
+ b := vm.pop()
+ a := vm.pop()
+ vm.push(runtime.More(a, b))
+
+ case OpLessOrEqual:
+ b := vm.pop()
+ a := vm.pop()
+ vm.push(runtime.LessOrEqual(a, b))
+
+ case OpMoreOrEqual:
+ b := vm.pop()
+ a := vm.pop()
+ vm.push(runtime.MoreOrEqual(a, b))
+
+ case OpAdd:
+ b := vm.pop()
+ a := vm.pop()
+ vm.push(runtime.Add(a, b))
+
+ case OpSubtract:
+ b := vm.pop()
+ a := vm.pop()
+ vm.push(runtime.Subtract(a, b))
+
+ case OpMultiply:
+ b := vm.pop()
+ a := vm.pop()
+ vm.push(runtime.Multiply(a, b))
+
+ case OpDivide:
+ b := vm.pop()
+ a := vm.pop()
+ vm.push(runtime.Divide(a, b))
+
+ case OpModulo:
+ b := vm.pop()
+ a := vm.pop()
+ vm.push(runtime.Modulo(a, b))
+
+ case OpExponent:
+ b := vm.pop()
+ a := vm.pop()
+ vm.push(runtime.Exponent(a, b))
+
+ case OpRange:
+ b := vm.pop()
+ a := vm.pop()
+ min := runtime.ToInt(a)
+ max := runtime.ToInt(b)
+ size := max - min + 1
+ if vm.memory+size >= vm.memoryBudget {
+ panic("memory budget exceeded")
+ }
+ vm.push(runtime.MakeRange(min, max))
+ vm.memory += size
+
+ case OpMatches:
+ b := vm.pop()
+ a := vm.pop()
+ match, err := regexp.MatchString(b.(string), a.(string))
+ if err != nil {
+ panic(err)
+ }
+
+ vm.push(match)
+
+ case OpMatchesConst:
+ a := vm.pop()
+ r := program.Constants[arg].(*regexp.Regexp)
+ vm.push(r.MatchString(a.(string)))
+
+ case OpContains:
+ b := vm.pop()
+ a := vm.pop()
+ vm.push(strings.Contains(a.(string), b.(string)))
+
+ case OpStartsWith:
+ b := vm.pop()
+ a := vm.pop()
+ vm.push(strings.HasPrefix(a.(string), b.(string)))
+
+ case OpEndsWith:
+ b := vm.pop()
+ a := vm.pop()
+ vm.push(strings.HasSuffix(a.(string), b.(string)))
+
+ case OpSlice:
+ from := vm.pop()
+ to := vm.pop()
+ node := vm.pop()
+ vm.push(runtime.Slice(node, from, to))
+
+ case OpCall:
+ fn := reflect.ValueOf(vm.pop())
+ size := arg
+ in := make([]reflect.Value, size)
+ for i := int(size) - 1; i >= 0; i-- {
+ param := vm.pop()
+ if param == nil && reflect.TypeOf(param) == nil {
+ // In case of nil value and nil type use this hack,
+ // otherwise reflect.Call will panic on zero value.
+ in[i] = reflect.ValueOf(¶m).Elem()
+ } else {
+ in[i] = reflect.ValueOf(param)
+ }
+ }
+ out := fn.Call(in)
+ if len(out) == 2 && out[1].Type() == errorType && !out[1].IsNil() {
+ panic(out[1].Interface().(error))
+ }
+ vm.push(out[0].Interface())
+
+ case OpCall0:
+ out, err := program.Functions[arg]()
+ if err != nil {
+ panic(err)
+ }
+ vm.push(out)
+
+ case OpCall1:
+ a := vm.pop()
+ out, err := program.Functions[arg](a)
+ if err != nil {
+ panic(err)
+ }
+ vm.push(out)
+
+ case OpCall2:
+ b := vm.pop()
+ a := vm.pop()
+ out, err := program.Functions[arg](a, b)
+ if err != nil {
+ panic(err)
+ }
+ vm.push(out)
+
+ case OpCall3:
+ c := vm.pop()
+ b := vm.pop()
+ a := vm.pop()
+ out, err := program.Functions[arg](a, b, c)
+ if err != nil {
+ panic(err)
+ }
+ vm.push(out)
+
+ case OpCallN:
+ fn := vm.pop().(Function)
+ size := arg
+ in := make([]interface{}, size)
+ for i := int(size) - 1; i >= 0; i-- {
+ in[i] = vm.pop()
+ }
+ out, err := fn(in...)
+ if err != nil {
+ panic(err)
+ }
+ vm.push(out)
+
+ case OpCallFast:
+ fn := vm.pop().(func(...interface{}) interface{})
+ size := arg
+ in := make([]interface{}, size)
+ for i := int(size) - 1; i >= 0; i-- {
+ in[i] = vm.pop()
+ }
+ vm.push(fn(in...))
+
+ case OpCallTyped:
+ fn := vm.pop()
+ out := vm.call(fn, arg)
+ vm.push(out)
+
+ case OpArray:
+ size := vm.pop().(int)
+ array := make([]interface{}, size)
+ for i := size - 1; i >= 0; i-- {
+ array[i] = vm.pop()
+ }
+ vm.push(array)
+ vm.memory += size
+ if vm.memory >= vm.memoryBudget {
+ panic("memory budget exceeded")
+ }
+
+ case OpMap:
+ size := vm.pop().(int)
+ m := make(map[string]interface{})
+ for i := size - 1; i >= 0; i-- {
+ value := vm.pop()
+ key := vm.pop()
+ m[key.(string)] = value
+ }
+ vm.push(m)
+ vm.memory += size
+ if vm.memory >= vm.memoryBudget {
+ panic("memory budget exceeded")
+ }
+
+ case OpLen:
+ vm.push(runtime.Len(vm.current()))
+
+ case OpCast:
+ t := arg
+ switch t {
+ case 0:
+ vm.push(runtime.ToInt(vm.pop()))
+ case 1:
+ vm.push(runtime.ToInt64(vm.pop()))
+ case 2:
+ vm.push(runtime.ToFloat64(vm.pop()))
+ }
+
+ case OpDeref:
+ a := vm.pop()
+ vm.push(runtime.Deref(a))
+
+ case OpIncrementIt:
+ scope := vm.Scope()
+ scope.It++
+
+ case OpIncrementCount:
+ scope := vm.Scope()
+ scope.Count++
+
+ case OpGetCount:
+ scope := vm.Scope()
+ vm.push(scope.Count)
+
+ case OpGetLen:
+ scope := vm.Scope()
+ vm.push(scope.Len)
+
+ case OpPointer:
+ scope := vm.Scope()
+ vm.push(scope.Array.Index(scope.It).Interface())
+
+ case OpBegin:
+ a := vm.pop()
+ array := reflect.ValueOf(a)
+ vm.scopes = append(vm.scopes, &Scope{
+ Array: array,
+ Len: array.Len(),
+ })
+
+ case OpEnd:
+ vm.scopes = vm.scopes[:len(vm.scopes)-1]
+
+ case OpBuiltin:
+ switch arg {
+ case builtin.Len:
+ vm.push(runtime.Len(vm.pop()))
+
+ case builtin.Abs:
+ vm.push(runtime.Abs(vm.pop()))
+
+ case builtin.Int:
+ vm.push(runtime.ToInt(vm.pop()))
+
+ case builtin.Float:
+ vm.push(runtime.ToFloat64(vm.pop()))
+
+ default:
+ panic(fmt.Sprintf("unknown builtin %v", arg))
+ }
+
+ default:
+ panic(fmt.Sprintf("unknown bytecode %#x", op))
+ }
+
+ if vm.debug {
+ vm.curr <- vm.ip
+ }
+ }
+
+ if vm.debug {
+ close(vm.curr)
+ close(vm.step)
+ }
+
+ if len(vm.stack) > 0 {
+ return vm.pop(), nil
+ }
+
+ return nil, nil
+}
+
+func (vm *VM) push(value interface{}) {
+ vm.stack = append(vm.stack, value)
+}
+
+func (vm *VM) current() interface{} {
+ return vm.stack[len(vm.stack)-1]
+}
+
+func (vm *VM) pop() interface{} {
+ value := vm.stack[len(vm.stack)-1]
+ vm.stack = vm.stack[:len(vm.stack)-1]
+ return value
+}
+
+func (vm *VM) Stack() []interface{} {
+ return vm.stack
+}
+
+func (vm *VM) Scope() *Scope {
+ if len(vm.scopes) > 0 {
+ return vm.scopes[len(vm.scopes)-1]
+ }
+ return nil
+}
+
+func (vm *VM) Step() {
+ vm.step <- struct{}{}
+}
+
+func (vm *VM) Position() chan int {
+ return vm.curr
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index b9111e9db19..62503ed2091 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -245,6 +245,20 @@ github.com/andybalholm/brotli
# github.com/antlr/antlr4/runtime/Go/antlr v1.4.10
## explicit; go 1.16
github.com/antlr/antlr4/runtime/Go/antlr
+# github.com/antonmedv/expr v1.12.5
+## explicit; go 1.13
+github.com/antonmedv/expr
+github.com/antonmedv/expr/ast
+github.com/antonmedv/expr/builtin
+github.com/antonmedv/expr/checker
+github.com/antonmedv/expr/compiler
+github.com/antonmedv/expr/conf
+github.com/antonmedv/expr/file
+github.com/antonmedv/expr/optimizer
+github.com/antonmedv/expr/parser
+github.com/antonmedv/expr/parser/lexer
+github.com/antonmedv/expr/vm
+github.com/antonmedv/expr/vm/runtime
# github.com/apapsch/go-jsonmerge/v2 v2.0.0
## explicit; go 1.12
github.com/apapsch/go-jsonmerge/v2