diff --git a/.gitignore b/.gitignore index 1524d7f67b0..46398f306c8 100644 --- a/.gitignore +++ b/.gitignore @@ -66,3 +66,6 @@ docs/pipeline-api.md.backup # Ignore generated kind.yaml kind.yaml + +# Profiling +cpu.prof \ No newline at end of file diff --git a/config/resolvers/bundleresolver-config.yaml b/config/resolvers/bundleresolver-config.yaml index d48372ddd11..e2749b8887a 100644 --- a/config/resolvers/bundleresolver-config.yaml +++ b/config/resolvers/bundleresolver-config.yaml @@ -26,3 +26,8 @@ data: default-service-account: "default" # The default layer kind in the bundle image. default-kind: "task" + # Optional: Default cache mode for this resolver. Valid values: "always", "never", "auto" (default: "auto") + # "always" - Always cache resolved resources + # "never" - Never cache resolved resources + # "auto" - Only cache bundles with digest references (@sha256:...) + # default-cache-mode: "auto" diff --git a/config/resolvers/cluster-resolver-config.yaml b/config/resolvers/cluster-resolver-config.yaml index 8f2e775abe2..2b7f17a6070 100644 --- a/config/resolvers/cluster-resolver-config.yaml +++ b/config/resolvers/cluster-resolver-config.yaml @@ -30,3 +30,8 @@ data: allowed-namespaces: "" # An optional comma-separated list of namespaces which the resolver is blocked from accessing. Defaults to empty, meaning all namespaces are allowed. blocked-namespaces: "" + # Optional: Default cache mode for this resolver. Valid values: "always", "never", "auto" (default: "auto") + # "always" - Always cache resolved resources + # "never" - Never cache resolved resources (recommended for cluster resolver since resources are mutable) + # "auto" - Never cache for cluster resolver (same as "never") + # default-cache-mode: "auto" diff --git a/config/resolvers/git-resolver-config.yaml b/config/resolvers/git-resolver-config.yaml index 565d2837cab..bc33b0aee67 100644 --- a/config/resolvers/git-resolver-config.yaml +++ b/config/resolvers/git-resolver-config.yaml @@ -41,3 +41,8 @@ data: # The default organization to look for repositories under when using the authenticated API, # if not specified in the resolver parameters. Optional. default-org: "" + # Optional: Default cache mode for this resolver. Valid values: "always", "never", "auto" (default: "auto") + # "always" - Always cache resolved resources + # "never" - Never cache resolved resources + # "auto" - Only cache when revision is a commit hash + # default-cache-mode: "auto" diff --git a/config/resolvers/resolver-cache-config.yaml b/config/resolvers/resolver-cache-config.yaml new file mode 100644 index 00000000000..4266df46583 --- /dev/null +++ b/config/resolvers/resolver-cache-config.yaml @@ -0,0 +1,28 @@ +# Copyright 2025 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: resolver-cache-config + namespace: tekton-pipelines-resolvers + labels: + app.kubernetes.io/component: resolvers + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines +data: + # Maximum number of entries in the resolver cache + max-size: "1000" + # Time-to-live for cache entries (examples: 5m, 10m, 1h) + ttl: "5m" diff --git a/docs/bundle-resolver.md b/docs/bundle-resolver.md index a3321fab634..4ff75462d09 100644 --- a/docs/bundle-resolver.md +++ b/docs/bundle-resolver.md @@ -19,6 +19,7 @@ This Resolver responds to type `bundles`. | `bundle` | The bundle url pointing at the image to fetch | `gcr.io/tekton-releases/catalog/upstream/golang-build:0.1` | | `name` | The name of the resource to pull out of the bundle | `golang-build` | | `kind` | The resource kind to pull out of the bundle | `task` | +| `cache` | Controls caching behavior for the resolved resource | `always`, `never`, `auto` | ## Requirements @@ -45,6 +46,44 @@ for the name, namespace and defaults that the resolver ships with. | `backoff-cap` | The maxumum backoff duration. If reached, remaining steps are zeroed.| `10s`, `20s` | | `default-kind` | The default layer kind in the bundle image. | `task`, `pipeline` | +### Caching Options + +The bundle resolver supports caching of resolved resources to improve performance. The caching behavior can be configured using the `cache` option: + +| Cache Value | Description | +|-------------|-------------| +| `always` | Always cache resolved resources. This is the most aggressive caching strategy and will cache all resolved resources regardless of their source. | +| `never` | Never cache resolved resources. This disables caching completely. | +| `auto` | Caching will only occur for bundles pulled by digest. (default) | + +### Cache Configuration + +The resolver cache can be configured globally using the `resolver-cache-config` ConfigMap. This ConfigMap controls the cache size and TTL (time-to-live) for all resolvers. + +| Option Name | Description | Default Value | Example Values | +|-------------|-------------|---------------|----------------| +| `max-size` | Maximum number of entries in the cache | `1000` | `500`, `2000` | +| `ttl` | Time-to-live for cache entries | `5m` | `10m`, `1h` | + +The ConfigMap name can be customized using the `RESOLVER_CACHE_CONFIG_MAP_NAME` environment variable. If not set, it defaults to `resolver-cache-config`. + +Additionally, you can set a default cache mode for the bundle resolver by adding the `default-cache-mode` option to the `bundleresolver-config` ConfigMap. This overrides the system default (`auto`) for this resolver: + +| Option Name | Description | Valid Values | Default | +|-------------|-------------|--------------|---------| +| `default-cache-mode` | Default caching behavior when `cache` parameter is not specified | `always`, `never`, `auto` | `auto` | + +Example: +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: bundleresolver-config + namespace: tekton-pipelines-resolvers +data: + default-cache-mode: "always" # Always cache unless task/pipeline specifies otherwise +``` + ## Usage ### Task Resolution diff --git a/docs/cluster-resolver.md b/docs/cluster-resolver.md index 1a43f579449..be9ce2a7410 100644 --- a/docs/cluster-resolver.md +++ b/docs/cluster-resolver.md @@ -18,6 +18,48 @@ This Resolver responds to type `cluster`. | `kind` | The kind of resource to fetch. | `task`, `pipeline`, `stepaction` | | `name` | The name of the resource to fetch. | `some-pipeline`, `some-task` | | `namespace` | The namespace in the cluster containing the resource. | `default`, `other-namespace` | +| `cache` | Optional cache mode for the resolver. | `always`, `never`, `auto` | + +### Cache Parameter + +The `cache` parameter controls whether the cluster resolver caches resolved resources: + +| Cache Mode | Description | +|------------|-------------| +| `always` | Always cache the resolved resource, regardless of whether it has an immutable reference. | +| `never` | Never cache the resolved resource. | +| `auto` | **Cluster resolver behavior**: Never cache (cluster resources lack immutable references). | +| (not specified) | **Default behavior**: Never cache (same as `auto` for cluster resolver). | + +**Note**: The cluster resolver only caches when `cache: always` is explicitly specified. This is because cluster resources (Tasks, Pipelines, etc.) do not have immutable references like Git commit hashes or bundle digests, making automatic caching unreliable. + +### Cache Configuration + +The resolver cache can be configured globally using the `resolver-cache-config` ConfigMap. This ConfigMap controls the cache size and TTL (time-to-live) for all resolvers. + +| Option Name | Description | Default Value | Example Values | +|-------------|-------------|---------------|----------------| +| `max-size` | Maximum number of entries in the cache | `1000` | `500`, `2000` | +| `ttl` | Time-to-live for cache entries | `5m` | `10m`, `1h` | + +The ConfigMap name can be customized using the `RESOLVER_CACHE_CONFIG_MAP_NAME` environment variable. If not set, it defaults to `resolver-cache-config`. + +Additionally, you can set a default cache mode for the cluster resolver by adding the `default-cache-mode` option to the `cluster-resolver-config` ConfigMap. This overrides the system default (`auto`) for this resolver: + +| Option Name | Description | Valid Values | Default | +|-------------|-------------|--------------|---------| +| `default-cache-mode` | Default caching behavior when `cache` parameter is not specified | `always`, `never`, `auto` | `auto` | + +Example: +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: cluster-resolver-config + namespace: tekton-pipelines-resolvers +data: + default-cache-mode: "never" # Never cache by default (since cluster resources are mutable) +``` ## Requirements @@ -63,6 +105,48 @@ spec: value: namespace-containing-task ``` +### Task Resolution with Caching + +```yaml +apiVersion: tekton.dev/v1beta1 +kind: TaskRun +metadata: + name: remote-task-reference-cached +spec: + taskRef: + resolver: cluster + params: + - name: kind + value: task + - name: name + value: some-task + - name: namespace + value: namespace-containing-task + - name: cache + value: always +``` + +### Task Resolution without Caching + +```yaml +apiVersion: tekton.dev/v1beta1 +kind: TaskRun +metadata: + name: remote-task-reference-no-cache +spec: + taskRef: + resolver: cluster + params: + - name: kind + value: task + - name: name + value: some-task + - name: namespace + value: namespace-containing-task + - name: cache + value: never +``` + ### StepAction Resolution ```yaml diff --git a/docs/git-resolver.md b/docs/git-resolver.md index 85030ac2605..2ef0e74cbad 100644 --- a/docs/git-resolver.md +++ b/docs/git-resolver.md @@ -26,6 +26,7 @@ This Resolver responds to type `git`. | `pathInRepo` | Where to find the file in the repo. | `task/golang-build/0.3/golang-build.yaml` | | `serverURL` | An optional server URL (that includes the https:// prefix) to connect for API operations | `https:/github.mycompany.com` | | `scmType` | An optional SCM type to use for API operations | `github`, `gitlab`, `gitea` | +| `cache` | Controls caching behavior for the resolved resource | `always`, `never`, `auto` | ## Requirements @@ -55,6 +56,44 @@ for the name, namespace and defaults that the resolver ships with. | `api-token-secret-namespace` | The namespace containing the token secret, if not `default`. | `other-namespace` | | `default-org` | The default organization to look for repositories under when using the authenticated API, if not specified in the resolver parameters. Optional. | `tektoncd`, `kubernetes` | +### Caching Options + +The git resolver supports caching of resolved resources to improve performance. The caching behavior can be configured using the `cache` option: + +| Cache Value | Description | +|-------------|-------------| +| `always` | Always cache resolved resources. This is the most aggressive caching strategy and will cache all resolved resources regardless of their source. | +| `never` | Never cache resolved resources. This disables caching completely. | +| `auto` | Caching will only occur when revision is a commit hash. (default) | + +### Cache Configuration + +The resolver cache can be configured globally using the `resolver-cache-config` ConfigMap. This ConfigMap controls the cache size and TTL (time-to-live) for all resolvers. + +| Option Name | Description | Default Value | Example Values | +|-------------|-------------|---------------|----------------| +| `max-size` | Maximum number of entries in the cache | `1000` | `500`, `2000` | +| `ttl` | Time-to-live for cache entries | `5m` | `10m`, `1h` | + +The ConfigMap name can be customized using the `RESOLVER_CACHE_CONFIG_MAP_NAME` environment variable. If not set, it defaults to `resolver-cache-config`. + +Additionally, you can set a default cache mode for the git resolver by adding the `default-cache-mode` option to the `git-resolver-config` ConfigMap. This overrides the system default (`auto`) for this resolver: + +| Option Name | Description | Valid Values | Default | +|-------------|-------------|--------------|---------| +| `default-cache-mode` | Default caching behavior when `cache` parameter is not specified | `always`, `never`, `auto` | `auto` | + +Example: +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: git-resolver-config + namespace: tekton-pipelines-resolvers +data: + default-cache-mode: "always" # Always cache unless task/pipeline specifies otherwise +``` + ## Usage The `git` resolver has two modes: cloning a repository with `git clone` (with diff --git a/docs/hub-resolver.md b/docs/hub-resolver.md index aee168dc870..c34209c0aec 100644 --- a/docs/hub-resolver.md +++ b/docs/hub-resolver.md @@ -45,7 +45,6 @@ for the name, namespace and defaults that the resolver ships with. | `default-kind` | The default object kind for references. | `task`, `pipeline` | | `default-type` | The default hub from where to pull the resource. | `artifact`, `tekton` | - ### Configuring the Hub API endpoint The Hub Resolver supports to resolve resources from the [Artifact Hub](https://artifacthub.io/) and the [Tekton Hub](https://hub.tekton.dev/), diff --git a/docs/resolution.md b/docs/resolution.md index 6a56470aa88..474c77faefc 100644 --- a/docs/resolution.md +++ b/docs/resolution.md @@ -34,6 +34,18 @@ accompanying [resolver-template](./resolver-template). For a table of the interfaces and methods a resolver must implement along with those that are optional, see [resolver-reference.md](./resolver-reference.md). +## Resolver Cache Configuration + +The resolver cache is used to improve performance by caching resolved resources for bundle and git resolver. By default, the cache uses: +- 5 minutes ("5m") as the time-to-live (TTL) for cache entries +- 1000 entries as the maximum cache size + +You can override these defaults by editing the `resolver-cache-config.yaml` ConfigMap in the `tekton-pipelines-resolvers` namespace. Set the following keys: +- `max-size`: Set the maximum number of cache entries (e.g., "500") +- `default-ttl`: Set the default TTL for cache entries (e.g., "10m", "30s") + +If these values are missing or invalid, the defaults will be used. + --- Except as otherwise noted, the content of this page is licensed under the diff --git a/pkg/remoteresolution/remote/resolution/resolver.go b/pkg/remoteresolution/remote/resolution/resolver.go index d3500ae6396..67779b6fa6c 100644 --- a/pkg/remoteresolution/remote/resolution/resolver.go +++ b/pkg/remoteresolution/remote/resolution/resolver.go @@ -27,6 +27,8 @@ import ( "knative.dev/pkg/kmeta" ) +var _ remote.Resolver = (*Resolver)(nil) + // Resolver implements remote.Resolver and encapsulates the majority of // code required to interface with the tektoncd/resolution project. It // is used to make async requests for resources like pipelines from @@ -38,8 +40,6 @@ type Resolver struct { resolverPayload remoteresource.ResolverPayload } -var _ remote.Resolver = &Resolver{} - // NewResolver returns an implementation of remote.Resolver capable // of performing asynchronous remote resolution. func NewResolver(requester remoteresource.Requester, owner kmeta.OwnerRefable, resolverName string, resolverPayload remoteresource.ResolverPayload) remote.Resolver { diff --git a/pkg/remoteresolution/resolver/bundle/resolver.go b/pkg/remoteresolution/resolver/bundle/resolver.go index 4f8612931a0..2a85fb34203 100644 --- a/pkg/remoteresolution/resolver/bundle/resolver.go +++ b/pkg/remoteresolution/resolver/bundle/resolver.go @@ -1,17 +1,17 @@ /* - Copyright 2024 The Tekton Authors +Copyright 2024 The Tekton Authors - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ package bundle @@ -19,69 +19,106 @@ package bundle import ( "context" "errors" + "strings" + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1" "github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/framework" - "github.com/tektoncd/pipeline/pkg/resolution/common" - "github.com/tektoncd/pipeline/pkg/resolution/resolver/bundle" + "github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/framework/cache" + resolutioncommon "github.com/tektoncd/pipeline/pkg/resolution/common" + bundleresolution "github.com/tektoncd/pipeline/pkg/resolution/resolver/bundle" resolutionframework "github.com/tektoncd/pipeline/pkg/resolution/resolver/framework" "k8s.io/client-go/kubernetes" - "knative.dev/pkg/client/injection/kube/client" + kubeclient "knative.dev/pkg/client/injection/kube/client" ) const ( // LabelValueBundleResolverType is the value to use for the // resolution.tekton.dev/type label on resource requests - LabelValueBundleResolverType string = "bundles" + LabelValueBundleResolverType = "bundles" // BundleResolverName is the name that the bundle resolver should be associated with. BundleResolverName = "bundleresolver" ) -var _ framework.Resolver = &Resolver{} +var _ framework.Resolver = (*Resolver)(nil) +var _ resolutionframework.ConfigWatcher = (*Resolver)(nil) +var _ cache.ImmutabilityChecker = (*Resolver)(nil) // Resolver implements a framework.Resolver that can fetch files from OCI bundles. type Resolver struct { - kubeClientSet kubernetes.Interface + kubeClientSet kubernetes.Interface + resolveRequestFunc func(context.Context, kubernetes.Interface, *v1beta1.ResolutionRequestSpec) (resolutionframework.ResolvedResource, error) } // Initialize sets up any dependencies needed by the Resolver. None atm. func (r *Resolver) Initialize(ctx context.Context) error { - r.kubeClientSet = client.Get(ctx) + r.kubeClientSet = kubeclient.Get(ctx) + if r.resolveRequestFunc == nil { + r.resolveRequestFunc = bundleresolution.ResolveRequest + } return nil } // GetName returns a string name to refer to this Resolver by. -func (r *Resolver) GetName(context.Context) string { +func (r *Resolver) GetName(_ context.Context) string { return BundleResolverName } -// GetConfigName returns the name of the bundle resolver's configmap. -func (r *Resolver) GetConfigName(context.Context) string { - return bundle.ConfigMapName -} - -// GetSelector returns a map of labels to match requests to this Resolver. -func (r *Resolver) GetSelector(context.Context) map[string]string { +// GetSelector returns a map of labels to match against tasks requesting +// resolution from this Resolver. +func (r *Resolver) GetSelector(_ context.Context) map[string]string { return map[string]string{ - common.LabelKeyResolverType: LabelValueBundleResolverType, + resolutioncommon.LabelKeyResolverType: LabelValueBundleResolverType, } } -// Validate ensures reqolution request spec from a request are as expected. +// GetConfigName returns the name of the bundle resolver's configmap. +func (r *Resolver) GetConfigName(_ context.Context) string { + return bundleresolution.ConfigMapName +} + +// Validate ensures parameters from a request are as expected. func (r *Resolver) Validate(ctx context.Context, req *v1beta1.ResolutionRequestSpec) error { - if len(req.Params) > 0 { - return bundle.ValidateParams(ctx, req.Params) + return bundleresolution.ValidateParams(ctx, req.Params) +} + +// IsImmutable implements ImmutabilityChecker.IsImmutable +// Returns true if the bundle parameter contains a digest reference (@sha256:...) +func (r *Resolver) IsImmutable(params []v1.Param) bool { + var bundleRef string + for _, param := range params { + if param.Name == bundleresolution.ParamBundle { + bundleRef = param.Value.StringVal + break + } } - // Remove this error once validate url has been implemented. - return errors.New("cannot validate request. the Validate method has not been implemented.") + + // Checks if the given string looks like an OCI pull spec by digest. + // Only SHA-256 digests are considered immutable. + // Examples: + // - image@sha256:abc123... + // - registry.io/image@sha256:abc123... + // - registry.io/image:tag@sha256:abc123... (tag is ignored when digest is present) + return strings.Contains(bundleRef, "@sha256:") } -// Resolve uses the given request spec resolve the requested file or resource. +// Resolve uses the given params to resolve the requested file or resource. func (r *Resolver) Resolve(ctx context.Context, req *v1beta1.ResolutionRequestSpec) (resolutionframework.ResolvedResource, error) { - if len(req.Params) > 0 { - return bundle.ResolveRequest(ctx, r.kubeClientSet, req) + if len(req.Params) == 0 { + return nil, errors.New("no params") } - // Remove this error once resolution of url has been implemented. - return nil, errors.New("the Resolve method has not been implemented.") + + if cache.ShouldUse(ctx, r, req.Params, LabelValueBundleResolverType) { + return cache.GetFromCacheOrResolve( + ctx, + req.Params, + LabelValueBundleResolverType, + func() (resolutionframework.ResolvedResource, error) { + return r.resolveRequestFunc(ctx, r.kubeClientSet, req) + }, + ) + } + + return r.resolveRequestFunc(ctx, r.kubeClientSet, req) } diff --git a/pkg/remoteresolution/resolver/bundle/resolver_test.go b/pkg/remoteresolution/resolver/bundle/resolver_test.go index 3d9cd994feb..7e178e95e80 100644 --- a/pkg/remoteresolution/resolver/bundle/resolver_test.go +++ b/pkg/remoteresolution/resolver/bundle/resolver_test.go @@ -66,6 +66,65 @@ func TestGetSelector(t *testing.T) { } } +func TestIsImmutable(t *testing.T) { + testCases := []struct { + name string + bundleRef string + expected bool + }{ + { + name: "digest with sha256", + bundleRef: "gcr.io/tekton-releases/catalog/upstream/golang-build@sha256:abc123def456", + expected: true, + }, + { + name: "digest with sha512 (not supported)", + bundleRef: "myregistry.io/myimage@sha512:1234567890abcdef", + expected: false, + }, + { + name: "digest with sha384 (not supported)", + bundleRef: "docker.io/library/ubuntu@sha384:fedcba098765", + expected: false, + }, + { + name: "tag with digest", + bundleRef: "gcr.io/myproject/myimage:v1.0.0@sha256:abc123", + expected: true, + }, + { + name: "only tag without digest", + bundleRef: "gcr.io/myproject/myimage:latest", + expected: false, + }, + { + name: "no tag or digest", + bundleRef: "gcr.io/myproject/myimage", + expected: false, + }, + { + name: "tag with colon but no digest", + bundleRef: "myregistry.io:8080/myimage:v2.0", + expected: false, + }, + } + + resolver := &bundle.Resolver{} + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + params := []pipelinev1.Param{{ + Name: bundleresolution.ParamBundle, + Value: *pipelinev1.NewStructuredValues(tc.bundleRef), + }} + + result := resolver.IsImmutable(params) + if result != tc.expected { + t.Errorf("IsImmutable(%s) = %v, want %v", tc.bundleRef, result, tc.expected) + } + }) + } +} + func TestValidateParamsSecret(t *testing.T) { resolver := bundle.Resolver{} config := map[string]string{ @@ -222,7 +281,15 @@ func TestValidateMissing(t *testing.T) { } func TestResolveDisabled(t *testing.T) { + ctx, _ := ttesting.SetupFakeContext(t) + resolver := bundle.Resolver{} + if err := resolver.Initialize(ctx); err != nil { + t.Fatalf("failed to initialize resolver: %v", err) + } + + // Now overlay the disabled context on top + ctx = resolverDisabledContext() var err error @@ -240,7 +307,7 @@ func TestResolveDisabled(t *testing.T) { Value: *pipelinev1.NewStructuredValues("baz"), }} req := v1beta1.ResolutionRequestSpec{Params: params} - _, err = resolver.Resolve(resolverDisabledContext(), &req) + _, err = resolver.Resolve(ctx, &req) if err == nil { t.Fatalf("expected disabled err") } @@ -273,6 +340,12 @@ func TestResolve_KeyChainError(t *testing.T) { bundleresolution.ConfigKind: "task", bundleresolution.ConfigServiceAccount: "default", }, + }, { + ObjectMeta: metav1.ObjectMeta{ + Name: "resolver-cache-config", + Namespace: resolverconfig.ResolversNamespace(system.Namespace()), + }, + Data: map[string]string{}, }}, } diff --git a/pkg/remoteresolution/resolver/cluster/resolver.go b/pkg/remoteresolution/resolver/cluster/resolver.go index c08f8a18bd3..52bb6e8ff9a 100644 --- a/pkg/remoteresolution/resolver/cluster/resolver.go +++ b/pkg/remoteresolution/resolver/cluster/resolver.go @@ -18,79 +18,86 @@ package cluster import ( "context" - "errors" + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1" - clientset "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" + "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" pipelineclient "github.com/tektoncd/pipeline/pkg/client/injection/client" "github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/framework" + "github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/framework/cache" resolutioncommon "github.com/tektoncd/pipeline/pkg/resolution/common" - "github.com/tektoncd/pipeline/pkg/resolution/resolver/cluster" + clusterresolution "github.com/tektoncd/pipeline/pkg/resolution/resolver/cluster" resolutionframework "github.com/tektoncd/pipeline/pkg/resolution/resolver/framework" ) const ( // LabelValueClusterResolverType is the value to use for the // resolution.tekton.dev/type label on resource requests - LabelValueClusterResolverType string = "cluster" + LabelValueClusterResolverType = "cluster" // ClusterResolverName is the name that the cluster resolver should be // associated with - ClusterResolverName string = "Cluster" - - configMapName = "cluster-resolver-config" + ClusterResolverName = "Cluster" ) -var _ framework.Resolver = &Resolver{} +var _ framework.Resolver = (*Resolver)(nil) +var _ resolutionframework.ConfigWatcher = (*Resolver)(nil) +var _ cache.ImmutabilityChecker = (*Resolver)(nil) -// ResolverV2 implements a framework.Resolver that can fetch resources from other namespaces. +// Resolver implements a framework.Resolver that can fetch resources from the same cluster. type Resolver struct { - pipelineClientSet clientset.Interface + pipelineClientSet versioned.Interface } -// Initialize performs any setup required by the cluster resolver. +// Initialize sets up any dependencies needed by the Resolver. func (r *Resolver) Initialize(ctx context.Context) error { r.pipelineClientSet = pipelineclient.Get(ctx) return nil } -// GetName returns the string name that the cluster resolver should be -// associated with. +// GetName returns a string name to refer to this Resolver by. func (r *Resolver) GetName(_ context.Context) string { return ClusterResolverName } -// GetSelector returns the labels that resource requests are required to have for -// the cluster resolver to process them. +// GetSelector returns a map of labels to match against tasks requesting +// resolution from this Resolver. func (r *Resolver) GetSelector(_ context.Context) map[string]string { return map[string]string{ resolutioncommon.LabelKeyResolverType: LabelValueClusterResolverType, } } -// Validate returns an error if the given parameter map is not -// valid for a resource request targeting the cluster resolver. +// GetConfigName returns the name of the cluster resolver's configmap. +func (r *Resolver) GetConfigName(_ context.Context) string { + return clusterresolution.ConfigMapName +} + +// Validate ensures parameters from a request are as expected. func (r *Resolver) Validate(ctx context.Context, req *v1beta1.ResolutionRequestSpec) error { - if len(req.Params) > 0 { - return cluster.ValidateParams(ctx, req.Params) - } - // Remove this error once validate url has been implemented. - return errors.New("cannot validate request. the Validate method has not been implemented.") + return clusterresolution.ValidateParams(ctx, req.Params) } -// Resolve performs the work of fetching a resource from a namespace with the given -// resolution spec. -func (r *Resolver) Resolve(ctx context.Context, req *v1beta1.ResolutionRequestSpec) (resolutionframework.ResolvedResource, error) { - if len(req.Params) > 0 { - return cluster.ResolveFromParams(ctx, req.Params, r.pipelineClientSet) - } - // Remove this error once resolution of url has been implemented. - return nil, errors.New("the Resolve method has not been implemented.") +// IsImmutable implements ImmutabilityChecker.IsImmutable +// Returns false because cluster resources don't have immutable references +func (r *Resolver) IsImmutable([]v1.Param) bool { + // Cluster resources (Tasks, Pipelines, etc.) don't have immutable references + // like Git commit hashes or bundle digests, so we always return false + return false } -var _ resolutionframework.ConfigWatcher = &Resolver{} +// Resolve uses the given params to resolve the requested file or resource. +func (r *Resolver) Resolve(ctx context.Context, req *v1beta1.ResolutionRequestSpec) (resolutionframework.ResolvedResource, error) { + if cache.ShouldUse(ctx, r, req.Params, LabelValueClusterResolverType) { + return cache.GetFromCacheOrResolve( + ctx, + req.Params, + LabelValueClusterResolverType, + func() (resolutionframework.ResolvedResource, error) { + return clusterresolution.ResolveFromParams(ctx, req.Params, r.pipelineClientSet) + }, + ) + } -// GetConfigName returns the name of the cluster resolver's configmap. -func (r *Resolver) GetConfigName(context.Context) string { - return configMapName + return clusterresolution.ResolveFromParams(ctx, req.Params, r.pipelineClientSet) } diff --git a/pkg/remoteresolution/resolver/cluster/resolver_test.go b/pkg/remoteresolution/resolver/cluster/resolver_test.go index 8f29197054b..38e57951322 100644 --- a/pkg/remoteresolution/resolver/cluster/resolver_test.go +++ b/pkg/remoteresolution/resolver/cluster/resolver_test.go @@ -22,6 +22,7 @@ import ( "encoding/base64" "encoding/hex" "errors" + "strings" "testing" "time" @@ -33,6 +34,7 @@ import ( "github.com/tektoncd/pipeline/pkg/internal/resolution" ttesting "github.com/tektoncd/pipeline/pkg/reconciler/testing" cluster "github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/cluster" + "github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/framework/cache" frtesting "github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/framework/testing" resolutioncommon "github.com/tektoncd/pipeline/pkg/resolution/common" clusterresolution "github.com/tektoncd/pipeline/pkg/resolution/resolver/cluster" @@ -43,6 +45,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" duckv1 "knative.dev/pkg/apis/duck/v1" + "knative.dev/pkg/system" _ "knative.dev/pkg/system/testing" "sigs.k8s.io/yaml" @@ -88,7 +91,7 @@ func TestValidate(t *testing.T) { } func TestValidateNotEnabled(t *testing.T) { - resolver := cluster.Resolver{} + resolver := &cluster.Resolver{} var err error @@ -102,13 +105,27 @@ func TestValidateNotEnabled(t *testing.T) { Name: clusterresolution.NameParam, Value: *pipelinev1.NewStructuredValues("baz"), }} + + ctx := resolverDisabledContext() req := v1beta1.ResolutionRequestSpec{Params: params} - err = resolver.Validate(resolverDisabledContext(), &req) + if err = resolver.Validate(ctx, &req); err == nil { + t.Fatalf("expected error, got nil") + } else if err.Error() != disabledError { + t.Fatalf("expected error %q, got %q", disabledError, err.Error()) + } +} + +func TestValidateWithNoParams(t *testing.T) { + resolver := &cluster.Resolver{} + + // Test Validate with no parameters - should get validation error about missing params + req := v1beta1.ResolutionRequestSpec{Params: []pipelinev1.Param{}} + err := resolver.Validate(t.Context(), &req) if err == nil { - t.Fatalf("expected disabled err") + t.Fatalf("expected error when no params provided, got nil") } - if d := cmp.Diff(disabledError, err.Error()); d != "" { - t.Errorf("unexpected error: %s", diff.PrintWantGot(d)) + if !strings.Contains(err.Error(), "missing required cluster resolver params") { + t.Fatalf("expected validation error about missing params, got %q", err.Error()) } } @@ -223,6 +240,10 @@ func TestValidateFailure(t *testing.T) { } } +// TestResolve tests the cluster resolver's resolve functionality. +// All test cases in this function implicitly cover cache misses, as each test case is the +// first resolution attempt for its respective resource, resulting in a cache miss and +// subsequent fetch from the cluster. The cache is then populated for future requests. func TestResolve(t *testing.T) { defaultNS := "pipeline-ns" @@ -428,6 +449,12 @@ func TestResolve(t *testing.T) { Data: map[string]string{ "enable-cluster-resolver": "true", }, + }, { + ObjectMeta: metav1.ObjectMeta{ + Name: "resolver-cache-config", + Namespace: resolverconfig.ResolversNamespace(system.Namespace()), + }, + Data: map[string]string{}, }}, Pipelines: []*pipelinev1.Pipeline{examplePipeline}, ResolutionRequests: []*v1beta1.ResolutionRequest{request}, @@ -505,3 +532,137 @@ func createRequest(kind, name, namespace string) *v1beta1.ResolutionRequest { func resolverDisabledContext() context.Context { return frameworktesting.ContextWithClusterResolverDisabled(context.Background()) } + +func TestResolveWithDisabledResolver(t *testing.T) { + ctx, _ := ttesting.SetupFakeContext(t) + ctx = frameworktesting.ContextWithClusterResolverDisabled(ctx) + resolver := &cluster.Resolver{} + + if err := resolver.Initialize(ctx); err != nil { + t.Fatalf("failed to initialize resolver: %v", err) + } + + req := &v1beta1.ResolutionRequestSpec{ + Params: []pipelinev1.Param{ + {Name: "kind", Value: *pipelinev1.NewStructuredValues("task")}, + {Name: "name", Value: *pipelinev1.NewStructuredValues("test-task")}, + {Name: "namespace", Value: *pipelinev1.NewStructuredValues("test-ns")}, + {Name: "cache", Value: *pipelinev1.NewStructuredValues("always")}, + }, + } + + _, err := resolver.Resolve(ctx, req) + if err == nil { + t.Error("Expected error when resolver is disabled") + } + if !strings.Contains(err.Error(), "enable-cluster-resolver feature flag not true") { + t.Errorf("Expected disabled error, got: %v", err) + } +} + +func TestResolveWithNoParams(t *testing.T) { + ctx, _ := ttesting.SetupFakeContext(t) + resolver := &cluster.Resolver{} + + if err := resolver.Initialize(ctx); err != nil { + t.Fatalf("failed to initialize resolver: %v", err) + } + + req := &v1beta1.ResolutionRequestSpec{ + Params: []pipelinev1.Param{}, + } + + _, err := resolver.Resolve(ctx, req) + if err == nil { + t.Error("Expected error when no params provided") + } + if !strings.Contains(err.Error(), "missing required cluster resolver params") { + t.Errorf("Expected validation error about missing params, got: %v", err) + } +} + +func TestResolveWithInvalidParams(t *testing.T) { + ctx, _ := ttesting.SetupFakeContext(t) + resolver := &cluster.Resolver{} + + if err := resolver.Initialize(ctx); err != nil { + t.Fatalf("failed to initialize resolver: %v", err) + } + + req := &v1beta1.ResolutionRequestSpec{ + Params: []pipelinev1.Param{ + {Name: "invalid", Value: *pipelinev1.NewStructuredValues("value")}, + }, + } + + _, err := resolver.Resolve(ctx, req) + if err == nil { + t.Error("Expected error with invalid params") + } + if !strings.Contains(err.Error(), "missing required cluster resolver params") { + t.Errorf("Expected validation error, got: %v", err) + } +} + +// TestResolveWithCacheHit verifies that when a resource is already in the cache, +// the resolver returns the cached resource without calling the underlying resolver +func TestResolveWithCacheHit(t *testing.T) { + ctx, _ := ttesting.SetupFakeContext(t) + resolver := &cluster.Resolver{} + + if err := resolver.Initialize(ctx); err != nil { + t.Fatalf("failed to initialize resolver: %v", err) + } + + // prepopulate the cache with a mock resource + // This simulates a previous resolution that was cached + mockResource := &clusterresolution.ResolvedClusterResource{ + Content: []byte("cached content"), + Spec: []byte("cached spec"), + Name: "cached-task", + Namespace: "cached-ns", + Identifier: "cached-identifier", + Checksum: []byte{1, 2, 3, 4}, + } + + params := []pipelinev1.Param{ + {Name: "kind", Value: *pipelinev1.NewStructuredValues("task")}, + {Name: "name", Value: *pipelinev1.NewStructuredValues("test-task")}, + {Name: "namespace", Value: *pipelinev1.NewStructuredValues("test-ns")}, + {Name: "cache", Value: *pipelinev1.NewStructuredValues("always")}, + } + + // add the mock resource to the cache + cache.Get(ctx).Add(cluster.LabelValueClusterResolverType, params, mockResource) + + // create request with same parameters + req := &v1beta1.ResolutionRequestSpec{Params: params} + + // resolve should hit the cache and return the cached resource + result, err := resolver.Resolve(ctx, req) + if err != nil { + t.Fatalf("Expected no error, got: %v", err) + } + + // verify the result is not nil + if result == nil { + t.Fatal("Expected result but got nil") + } + + // verify cache annotations are present (indicates resource came from cache) + annotations := result.Annotations() + if annotations["resolution.tekton.dev/cached"] != "true" { + t.Error("Expected cached annotation to be true") + } + if annotations["resolution.tekton.dev/cache-resolver-type"] != "cluster" { + t.Error("Expected resolver type to be cluster") + } + if annotations["resolution.tekton.dev/cache-operation"] != "retrieve" { + t.Errorf("Expected cache operation to be 'retrieve', got: %v", annotations["resolution.tekton.dev/cache-operation"]) + } + + // Verify the returned data matches the cached resource + if string(result.Data()) != string(mockResource.Data()) { + t.Errorf("Expected data %q, got %q", string(mockResource.Data()), string(result.Data())) + } +} diff --git a/pkg/remoteresolution/resolver/framework/cache/annotated_resource.go b/pkg/remoteresolution/resolver/framework/cache/annotated_resource.go new file mode 100644 index 00000000000..6a7b5b25329 --- /dev/null +++ b/pkg/remoteresolution/resolver/framework/cache/annotated_resource.go @@ -0,0 +1,88 @@ +/* +Copyright 2025 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "time" + + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" + resolutionframework "github.com/tektoncd/pipeline/pkg/resolution/resolver/framework" +) + +const ( + // cacheAnnotationKey is the annotation key indicating if a resource was cached + cacheAnnotationKey = "resolution.tekton.dev/cached" + // cacheTimestampKey is the annotation key for when the resource was cached + cacheTimestampKey = "resolution.tekton.dev/cache-timestamp" + // cacheResolverTypeKey is the annotation key for the resolver type that cached it + cacheResolverTypeKey = "resolution.tekton.dev/cache-resolver-type" + // cacheOperationKey is the annotation key for the cache operation type + cacheOperationKey = "resolution.tekton.dev/cache-operation" + // cacheValueTrue is the value used for cache annotations + cacheValueTrue = "true" + // cacheOperationStore is the value for cache store operations + cacheOperationStore = "store" + // cacheOperationRetrieve is the value for cache retrieve operations + cacheOperationRetrieve = "retrieve" +) + +// annotatedResource wraps a ResolvedResource with cache annotations +type annotatedResource struct { + resource resolutionframework.ResolvedResource + annotations map[string]string +} + +// newAnnotatedResource creates a new annotatedResource with cache annotations +func newAnnotatedResource(resource resolutionframework.ResolvedResource, resolverType, operation string, clock Clock) *annotatedResource { + // Create a new map to avoid concurrent map writes when the same resource + // is being annotated from multiple goroutines + existingAnnotations := resource.Annotations() + annotations := make(map[string]string) + + // Copy existing annotations to new map + if existingAnnotations != nil { + for k, v := range existingAnnotations { + annotations[k] = v + } + } + + // Add cache annotations + annotations[cacheAnnotationKey] = cacheValueTrue + annotations[cacheTimestampKey] = clock.Now().Format(time.RFC3339) + annotations[cacheResolverTypeKey] = resolverType + annotations[cacheOperationKey] = operation + + return &annotatedResource{ + resource: resource, + annotations: annotations, + } +} + +// Data returns the bytes of the resource +func (a *annotatedResource) Data() []byte { + return a.resource.Data() +} + +// Annotations returns the annotations with cache metadata +func (a *annotatedResource) Annotations() map[string]string { + return a.annotations +} + +// RefSource returns the source reference of the remote data +func (a *annotatedResource) RefSource() *v1.RefSource { + return a.resource.RefSource() +} diff --git a/pkg/remoteresolution/resolver/framework/cache/annotated_resource_test.go b/pkg/remoteresolution/resolver/framework/cache/annotated_resource_test.go new file mode 100644 index 00000000000..749ea305e2b --- /dev/null +++ b/pkg/remoteresolution/resolver/framework/cache/annotated_resource_test.go @@ -0,0 +1,110 @@ +/* +Copyright 2025 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "testing" + "time" + + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" +) + +// mockResolvedResource implements resolutionframework.ResolvedResource for testing +type mockResolvedResource struct { + data []byte + annotations map[string]string + refSource *v1.RefSource +} + +func (m *mockResolvedResource) Data() []byte { + return m.data +} + +func (m *mockResolvedResource) Annotations() map[string]string { + return m.annotations +} + +func (m *mockResolvedResource) RefSource() *v1.RefSource { + return m.refSource +} + +func TestNewAnnotatedResource(t *testing.T) { + // Create mock resource with existing annotations to test preservation + mockAnnotations := map[string]string{ + "existing-key": "existing-value", + } + + mockResource := &mockResolvedResource{ + data: []byte("test data"), + annotations: mockAnnotations, + refSource: &v1.RefSource{ + URI: "test-uri", + }, + } + + // Create fake clock with fixed time for deterministic testing + fixedTime := time.Date(2025, 1, 15, 10, 30, 0, 0, time.UTC) + fc := &fakeClock{now: fixedTime} + + resolverType := "bundles" + + // Create annotated resource + annotated := newAnnotatedResource(mockResource, resolverType, cacheOperationStore, fc) + + // Verify data is preserved + if string(annotated.Data()) != "test data" { + t.Errorf("Expected data 'test data', got '%s'", string(annotated.Data())) + } + + // Verify annotations are added + annotations := annotated.Annotations() + if annotations[cacheAnnotationKey] != "true" { + t.Errorf("Expected cache annotation to be 'true', got '%s'", annotations[cacheAnnotationKey]) + } + + if annotations[cacheResolverTypeKey] != resolverType { + t.Errorf("Expected resolver type '%s', got '%s'", resolverType, annotations[cacheResolverTypeKey]) + } + + // Verify timestamp is as expected + expectedTimestamp := fixedTime.Format(time.RFC3339) + timestamp := annotations[cacheTimestampKey] + if timestamp != expectedTimestamp { + t.Errorf("Expected cache timestamp to be %s, got %s", expectedTimestamp, timestamp) + } + + // Verify timestamp is valid RFC3339 format + _, err := time.Parse(time.RFC3339, timestamp) + if err != nil { + t.Errorf("Expected valid RFC3339 timestamp, got error: %v", err) + } + + // Verify cache operation is set + if annotations[cacheOperationKey] != cacheOperationStore { + t.Errorf("Expected cache operation '%s', got '%s'", cacheOperationStore, annotations[cacheOperationKey]) + } + + // Verify existing annotations are preserved + if annotations["existing-key"] != "existing-value" { + t.Errorf("Expected existing annotation to be preserved, got '%s'", annotations["existing-key"]) + } + + // Verify RefSource is preserved + if annotated.RefSource().URI != "test-uri" { + t.Errorf("Expected RefSource URI 'test-uri', got '%s'", annotated.RefSource().URI) + } +} diff --git a/pkg/remoteresolution/resolver/framework/cache/cache.go b/pkg/remoteresolution/resolver/framework/cache/cache.go new file mode 100644 index 00000000000..09e49bc133a --- /dev/null +++ b/pkg/remoteresolution/resolver/framework/cache/cache.go @@ -0,0 +1,189 @@ +/* +Copyright 2025 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "sort" + "time" + + pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" + resolutionframework "github.com/tektoncd/pipeline/pkg/resolution/resolver/framework" + "go.uber.org/zap" + utilcache "k8s.io/apimachinery/pkg/util/cache" +) + +var _ resolutionframework.ConfigWatcher = (*resolverCache)(nil) + +// resolverCache is a wrapper around utilcache.LRUExpireCache that provides +// type-safe methods for caching resolver results. +type resolverCache struct { + cache *utilcache.LRUExpireCache + logger *zap.SugaredLogger + ttl time.Duration + maxSize int + clock Clock +} + +func newResolverCache(maxSize int, ttl time.Duration) *resolverCache { + return &resolverCache{ + cache: utilcache.NewLRUExpireCache(maxSize), + ttl: ttl, + maxSize: maxSize, + clock: realClock{}, + } +} + +// GetConfigName returns the name of the cache's configmap. +func (c *resolverCache) GetConfigName(_ context.Context) string { + return getCacheConfigName() +} + +// withLogger returns a new ResolverCache instance with the provided logger. +// This prevents state leak by not storing logger in the global singleton. +func (c *resolverCache) withLogger(logger *zap.SugaredLogger) *resolverCache { + return &resolverCache{logger: logger, cache: c.cache, ttl: c.ttl, maxSize: c.maxSize, clock: c.clock} +} + +// TTL returns the time-to-live duration for cache entries. +func (c *resolverCache) TTL() time.Duration { + return c.ttl +} + +// MaxSize returns the maximum number of entries the cache can hold. +func (c *resolverCache) MaxSize() int { + return c.maxSize +} + +// Get retrieves a cached resource by resolver type and parameters, returning +// the resource and whether it was found. +func (c *resolverCache) Get(resolverType string, params []pipelinev1.Param) (resolutionframework.ResolvedResource, bool) { + key := generateCacheKey(resolverType, params) + value, found := c.cache.Get(key) + if !found { + c.infow("Cache miss", "key", key) + return nil, found + } + + resource, ok := value.(resolutionframework.ResolvedResource) + if !ok { + c.infow("Failed casting cached resource", "key", key) + return nil, false + } + + c.infow("Cache hit", "key", key) + return newAnnotatedResource(resource, resolverType, cacheOperationRetrieve, c.clock), true +} + +func (c *resolverCache) infow(msg string, keysAndValues ...any) { + if c.logger != nil { + c.logger.Infow(msg, keysAndValues...) + } +} + +// Add stores a resource in the cache with the configured TTL and returns an +// annotated version of the resource. +func (c *resolverCache) Add( + resolverType string, + params []pipelinev1.Param, + resource resolutionframework.ResolvedResource, +) resolutionframework.ResolvedResource { + key := generateCacheKey(resolverType, params) + c.infow("Adding to cache", "key", key, "expiration", c.ttl) + + annotatedResource := newAnnotatedResource(resource, resolverType, cacheOperationStore, c.clock) + + c.cache.Add(key, annotatedResource, c.ttl) + + return annotatedResource +} + +// Remove deletes a cached resource identified by resolver type and parameters. +func (c *resolverCache) Remove(resolverType string, params []pipelinev1.Param) { + key := generateCacheKey(resolverType, params) + c.infow("Removing from cache", "key", key) + + c.cache.Remove(key) +} + +// Clear removes all entries from the cache. +func (c *resolverCache) Clear() { + c.infow("Clearing all cache entries") + // predicate that returns true clears all entries + c.cache.RemoveAll(func(_ any) bool { return true }) +} + +func generateCacheKey(resolverType string, params []pipelinev1.Param) string { + // Create a deterministic string representation of the parameters + paramStr := resolverType + ":" + + // Filter out the 'cache' parameter and sort remaining params by name for determinism + filteredParams := make([]pipelinev1.Param, 0, len(params)) + for _, p := range params { + if p.Name != CacheParam { + filteredParams = append(filteredParams, p) + } + } + + // Sort params by name to ensure deterministic ordering + sort.Slice(filteredParams, func(i, j int) bool { + return filteredParams[i].Name < filteredParams[j].Name + }) + + for _, p := range filteredParams { + paramStr += p.Name + "=" + + switch p.Value.Type { + case pipelinev1.ParamTypeString: + paramStr += p.Value.StringVal + case pipelinev1.ParamTypeArray: + // Sort array values for determinism + arrayVals := make([]string, len(p.Value.ArrayVal)) + copy(arrayVals, p.Value.ArrayVal) + sort.Strings(arrayVals) + for i, val := range arrayVals { + if i > 0 { + paramStr += "," + } + paramStr += val + } + case pipelinev1.ParamTypeObject: + // Sort object keys for determinism + keys := make([]string, 0, len(p.Value.ObjectVal)) + for k := range p.Value.ObjectVal { + keys = append(keys, k) + } + sort.Strings(keys) + for i, key := range keys { + if i > 0 { + paramStr += "," + } + paramStr += key + ":" + p.Value.ObjectVal[key] + } + default: + // For unknown types, use StringVal as fallback + paramStr += p.Value.StringVal + } + paramStr += ";" + } + + // Generate a SHA-256 hash of the parameter string + hash := sha256.Sum256([]byte(paramStr)) + return hex.EncodeToString(hash[:]) +} diff --git a/pkg/remoteresolution/resolver/framework/cache/cache_test.go b/pkg/remoteresolution/resolver/framework/cache/cache_test.go new file mode 100644 index 00000000000..90fefce9953 --- /dev/null +++ b/pkg/remoteresolution/resolver/framework/cache/cache_test.go @@ -0,0 +1,755 @@ +/* +Copyright 2025 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "fmt" + "testing" + "time" + + pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" + _ "knative.dev/pkg/system/testing" // Setup system.Namespace() +) + +func TestGenerateCacheKey(t *testing.T) { + tests := []struct { + name string + resolverType string + params []pipelinev1.Param + expectedKey string + }{ + { + name: "empty params", + resolverType: "http", + params: []pipelinev1.Param{}, + expectedKey: "1c31dda07cb1e09e89bd660a8d114936b44f728b73a3bc52c69a409ee1d44e67", + }, + { + name: "single param", + resolverType: "http", + params: []pipelinev1.Param{ + { + Name: "url", + Value: pipelinev1.ParamValue{ + Type: pipelinev1.ParamTypeString, + StringVal: "https://example.com", + }, + }, + }, + expectedKey: "63f68e3e567eafd7efb4149b3389b3261784c8ac5847b62e90b7ae8d23f6e889", + }, + { + name: "multiple params", + resolverType: "git", + params: []pipelinev1.Param{ + { + Name: "url", + Value: pipelinev1.ParamValue{ + Type: pipelinev1.ParamTypeString, + StringVal: "https://github.com/tektoncd/pipeline", + }, + }, + { + Name: "revision", + Value: pipelinev1.ParamValue{ + Type: pipelinev1.ParamTypeString, + StringVal: "main", + }, + }, + }, + expectedKey: "fbe74989962e04dbb512a986864acff592dd02e84ab20f7544fa6b473648f28c", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actualKey := generateCacheKey(tt.resolverType, tt.params) + if tt.expectedKey != actualKey { + t.Errorf("want %s, got %s", tt.expectedKey, actualKey) + } + }) + } +} + +func TestGenerateCacheKey_IndependentOfCacheParam(t *testing.T) { + tests := []struct { + name string + resolverType string + params []pipelinev1.Param + expectedSame bool + description string + }{ + { + name: "same params without cache param", + resolverType: "git", + params: []pipelinev1.Param{ + {Name: "url", Value: pipelinev1.ParamValue{Type: pipelinev1.ParamTypeString, StringVal: "https://github.com/tektoncd/pipeline"}}, + {Name: "revision", Value: pipelinev1.ParamValue{Type: pipelinev1.ParamTypeString, StringVal: "main"}}, + }, + expectedSame: true, + description: "Params without cache param should generate same key", + }, + { + name: "same params with different cache values", + resolverType: "git", + params: []pipelinev1.Param{ + {Name: "url", Value: pipelinev1.ParamValue{Type: pipelinev1.ParamTypeString, StringVal: "https://github.com/tektoncd/pipeline"}}, + {Name: "revision", Value: pipelinev1.ParamValue{Type: pipelinev1.ParamTypeString, StringVal: "main"}}, + {Name: "cache", Value: pipelinev1.ParamValue{Type: pipelinev1.ParamTypeString, StringVal: "true"}}, + }, + expectedSame: true, + description: "Params with cache=true should generate same key as without cache param", + }, + { + name: "same params with cache=false", + resolverType: "git", + params: []pipelinev1.Param{ + {Name: "url", Value: pipelinev1.ParamValue{Type: pipelinev1.ParamTypeString, StringVal: "https://github.com/tektoncd/pipeline"}}, + {Name: "revision", Value: pipelinev1.ParamValue{Type: pipelinev1.ParamTypeString, StringVal: "main"}}, + {Name: "cache", Value: pipelinev1.ParamValue{Type: pipelinev1.ParamTypeString, StringVal: "false"}}, + }, + expectedSame: true, + description: "Params with cache=false should generate same key as without cache param", + }, + { + name: "different params should generate different keys", + resolverType: "git", + params: []pipelinev1.Param{ + {Name: "url", Value: pipelinev1.ParamValue{Type: pipelinev1.ParamTypeString, StringVal: "https://github.com/tektoncd/pipeline"}}, + {Name: "revision", Value: pipelinev1.ParamValue{Type: pipelinev1.ParamTypeString, StringVal: "v0.50.0"}}, + }, + expectedSame: false, + description: "Different revision should generate different key", + }, + { + name: "array params", + resolverType: "bundle", + params: []pipelinev1.Param{ + {Name: "bundle", Value: pipelinev1.ParamValue{Type: pipelinev1.ParamTypeString, StringVal: "gcr.io/tekton-releases/catalog/upstream/git-clone"}}, + {Name: "name", Value: pipelinev1.ParamValue{Type: pipelinev1.ParamTypeString, StringVal: "git-clone"}}, + {Name: "cache", Value: pipelinev1.ParamValue{Type: pipelinev1.ParamTypeString, StringVal: "true"}}, + }, + expectedSame: true, + description: "Array params with cache should generate same key as without cache", + }, + { + name: "object params", + resolverType: "hub", + params: []pipelinev1.Param{ + {Name: "name", Value: pipelinev1.ParamValue{Type: pipelinev1.ParamTypeString, StringVal: "git-clone"}}, + {Name: "version", Value: pipelinev1.ParamValue{Type: pipelinev1.ParamTypeString, StringVal: "0.8"}}, + {Name: "cache", Value: pipelinev1.ParamValue{Type: pipelinev1.ParamTypeString, StringVal: "false"}}, + }, + expectedSame: true, + description: "Object params with cache should generate same key as without cache", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.expectedSame { + // Generate key with cache param + keyWithCache := generateCacheKey(tt.resolverType, tt.params) + + // Generate key without cache param + paramsWithoutCache := make([]pipelinev1.Param, 0, len(tt.params)) + for _, p := range tt.params { + if p.Name != "cache" { + paramsWithoutCache = append(paramsWithoutCache, p) + } + } + keyWithoutCache := generateCacheKey(tt.resolverType, paramsWithoutCache) + + if keyWithCache != keyWithoutCache { + t.Errorf("Expected same keys, but got different:\nWith cache: %s\nWithout cache: %s\nDescription: %s", + keyWithCache, keyWithoutCache, tt.description) + } + } else { + // For different params test, create a second set with different values + params2 := make([]pipelinev1.Param, len(tt.params)) + copy(params2, tt.params) + // Change the revision value to make it different + for i := range params2 { + if params2[i].Name == "revision" { + params2[i].Value.StringVal = "main" + break + } + } + + key1 := generateCacheKey(tt.resolverType, tt.params) + key2 := generateCacheKey(tt.resolverType, params2) + if key1 == key2 { + t.Errorf("Expected different keys, but got same: %s\nDescription: %s", + key1, tt.description) + } + } + }) + } +} + +func TestGenerateCacheKey_Deterministic(t *testing.T) { + resolverType := "git" + params := []pipelinev1.Param{ + {Name: "url", Value: pipelinev1.ParamValue{Type: pipelinev1.ParamTypeString, StringVal: "https://github.com/tektoncd/pipeline"}}, + {Name: "revision", Value: pipelinev1.ParamValue{Type: pipelinev1.ParamTypeString, StringVal: "main"}}, + {Name: "cache", Value: pipelinev1.ParamValue{Type: pipelinev1.ParamTypeString, StringVal: "true"}}, + } + + // Generate the same key multiple times + key1 := generateCacheKey(resolverType, params) + key2 := generateCacheKey(resolverType, params) + + if key1 != key2 { + t.Errorf("Cache key generation is not deterministic. Got different keys: %s vs %s", key1, key2) + } +} + +func TestGenerateCacheKey_AllParamTypes(t *testing.T) { + resolverType := "test" + params := []pipelinev1.Param{ + {Name: "string-param", Value: pipelinev1.ParamValue{Type: pipelinev1.ParamTypeString, StringVal: "string-value"}}, + {Name: "array-param", Value: pipelinev1.ParamValue{Type: pipelinev1.ParamTypeArray, ArrayVal: []string{"item1", "item2"}}}, + {Name: "object-param", Value: pipelinev1.ParamValue{Type: pipelinev1.ParamTypeObject, ObjectVal: map[string]string{"key1": "value1", "key2": "value2"}}}, + {Name: "cache", Value: pipelinev1.ParamValue{Type: pipelinev1.ParamTypeString, StringVal: "true"}}, + } + + // Generate key with cache param + keyWithCache := generateCacheKey(resolverType, params) + + // Generate key without cache param + paramsWithoutCache := make([]pipelinev1.Param, 0, len(params)) + for _, p := range params { + if p.Name != "cache" { + paramsWithoutCache = append(paramsWithoutCache, p) + } + } + keyWithoutCache := generateCacheKey(resolverType, paramsWithoutCache) + if keyWithCache != keyWithoutCache { + t.Errorf("Expected same keys for all param types, but got different:\nWith cache: %s\nWithout cache: %s", + keyWithCache, keyWithoutCache) + } +} + +func TestCacheTTLExpiration(t *testing.T) { + // Use short TTL for fast test execution (10ms) + ttl := 10 * time.Millisecond + cache := newResolverCache(100, ttl) + + resolverType := "bundle" + params := []pipelinev1.Param{ + {Name: "bundle", Value: pipelinev1.ParamValue{Type: pipelinev1.ParamTypeString, StringVal: "registry.io/repo@sha256:abcdef"}}, + } + + // Create a mock resource using the existing mockResolvedResource type + mockResource := &mockResolvedResource{ + data: []byte("test data"), + } + + // Add to cache + cache.Add(resolverType, params, mockResource) + + // Verify it's immediately retrievable + if cached, ok := cache.Get(resolverType, params); !ok { + t.Error("Expected cache hit immediately after adding, but got cache miss") + } else if cached == nil { + t.Error("Expected non-nil cached resource immediately after adding") + } + + // Wait for TTL to expire with minimal delay + time.Sleep(ttl + 5*time.Millisecond) + + // Verify entry is no longer in cache after TTL expiration + if cached, ok := cache.Get(resolverType, params); ok { + t.Errorf("Expected cache miss after TTL expiration, but got cache hit with resource: %v", cached) + } +} + +func TestCacheLRUEviction(t *testing.T) { + // Create cache with small size (3 entries) for testing eviction + maxSize := 3 + cache := newResolverCache(maxSize, 1*time.Hour) // Long TTL so only LRU eviction triggers + + resolverType := "bundle" + + // Create 3 different entries + entries := []struct { + name string + params []pipelinev1.Param + }{ + { + name: "entry1", + params: []pipelinev1.Param{ + {Name: "bundle", Value: pipelinev1.ParamValue{Type: pipelinev1.ParamTypeString, StringVal: "registry.io/repo1@sha256:111111"}}, + }, + }, + { + name: "entry2", + params: []pipelinev1.Param{ + {Name: "bundle", Value: pipelinev1.ParamValue{Type: pipelinev1.ParamTypeString, StringVal: "registry.io/repo2@sha256:222222"}}, + }, + }, + { + name: "entry3", + params: []pipelinev1.Param{ + {Name: "bundle", Value: pipelinev1.ParamValue{Type: pipelinev1.ParamTypeString, StringVal: "registry.io/repo3@sha256:333333"}}, + }, + }, + } + + // Add all 3 entries to fill the cache + for _, entry := range entries { + mockResource := &mockResolvedResource{ + data: []byte(entry.name), + } + cache.Add(resolverType, entry.params, mockResource) + } + + // Verify all 3 entries are in cache + for _, entry := range entries { + if _, ok := cache.Get(resolverType, entry.params); !ok { + t.Errorf("Expected cache hit for %s after adding, but got cache miss", entry.name) + } + } + + // Add a 4th entry which should evict the least recently used (entry1) + entry4Params := []pipelinev1.Param{ + {Name: "bundle", Value: pipelinev1.ParamValue{Type: pipelinev1.ParamTypeString, StringVal: "registry.io/repo4@sha256:444444"}}, + } + mockResource4 := &mockResolvedResource{ + data: []byte("entry4"), + } + cache.Add(resolverType, entry4Params, mockResource4) + + // Verify entry1 (LRU) was evicted + if _, ok := cache.Get(resolverType, entries[0].params); ok { + t.Error("Expected entry1 to be evicted (LRU), but it was still in cache") + } + + // Verify entry2 and entry3 are still in cache + if _, ok := cache.Get(resolverType, entries[1].params); !ok { + t.Error("Expected entry2 to still be in cache, but got cache miss") + } + if _, ok := cache.Get(resolverType, entries[2].params); !ok { + t.Error("Expected entry3 to still be in cache, but got cache miss") + } + + // Verify entry4 is in cache + if _, ok := cache.Get(resolverType, entry4Params); !ok { + t.Error("Expected entry4 to be in cache after adding, but got cache miss") + } +} + +func TestCacheLRUEvictionWithAccess(t *testing.T) { + // Create cache with small size (3 entries) for testing LRU access pattern + maxSize := 3 + cache := newResolverCache(maxSize, 1*time.Hour) + + resolverType := "bundle" + + // Create 3 different entries + entries := []struct { + name string + params []pipelinev1.Param + }{ + { + name: "entry1", + params: []pipelinev1.Param{ + {Name: "bundle", Value: pipelinev1.ParamValue{Type: pipelinev1.ParamTypeString, StringVal: "registry.io/repo1@sha256:aaa"}}, + }, + }, + { + name: "entry2", + params: []pipelinev1.Param{ + {Name: "bundle", Value: pipelinev1.ParamValue{Type: pipelinev1.ParamTypeString, StringVal: "registry.io/repo2@sha256:bbb"}}, + }, + }, + { + name: "entry3", + params: []pipelinev1.Param{ + {Name: "bundle", Value: pipelinev1.ParamValue{Type: pipelinev1.ParamTypeString, StringVal: "registry.io/repo3@sha256:ccc"}}, + }, + }, + } + + // Add all 3 entries + for _, entry := range entries { + mockResource := &mockResolvedResource{ + data: []byte(entry.name), + } + cache.Add(resolverType, entry.params, mockResource) + } + + // Access entry1 to make it recently used (entry2 becomes LRU) + if _, ok := cache.Get(resolverType, entries[0].params); !ok { + t.Error("Expected cache hit for entry1") + } + + // Add a 4th entry which should evict entry2 (now LRU) instead of entry1 + entry4Params := []pipelinev1.Param{ + {Name: "bundle", Value: pipelinev1.ParamValue{Type: pipelinev1.ParamTypeString, StringVal: "registry.io/repo4@sha256:ddd"}}, + } + mockResource4 := &mockResolvedResource{ + data: []byte("entry4"), + } + cache.Add(resolverType, entry4Params, mockResource4) + + // Verify entry2 (LRU after entry1 was accessed) was evicted + if _, ok := cache.Get(resolverType, entries[1].params); ok { + t.Error("Expected entry2 to be evicted (LRU), but it was still in cache") + } + + // Verify entry1 (accessed recently) is still in cache + if _, ok := cache.Get(resolverType, entries[0].params); !ok { + t.Error("Expected entry1 to still be in cache after being accessed, but got cache miss") + } + + // Verify entry3 is still in cache + if _, ok := cache.Get(resolverType, entries[2].params); !ok { + t.Error("Expected entry3 to still be in cache, but got cache miss") + } + + // Verify entry4 is in cache + if _, ok := cache.Get(resolverType, entry4Params); !ok { + t.Error("Expected entry4 to be in cache, but got cache miss") + } +} + +func TestCacheConcurrentReads(t *testing.T) { + // Test concurrent reads with high load + cache := newResolverCache(100, 1*time.Hour) + resolverType := "bundle" + + // Pre-populate cache with entries + numEntries := 50 + entries := make([][]pipelinev1.Param, numEntries) + for i := range numEntries { + params := []pipelinev1.Param{ + {Name: "bundle", Value: pipelinev1.ParamValue{ + Type: pipelinev1.ParamTypeString, + StringVal: fmt.Sprintf("registry.io/repo%d@sha256:%064d", i, i), + }}, + } + entries[i] = params + mockResource := &mockResolvedResource{ + data: []byte(fmt.Sprintf("data-%d", i)), + } + cache.Add(resolverType, params, mockResource) + } + + // Launch 1000 concurrent readers + numReaders := 1000 + done := make(chan bool, numReaders) + + for i := range numReaders { + go func(readerID int) { + defer func() { done <- true }() + + // Each reader performs 100 reads + for j := range 100 { + entryIdx := (readerID + j) % numEntries + if _, ok := cache.Get(resolverType, entries[entryIdx]); !ok { + t.Errorf("Reader %d: Expected cache hit for entry %d, got miss", readerID, entryIdx) + } + } + }(i) + } + + // Wait for all readers to complete + for range numReaders { + <-done + } +} + +func TestCacheConcurrentWrites(t *testing.T) { + // Test concurrent writes with high load + cache := newResolverCache(1000, 1*time.Hour) + resolverType := "bundle" + + // Launch 500 concurrent writers + numWriters := 500 + entriesPerWriter := 10 + done := make(chan bool, numWriters) + + // Track all entries written for later verification + type entryInfo struct { + params []pipelinev1.Param + expectedData string + } + allEntries := make([]entryInfo, numWriters*entriesPerWriter) + + for i := range numWriters { + go func(writerID int) { + defer func() { done <- true }() + + // Each writer adds 10 unique entries + for j := range entriesPerWriter { + params := []pipelinev1.Param{ + {Name: "bundle", Value: pipelinev1.ParamValue{ + Type: pipelinev1.ParamTypeString, + StringVal: fmt.Sprintf("registry.io/writer%d-entry%d@sha256:%064d", writerID, j, writerID*100+j), + }}, + } + expectedData := fmt.Sprintf("writer-%d-data-%d", writerID, j) + mockResource := &mockResolvedResource{ + data: []byte(expectedData), + } + cache.Add(resolverType, params, mockResource) + + // Record this entry for verification + allEntries[writerID*entriesPerWriter+j] = entryInfo{ + params: params, + expectedData: expectedData, + } + } + }(i) + } + + // Wait for all writers to complete + for range numWriters { + <-done + } + + // Verify that concurrent writes are retrievable and have correct data + // With 5000 entries (500 writers * 10 entries each) and cache size of 1000, + // we expect many entries to be evicted due to LRU. We verify that: + // 1. Entries that ARE in cache have the correct data + // 2. We get a reasonable hit rate + cachedCount := 0 + wrongDataCount := 0 + + for _, entry := range allEntries { + cached, ok := cache.Get(resolverType, entry.params) + if ok { + cachedCount++ + // Verify the data is correct + if string(cached.Data()) != entry.expectedData { + wrongDataCount++ + t.Errorf("Expected data '%s', got '%s'", entry.expectedData, string(cached.Data())) + } + } + } + + // We should have no entries with wrong data + if wrongDataCount > 0 { + t.Errorf("Found %d entries with wrong data", wrongDataCount) + } + + // We should have some reasonable number of entries cached + // With 5000 entries and cache size 1000, we expect close to 1000 entries to be cached + // Using a lower bound of 500 to account for concurrent evictions and timing + if cachedCount < 500 { + t.Errorf("Expected at least 500 entries to be cached, but only found %d out of %d total", cachedCount, len(allEntries)) + } + + t.Logf("Concurrent write test passed: %d entries cached out of %d written", cachedCount, len(allEntries)) +} + +func TestCacheConcurrentReadWrite(t *testing.T) { + // Test concurrent reads and writes together + cache := newResolverCache(500, 1*time.Hour) + resolverType := "bundle" + + // Pre-populate with some entries + numInitialEntries := 100 + initialEntries := make([][]pipelinev1.Param, numInitialEntries) + for i := range numInitialEntries { + params := []pipelinev1.Param{ + {Name: "bundle", Value: pipelinev1.ParamValue{ + Type: pipelinev1.ParamTypeString, + StringVal: fmt.Sprintf("registry.io/initial%d@sha256:%064d", i, i), + }}, + } + initialEntries[i] = params + mockResource := &mockResolvedResource{ + data: []byte(fmt.Sprintf("initial-data-%d", i)), + } + cache.Add(resolverType, params, mockResource) + } + + // Launch 300 readers and 300 writers concurrently + numReaders := 300 + numWriters := 300 + entriesPerWriter := 5 + totalGoroutines := numReaders + numWriters + done := make(chan bool, totalGoroutines) + + // Track entries written by concurrent writers for later verification + type entryInfo struct { + params []pipelinev1.Param + expectedData string + } + writerEntries := make([]entryInfo, numWriters*entriesPerWriter) + + // Start readers + for i := range numReaders { + go func(readerID int) { + defer func() { done <- true }() + + for j := range 50 { + entryIdx := (readerID + j) % numInitialEntries + cache.Get(resolverType, initialEntries[entryIdx]) + } + }(i) + } + + // Start writers + for i := range numWriters { + go func(writerID int) { + defer func() { done <- true }() + + for j := range entriesPerWriter { + params := []pipelinev1.Param{ + {Name: "bundle", Value: pipelinev1.ParamValue{ + Type: pipelinev1.ParamTypeString, + StringVal: fmt.Sprintf("registry.io/concurrent-writer%d-entry%d@sha256:%064d", writerID, j, writerID*10+j), + }}, + } + expectedData := fmt.Sprintf("concurrent-writer-%d-data-%d", writerID, j) + mockResource := &mockResolvedResource{ + data: []byte(expectedData), + } + cache.Add(resolverType, params, mockResource) + + // Record this entry for verification + writerEntries[writerID*entriesPerWriter+j] = entryInfo{ + params: params, + expectedData: expectedData, + } + } + }(i) + } + + // Wait for all goroutines to complete + for range totalGoroutines { + <-done + } + + // Verify that concurrent writes are retrievable and have correct data + // With 1500 new entries (300 writers * 5 entries each) plus 100 initial entries, + // and cache size of 500, we expect some entries to be evicted. We verify that: + // 1. Entries that ARE in cache have the correct data + // 2. We get a reasonable hit rate for the concurrent writes + cachedCount := 0 + wrongDataCount := 0 + + for _, entry := range writerEntries { + cached, ok := cache.Get(resolverType, entry.params) + if ok { + cachedCount++ + // Verify the data is correct + if string(cached.Data()) != entry.expectedData { + wrongDataCount++ + t.Errorf("Expected data '%s', got '%s'", entry.expectedData, string(cached.Data())) + } + } + } + + // We should have no entries with wrong data + if wrongDataCount > 0 { + t.Errorf("Found %d entries with wrong data", wrongDataCount) + } + + // We should have some reasonable number of entries cached + // With 1500 concurrent write entries and cache size 500, accounting for initial entries + // and concurrent evictions, we expect at least 200 of the writer entries to be cached + if cachedCount < 200 { + t.Errorf("Expected at least 200 writer entries to be cached, but only found %d out of %d total", cachedCount, len(writerEntries)) + } + + t.Logf("Concurrent read/write test passed: %d writer entries cached out of %d written", cachedCount, len(writerEntries)) +} + +func TestCacheConcurrentEviction(t *testing.T) { + // Test that LRU eviction works correctly under concurrent load + maxSize := 50 + cache := newResolverCache(maxSize, 1*time.Hour) + resolverType := "bundle" + + // Launch 200 concurrent writers adding entries to force evictions + numWriters := 200 + entriesPerWriter := 10 + done := make(chan bool, numWriters) + + for i := range numWriters { + go func(writerID int) { + defer func() { done <- true }() + + for j := range entriesPerWriter { + params := []pipelinev1.Param{ + {Name: "bundle", Value: pipelinev1.ParamValue{ + Type: pipelinev1.ParamTypeString, + StringVal: fmt.Sprintf("registry.io/eviction-writer%d-entry%d@sha256:%064d", writerID, j, writerID*100+j), + }}, + } + mockResource := &mockResolvedResource{ + data: []byte(fmt.Sprintf("eviction-data-%d-%d", writerID, j)), + } + cache.Add(resolverType, params, mockResource) + + // Small random read to simulate real access patterns + if j%3 == 0 { + cache.Get(resolverType, params) + } + } + }(i) + } + + // Wait for all writers to complete + for range numWriters { + <-done + } + + // Cache should not panic or deadlock under concurrent eviction pressure + // Verify that expected entries were evicted and cache size is maintained + + // The cache should have at most maxSize entries + // We'll verify this by checking that older entries were evicted + // Since we had 200 writers * 10 entries = 2000 total entries written + // and cache size is 50, we expect most early entries to be evicted + + // Check that early entries were evicted (writers 0-49 should be mostly evicted) + evictedCount := 0 + totalChecked := 0 + for writerID := range 50 { // Check first 50 writers + for j := range entriesPerWriter { + params := []pipelinev1.Param{ + {Name: "bundle", Value: pipelinev1.ParamValue{ + Type: pipelinev1.ParamTypeString, + StringVal: fmt.Sprintf("registry.io/eviction-writer%d-entry%d@sha256:%064d", writerID, j, writerID*100+j), + }}, + } + totalChecked++ + if _, ok := cache.Get(resolverType, params); !ok { + evictedCount++ + } + } + } + + // We expect most early entries to be evicted (at least 80% of early entries checked) + // Using a lower threshold due to concurrent access patterns and timing variations + expectedEvicted := int(float64(totalChecked) * 0.8) + if evictedCount < expectedEvicted { + t.Errorf("Expected at least %d early entries to be evicted, but only %d were evicted out of %d checked", expectedEvicted, evictedCount, totalChecked) + } + + // The main goal of this test is to ensure no panics or deadlocks under concurrent eviction, + // and that LRU eviction actually occurs (verified above) + // With cache size of 50 and 2000 total entries with random access patterns, we've verified: + // 1. No crashes or deadlocks + // 2. Old entries are evicted + t.Logf("Eviction test passed: %d early entries evicted out of %d checked", evictedCount, totalChecked) +} diff --git a/pkg/remoteresolution/resolver/framework/cache/clock.go b/pkg/remoteresolution/resolver/framework/cache/clock.go new file mode 100644 index 00000000000..907f3bd9d59 --- /dev/null +++ b/pkg/remoteresolution/resolver/framework/cache/clock.go @@ -0,0 +1,49 @@ +/* +Copyright 2025 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import "time" + +// Clock is an interface for getting the current time. +// This allows for testing without relying on actual time for timestamp generation. +// Note: The underlying k8s LRUExpireCache uses real time for TTL expiration checks, +// so we cannot use a fake clock to control cache expiration in tests. +type Clock interface { + Now() time.Time +} + +// realClock implements Clock using the actual system time. +type realClock struct{} + +func (realClock) Now() time.Time { + return time.Now() +} + +// fakeClock implements Clock with a controllable time for testing. +type fakeClock struct { + now time.Time +} + +// Now returns the current time of the fake clock. +func (f *fakeClock) Now() time.Time { + return f.now +} + +// Advance moves the fake clock forward by the given duration. +func (f *fakeClock) Advance(d time.Duration) { + f.now = f.now.Add(d) +} diff --git a/pkg/remoteresolution/resolver/framework/cache/configstore.go b/pkg/remoteresolution/resolver/framework/cache/configstore.go new file mode 100644 index 00000000000..d3c75475ab9 --- /dev/null +++ b/pkg/remoteresolution/resolver/framework/cache/configstore.go @@ -0,0 +1,146 @@ +/* +Copyright 2025 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "context" + "os" + "strconv" + "sync" + "time" + + corev1 "k8s.io/api/core/v1" + + "knative.dev/pkg/configmap" +) + +const ( + // resolverCacheConfigMapNameEnv env var overwrites the cache ConfigMap name + // defaults to "resolver-cache-config" + resolverCacheConfigMapNameEnv = "RESOLVER_CACHE_CONFIG_MAP_NAME" + // defaultConfigMapName is the default name of the ConfigMap that configures resolver cache settings + // the ConfigMap contains max-size and ttl configuration for the shared resolver cache + defaultConfigMapName = "resolver-cache-config" + maxSizeConfigMapKey = "max-size" + ttlConfigMapKey = "ttl" + defaultCacheSize = 1000 + defaultExpiration = 5 * time.Minute +) + +var ( + cacheMu sync.Mutex + startWatchingOnce sync.Once +) + +type cacheConfigKey struct{} + +// Config holds the configuration for the resolver cache +type Config struct { + MaxSize int + TTL time.Duration +} + +type CacheConfigStore struct { + untyped *configmap.UntypedStore + cacheConfigName string +} + +func NewCacheConfigStore(cacheConfigName string, logger configmap.Logger) *CacheConfigStore { + return &CacheConfigStore{ + cacheConfigName: cacheConfigName, + untyped: configmap.NewUntypedStore( + defaultConfigMapName, + logger, + configmap.Constructors{ + getCacheConfigName(): NewConfigFromConfigMap, + }, + onCacheConfigChanged, + ), + } +} + +func (store *CacheConfigStore) WatchConfigs(w configmap.Watcher) { + startWatchingOnce.Do(func() { + store.untyped.WatchConfigs(w) + }) +} + +func (store *CacheConfigStore) GetResolverConfig() *Config { + untypedConf := store.untyped.UntypedLoad(store.cacheConfigName) + if cacheConf, ok := untypedConf.(*Config); ok { + return cacheConf + } + + return &Config{ + MaxSize: defaultCacheSize, + TTL: defaultExpiration, + } +} + +// ToContext returns a new context with the cache's configuration +// data stored in it. +func (store *CacheConfigStore) ToContext(ctx context.Context) context.Context { + return context.WithValue(ctx, cacheConfigKey{}, store.GetResolverConfig()) +} + +// getCacheConfigName returns the name of the cache configuration ConfigMap. +// This can be overridden via the cacheConfigEnv environment variable. +func getCacheConfigName() string { + if configMapName := os.Getenv(resolverCacheConfigMapNameEnv); configMapName != "" { + return configMapName + } + + return defaultConfigMapName +} + +// NewConfigFromConfigMap creates a Config from a ConfigMap +func NewConfigFromConfigMap(cm *corev1.ConfigMap) (*Config, error) { + config := &Config{ + MaxSize: defaultCacheSize, + TTL: defaultExpiration, + } + + if cm == nil { + return config, nil + } + + if maxSizeStr, ok := cm.Data[maxSizeConfigMapKey]; ok { + if parsed, err := strconv.Atoi(maxSizeStr); err == nil && parsed > 0 { + config.MaxSize = parsed + } + } + + if ttlStr, ok := cm.Data[ttlConfigMapKey]; ok { + if parsed, err := time.ParseDuration(ttlStr); err == nil && parsed > 0 { + config.TTL = parsed + } + } + + return config, nil +} + +func onCacheConfigChanged(_ string, value any) { + config, ok := value.(*Config) + if !ok { + return + } + + cacheMu.Lock() + defer cacheMu.Unlock() + + sharedCache = newResolverCache(config.MaxSize, config.TTL) +} diff --git a/pkg/remoteresolution/resolver/framework/cache/configstore_test.go b/pkg/remoteresolution/resolver/framework/cache/configstore_test.go new file mode 100644 index 00000000000..39b87159ba3 --- /dev/null +++ b/pkg/remoteresolution/resolver/framework/cache/configstore_test.go @@ -0,0 +1,266 @@ +/* +Copyright 2025 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + logtesting "knative.dev/pkg/logging/testing" +) + +func TestParseCacheConfigMap(t *testing.T) { + tests := []struct { + name string + configMap *corev1.ConfigMap + expectedMaxSize int + expectedTTL time.Duration + }{ + { + name: "nil ConfigMap returns defaults", + configMap: nil, + expectedMaxSize: defaultCacheSize, + expectedTTL: defaultExpiration, + }, + { + name: "empty ConfigMap returns defaults", + configMap: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-config", + Namespace: "test-namespace", + }, + Data: map[string]string{}, + }, + expectedMaxSize: defaultCacheSize, + expectedTTL: defaultExpiration, + }, + { + name: "ConfigMap with custom max-size", + configMap: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-config", + Namespace: "test-namespace", + }, + Data: map[string]string{ + "max-size": "500", + }, + }, + expectedMaxSize: 500, + expectedTTL: defaultExpiration, + }, + { + name: "ConfigMap with custom ttl", + configMap: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-config", + Namespace: "test-namespace", + }, + Data: map[string]string{ + "ttl": "10m", + }, + }, + expectedMaxSize: defaultCacheSize, + expectedTTL: 10 * time.Minute, + }, + { + name: "ConfigMap with both max-size and ttl", + configMap: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-config", + Namespace: "test-namespace", + }, + Data: map[string]string{ + "max-size": "2000", + "ttl": "1h", + }, + }, + expectedMaxSize: 2000, + expectedTTL: 1 * time.Hour, + }, + { + name: "ConfigMap with invalid max-size uses default", + configMap: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-config", + Namespace: "test-namespace", + }, + Data: map[string]string{ + "max-size": "invalid", + }, + }, + expectedMaxSize: defaultCacheSize, + expectedTTL: defaultExpiration, + }, + { + name: "ConfigMap with negative max-size uses default", + configMap: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-config", + Namespace: "test-namespace", + }, + Data: map[string]string{ + "max-size": "-100", + }, + }, + expectedMaxSize: defaultCacheSize, + expectedTTL: defaultExpiration, + }, + { + name: "ConfigMap with zero max-size uses default", + configMap: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-config", + Namespace: "test-namespace", + }, + Data: map[string]string{ + "max-size": "0", + }, + }, + expectedMaxSize: defaultCacheSize, + expectedTTL: defaultExpiration, + }, + { + name: "ConfigMap with invalid ttl uses default", + configMap: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-config", + Namespace: "test-namespace", + }, + Data: map[string]string{ + "ttl": "invalid", + }, + }, + expectedMaxSize: defaultCacheSize, + expectedTTL: defaultExpiration, + }, + { + name: "ConfigMap with negative ttl uses default", + configMap: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-config", + Namespace: "test-namespace", + }, + Data: map[string]string{ + "ttl": "-5m", + }, + }, + expectedMaxSize: defaultCacheSize, + expectedTTL: defaultExpiration, + }, + { + name: "ConfigMap with zero ttl uses default", + configMap: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-config", + Namespace: "test-namespace", + }, + Data: map[string]string{ + "ttl": "0", + }, + }, + expectedMaxSize: defaultCacheSize, + expectedTTL: defaultExpiration, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + config, err := NewConfigFromConfigMap(tt.configMap) + if err != nil { + t.Fatalf("NewConfigFromConfigMap() returned error: %v", err) + } + + if config.MaxSize != tt.expectedMaxSize { + t.Errorf("MaxSize = %d, want %d", config.MaxSize, tt.expectedMaxSize) + } + + if config.TTL != tt.expectedTTL { + t.Errorf("TTL = %v, want %v", config.TTL, tt.expectedTTL) + } + }) + } +} + +func TestOnCacheConfigChanged(t *testing.T) { + ctx := logtesting.TestContextWithLogger(t) + + // Ensure cache is initialized first + _ = Get(ctx) + + // Test that onCacheConfigChanged updates the shared cache with new config values + config := &Config{ + MaxSize: 500, + TTL: 10 * time.Minute, + } + + // Call onCacheConfigChanged to update the shared cache + onCacheConfigChanged("test-config", config) + + // Verify the shared cache was updated with the correct config values + cache := Get(ctx) + if cache == nil { + t.Fatal("Expected cache after config change but got nil") + } + + // Verify TTL was applied + if cache.TTL() != config.TTL { + t.Errorf("Expected TTL to be %v, got %v", config.TTL, cache.TTL()) + } + + // Verify MaxSize was applied + if cache.MaxSize() != config.MaxSize { + t.Errorf("Expected MaxSize to be %d, got %d", config.MaxSize, cache.MaxSize()) + } +} + +func TestOnCacheConfigChangedWithInvalidType(t *testing.T) { + // First, set up a known good config + goodConfig := &Config{ + MaxSize: defaultCacheSize, + TTL: defaultExpiration, + } + onCacheConfigChanged("test-config", goodConfig) + + ctx := logtesting.TestContextWithLogger(t) + cacheBefore := Get(ctx) + if cacheBefore == nil { + t.Fatal("Expected cache before invalid config change") + } + ttlBefore := cacheBefore.TTL() + maxSizeBefore := cacheBefore.MaxSize() + + // Test that onCacheConfigChanged handles invalid types gracefully + // This should not panic and should preserve the existing cache + onCacheConfigChanged("test-config", "invalid-type") + + // Verify we can still get the cache and it wasn't modified + cacheAfter := Get(ctx) + if cacheAfter == nil { + t.Fatal("Expected cache after invalid config change but got nil") + } + + // Verify cache config wasn't changed by invalid input + if cacheAfter.TTL() != ttlBefore { + t.Errorf("Expected TTL to remain %v after invalid config, got %v", ttlBefore, cacheAfter.TTL()) + } + + if cacheAfter.MaxSize() != maxSizeBefore { + t.Errorf("Expected MaxSize to remain %d after invalid config, got %d", maxSizeBefore, cacheAfter.MaxSize()) + } +} diff --git a/pkg/remoteresolution/resolver/framework/cache/operations.go b/pkg/remoteresolution/resolver/framework/cache/operations.go new file mode 100644 index 00000000000..f4b89ae8ed1 --- /dev/null +++ b/pkg/remoteresolution/resolver/framework/cache/operations.go @@ -0,0 +1,128 @@ +/* +Copyright 2025 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "context" + "fmt" + "slices" + + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" + resolutionframework "github.com/tektoncd/pipeline/pkg/resolution/resolver/framework" +) + +const ( + cacheModeAlways = "always" + cacheModeNever = "never" + cacheModeAuto = "auto" + CacheParam = "cache" + defaultCacheModeConfigMapKey = "default-cache-mode" +) + +// ImmutabilityChecker extends the base Resolver interface with cache-specific methods. +// Each resolver implements IsImmutable to define what "auto" mode means in their context. +type ImmutabilityChecker interface { + IsImmutable(params []v1.Param) bool +} + +// ShouldUse determines whether caching should be used based on: +// 1. Task/Pipeline cache parameter (highest priority) +// 2. ConfigMap default-cache-mode (middle priority) +// 3. System default for resolver type (lowest priority) +func ShouldUse( + ctx context.Context, + resolver ImmutabilityChecker, + params []v1.Param, + resolverType string, +) bool { + // Get cache mode from task parameter + cacheMode := "" + for _, param := range params { + if param.Name == CacheParam { + cacheMode = param.Value.StringVal + break + } + } + + // If no task parameter, get default from ConfigMap + if cacheMode == "" { + conf := resolutionframework.GetResolverConfigFromContext(ctx) + // This can be optionally set in individual resolver ConfigMaps (e.g., bundleresolver-config, + // git-resolver-config, cluster-resolver-config) to override the system default cache + // mode for that resolver. Valid values: "always", "never", "auto" + if defaultMode, ok := conf[defaultCacheModeConfigMapKey]; ok { + cacheMode = defaultMode + } + } + + // If still no mode, use system default + if cacheMode == "" { + cacheMode = cacheModeAuto + } + + switch cacheMode { + case cacheModeAlways: + return true + case cacheModeNever: + return false + case cacheModeAuto: + return resolver.IsImmutable(params) + default: + return resolver.IsImmutable(params) + } +} + +// Validate returns an error if the cache mode is not "always", "never", +// "auto", or empty string (which defaults to auto). +func Validate(cacheMode string) error { + // Empty string is valid - it will default to auto mode in ShouldUse + if cacheMode == "" { + return nil + } + + validCacheModes := []string{cacheModeAlways, cacheModeNever, cacheModeAuto} + if slices.Contains(validCacheModes, cacheMode) { + return nil + } + + return fmt.Errorf("invalid cache mode '%s', must be one of: %v (or empty for default)", cacheMode, validCacheModes) +} + +type resolveFn = func() (resolutionframework.ResolvedResource, error) + +func GetFromCacheOrResolve( + ctx context.Context, + params []v1.Param, + resolverType string, + resolve resolveFn, +) (resolutionframework.ResolvedResource, error) { + cacheInstance := Get(ctx) + + if cached, ok := cacheInstance.Get(resolverType, params); ok { + return cached, nil + } + + // If cache miss, resolve from params + resource, err := resolve() + if err != nil { + return nil, err + } + + // Store annotated resource with store operation and return annotated resource + // to indicate it was stored in cache + return cacheInstance.Add(resolverType, params, resource), nil +} diff --git a/pkg/remoteresolution/resolver/framework/cache/operations_test.go b/pkg/remoteresolution/resolver/framework/cache/operations_test.go new file mode 100644 index 00000000000..edcbb6b1da7 --- /dev/null +++ b/pkg/remoteresolution/resolver/framework/cache/operations_test.go @@ -0,0 +1,317 @@ +/* +Copyright 2025 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "errors" + "strings" + "testing" + + pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" + "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1" + bundleresolution "github.com/tektoncd/pipeline/pkg/resolution/resolver/bundle" + resolutionframework "github.com/tektoncd/pipeline/pkg/resolution/resolver/framework" +) + +type resolverFake struct{} + +func (r *resolverFake) IsImmutable(params []pipelinev1.Param) bool { + // Check if bundle parameter contains a digest (@sha256:) + for _, param := range params { + if param.Name == bundleresolution.ParamBundle { + bundleRef := param.Value.StringVal + return strings.Contains(bundleRef, "@sha256:") + } + } + return false +} + +func TestShouldUseCachePrecedence(t *testing.T) { + tests := []struct { + name string + taskCacheParam string // cache parameter from task/ResolutionRequest + configMap map[string]string // resolver ConfigMap + bundleRef string // bundle reference (affects auto mode) + expected bool // expected result + description string // test case description + }{ + // Test case 1: Default behavior (no config, no task param) -> should be "auto" + { + name: "no_config_no_task_param_with_digest", + taskCacheParam: "", // no cache param in task + configMap: map[string]string{}, // no default-cache-mode in ConfigMap + bundleRef: "registry.io/repo@sha256:abcdef", // has digest + expected: true, // auto mode + digest = cache + description: "No config anywhere, defaults to auto, digest should be cached", + }, + { + name: "no_config_no_task_param_with_tag", + taskCacheParam: "", // no cache param in task + configMap: map[string]string{}, // no default-cache-mode in ConfigMap + bundleRef: "registry.io/repo:latest", // no digest, just tag + expected: false, // auto mode + tag = no cache + description: "No config anywhere, defaults to auto, tag should not be cached", + }, + + // Test case 2: ConfigMap has setting, task has nothing -> should use ConfigMap value + { + name: "configmap_always_no_task_param", + taskCacheParam: "", // no cache param in task + configMap: map[string]string{"default-cache-mode": "always"}, // ConfigMap says always + bundleRef: "registry.io/repo:latest", // irrelevant for always mode + expected: true, // always = cache + description: "ConfigMap says always, no task param, should cache", + }, + { + name: "configmap_never_no_task_param", + taskCacheParam: "", // no cache param in task + configMap: map[string]string{"default-cache-mode": "never"}, // ConfigMap says never + bundleRef: "registry.io/repo@sha256:abcdef", // irrelevant for never mode + expected: false, // never = no cache + description: "ConfigMap says never, no task param, should not cache", + }, + { + name: "configmap_auto_no_task_param_with_digest", + taskCacheParam: "", // no cache param in task + configMap: map[string]string{"default-cache-mode": "auto"}, // ConfigMap says auto + bundleRef: "registry.io/repo@sha256:abcdef", // has digest + expected: true, // auto + digest = cache + description: "ConfigMap says auto, no task param, digest should be cached", + }, + + // Test case 3: ConfigMap has setting AND task has setting -> task should win + { + name: "configmap_always_task_never", + taskCacheParam: "never", // task says never + configMap: map[string]string{"default-cache-mode": "always"}, // ConfigMap says always + bundleRef: "registry.io/repo@sha256:abcdef", // irrelevant + expected: false, // task wins: never = no cache + description: "Task says never, ConfigMap says always, task should win", + }, + { + name: "configmap_never_task_always", + taskCacheParam: "always", // task says always + configMap: map[string]string{"default-cache-mode": "never"}, // ConfigMap says never + bundleRef: "registry.io/repo:latest", // irrelevant + expected: true, // task wins: always = cache + description: "Task says always, ConfigMap says never, task should win", + }, + { + name: "configmap_auto_task_always", + taskCacheParam: "always", // task says always + configMap: map[string]string{"default-cache-mode": "auto"}, // ConfigMap says auto + bundleRef: "registry.io/repo:latest", // would be false for auto mode + expected: true, // task wins: always = cache + description: "Task says always, ConfigMap says auto, task should win", + }, + { + name: "invalid_task_cache_param_falls_back_to_auto", + taskCacheParam: "invalid-value", // task has invalid cache value + configMap: map[string]string{}, // no ConfigMap setting + bundleRef: "registry.io/repo@sha256:abcdef", // has digest + expected: true, // invalid defaults to auto + digest = cache + description: "Invalid task cache parameter should fall back to auto mode", + }, + { + name: "invalid_configmap_value_falls_back_to_auto", + taskCacheParam: "", // no task param + configMap: map[string]string{"default-cache-mode": "wrong"}, // invalid ConfigMap value + bundleRef: "registry.io/repo@sha256:abcdef", // has digest + expected: true, // invalid defaults to auto + digest = cache + description: "Invalid ConfigMap cache mode should fall back to auto mode", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Set up context with resolver config + ctx := t.Context() + if len(tt.configMap) > 0 { + ctx = resolutionframework.InjectResolverConfigToContext(ctx, tt.configMap) + } + + // Set up ResolutionRequestSpec + req := &v1beta1.ResolutionRequestSpec{ + Params: []pipelinev1.Param{ + { + Name: bundleresolution.ParamBundle, + Value: pipelinev1.ParamValue{StringVal: tt.bundleRef}, + }, + }, + } + if tt.taskCacheParam != "" { + req.Params = append(req.Params, pipelinev1.Param{ + Name: CacheParam, + Value: pipelinev1.ParamValue{StringVal: tt.taskCacheParam}, + }) + } + + // Test the framework function + result := ShouldUse(ctx, &resolverFake{}, req.Params, bundleresolution.LabelValueBundleResolverType) + + // Verify result + if result != tt.expected { + t.Errorf("ShouldUse() = %v, expected %v\nDescription: %s", result, tt.expected, tt.description) + } + }) + } +} + +func TestValidateCacheMode(t *testing.T) { + tests := []struct { + name string + cacheMode string + wantErr bool + }{ + { + name: "valid always mode", + cacheMode: "always", + wantErr: false, + }, + { + name: "valid never mode", + cacheMode: "never", + wantErr: false, + }, + { + name: "valid auto mode", + cacheMode: "auto", + wantErr: false, + }, + { + name: "invalid mode", + cacheMode: "invalid", + wantErr: true, + }, + { + name: "empty mode (defaults to auto)", + cacheMode: "", + wantErr: false, // Empty is valid - defaults to auto in ShouldUse + }, + { + name: "uppercase mode", + cacheMode: "ALWAYS", + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := Validate(tt.cacheMode) + if (err != nil) != tt.wantErr { + t.Errorf("ValidateCacheMode() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func TestGetFromCacheOrResolve(t *testing.T) { + tests := []struct { + name string + params []pipelinev1.Param + resolverType string + cacheHit bool + resolveErr error + description string + }{ + { + name: "cache hit", + params: []pipelinev1.Param{ + { + Name: bundleresolution.ParamBundle, + Value: pipelinev1.ParamValue{StringVal: "registry.io/repo@sha256:abcdef"}, + }, + }, + resolverType: bundleresolution.LabelValueBundleResolverType, + cacheHit: true, + description: "Should return cached resource", + }, + { + name: "cache miss - successful resolve", + params: []pipelinev1.Param{ + { + Name: bundleresolution.ParamBundle, + Value: pipelinev1.ParamValue{StringVal: "registry.io/repo:latest"}, + }, + }, + resolverType: bundleresolution.LabelValueBundleResolverType, + cacheHit: false, + description: "Should resolve and add to cache", + }, + { + name: "cache miss - resolve error", + params: []pipelinev1.Param{ + { + Name: bundleresolution.ParamBundle, + Value: pipelinev1.ParamValue{StringVal: "registry.io/repo:error"}, + }, + }, + resolverType: bundleresolution.LabelValueBundleResolverType, + cacheHit: false, + resolveErr: errors.New("resolve error"), + description: "Should return error from resolver", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx := t.Context() + + // Create mock resolver function + resolveCalled := false + resolveFn := func() (resolutionframework.ResolvedResource, error) { + resolveCalled = true + if tt.resolveErr != nil { + return nil, tt.resolveErr + } + return &mockResolvedResource{data: []byte("test data")}, nil + } + + // If this is a cache hit test, pre-populate the cache directly + if tt.cacheHit { + mockResource := &mockResolvedResource{data: []byte("test data")} + Get(ctx).Add(tt.resolverType, tt.params, mockResource) + } + + result, err := GetFromCacheOrResolve(ctx, tt.params, tt.resolverType, resolveFn) + + // Verify error handling + if tt.resolveErr != nil { + if err == nil { + t.Fatalf("Expected error but got none") + } + return + } + + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + + if result == nil { + t.Fatalf("Expected result but got nil") + } + + // Verify cache behavior + if tt.cacheHit && resolveCalled { + t.Errorf("Expected cache hit but resolve function was called") + } + if !tt.cacheHit && !resolveCalled { + t.Errorf("Expected cache miss but resolve function was not called") + } + }) + } +} diff --git a/pkg/remoteresolution/resolver/framework/cache/setup.go b/pkg/remoteresolution/resolver/framework/cache/setup.go new file mode 100644 index 00000000000..7d57394e43d --- /dev/null +++ b/pkg/remoteresolution/resolver/framework/cache/setup.go @@ -0,0 +1,65 @@ +/* +Copyright 2025 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "context" + "sync" + + "k8s.io/client-go/rest" + "knative.dev/pkg/injection" + "knative.dev/pkg/logging" +) + +var ( + sharedCache *resolverCache + cacheInitOnce sync.Once +) + +type resolverCacheKey struct{} + +func init() { + injection.Default.RegisterClient(addCacheWithLoggerToCtx) +} + +func addCacheWithLoggerToCtx(ctx context.Context, _ *rest.Config) context.Context { + return context.WithValue(ctx, resolverCacheKey{}, createCacheOnce(ctx)) +} + +func createCacheOnce(ctx context.Context) *resolverCache { + cacheInitOnce.Do(func() { + cacheMu.Lock() + defer cacheMu.Unlock() + + sharedCache = newResolverCache(defaultCacheSize, defaultExpiration) + }) + + return sharedCache.withLogger( + logging.FromContext(ctx), + ) +} + +// Get extracts the ResolverCache from the context. +// If the cache is not available in the context (e.g., in tests), +// it falls back to the shared cache with a logger from the context. +func Get(ctx context.Context) *resolverCache { + if untyped := ctx.Value(resolverCacheKey{}); untyped != nil { + return untyped.(*resolverCache) + } + + return createCacheOnce(ctx) +} diff --git a/pkg/remoteresolution/resolver/framework/cache/setup_test.go b/pkg/remoteresolution/resolver/framework/cache/setup_test.go new file mode 100644 index 00000000000..f3fa3d10ac5 --- /dev/null +++ b/pkg/remoteresolution/resolver/framework/cache/setup_test.go @@ -0,0 +1,84 @@ +/* +Copyright 2025 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "context" + "testing" + + pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" + _ "knative.dev/pkg/system/testing" // Setup system.Namespace() + + logtesting "knative.dev/pkg/logging/testing" +) + +func TestGetWithContextValue(t *testing.T) { + logger := logtesting.TestLogger(t) + ctx := t.Context() + + // Create a cache and inject it into context + testCache := newResolverCache(100, defaultExpiration).withLogger(logger) + ctx = context.WithValue(ctx, resolverCacheKey{}, testCache) + + // Get cache from context + resolverCache := Get(ctx) + if resolverCache == nil { + t.Error("Expected resolver cache but got nil") + } + + if resolverCache != testCache { + t.Error("Expected injected cache but got different instance") + } +} + +func TestCacheSharing(t *testing.T) { + // Create two different contexts + ctx1 := logtesting.TestContextWithLogger(t) + ctx2 := logtesting.TestContextWithLogger(t) + + // Get cache from first context + cache1 := Get(ctx1) + if cache1 == nil { + t.Fatal("Expected cache from ctx1") + } + + // Get cache from second context + cache2 := Get(ctx2) + if cache2 == nil { + t.Fatal("Expected cache from ctx2") + } + + // Verify they share the same underlying cache storage by adding data + // to cache1 and checking it appears in cache2 + testParams := []pipelinev1.Param{ + {Name: "url", Value: pipelinev1.ParamValue{Type: pipelinev1.ParamTypeString, StringVal: "https://example.com"}}, + } + testResource := &mockResolvedResource{data: []byte("test-data")} + + // Add to cache1 + cache1.Add("test-resolver", testParams, testResource) + + // Verify it exists in cache2 (proving they share the same underlying storage) + retrieved, found := cache2.Get("test-resolver", testParams) + if !found { + t.Fatal("Expected to find resource in cache2 that was added to cache1 - caches are not shared") + } + + if string(retrieved.Data()) != string(testResource.Data()) { + t.Errorf("Expected data %q, got %q", string(testResource.Data()), string(retrieved.Data())) + } +} diff --git a/pkg/remoteresolution/resolver/framework/controller.go b/pkg/remoteresolution/resolver/framework/controller.go index 665ecd89e93..f33517e2c6c 100644 --- a/pkg/remoteresolution/resolver/framework/controller.go +++ b/pkg/remoteresolution/resolver/framework/controller.go @@ -22,6 +22,7 @@ import ( rrclient "github.com/tektoncd/pipeline/pkg/client/resolution/injection/client" rrinformer "github.com/tektoncd/pipeline/pkg/client/resolution/injection/informers/resolution/v1beta1/resolutionrequest" + rrcache "github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/framework/cache" framework "github.com/tektoncd/pipeline/pkg/resolution/resolver/framework" "k8s.io/client-go/tools/cache" "k8s.io/utils/clock" @@ -62,6 +63,7 @@ func NewController(ctx context.Context, resolver Resolver, modifiers ...Reconcil } watchConfigChanges(ctx, r, cmw) + watchCacheConfigChanges(ctx, r, cmw) // TODO(sbwsg): Do better sanitize. resolverName := resolver.GetName(ctx) @@ -110,6 +112,19 @@ func watchConfigChanges(ctx context.Context, reconciler *Reconciler, cmw configm } } +func watchCacheConfigChanges(ctx context.Context, reconciler *Reconciler, cmw configmap.Watcher) { + logger := logging.FromContext(ctx) + cacheInstance := rrcache.Get(ctx) + cacheConfigName := cacheInstance.GetConfigName(ctx) + if cacheConfigName == "" { + logger.Error("failed to setup cache config watcher, cache returned empty config name") + return + } + + cacheConfigStore := rrcache.NewCacheConfigStore(cacheConfigName, logger) + cacheConfigStore.WatchConfigs(cmw) +} + // applyModifiersAndDefaults applies the given modifiers to // a reconciler and, after doing so, sets any default values for things // that weren't set by a modifier. diff --git a/pkg/remoteresolution/resolver/framework/reconciler.go b/pkg/remoteresolution/resolver/framework/reconciler.go index 4e35557fe47..70290939430 100644 --- a/pkg/remoteresolution/resolver/framework/reconciler.go +++ b/pkg/remoteresolution/resolver/framework/reconciler.go @@ -29,6 +29,7 @@ import ( "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1" rrclient "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned" rrv1beta1 "github.com/tektoncd/pipeline/pkg/client/resolution/listers/resolution/v1beta1" + rrcache "github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/framework/cache" resolutioncommon "github.com/tektoncd/pipeline/pkg/resolution/common" "github.com/tektoncd/pipeline/pkg/resolution/resolver/framework" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -123,6 +124,16 @@ func (r *Reconciler) resolve(ctx context.Context, key string, rr *v1beta1.Resolu paramsMap[p.Name] = p.Value.StringVal } + // Centralized cache parameter validation for all resolvers + if cacheMode, exists := paramsMap[rrcache.CacheParam]; exists && cacheMode != "" { + if err := rrcache.Validate(cacheMode); err != nil { + return &resolutioncommon.InvalidRequestError{ + ResolutionRequestKey: key, + Message: err.Error(), + } + } + } + timeoutDuration := defaultMaximumResolutionDuration if timed, ok := r.resolver.(framework.TimedResolution); ok { var err error diff --git a/pkg/remoteresolution/resolver/framework/reconciler_test.go b/pkg/remoteresolution/resolver/framework/reconciler_test.go index 1a437ddfc89..c7526ee4958 100644 --- a/pkg/remoteresolution/resolver/framework/reconciler_test.go +++ b/pkg/remoteresolution/resolver/framework/reconciler_test.go @@ -35,6 +35,7 @@ import ( "github.com/tektoncd/pipeline/test" "github.com/tektoncd/pipeline/test/diff" "github.com/tektoncd/pipeline/test/names" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" @@ -352,6 +353,13 @@ func TestReconcile(t *testing.T) { t.Run(tc.name, func(t *testing.T) { d := test.Data{ ResolutionRequests: []*v1beta1.ResolutionRequest{tc.inputRequest}, + ConfigMaps: []*corev1.ConfigMap{{ + ObjectMeta: metav1.ObjectMeta{ + Name: "resolver-cache-config", + Namespace: system.Namespace(), + }, + Data: map[string]string{}, + }}, } fakeResolver := &framework.FakeResolver{ForParam: tc.paramMap} diff --git a/pkg/remoteresolution/resolver/framework/testing/fakecontroller.go b/pkg/remoteresolution/resolver/framework/testing/fakecontroller.go index eefee4263da..2399ac44f33 100644 --- a/pkg/remoteresolution/resolver/framework/testing/fakecontroller.go +++ b/pkg/remoteresolution/resolver/framework/testing/fakecontroller.go @@ -48,6 +48,9 @@ var ( now = time.Date(2022, time.January, 1, 0, 0, 0, 0, time.UTC) testClock = testclock.NewFakePassiveClock(now) ignoreLastTransitionTime = cmpopts.IgnoreFields(apis.Condition{}, "LastTransitionTime.Inner.Time") + ignoreCacheTimestamp = cmpopts.IgnoreMapEntries(func(k, v string) bool { + return strings.HasPrefix(k, "resolution.tekton.dev/cache") + }) ) // ResolverReconcileTestModifier is a function thaat will be invoked after the test assets and controller have been created @@ -88,7 +91,7 @@ func RunResolverReconcileTest(ctx context.Context, t *testing.T, d test.Data, re t.Fatalf("getting updated ResolutionRequest: %v", err) } if expectedStatus != nil { - if d := cmp.Diff(*expectedStatus, reconciledRR.Status, ignoreLastTransitionTime); d != "" { + if d := cmp.Diff(*expectedStatus, reconciledRR.Status, ignoreLastTransitionTime, ignoreCacheTimestamp); d != "" { t.Errorf("ResolutionRequest status doesn't match %s", diff.PrintWantGot(d)) if expectedStatus.Data != "" && expectedStatus.Data != reconciledRR.Status.Data { decodedExpectedData, err := base64.StdEncoding.Strict().DecodeString(expectedStatus.Data) @@ -154,10 +157,14 @@ func setClockOnReconciler(r *framework.Reconciler) { func ensureConfigurationConfigMapsExist(d *test.Data) { var featureFlagsExists bool + var resolverCacheConfigExists bool for _, cm := range d.ConfigMaps { if cm.Name == resolverconfig.GetFeatureFlagsConfigName() { featureFlagsExists = true } + if cm.Name == "resolver-cache-config" { + resolverCacheConfigExists = true + } } if !featureFlagsExists { d.ConfigMaps = append(d.ConfigMaps, &corev1.ConfigMap{ @@ -168,4 +175,13 @@ func ensureConfigurationConfigMapsExist(d *test.Data) { Data: map[string]string{}, }) } + if !resolverCacheConfigExists { + d.ConfigMaps = append(d.ConfigMaps, &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "resolver-cache-config", + Namespace: resolverconfig.ResolversNamespace(system.Namespace()), + }, + Data: map[string]string{}, + }) + } } diff --git a/pkg/remoteresolution/resolver/git/resolver.go b/pkg/remoteresolution/resolver/git/resolver.go index 1d4890ebc6d..4e410f53b80 100644 --- a/pkg/remoteresolution/resolver/git/resolver.go +++ b/pkg/remoteresolution/resolver/git/resolver.go @@ -23,64 +23,70 @@ import ( "github.com/jenkins-x/go-scm/scm" "github.com/jenkins-x/go-scm/scm/factory" + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1" "github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/framework" + "github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/framework/cache" resolutioncommon "github.com/tektoncd/pipeline/pkg/resolution/common" resolutionframework "github.com/tektoncd/pipeline/pkg/resolution/resolver/framework" "github.com/tektoncd/pipeline/pkg/resolution/resolver/git" "go.uber.org/zap" - "k8s.io/apimachinery/pkg/util/cache" + k8scache "k8s.io/apimachinery/pkg/util/cache" "k8s.io/client-go/kubernetes" kubeclient "knative.dev/pkg/client/injection/kube/client" "knative.dev/pkg/logging" ) const ( - disabledError = "cannot handle resolution request, enable-git-resolver feature flag not true" + disabledError = "cannot handle resolution request, enable-git-resolver feature flag not true" + gitResolverName = "Git" // labelValueGitResolverType is the value to use for the // resolution.tekton.dev/type label on resource requests - labelValueGitResolverType string = "git" + labelValueGitResolverType = "git" - // gitResolverName is the name that the git resolver should be - // associated with - gitResolverName string = "Git" - - // ConfigMapName is the git resolver's config map - ConfigMapName = "git-resolver-config" - - // cacheSize is the size of the LRU secrets cache + // size of the LRU secrets cache cacheSize = 1024 - // ttl is the time to live for a cache entry + // the time to live for a cache entry ttl = 5 * time.Minute + + // git revision parameter name + revisionParam = "revision" ) -var _ framework.Resolver = &Resolver{} +var _ framework.Resolver = (*Resolver)(nil) +var _ resolutionframework.ConfigWatcher = (*Resolver)(nil) +var _ cache.ImmutabilityChecker = (*Resolver)(nil) +var _ resolutionframework.TimedResolution = (*Resolver)(nil) // Resolver implements a framework.Resolver that can fetch files from git. type Resolver struct { kubeClient kubernetes.Interface logger *zap.SugaredLogger - cache *cache.LRUExpireCache + cache *k8scache.LRUExpireCache ttl time.Duration - // Used in testing + // Function for creating a SCM client so we can change it in tests. clientFunc func(string, string, string, ...factory.ClientOptionFunc) (*scm.Client, error) } -// Initialize performs any setup required by the gitresolver. +// Initialize performs any setup required by the git resolver. func (r *Resolver) Initialize(ctx context.Context) error { r.kubeClient = kubeclient.Get(ctx) - r.logger = logging.FromContext(ctx) - r.cache = cache.NewLRUExpireCache(cacheSize) - r.ttl = ttl + r.logger = logging.FromContext(ctx).Named(gitResolverName) + if r.cache == nil { + r.cache = k8scache.NewLRUExpireCache(cacheSize) + } + if r.ttl == 0 { + r.ttl = ttl + } if r.clientFunc == nil { r.clientFunc = factory.NewClient } return nil } -// GetName returns the string name that the gitresolver should be +// GetName returns the string name that the git resolver should be // associated with. func (r *Resolver) GetName(_ context.Context) string { return gitResolverName @@ -94,61 +100,42 @@ func (r *Resolver) GetSelector(_ context.Context) map[string]string { } } -// ValidateParams returns an error if the given parameter map is not +// GetConfigName returns the name of the git resolver's configmap. +func (r *Resolver) GetConfigName(_ context.Context) string { + return git.ConfigMapName +} + +// Validate returns an error if the given parameter map is not // valid for a resource request targeting the gitresolver. func (r *Resolver) Validate(ctx context.Context, req *v1beta1.ResolutionRequestSpec) error { - if len(req.Params) > 0 { - return git.ValidateParams(ctx, req.Params) - } - // Remove this error once validate url has been implemented. - return errors.New("cannot validate request. the Validate method has not been implemented.") + return git.ValidateParams(ctx, req.Params) } -// Resolve performs the work of fetching a file from git given a map of -// parameters. -func (r *Resolver) Resolve(ctx context.Context, req *v1beta1.ResolutionRequestSpec) (resolutionframework.ResolvedResource, error) { - if len(req.Params) > 0 { - origParams := req.Params - - if git.IsDisabled(ctx) { - return nil, errors.New(disabledError) - } - - params, err := git.PopulateDefaultParams(ctx, origParams) - if err != nil { - return nil, err - } - - g := &git.GitResolver{ - KubeClient: r.kubeClient, - Logger: r.logger, - Cache: r.cache, - TTL: r.ttl, - Params: params, +// IsImmutable implements ImmutabilityChecker.IsImmutable +// Returns true if the revision parameter is a commit SHA (40-character hex string) +func (r *Resolver) IsImmutable(params []v1.Param) bool { + var revision string + for _, param := range params { + if param.Name == revisionParam { + revision = param.Value.StringVal + break } + } - if params[git.UrlParam] != "" { - return g.ResolveGitClone(ctx) + // Checks if the given string looks like a git commit SHA. + // A valid commit SHA is exactly 40 characters of hexadecimal. + if len(revision) != 40 { + return false + } + for _, r := range revision { + if !((r >= '0' && r <= '9') || (r >= 'a' && r <= 'f') || (r >= 'A' && r <= 'F')) { + return false } - - return g.ResolveAPIGit(ctx, r.clientFunc) } - // Remove this error once resolution of url has been implemented. - return nil, errors.New("the Resolve method has not been implemented.") + return true } -var _ resolutionframework.ConfigWatcher = &Resolver{} - -// GetConfigName returns the name of the git resolver's configmap. -func (r *Resolver) GetConfigName(context.Context) string { - return ConfigMapName -} - -var _ resolutionframework.TimedResolution = &Resolver{} - -// GetResolutionTimeout returns a time.Duration for the amount of time a -// single git fetch may take. This can be configured with the -// fetch-timeout field in the git-resolver-config configmap. +// GetResolutionTimeout returns the configured timeout for git resolution requests. func (r *Resolver) GetResolutionTimeout(ctx context.Context, defaultTimeout time.Duration, params map[string]string) (time.Duration, error) { conf, err := git.GetScmConfigForParamConfigKey(ctx, params) if err != nil { @@ -163,3 +150,48 @@ func (r *Resolver) GetResolutionTimeout(ctx context.Context, defaultTimeout time } return defaultTimeout, nil } + +// Resolve performs the work of fetching a file from git given a map of +// parameters. +func (r *Resolver) Resolve(ctx context.Context, req *v1beta1.ResolutionRequestSpec) (resolutionframework.ResolvedResource, error) { + if len(req.Params) == 0 { + return nil, errors.New("no params") + } + + if git.IsDisabled(ctx) { + return nil, errors.New(disabledError) + } + + params, err := git.PopulateDefaultParams(ctx, req.Params) + if err != nil { + return nil, err + } + + if cache.ShouldUse(ctx, r, req.Params, labelValueGitResolverType) { + return cache.GetFromCacheOrResolve( + ctx, + req.Params, + labelValueGitResolverType, + func() (resolutionframework.ResolvedResource, error) { + return r.resolveViaGit(ctx, params) + }, + ) + } + return r.resolveViaGit(ctx, params) +} + +func (r *Resolver) resolveViaGit(ctx context.Context, params map[string]string) (resolutionframework.ResolvedResource, error) { + g := &git.GitResolver{ + KubeClient: r.kubeClient, + Logger: r.logger, + Cache: r.cache, + TTL: r.ttl, + Params: params, + } + + if params[git.UrlParam] != "" { + return g.ResolveGitClone(ctx) + } + + return g.ResolveAPIGit(ctx, r.clientFunc) +} diff --git a/pkg/remoteresolution/resolver/git/resolver_test.go b/pkg/remoteresolution/resolver/git/resolver_test.go index c47934c0815..db8fcceba74 100644 --- a/pkg/remoteresolution/resolver/git/resolver_test.go +++ b/pkg/remoteresolution/resolver/git/resolver_test.go @@ -42,6 +42,7 @@ import ( common "github.com/tektoncd/pipeline/pkg/resolution/common" resolutionframework "github.com/tektoncd/pipeline/pkg/resolution/resolver/framework" frameworktesting "github.com/tektoncd/pipeline/pkg/resolution/resolver/framework/testing" + "github.com/tektoncd/pipeline/pkg/resolution/resolver/git" gitresolution "github.com/tektoncd/pipeline/pkg/resolution/resolver/git" "github.com/tektoncd/pipeline/test" "github.com/tektoncd/pipeline/test/diff" @@ -665,7 +666,7 @@ func TestResolve(t *testing.T) { d := test.Data{ ConfigMaps: []*corev1.ConfigMap{{ ObjectMeta: metav1.ObjectMeta{ - Name: ConfigMapName, + Name: git.ConfigMapName, Namespace: resolverconfig.ResolversNamespace(system.Namespace()), }, Data: cfg, @@ -677,6 +678,12 @@ func TestResolve(t *testing.T) { Data: map[string]string{ "enable-git-resolver": "true", }, + }, { + ObjectMeta: metav1.ObjectMeta{ + Name: "resolver-cache-config", + Namespace: resolverconfig.ResolversNamespace(system.Namespace()), + }, + Data: map[string]string{}, }}, ResolutionRequests: []*v1beta1.ResolutionRequest{request}, } diff --git a/pkg/remoteresolution/resolver/http/resolver.go b/pkg/remoteresolution/resolver/http/resolver.go index ec106586d9c..f8ec38c7a96 100644 --- a/pkg/remoteresolution/resolver/http/resolver.go +++ b/pkg/remoteresolution/resolver/http/resolver.go @@ -31,24 +31,16 @@ import ( const ( // LabelValueHttpResolverType is the value to use for the // resolution.tekton.dev/type label on resource requests - LabelValueHttpResolverType string = "http" - - disabledError = "cannot handle resolution request, enable-http-resolver feature flag not true" - - // httpResolverName The name of the resolver - httpResolverName = "Http" - - // configMapName is the http resolver's config map - configMapName = "http-resolver-config" - - // default Timeout value when fetching http resources in seconds - defaultHttpTimeoutValue = "1m" - - // default key in the HTTP password secret - defaultBasicAuthSecretKey = "password" + LabelValueHttpResolverType = "http" + disabledError = "cannot handle resolution request, enable-http-resolver feature flag not true" + httpResolverName = "Http" + configMapName = "http-resolver-config" + defaultHttpTimeoutValue = "1m" + defaultBasicAuthSecretKey = "password" // default key in the HTTP password secret ) -var _ framework.Resolver = &Resolver{} +var _ framework.Resolver = (*Resolver)(nil) +var _ resolutionframework.ConfigWatcher = (*Resolver)(nil) // Resolver implements a framework.Resolver that can fetch files from an HTTP URL type Resolver struct { @@ -63,17 +55,17 @@ func (r *Resolver) Initialize(ctx context.Context) error { } // GetName returns a string name to refer to this resolver by. -func (r *Resolver) GetName(context.Context) string { +func (r *Resolver) GetName(_ context.Context) string { return httpResolverName } // GetConfigName returns the name of the http resolver's configmap. -func (r *Resolver) GetConfigName(context.Context) string { +func (r *Resolver) GetConfigName(_ context.Context) string { return configMapName } // GetSelector returns a map of labels to match requests to this resolver. -func (r *Resolver) GetSelector(context.Context) map[string]string { +func (r *Resolver) GetSelector(_ context.Context) map[string]string { return map[string]string{ common.LabelKeyResolverType: LabelValueHttpResolverType, } @@ -81,28 +73,19 @@ func (r *Resolver) GetSelector(context.Context) map[string]string { // Validate ensures parameters from a request are as expected. func (r *Resolver) Validate(ctx context.Context, req *v1beta1.ResolutionRequestSpec) error { - if len(req.Params) > 0 { - return http.ValidateParams(ctx, req.Params) - } - // Remove this error once validate url has been implemented. - return errors.New("cannot validate request. the Validate method has not been implemented.") + return http.ValidateParams(ctx, req.Params) } // Resolve uses the given params to resolve the requested file or resource. func (r *Resolver) Resolve(ctx context.Context, req *v1beta1.ResolutionRequestSpec) (resolutionframework.ResolvedResource, error) { - if len(req.Params) > 0 { - oParams := req.Params - if http.IsDisabled(ctx) { - return nil, errors.New(disabledError) - } - - params, err := http.PopulateDefaultParams(ctx, oParams) - if err != nil { - return nil, err - } + if http.IsDisabled(ctx) { + return nil, errors.New(disabledError) + } - return http.FetchHttpResource(ctx, params, r.kubeClient, r.logger) + params, err := http.PopulateDefaultParams(ctx, req.Params) + if err != nil { + return nil, err } - // Remove this error once resolution of url has been implemented. - return nil, errors.New("the Resolve method has not been implemented.") + + return http.FetchHttpResource(ctx, params, r.kubeClient, r.logger) } diff --git a/pkg/remoteresolution/resolver/http/resolver_test.go b/pkg/remoteresolution/resolver/http/resolver_test.go index 463a3a775fc..e89d13777fb 100644 --- a/pkg/remoteresolution/resolver/http/resolver_test.go +++ b/pkg/remoteresolution/resolver/http/resolver_test.go @@ -151,6 +151,16 @@ func TestResolve(t *testing.T) { } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { + ctx, _ := ttesting.SetupFakeContext(t) + + resolver := Resolver{} + if err := resolver.Initialize(ctx); err != nil { + t.Fatalf("failed to initialize resolver: %v", err) + } + + // Now overlay the config context + ctx = contextWithConfig(defaultHttpTimeoutValue) + svr := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if tc.expectedStatus != 0 { w.WriteHeader(tc.expectedStatus) @@ -169,11 +179,11 @@ func TestResolve(t *testing.T) { Value: *pipelinev1.NewStructuredValues("bar"), }) } - resolver := Resolver{} + req := v1beta1.ResolutionRequestSpec{ Params: params, } - output, err := resolver.Resolve(contextWithConfig(defaultHttpTimeoutValue), &req) + output, err := resolver.Resolve(ctx, &req) if tc.expectedErr != "" { re := regexp.MustCompile(tc.expectedErr) if !re.MatchString(err.Error()) { @@ -205,20 +215,30 @@ func TestResolve(t *testing.T) { } func TestResolveNotEnabled(t *testing.T) { + ctx, _ := ttesting.SetupFakeContext(t) + var err error resolver := Resolver{} + + if err := resolver.Initialize(ctx); err != nil { + t.Fatalf("failed to initialize resolver: %v", err) + } + + // Now overlay the disabled context + ctx = resolverDisabledContext() + someParams := map[string]string{"foo": "bar"} req := v1beta1.ResolutionRequestSpec{ Params: toParams(someParams), } - _, err = resolver.Resolve(resolverDisabledContext(), &req) + _, err = resolver.Resolve(ctx, &req) if err == nil { t.Fatalf("expected disabled err") } if d := cmp.Diff(disabledError, err.Error()); d != "" { t.Errorf("unexpected error: %s", diff.PrintWantGot(d)) } - err = resolver.Validate(resolverDisabledContext(), &v1beta1.ResolutionRequestSpec{Params: toParams(someParams)}) + err = resolver.Validate(ctx, &v1beta1.ResolutionRequestSpec{Params: toParams(someParams)}) if err == nil { t.Fatalf("expected disabled err") } @@ -354,6 +374,12 @@ func TestResolverReconcileBasicAuth(t *testing.T) { Data: map[string]string{ "enable-http-resolver": "true", }, + }, { + ObjectMeta: metav1.ObjectMeta{ + Name: "resolver-cache-config", + Namespace: resolverconfig.ResolversNamespace(system.Namespace()), + }, + Data: map[string]string{}, }}, ResolutionRequests: []*v1beta1.ResolutionRequest{request}, } diff --git a/pkg/remoteresolution/resolver/hub/resolver.go b/pkg/remoteresolution/resolver/hub/resolver.go index 8c29b23e50d..4342357404b 100644 --- a/pkg/remoteresolution/resolver/hub/resolver.go +++ b/pkg/remoteresolution/resolver/hub/resolver.go @@ -15,7 +15,6 @@ package hub import ( "context" - "errors" "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1" "github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/framework" @@ -27,16 +26,19 @@ import ( const ( // LabelValueHubResolverType is the value to use for the // resolution.tekton.dev/type label on resource requests - LabelValueHubResolverType string = "hub" + LabelValueHubResolverType = "hub" + hubResolverName = "Hub" + configMapName = "hubresolver-config" // ArtifactHubType is the value to use setting the type field to artifact - ArtifactHubType string = "artifact" + ArtifactHubType = "artifact" // TektonHubType is the value to use setting the type field to tekton - TektonHubType string = "tekton" + TektonHubType = "tekton" ) -var _ framework.Resolver = &Resolver{} +var _ framework.Resolver = (*Resolver)(nil) +var _ resolutionframework.ConfigWatcher = (*Resolver)(nil) // Resolver implements a framework.Resolver that can fetch files from OCI bundles. type Resolver struct { @@ -47,22 +49,22 @@ type Resolver struct { } // Initialize sets up any dependencies needed by the resolver. None atm. -func (r *Resolver) Initialize(context.Context) error { +func (r *Resolver) Initialize(_ context.Context) error { return nil } // GetName returns a string name to refer to this resolver by. -func (r *Resolver) GetName(context.Context) string { - return "Hub" +func (r *Resolver) GetName(_ context.Context) string { + return hubResolverName } // GetConfigName returns the name of the bundle resolver's configmap. -func (r *Resolver) GetConfigName(context.Context) string { - return "hubresolver-config" +func (r *Resolver) GetConfigName(_ context.Context) string { + return configMapName } // GetSelector returns a map of labels to match requests to this resolver. -func (r *Resolver) GetSelector(context.Context) map[string]string { +func (r *Resolver) GetSelector(_ context.Context) map[string]string { return map[string]string{ common.LabelKeyResolverType: LabelValueHubResolverType, } @@ -70,18 +72,10 @@ func (r *Resolver) GetSelector(context.Context) map[string]string { // Validate ensures parameters from a request are as expected. func (r *Resolver) Validate(ctx context.Context, req *v1beta1.ResolutionRequestSpec) error { - if len(req.Params) > 0 { - return hub.ValidateParams(ctx, req.Params, r.TektonHubURL) - } - // Remove this error once validate url has been implemented. - return errors.New("cannot validate request. the Validate method has not been implemented.") + return hub.ValidateParams(ctx, req.Params, r.TektonHubURL) } // Resolve uses the given params to resolve the requested file or resource. func (r *Resolver) Resolve(ctx context.Context, req *v1beta1.ResolutionRequestSpec) (resolutionframework.ResolvedResource, error) { - if len(req.Params) > 0 { - return hub.Resolve(ctx, req.Params, r.TektonHubURL, r.ArtifactHubURL) - } - // Remove this error once resolution of url has been implemented. - return nil, errors.New("the Resolve method has not been implemented.") + return hub.Resolve(ctx, req.Params, r.TektonHubURL, r.ArtifactHubURL) } diff --git a/pkg/resolution/resolver/bundle/bundle.go b/pkg/resolution/resolver/bundle/bundle.go index 8174db2fbfd..f99f3088b69 100644 --- a/pkg/resolution/resolver/bundle/bundle.go +++ b/pkg/resolution/resolver/bundle/bundle.go @@ -42,6 +42,7 @@ type RequestOptions struct { Bundle string EntryName string Kind string + Cache string } // ResolvedResource wraps the content of a matched entry in a bundle. diff --git a/pkg/resolution/resolver/bundle/params.go b/pkg/resolution/resolver/bundle/params.go index 2712cbe4c09..63c906c28e8 100644 --- a/pkg/resolution/resolver/bundle/params.go +++ b/pkg/resolution/resolver/bundle/params.go @@ -43,6 +43,9 @@ const ParamName = resource.ParamName // image is. const ParamKind = "kind" +// paramCache is the parameter defining whether to use cache for bundle requests. +const paramCache = "cache" + // OptionsFromParams parses the params from a resolution request and // converts them into options to pass as part of a bundle request. func OptionsFromParams(ctx context.Context, params []pipelinev1.Param) (RequestOptions, error) { @@ -97,5 +100,12 @@ func OptionsFromParams(ctx context.Context, params []pipelinev1.Param) (RequestO opts.EntryName = nameVal.StringVal opts.Kind = kind + // Use default cache mode since validation happens centrally in framework + if cacheVal, ok := paramsMap[paramCache]; ok && cacheVal.StringVal != "" { + opts.Cache = cacheVal.StringVal + } else { + opts.Cache = "auto" + } + return opts, nil } diff --git a/pkg/resolution/resolver/cluster/resolver.go b/pkg/resolution/resolver/cluster/resolver.go index bc9d5cd3e30..113bae2840b 100644 --- a/pkg/resolution/resolver/cluster/resolver.go +++ b/pkg/resolution/resolver/cluster/resolver.go @@ -47,13 +47,14 @@ const ( // associated with ClusterResolverName string = "Cluster" - configMapName = "cluster-resolver-config" + ConfigMapName = "cluster-resolver-config" ) -var _ framework.Resolver = &Resolver{} - var supportedKinds = []string{"task", "pipeline", "stepaction"} +var _ framework.Resolver = (*Resolver)(nil) +var _ framework.ConfigWatcher = (*Resolver)(nil) + // Resolver implements a framework.Resolver that can fetch resources from other namespaces. // // Deprecated: Use [github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/cluster.Resolver] instead. @@ -81,6 +82,11 @@ func (r *Resolver) GetSelector(_ context.Context) map[string]string { } } +// GetConfigName returns the name of the cluster resolver's configmap. +func (r *Resolver) GetConfigName(context.Context) string { + return ConfigMapName +} + // ValidateParams returns an error if the given parameter map is not // valid for a resource request targeting the cluster resolver. func (r *Resolver) ValidateParams(ctx context.Context, params []pipelinev1.Param) error { @@ -158,13 +164,6 @@ func ResolveFromParams(ctx context.Context, origParams []pipelinev1.Param, pipel }, nil } -var _ framework.ConfigWatcher = &Resolver{} - -// GetConfigName returns the name of the cluster resolver's configmap. -func (r *Resolver) GetConfigName(context.Context) string { - return configMapName -} - // ResolvedClusterResource implements framework.ResolvedResource and returns // the resolved file []byte data and an annotation map for any metadata. type ResolvedClusterResource struct { diff --git a/pkg/resolution/resolver/framework/configstore.go b/pkg/resolution/resolver/framework/configstore.go index 5d9af9cd5e8..7f5d6649932 100644 --- a/pkg/resolution/resolver/framework/configstore.go +++ b/pkg/resolution/resolver/framework/configstore.go @@ -24,7 +24,7 @@ import ( "knative.dev/pkg/configmap" ) -// resolverConfigKey is the contenxt key associated with configuration +// resolverConfigKey is the context key associated with configuration // for one specific resolver, and is only used if that resolver // implements the optional framework.ConfigWatcher interface. var resolverConfigKey = struct{}{} diff --git a/pkg/resolution/resolver/framework/testing/fakecontroller.go b/pkg/resolution/resolver/framework/testing/fakecontroller.go index 4ddc3a8c95c..dee898d0a1c 100644 --- a/pkg/resolution/resolver/framework/testing/fakecontroller.go +++ b/pkg/resolution/resolver/framework/testing/fakecontroller.go @@ -154,10 +154,14 @@ func setClockOnReconciler(r *framework.Reconciler) { func ensureConfigurationConfigMapsExist(d *test.Data) { var featureFlagsExists bool + var resolverCacheConfigExists bool for _, cm := range d.ConfigMaps { if cm.Name == resolverconfig.GetFeatureFlagsConfigName() { featureFlagsExists = true } + if cm.Name == "resolver-cache-config" { + resolverCacheConfigExists = true + } } if !featureFlagsExists { d.ConfigMaps = append(d.ConfigMaps, &corev1.ConfigMap{ @@ -168,4 +172,13 @@ func ensureConfigurationConfigMapsExist(d *test.Data) { Data: map[string]string{}, }) } + if !resolverCacheConfigExists { + d.ConfigMaps = append(d.ConfigMaps, &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "resolver-cache-config", + Namespace: resolverconfig.ResolversNamespace(system.Namespace()), + }, + Data: map[string]string{}, + }) + } } diff --git a/pkg/resolution/resolver/git/repository.go b/pkg/resolution/resolver/git/repository.go index b6525974ce8..0305185bf7a 100644 --- a/pkg/resolution/resolver/git/repository.go +++ b/pkg/resolution/resolver/git/repository.go @@ -102,7 +102,7 @@ func (repo *repository) execGit(ctx context.Context, subCmd string, args ...stri args = append([]string{subCmd}, args...) - // We need to configure which directory contains the cloned repository since `cd`ing + // We need to configure which directory contains the cloned repository since `cd`ing // into the repository directory is not concurrency-safe configArgs := []string{"-C", repo.directory} @@ -118,6 +118,7 @@ func (repo *repository) execGit(ctx context.Context, subCmd string, args ...stri ) configArgs = append(configArgs, "--config-env", "http.extraHeader=GIT_AUTH_HEADER") } + cmd := repo.executor(ctx, "git", append(configArgs, args...)...) cmd.Env = append(cmd.Environ(), env...) diff --git a/pkg/resolution/resolver/git/repository_test.go b/pkg/resolution/resolver/git/repository_test.go index 21c0cf8a1fb..18900c07235 100644 --- a/pkg/resolution/resolver/git/repository_test.go +++ b/pkg/resolution/resolver/git/repository_test.go @@ -69,7 +69,7 @@ func TestClone(t *testing.T) { expectedEnv := []string{"GIT_TERMINAL_PROMPT=false"} expectedCmd := []string{"git", "-C", repo.directory} if test.username != "" { - token := base64.URLEncoding.EncodeToString([]byte(test.username + ":" + test.password)) + token := base64.StdEncoding.EncodeToString([]byte(test.username + ":" + test.password)) expectedCmd = append(expectedCmd, "--config-env", "http.extraHeader=GIT_AUTH_HEADER") expectedEnv = append(expectedEnv, "GIT_AUTH_HEADER=Authorization: Basic "+token) } diff --git a/pkg/resolution/resolver/git/resolver.go b/pkg/resolution/resolver/git/resolver.go index 33e290c457c..4e98902d2ff 100644 --- a/pkg/resolution/resolver/git/resolver.go +++ b/pkg/resolution/resolver/git/resolver.go @@ -142,8 +142,7 @@ func ValidateParams(ctx context.Context, params []pipelinev1.Param) error { return errors.New(disabledError) } - _, err := PopulateDefaultParams(ctx, params) - if err != nil { + if _, err := PopulateDefaultParams(ctx, params); err != nil { return err } return nil @@ -159,14 +158,22 @@ func validateRepoURL(url string) bool { } type GitResolver struct { - Params map[string]string + KubeClient kubernetes.Interface Logger *zap.SugaredLogger Cache *cache.LRUExpireCache TTL time.Duration - KubeClient kubernetes.Interface + Params map[string]string + + // Function variables for mocking in tests + ResolveGitCloneFunc func(ctx context.Context) (framework.ResolvedResource, error) + ResolveAPIGitFunc func(ctx context.Context, clientFunc func(string, string, string, ...factory.ClientOptionFunc) (*scm.Client, error)) (framework.ResolvedResource, error) } +// ResolveGitClone resolves a git resource using git clone. func (g *GitResolver) ResolveGitClone(ctx context.Context) (framework.ResolvedResource, error) { + if g.ResolveGitCloneFunc != nil { + return g.ResolveGitCloneFunc(ctx) + } conf, err := GetScmConfigForParamConfigKey(ctx, g.Params) if err != nil { return nil, err @@ -242,6 +249,72 @@ func (g *GitResolver) ResolveGitClone(ctx context.Context) (framework.ResolvedRe }, nil } +// ResolveAPIGit resolves a git resource using the SCM API. +func (g *GitResolver) ResolveAPIGit(ctx context.Context, clientFunc func(string, string, string, ...factory.ClientOptionFunc) (*scm.Client, error)) (framework.ResolvedResource, error) { + if g.ResolveAPIGitFunc != nil { + return g.ResolveAPIGitFunc(ctx, clientFunc) + } + // If we got here, the "repo" param was specified, so use the API approach + scmType, serverURL, err := getSCMTypeAndServerURL(ctx, g.Params) + if err != nil { + return nil, err + } + secretRef := &secretCacheKey{ + name: g.Params[TokenParam], + key: g.Params[TokenKeyParam], + } + if secretRef.name != "" { + if secretRef.key == "" { + secretRef.key = DefaultTokenKeyParam + } + secretRef.ns = common.RequestNamespace(ctx) + } else { + secretRef = nil + } + apiToken, err := g.getAPIToken(ctx, secretRef, APISecretNameKey) + if err != nil { + return nil, err + } + scmClient, err := clientFunc(scmType, serverURL, string(apiToken)) + if err != nil { + return nil, fmt.Errorf("failed to create SCM client: %w", err) + } + + orgRepo := fmt.Sprintf("%s/%s", g.Params[OrgParam], g.Params[RepoParam]) + path := g.Params[PathParam] + ref := g.Params[RevisionParam] + + // fetch the actual content from a file in the repo + content, _, err := scmClient.Contents.Find(ctx, orgRepo, path, ref) + if err != nil { + return nil, fmt.Errorf("couldn't fetch resource content: %w", err) + } + if content == nil || len(content.Data) == 0 { + return nil, fmt.Errorf("no content for resource in %s %s", orgRepo, path) + } + + // find the actual git commit sha by the ref + commit, _, err := scmClient.Git.FindCommit(ctx, orgRepo, ref) + if err != nil || commit == nil { + return nil, fmt.Errorf("couldn't fetch the commit sha for the ref %s in the repo: %w", ref, err) + } + + // fetch the repository URL + repo, _, err := scmClient.Repositories.Find(ctx, orgRepo) + if err != nil { + return nil, fmt.Errorf("couldn't fetch repository: %w", err) + } + + return &resolvedGitResource{ + Content: content.Data, + Revision: commit.Sha, + Org: g.Params[OrgParam], + Repo: g.Params[RepoParam], + Path: content.Path, + URL: repo.Clone, + }, nil +} + var _ framework.ConfigWatcher = &Resolver{} // GetConfigName returns the name of the git resolver's configmap. @@ -393,68 +466,6 @@ type secretCacheKey struct { key string } -func (g *GitResolver) ResolveAPIGit(ctx context.Context, clientFunc func(string, string, string, ...factory.ClientOptionFunc) (*scm.Client, error)) (framework.ResolvedResource, error) { - // If we got here, the "repo" param was specified, so use the API approach - scmType, serverURL, err := getSCMTypeAndServerURL(ctx, g.Params) - if err != nil { - return nil, err - } - secretRef := &secretCacheKey{ - name: g.Params[TokenParam], - key: g.Params[TokenKeyParam], - } - if secretRef.name != "" { - if secretRef.key == "" { - secretRef.key = DefaultTokenKeyParam - } - secretRef.ns = common.RequestNamespace(ctx) - } else { - secretRef = nil - } - apiToken, err := g.getAPIToken(ctx, secretRef, APISecretNameKey) - if err != nil { - return nil, err - } - scmClient, err := clientFunc(scmType, serverURL, string(apiToken)) - if err != nil { - return nil, fmt.Errorf("failed to create SCM client: %w", err) - } - - orgRepo := fmt.Sprintf("%s/%s", g.Params[OrgParam], g.Params[RepoParam]) - path := g.Params[PathParam] - ref := g.Params[RevisionParam] - - // fetch the actual content from a file in the repo - content, _, err := scmClient.Contents.Find(ctx, orgRepo, path, ref) - if err != nil { - return nil, fmt.Errorf("couldn't fetch resource content: %w", err) - } - if content == nil || len(content.Data) == 0 { - return nil, fmt.Errorf("no content for resource in %s %s", orgRepo, path) - } - - // find the actual git commit sha by the ref - commit, _, err := scmClient.Git.FindCommit(ctx, orgRepo, ref) - if err != nil || commit == nil { - return nil, fmt.Errorf("couldn't fetch the commit sha for the ref %s in the repo: %w", ref, err) - } - - // fetch the repository URL - repo, _, err := scmClient.Repositories.Find(ctx, orgRepo) - if err != nil { - return nil, fmt.Errorf("couldn't fetch repository: %w", err) - } - - return &resolvedGitResource{ - Content: content.Data, - Revision: commit.Sha, - Org: g.Params[OrgParam], - Repo: g.Params[RepoParam], - Path: content.Path, - URL: repo.Clone, - }, nil -} - func (g *GitResolver) getAPIToken(ctx context.Context, apiSecret *secretCacheKey, key string) ([]byte, error) { conf, err := GetScmConfigForParamConfigKey(ctx, g.Params) if err != nil { diff --git a/pkg/spire/test/ca.go b/pkg/spire/test/ca.go index ae39d8f67a2..e0e3b6e2733 100644 --- a/pkg/spire/test/ca.go +++ b/pkg/spire/test/ca.go @@ -310,4 +310,4 @@ func (ca *CA) chain(includeRoot bool) []*x509.Certificate { next = next.parent } return chain -} \ No newline at end of file +} diff --git a/pkg/spire/test/fakebundleendpoint/server.go b/pkg/spire/test/fakebundleendpoint/server.go index 4454467e2b3..66a520570ea 100644 --- a/pkg/spire/test/fakebundleendpoint/server.go +++ b/pkg/spire/test/fakebundleendpoint/server.go @@ -81,7 +81,7 @@ func New(tb testing.TB, option ...ServerOption) *Server { func (s *Server) Shutdown() { err := s.httpServer.Shutdown(context.Background()) - if err!=nil { + if err != nil { s.tb.Errorf("unexpected error: %v", err) } s.wg.Wait() @@ -110,8 +110,8 @@ func (s *Server) start() error { s.wg.Add(1) go func() { err := s.httpServer.ServeTLS(ln, "", "") - if err != nil || err.Error()!=http.ErrServerClosed.Error(){ - s.tb.Errorf("expected error %q, got %v",http.ErrServerClosed.Error(),err) + if err != nil || err.Error() != http.ErrServerClosed.Error() { + s.tb.Errorf("expected error %q, got %v", http.ErrServerClosed.Error(), err) } s.wg.Done() ln.Close() @@ -128,16 +128,16 @@ func (s *Server) testbundle(w http.ResponseWriter, r *http.Request) { bb, err := s.bundles[0].Marshal() if err != nil { s.tb.Errorf("unexpected error: %v", err) - } + } s.bundles = s.bundles[1:] w.Header().Add("Content-Type", "application/json") b, err := w.Write(bb) if err != nil { s.tb.Errorf("unexpected error: %v", err) - } + } if len(bb) != b { s.tb.Errorf("expected written bytes %d, got %d", len(bb), b) - } + } } type serverOption func(*Server) diff --git a/pkg/spire/test/keys.go b/pkg/spire/test/keys.go index c69fe7d7b56..8f2bfb52e81 100644 --- a/pkg/spire/test/keys.go +++ b/pkg/spire/test/keys.go @@ -31,8 +31,8 @@ import ( func NewEC256Key(tb testing.TB) *ecdsa.PrivateKey { key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) if err != nil { - tb.Fatalf("failed to marshal private key: %v", err) - } + tb.Fatalf("failed to marshal private key: %v", err) + } return key } @@ -41,8 +41,8 @@ func NewKeyID(tb testing.TB) string { choices := make([]byte, 32) _, err := rand.Read(choices) if err != nil { - tb.Fatalf("failed to marshal private key: %v", err) - } + tb.Fatalf("failed to marshal private key: %v", err) + } return keyIDFromBytes(choices) } @@ -53,4 +53,4 @@ func keyIDFromBytes(choices []byte) string { builder.WriteByte(alphabet[int(choice)%len(alphabet)]) } return builder.String() -} \ No newline at end of file +} diff --git a/test/clients.go b/test/clients.go index efc7d2b6fb6..568bbd0010c 100644 --- a/test/clients.go +++ b/test/clients.go @@ -48,6 +48,7 @@ import ( "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1" resolutionversioned "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned" resolutionv1alpha1 "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1alpha1" + resolutionv1beta1 "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1beta1" apixclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" "k8s.io/client-go/kubernetes" knativetest "knative.dev/pkg/test" @@ -70,6 +71,7 @@ type clients struct { V1TaskRunClient v1.TaskRunInterface V1PipelineRunClient v1.PipelineRunInterface V1beta1StepActionClient v1beta1.StepActionInterface + V1beta1ResolutionRequestclient resolutionv1beta1.ResolutionRequestInterface } // newClients instantiates and returns several clientsets required for making requests to the @@ -117,5 +119,6 @@ func newClients(t *testing.T, configPath, clusterName, namespace string) *client c.V1TaskRunClient = cs.TektonV1().TaskRuns(namespace) c.V1PipelineRunClient = cs.TektonV1().PipelineRuns(namespace) c.V1beta1StepActionClient = cs.TektonV1beta1().StepActions(namespace) + c.V1beta1ResolutionRequestclient = rrcs.ResolutionV1beta1().ResolutionRequests(namespace) return c } diff --git a/test/dag_test.go b/test/dag_test.go index c7060cc3fc3..6e252bee764 100644 --- a/test/dag_test.go +++ b/test/dag_test.go @@ -35,8 +35,6 @@ import ( "knative.dev/pkg/test/helpers" ) -const sleepDuration = 15 * time.Second - // TestDAGPipelineRun creates a graph of arbitrary Tasks, then looks at the corresponding // TaskRun start times to ensure they were run in the order intended, which is: // diff --git a/test/per_feature_flags_test.go b/test/per_feature_flags_test.go index d6dcc4c1207..d9a05c1c1fb 100644 --- a/test/per_feature_flags_test.go +++ b/test/per_feature_flags_test.go @@ -30,7 +30,6 @@ import ( "time" "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" "github.com/tektoncd/pipeline/pkg/apis/config" v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" "github.com/tektoncd/pipeline/test/parse" @@ -41,9 +40,8 @@ import ( ) const ( - sleepDuration = 15 * time.Second - enabled = "true" - disabled = "false" + enabled = "true" + disabled = "false" ) var ( @@ -53,9 +51,6 @@ var ( "alpha": alphaFeatureFlags, "beta": betaFeatureFlags, } - - ignorePipelineRunStatus = cmpopts.IgnoreFields(v1.PipelineRunStatusFields{}, "StartTime", "CompletionTime", "FinallyStartTime", "ChildReferences", "Provenance") - ignoreTaskRunStatus = cmpopts.IgnoreFields(v1.TaskRunStatusFields{}, "StartTime", "CompletionTime", "Provenance") ) // TestPerFeatureFlagOptInAlpha tests the behavior with all alpha Per-feature diff --git a/test/propagated_params_test.go b/test/propagated_params_test.go index 71d4467f11d..665a8f61833 100644 --- a/test/propagated_params_test.go +++ b/test/propagated_params_test.go @@ -25,18 +25,12 @@ import ( "testing" "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" "github.com/tektoncd/pipeline/test/parse" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" knativetest "knative.dev/pkg/test" ) -var ( - ignorePipelineRunStatus = cmpopts.IgnoreFields(v1.PipelineRunStatusFields{}, "StartTime", "CompletionTime", "FinallyStartTime", "ChildReferences") - ignoreTaskRunStatus = cmpopts.IgnoreFields(v1.TaskRunStatusFields{}, "StartTime", "CompletionTime") -) - func TestPropagatedParams(t *testing.T) { t.Parallel() diff --git a/test/resolver_cache_test.go b/test/resolver_cache_test.go new file mode 100644 index 00000000000..ff0ba85e14d --- /dev/null +++ b/test/resolver_cache_test.go @@ -0,0 +1,735 @@ +//go:build e2e +// +build e2e + +/* +Copyright 2025 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "context" + "fmt" + "strings" + "testing" + "time" + + "sync" + + resolverconfig "github.com/tektoncd/pipeline/pkg/apis/config/resolver" + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" + v1beta1 "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1" + "github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/framework/cache" + "github.com/tektoncd/pipeline/test/parse" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/system" + knativetest "knative.dev/pkg/test" + "knative.dev/pkg/test/helpers" +) + +const ( + cacheAnnotationKey = "resolution.tekton.dev/cached" + cacheResolverTypeKey = "resolution.tekton.dev/cache-resolver-type" + cacheTimestampKey = "resolution.tekton.dev/cache-timestamp" + cacheValueTrue = "true" +) + +var cacheResolverFeatureFlags = requireAllGates(map[string]string{ + "enable-bundles-resolver": "true", + "enable-api-fields": "beta", +}) + +var cacheGitFeatureFlags = requireAllGates(map[string]string{ + "enable-git-resolver": "true", + "enable-api-fields": "beta", +}) + +// getResolverPodLogs gets logs from the tekton-resolvers pod +func getResolverPodLogs(ctx context.Context, t *testing.T, c *clients) string { + t.Helper() + + resolverNamespace := resolverconfig.ResolversNamespace(system.Namespace()) + + // List pods in the resolver namespace + pods, err := c.KubeClient.CoreV1().Pods(resolverNamespace).List(ctx, metav1.ListOptions{ + LabelSelector: "app.kubernetes.io/name=resolvers", + }) + if err != nil { + t.Fatalf("Failed to list resolver pods in namespace %s: %v", resolverNamespace, err) + } + + if len(pods.Items) == 0 { + t.Fatalf("No resolver pods found in namespace %s", resolverNamespace) + } + + // Get logs from the first resolver pod + pod := pods.Items[0] + req := c.KubeClient.CoreV1().Pods(resolverNamespace).GetLogs(pod.Name, &corev1.PodLogOptions{}) + logs, err := req.DoRaw(ctx) + if err != nil { + t.Fatalf("Failed to get logs from resolver pod %s: %v", pod.Name, err) + } + + return string(logs) +} + +// TestBundleResolverCache validates that bundle resolver caching works correctly +func TestBundleResolverCache(t *testing.T) { + ctx := t.Context() + c, namespace := setup(ctx, t, withRegistry, cacheResolverFeatureFlags) + + t.Parallel() + + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) + + cache.Get(ctx).Clear() + + // Set up local bundle registry with different repositories for each task + taskName1 := helpers.ObjectNameForTest(t) + "-1" + taskName2 := helpers.ObjectNameForTest(t) + "-2" + taskName3 := helpers.ObjectNameForTest(t) + "-3" + repo1 := getRegistryServiceIP(ctx, t, c, namespace) + ":5000/cachetest-" + helpers.ObjectNameForTest(t) + "-1" + repo2 := getRegistryServiceIP(ctx, t, c, namespace) + ":5000/cachetest-" + helpers.ObjectNameForTest(t) + "-2" + repo3 := getRegistryServiceIP(ctx, t, c, namespace) + ":5000/cachetest-" + helpers.ObjectNameForTest(t) + "-3" + + // Create different tasks for each test to ensure unique cache keys + task1 := parse.MustParseV1beta1Task(t, fmt.Sprintf(` +metadata: + name: %s + namespace: %s +spec: + steps: + - name: hello + image: mirror.gcr.io/alpine + script: 'echo Hello from cache test 1' +`, taskName1, namespace)) + + task2 := parse.MustParseV1beta1Task(t, fmt.Sprintf(` +metadata: + name: %s + namespace: %s +spec: + steps: + - name: hello + image: mirror.gcr.io/alpine + script: 'echo Hello from cache test 2' +`, taskName2, namespace)) + + task3 := parse.MustParseV1beta1Task(t, fmt.Sprintf(` +metadata: + name: %s + namespace: %s +spec: + steps: + - name: hello + image: mirror.gcr.io/alpine + script: 'echo Hello from cache test 3' +`, taskName3, namespace)) + + // Set up the bundles in the local registry + setupBundle(ctx, t, c, namespace, repo1, task1, nil) + setupBundle(ctx, t, c, namespace, repo2, task2, nil) + setupBundle(ctx, t, c, namespace, repo3, task3, nil) + + // Test 1: First request should have cache annotations (it stores in cache with "always" mode) + tr1 := createBundleTaskRunLocal(t, namespace, "test-task-1", "always", repo1, taskName1) + createTaskRunAndWait(ctx, t, c, tr1) + + // Add a small delay to ensure ResolutionRequest status is fully updated + time.Sleep(2 * time.Second) + + // Get the resolved resource and verify it's cached (first request stores in cache with "always" mode) + resolutionRequest1 := getResolutionRequest(ctx, t, c, namespace, tr1.Name) + if !hasCacheAnnotation(resolutionRequest1.Status.Annotations) { + t.Errorf("First request should have cache annotations when using cache=always mode. Annotations: %v", resolutionRequest1.Status.Annotations) + } + + // Test 2: Second request with same parameters should be cached + tr2 := createBundleTaskRunLocal(t, namespace, "test-task-2", "always", repo1, taskName1) + createTaskRunAndWait(ctx, t, c, tr2) + + // Add a small delay to ensure ResolutionRequest status is fully updated + time.Sleep(2 * time.Second) + + // Verify it IS cached and has correct annotations + resolutionRequest2 := getResolutionRequest(ctx, t, c, namespace, tr2.Name) + verifyCacheAnnotations(t, resolutionRequest2.Status.Annotations, "bundles") + + // Verify resolver logs show cache behavior + logs := getResolverPodLogs(ctx, t, c) + + // Check for cache miss on first request (should see "Cache miss" followed by "Adding to cache") + if !strings.Contains(logs, "Cache miss") { + t.Error("Expected to find 'Cache miss' in resolver logs for first request") + } + if !strings.Contains(logs, "Adding to cache") { + t.Error("Expected to find 'Adding to cache' in resolver logs for first request") + } + + // Check for cache hit on second request + if !strings.Contains(logs, "Cache hit") { + t.Error("Expected to find 'Cache hit' in resolver logs for second request") + } + + // Test 3: Request with different parameters should not be cached + tr3 := createBundleTaskRunLocal(t, namespace, "test-task-3", "never", repo2, taskName2) + createTaskRunAndWait(ctx, t, c, tr3) + + resolutionRequest3 := getResolutionRequest(ctx, t, c, namespace, tr3.Name) + if hasCacheAnnotation(resolutionRequest3.Status.Annotations) { + t.Error("Request with cache=never should not be cached") + } +} + +// Helper functions +func createBundleTaskRun(t *testing.T, namespace, name, cacheMode string) *v1.TaskRun { + t.Helper() + return parse.MustParseV1TaskRun(t, fmt.Sprintf(` +metadata: + name: %s + namespace: %s +spec: + params: + - name: url + value: "https://github.com/tektoncd/pipeline.git" + workspaces: + - name: output + emptyDir: {} + taskRef: + resolver: bundles + params: + - name: bundle + value: ghcr.io/tektoncd/catalog/upstream/tasks/git-clone@sha256:65e61544c5870c8828233406689d812391735fd4100cb444bbd81531cb958bb3 + - name: name + value: git-clone + - name: kind + value: task + - name: cache + value: %s +`, name, namespace, cacheMode)) +} + +func createBundleTaskRunLocal(t *testing.T, namespace, name, cacheMode, repo, taskName string) *v1.TaskRun { + t.Helper() + return parse.MustParseV1TaskRun(t, fmt.Sprintf(` +metadata: + name: %s + namespace: %s +spec: + taskRef: + resolver: bundles + params: + - name: bundle + value: %s + - name: name + value: %s + - name: kind + value: task + - name: cache + value: %s +`, name, namespace, repo, taskName, cacheMode)) +} + +func createGitTaskRunWithCache(t *testing.T, namespace, name, revision, cacheMode string) *v1.TaskRun { + t.Helper() + return parse.MustParseV1TaskRun(t, fmt.Sprintf(` +metadata: + name: %s + namespace: %s +spec: + workspaces: + - name: output + emptyDir: {} + taskRef: + resolver: git + params: + - name: url + value: https://github.com/tektoncd/catalog.git + - name: pathInRepo + value: /task/git-clone/0.10/git-clone.yaml + - name: revision + value: %s + - name: cache + value: %s + params: + - name: url + value: https://github.com/tektoncd/pipeline + - name: deleteExisting + value: "true" +`, name, namespace, revision, cacheMode)) +} + +func createClusterTaskRun(t *testing.T, namespace, name, taskName, cacheMode string) *v1.TaskRun { + t.Helper() + return parse.MustParseV1TaskRun(t, fmt.Sprintf(` +metadata: + name: %s + namespace: %s +spec: + taskRef: + resolver: cluster + params: + - name: kind + value: task + - name: name + value: %s + - name: namespace + value: %s + - name: cache + value: %s +`, name, namespace, taskName, namespace, cacheMode)) +} + +// TestCacheIsolationBetweenResolvers validates that cache keys are unique between resolvers +func TestResolverCacheIsolation(t *testing.T) { + ctx := t.Context() + c, namespace := setup(ctx, t, withRegistry, cacheResolverFeatureFlags, cacheGitFeatureFlags, clusterFeatureFlags) + + t.Parallel() + + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) + + // Create a Task in the namespace for testing cluster resolver + taskName := helpers.ObjectNameForTest(t) + exampleTask := parse.MustParseV1Task(t, fmt.Sprintf(` +apiVersion: tekton.dev/v1 +kind: Task +metadata: + name: %s + namespace: %s +spec: + steps: + - name: echo + image: mirror.gcr.io/ubuntu + script: | + #!/usr/bin/env bash + echo "Hello from cache isolation test" +`, taskName, namespace)) + + _, err := c.V1TaskClient.Create(ctx, exampleTask, metav1.CreateOptions{}) + if err != nil { + t.Fatalf("Failed to create Task `%s`: %s", taskName, err) + } + + // Test bundle resolver cache + tr1 := createBundleTaskRun(t, namespace, "isolation-bundle-1", "always") + createTaskRunAndWait(ctx, t, c, tr1) + + // Test git resolver cache + tr2 := createGitTaskRunWithCache(t, namespace, "isolation-git-1", "dd7cc22f2965ff4c9d8855b7161c2ffe94b6153e", "always") + createTaskRunAndWait(ctx, t, c, tr2) + + // Test cluster resolver cache + tr3 := createClusterTaskRun(t, namespace, "isolation-cluster-1", taskName, "always") + createTaskRunAndWait(ctx, t, c, tr3) + + // Verify each resolver has its own cache entry + resolutionRequests, err := c.V1beta1ResolutionRequestclient.List(ctx, metav1.ListOptions{}) + if err != nil { + t.Fatalf("Failed to list ResolutionRequests: %s", err) + } + + bundleCacheFound := false + gitCacheFound := false + clusterCacheFound := false + + for _, req := range resolutionRequests.Items { + if req.Namespace == namespace && req.Status.Data != "" && req.Status.Annotations != nil { + switch req.Status.Annotations[cacheResolverTypeKey] { + case "bundles": + bundleCacheFound = true + case "git": + gitCacheFound = true + case "cluster": + clusterCacheFound = true + } + } + } + + if !bundleCacheFound { + t.Error("Bundle resolver cache entry not found") + } + if !gitCacheFound { + t.Error("Git resolver cache entry not found") + } + if !clusterCacheFound { + t.Error("Cluster resolver cache entry not found") + } + + t.Logf("Cache isolation verified: Bundle=%v, Git=%v, Cluster=%v", bundleCacheFound, gitCacheFound, clusterCacheFound) +} + +// TestCacheConfigurationComprehensive validates all cache configuration modes across resolvers +func TestResolverCacheComprehensive(t *testing.T) { + ctx := t.Context() + c, namespace := setup(ctx, t, withRegistry, cacheResolverFeatureFlags, cacheGitFeatureFlags, clusterFeatureFlags) + + t.Parallel() + + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) + + // Create a Task in the namespace for testing cluster resolver + taskName := helpers.ObjectNameForTest(t) + exampleTask := parse.MustParseV1Task(t, fmt.Sprintf(` +apiVersion: tekton.dev/v1 +kind: Task +metadata: + name: %s + namespace: %s +spec: + steps: + - name: echo + image: mirror.gcr.io/ubuntu + script: | + #!/usr/bin/env bash + echo "Hello from comprehensive cache config test" +`, taskName, namespace)) + + _, err := c.V1TaskClient.Create(ctx, exampleTask, metav1.CreateOptions{}) + if err != nil { + t.Fatalf("Failed to create Task `%s`: %s", taskName, err) + } + + testCases := []struct { + name string + resolver string + cacheMode string + shouldCache bool + description string + }{ + // Bundle resolver tests + {"bundle-always", "bundle", "always", true, "Bundle resolver should cache with always"}, + {"bundle-never", "bundle", "never", false, "Bundle resolver should not cache with never"}, + {"bundle-auto", "bundle", "auto", true, "Bundle resolver should cache with auto (has digest)"}, + {"bundle-default", "bundle", "", true, "Bundle resolver should cache with default (auto with digest)"}, + + // Git resolver tests + {"git-always", "git", "always", true, "Git resolver should cache with always"}, + {"git-never", "git", "never", false, "Git resolver should not cache with never"}, + {"git-auto-commit", "git", "auto", true, "Git resolver should cache with auto and commit hash"}, + {"git-auto-branch", "git", "auto", false, "Git resolver should not cache with auto and branch"}, + + // Cluster resolver tests + {"cluster-always", "cluster", "always", true, "Cluster resolver should cache with always"}, + {"cluster-never", "cluster", "never", false, "Cluster resolver should not cache with never"}, + {"cluster-auto", "cluster", "auto", false, "Cluster resolver should not cache with auto"}, + {"cluster-default", "cluster", "", false, "Cluster resolver should not cache with default"}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + var tr *v1.TaskRun + + switch tc.resolver { + case "bundle": + tr = createBundleTaskRun(t, namespace, "config-test-"+tc.name, tc.cacheMode) + case "git": + // Use commit hash for auto mode, branch for others + revision := "dd7cc22f2965ff4c9d8855b7161c2ffe94b6153e" + if tc.cacheMode == "auto" && tc.shouldCache == false { + revision = "main" // Use branch name for auto mode that shouldn't cache + } + tr = createGitTaskRunWithCache(t, namespace, "config-test-"+tc.name, revision, tc.cacheMode) + case "cluster": + tr = createClusterTaskRun(t, namespace, "config-test-"+tc.name, taskName, tc.cacheMode) + } + + _, err := c.V1TaskRunClient.Create(ctx, tr, metav1.CreateOptions{}) + if err != nil { + t.Fatalf("Failed to create TaskRun: %s", err) + } + + if err := WaitForTaskRunState(ctx, c, tr.Name, TaskRunSucceed(tr.Name), "TaskRunSuccess", v1Version); err != nil { + t.Fatalf("Error waiting for TaskRun to finish: %s", err) + } + + resolutionRequest := getResolutionRequest(ctx, t, c, namespace, tr.Name) + isCached := hasCacheAnnotation(resolutionRequest.Status.Annotations) + + if isCached != tc.shouldCache { + t.Errorf("%s: expected cache=%v, got cache=%v", tc.description, tc.shouldCache, isCached) + } + }) + } +} + +// TestCacheErrorHandling validates cache error handling scenarios +func TestResolverCacheErrorHandling(t *testing.T) { + ctx := t.Context() + c, namespace := setup(ctx, t, withRegistry, cacheResolverFeatureFlags) + + t.Parallel() + + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) + + // Test with invalid cache mode (should fail with error due to centralized validation) + tr1 := createBundleTaskRun(t, namespace, "error-test-invalid", "invalid") + _, err := c.V1TaskRunClient.Create(ctx, tr1, metav1.CreateOptions{}) + if err != nil { + t.Fatalf("Failed to create TaskRun with invalid cache mode: %s", err) + } + + // Should fail due to invalid cache parameter validation + if err := WaitForTaskRunState(ctx, c, tr1.Name, TaskRunFailed(tr1.Name), "TaskRunFailed", v1Version); err != nil { + t.Fatalf("Error waiting for TaskRun to fail: %s", err) + } + + // Verify it failed due to invalid cache mode + resolutionRequest1 := getResolutionRequest(ctx, t, c, namespace, tr1.Name) + if resolutionRequest1.Status.Conditions[0].Status != "False" { + t.Error("TaskRun with invalid cache mode should fail resolution") + } + + // Test with empty cache parameter (should default to auto) + tr2 := createBundleTaskRun(t, namespace, "error-test-empty", "") + _, err = c.V1TaskRunClient.Create(ctx, tr2, metav1.CreateOptions{}) + if err != nil { + t.Fatalf("Failed to create TaskRun with empty cache mode: %s", err) + } + + if err := WaitForTaskRunState(ctx, c, tr2.Name, TaskRunSucceed(tr2.Name), "TaskRunSuccess", v1Version); err != nil { + t.Fatalf("Error waiting for TaskRun to finish: %s", err) + } + + // Should still work and cache (defaults to auto mode with digest) + resolutionRequest2 := getResolutionRequest(ctx, t, c, namespace, tr2.Name) + if !hasCacheAnnotation(resolutionRequest2.Status.Annotations) { + t.Error("TaskRun with empty cache mode should still cache (defaults to auto)") + } +} + +// TestCacheTTLExpiration validates cache TTL behavior +func TestResolverCacheTTL(t *testing.T) { + ctx := t.Context() + c, namespace := setup(ctx, t, withRegistry, cacheResolverFeatureFlags) + + t.Parallel() + + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) + + // First request to populate cache + tr1 := createBundleTaskRun(t, namespace, "ttl-test-1", "always") + _, err := c.V1TaskRunClient.Create(ctx, tr1, metav1.CreateOptions{}) + if err != nil { + t.Fatalf("Failed to create first TaskRun: %s", err) + } + + if err := WaitForTaskRunState(ctx, c, tr1.Name, TaskRunSucceed(tr1.Name), "TaskRunSuccess", v1Version); err != nil { + t.Fatalf("Error waiting for first TaskRun to finish: %s", err) + } + + // Second request should hit cache + tr2 := createBundleTaskRun(t, namespace, "ttl-test-2", "always") + _, err = c.V1TaskRunClient.Create(ctx, tr2, metav1.CreateOptions{}) + if err != nil { + t.Fatalf("Failed to create second TaskRun: %s", err) + } + + if err := WaitForTaskRunState(ctx, c, tr2.Name, TaskRunSucceed(tr2.Name), "TaskRunSuccess", v1Version); err != nil { + t.Fatalf("Error waiting for second TaskRun to finish: %s", err) + } + + resolutionRequest2 := getResolutionRequest(ctx, t, c, namespace, tr2.Name) + if !hasCacheAnnotation(resolutionRequest2.Status.Annotations) { + t.Error("Second request should be cached") + } + + // Note: We can't easily test TTL expiration in e2e tests without waiting for the full TTL duration + // This test validates that cache entries are created and retrieved correctly + // TTL expiration would need to be tested in unit tests with mocked time + t.Logf("Cache TTL test completed - cache entries created and retrieved successfully") +} + +// TestCacheStressTest validates cache behavior under stress conditions +func TestResolverCacheStress(t *testing.T) { + ctx := t.Context() + c, namespace := setup(ctx, t, withRegistry, cacheResolverFeatureFlags) + + t.Parallel() + + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) + + // Create multiple concurrent requests to test cache behavior under load + const numRequests = 5 + var wg sync.WaitGroup + errors := make(chan error, numRequests) + + for i := range numRequests { + wg.Add(1) + go func(index int) { + defer wg.Done() + + tr := createBundleTaskRun(t, namespace, fmt.Sprintf("stress-test-%d", index), "always") + _, err := c.V1TaskRunClient.Create(ctx, tr, metav1.CreateOptions{}) + if err != nil { + errors <- fmt.Errorf("Failed to create TaskRun %d: %w", index, err) + return + } + + if err := WaitForTaskRunState(ctx, c, tr.Name, TaskRunSucceed(tr.Name), "TaskRunSuccess", v1Version); err != nil { + errors <- fmt.Errorf("Error waiting for TaskRun %d to finish: %w", index, err) + return + } + + resolutionRequest := getResolutionRequest(ctx, t, c, namespace, tr.Name) + if !hasCacheAnnotation(resolutionRequest.Status.Annotations) { + errors <- fmt.Errorf("TaskRun %d should be cached", index) + return + } + }(i) + } + + wg.Wait() + close(errors) + + // Check for any errors + for err := range errors { + t.Errorf("Stress test error: %v", err) + } + + t.Logf("Cache stress test completed successfully with %d concurrent requests", numRequests) +} + +// TestResolverCacheInvalidParams validates centralized cache parameter validation +func TestResolverCacheInvalidParams(t *testing.T) { + ctx := t.Context() + c, namespace := setup(ctx, t, withRegistry, cacheResolverFeatureFlags) + + t.Parallel() + + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) + + // Set up local bundle registry + taskName := helpers.ObjectNameForTest(t) + repo := getRegistryServiceIP(ctx, t, c, namespace) + ":5000/cachetest-invalid-" + helpers.ObjectNameForTest(t) + + // Create a task for the test + task := parse.MustParseV1beta1Task(t, fmt.Sprintf(` +metadata: + name: %s + namespace: %s +spec: + steps: + - name: hello + image: mirror.gcr.io/alpine + script: 'echo Hello from invalid cache param test' +`, taskName, namespace)) + + _, err := c.V1beta1TaskClient.Create(ctx, task, metav1.CreateOptions{}) + if err != nil { + t.Fatalf("Failed to create Task `%s`: %s", taskName, err) + } + + setupBundle(ctx, t, c, namespace, repo, task, nil) + + // Test with malformed cache parameter (should fail due to centralized validation) + tr := createBundleTaskRunLocal(t, namespace, "invalid-params-test", "malformed-cache-value", repo, taskName) + _, err = c.V1TaskRunClient.Create(ctx, tr, metav1.CreateOptions{}) + if err != nil { + t.Fatalf("Failed to create TaskRun with malformed cache parameter: %s", err) + } + + // Should fail due to invalid cache parameter validation + if err := WaitForTaskRunState(ctx, c, tr.Name, TaskRunFailed(tr.Name), "TaskRunFailed", v1Version); err != nil { + t.Fatalf("Error waiting for TaskRun to fail: %s", err) + } + + // Verify it failed due to invalid cache mode + resolutionRequest := getResolutionRequest(ctx, t, c, namespace, tr.Name) + if resolutionRequest.Status.Conditions[0].Status != "False" { + t.Error("TaskRun with malformed cache parameter should fail resolution") + } + + t.Logf("Cache invalid parameters test completed successfully") +} + +// getResolutionRequest gets the ResolutionRequest for a TaskRun +func getResolutionRequest(ctx context.Context, t *testing.T, c *clients, namespace, taskRunName string) *v1beta1.ResolutionRequest { + t.Helper() + + // List all ResolutionRequests in the namespace + resolutionRequests, err := c.V1beta1ResolutionRequestclient.List(ctx, metav1.ListOptions{}) + if err != nil { + t.Fatalf("Failed to list ResolutionRequests: %v", err) + } + + // Find the ResolutionRequest that has this TaskRun as an owner + var mostRecent *v1beta1.ResolutionRequest + for _, rr := range resolutionRequests.Items { + // Check if this ResolutionRequest is owned by our TaskRun + for _, ownerRef := range rr.OwnerReferences { + if ownerRef.Kind == "TaskRun" && ownerRef.Name == taskRunName { + if mostRecent == nil || rr.CreationTimestamp.After(mostRecent.CreationTimestamp.Time) { + mostRecent = &rr + } + } + } + } + + if mostRecent == nil { + // No ResolutionRequest found - this might be expected for cache: never + return nil + } + + return mostRecent +} + +func hasCacheAnnotation(annotations map[string]string) bool { + if annotations == nil { + return false + } + cached, exists := annotations[cacheAnnotationKey] + return exists && cached == cacheValueTrue +} + +// verifyCacheAnnotations verifies that cache annotations are present and have correct values +func verifyCacheAnnotations(t *testing.T, annotations map[string]string, expectedResolverType string) { + t.Helper() + + if !hasCacheAnnotation(annotations) { + t.Error("Expected cache annotations but none found") + return + } + + if annotations[cacheResolverTypeKey] != expectedResolverType { + t.Errorf("Expected resolver type '%s', got '%s'", expectedResolverType, annotations[cacheResolverTypeKey]) + } + + if timestamp, exists := annotations[cacheTimestampKey]; !exists || timestamp == "" { + t.Errorf("Expected cache timestamp annotation, got: %v", annotations) + } +} + +// createTaskRunAndWait creates a TaskRun and waits for it to complete successfully +func createTaskRunAndWait(ctx context.Context, t *testing.T, c *clients, tr *v1.TaskRun) { + t.Helper() + + _, err := c.V1TaskRunClient.Create(ctx, tr, metav1.CreateOptions{}) + if err != nil { + t.Fatalf("Failed to create TaskRun %s: %s", tr.Name, err) + } + + if err := WaitForTaskRunState(ctx, c, tr.Name, TaskRunSucceed(tr.Name), "TaskRunSuccess", v1Version); err != nil { + t.Fatalf("Error waiting for TaskRun %s to finish: %s", tr.Name, err) + } +} diff --git a/test/resolvers_test.go b/test/resolvers_test.go index 206b8e3a1d8..b43ec717bb1 100644 --- a/test/resolvers_test.go +++ b/test/resolvers_test.go @@ -250,7 +250,7 @@ spec: func TestGitResolver_Clone_Failure(t *testing.T) { defaultURL := "https://github.com/tektoncd/catalog.git" defaultPathInRepo := "/task/git-clone/0.10/git-clone.yaml" - defaultCommit := "783b4fe7d21148f3b1a93bfa49b0024d8c6c2955" + defaultCommit := "dd7cc22f2965ff4c9d8855b7161c2ffe94b6153e" testCases := []struct { name string diff --git a/test/tektonbundles_test.go b/test/tektonbundles_test.go index 43d5c0f3cd3..aff5c42b61f 100644 --- a/test/tektonbundles_test.go +++ b/test/tektonbundles_test.go @@ -220,7 +220,8 @@ func publishImg(ctx context.Context, t *testing.T, c *clients, namespace string, } // Create a configmap to contain the tarball which we will mount in the pod. - cmName := namespace + "uploadimage-cm" + // Use a unique name based on the repository to avoid conflicts + cmName := namespace + "-" + strings.ReplaceAll(strings.ReplaceAll(ref.String(), "/", "-"), ":", "-") + "-uploadimage-cm" if _, err = c.KubeClient.CoreV1().ConfigMaps(namespace).Create(ctx, &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{Name: cmName}, BinaryData: map[string][]byte{ diff --git a/test/util.go b/test/util.go index 8acc203c981..849d1402ff4 100644 --- a/test/util.go +++ b/test/util.go @@ -27,11 +27,13 @@ import ( "strings" "sync" "testing" + "time" "github.com/google/go-cmp/cmp/cmpopts" "github.com/tektoncd/pipeline/pkg/apis/config" v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" "github.com/tektoncd/pipeline/pkg/names" + "github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/framework/cache" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -48,14 +50,20 @@ import ( "sigs.k8s.io/yaml" ) +const ( + sleepDuration = 15 * time.Second +) + var ( - initMetrics sync.Once - ignoreTypeMeta = cmpopts.IgnoreFields(metav1.TypeMeta{}, "Kind", "APIVersion") - ignoreObjectMeta = cmpopts.IgnoreFields(metav1.ObjectMeta{}, "ResourceVersion", "UID", "CreationTimestamp", "Generation", "ManagedFields", "Labels", "Annotations", "OwnerReferences") - ignoreCondition = cmpopts.IgnoreFields(apis.Condition{}, "LastTransitionTime.Inner.Time", "Message") - ignoreConditions = cmpopts.IgnoreFields(duckv1.Status{}, "Conditions") - ignoreStepState = cmpopts.IgnoreFields(v1.StepState{}, "ImageID", "TerminationReason", "Provenance") - ignoreContainerStates = cmpopts.IgnoreFields(corev1.ContainerState{}, "Terminated") + initMetrics sync.Once + ignoreTypeMeta = cmpopts.IgnoreFields(metav1.TypeMeta{}, "Kind", "APIVersion") + ignoreObjectMeta = cmpopts.IgnoreFields(metav1.ObjectMeta{}, "ResourceVersion", "UID", "CreationTimestamp", "Generation", "ManagedFields", "Labels", "Annotations", "OwnerReferences") + ignoreCondition = cmpopts.IgnoreFields(apis.Condition{}, "LastTransitionTime.Inner.Time", "Message") + ignoreConditions = cmpopts.IgnoreFields(duckv1.Status{}, "Conditions") + ignoreStepState = cmpopts.IgnoreFields(v1.StepState{}, "ImageID", "TerminationReason", "Provenance") + ignoreContainerStates = cmpopts.IgnoreFields(corev1.ContainerState{}, "Terminated") + ignorePipelineRunStatus = cmpopts.IgnoreFields(v1.PipelineRunStatusFields{}, "StartTime", "CompletionTime", "FinallyStartTime", "ChildReferences", "Provenance") + ignoreTaskRunStatus = cmpopts.IgnoreFields(v1.TaskRunStatusFields{}, "StartTime", "CompletionTime", "Provenance") // ignoreSATaskRunSpec ignores the service account in the TaskRunSpec as it may differ across platforms ignoreSATaskRunSpec = cmpopts.IgnoreFields(v1.TaskRunSpec{}, "ServiceAccountName") // ignoreSAPipelineRunSpec ignores the service account in the PipelineRunSpec as it may differ across platforms @@ -66,6 +74,8 @@ func setup(ctx context.Context, t *testing.T, fn ...func(context.Context, *testi t.Helper() skipIfExcluded(t) + cache.Get(ctx).Clear() + namespace := names.SimpleNameGenerator.RestrictLengthWithRandomSuffix("arendelle") initializeLogsAndMetrics(t)