Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Bump github.com/fluxcd/pkg/oci from 0.17.0 to 0.18.0 #5938

10 changes: 8 additions & 2 deletions cmd/apprepository-controller/server/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ func NewController(

log.Info("Setting up event handlers")
// Set up an event handler for when AppRepository resources change
apprepoInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
_, err = apprepoInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Seems odd that we're not using the result (but then, weren't before the change either).

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yep, I don't know why we weren't using it... and why the golangci-lint linter hadn't complained until this PR :S

AddFunc: controller.enqueueAppRepo,
UpdateFunc: func(oldObj, newObj interface{}) {
oldApp := oldObj.(*apprepov1alpha1.AppRepository)
Expand All @@ -150,6 +150,9 @@ func NewController(
}
},
})
if err != nil {
log.Warningf("Error adding AppRepository event handler: %v", err)
}

// Set up an event handler for when CronJob resources get deleted. This
// handler will lookup the owner of the given CronJob, and if it is owned by a
Expand All @@ -158,9 +161,12 @@ func NewController(
// to implement custom logic for handling CronJob resources. More info on this
// pattern:
// https://github.com/kubernetes/community/blob/8cafef897a22026d42f5e5bb3f104febe7e29830/contributors/devel/controllers.md
cronjobInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
_, err = cronjobInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
DeleteFunc: controller.handleObject,
})
if err != nil {
log.Warningf("Error adding CronJob event handler: %v", err)
}

return controller
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -265,7 +265,7 @@ func (c *ChartCache) processNextWorkItem(workerName string) bool {

// will clear out the cache of charts for a given repo except the charts specified by
// keepThese argument, which may be nil.
func (c *ChartCache) deleteChartsHelper(repo *types.NamespacedName, keepThese sets.String) error {
func (c *ChartCache) deleteChartsHelper(repo *types.NamespacedName, keepThese sets.Set[string]) error {
// need to get a list of all charts/versions for this repo that are either:
// a. already in the cache OR
// b. being processed
Expand All @@ -279,7 +279,7 @@ func (c *ChartCache) deleteChartsHelper(repo *types.NamespacedName, keepThese se
KeySegmentsSeparator,
repo.Name,
KeySegmentsSeparator)
redisKeysToDelete := sets.String{}
redisKeysToDelete := sets.Set[string]{}
// https://redis.io/commands/scan An iteration starts when the cursor is set to 0,
// and terminates when the cursor returned by the server is 0
cursor := uint64(0)
Expand Down Expand Up @@ -337,7 +337,7 @@ func (c *ChartCache) DeleteChartsForRepo(repo *types.NamespacedName) error {
log.Infof("+DeleteChartsForRepo(%s)", repo)
defer log.Infof("-DeleteChartsForRepo(%s)", repo)

return c.deleteChartsHelper(repo, sets.String{})
return c.deleteChartsHelper(repo, sets.Set[string]{})
}

// this function is called when re-importing charts after an update to the repo,
Expand All @@ -347,7 +347,7 @@ func (c *ChartCache) PurgeObsoleteChartVersions(keepThese []models.Chart) error
log.Infof("+PurgeObsoleteChartVersions()")
defer log.Infof("-PurgeObsoleteChartVersions")

repos := map[types.NamespacedName]sets.String{}
repos := map[types.NamespacedName]sets.Set[string]{}
for _, ch := range keepThese {
if ch.Repo == nil {
// shouldn't happen
Expand All @@ -360,7 +360,7 @@ func (c *ChartCache) PurgeObsoleteChartVersions(keepThese []models.Chart) error
}
a, ok := repos[n]
if a == nil || !ok {
a = sets.String{}
a = sets.Set[string]{}
}
for _, cv := range ch.ChartVersions {
if key, err := c.KeyFor(ch.Repo.Namespace, ch.ID, cv.Version); err != nil {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -121,9 +121,9 @@ func newQueue(name string, verbose bool) *Type {
return &Type{
name: name,
verbose: verbose,
expected: sets.String{},
dirty: sets.String{},
processing: sets.String{},
expected: sets.Set[string]{},
dirty: sets.Set[string]{},
processing: sets.Set[string]{},
cond: sync.NewCond(&sync.Mutex{}),
}
}
Expand All @@ -147,16 +147,16 @@ type Type struct {
// in unit tests, where an item is added to the queue and then the test code
// needs to wait until its been processed before taking further action
// Used in unit tests only
expected sets.String
expected sets.Set[string]

// dirty defines all of the items that need to be processed.
dirty sets.String
dirty sets.Set[string]

// Things that are currently being processed are in the processing set.
// These things may be simultaneously in the dirty set. When we finish
// processing something and remove it from this set, we'll check if
// it's in the dirty set, and if so, add it to the queue.
processing sets.String
processing sets.Set[string]

cond *sync.Cond

Expand Down Expand Up @@ -379,18 +379,18 @@ func (q *Type) reset() {
defer q.cond.L.Unlock()

q.queue = []string{}
q.dirty = sets.String{}
q.processing = sets.String{}
q.dirty = sets.Set[string]{}
q.processing = sets.Set[string]{}
// we are intentionally not resetting q.expected as we don't want to lose
// those across resync's
}

// for easier reading of debug output
func (q *Type) prettyPrintAll() string {
return fmt.Sprintf("\n\texpected: %s\n\tdirty: %s\n\tprocessing: %s\n\tqueue: %s",
printOneItemPerLine(q.expected.List()),
printOneItemPerLine(q.dirty.List()),
printOneItemPerLine(q.processing.List()),
printOneItemPerLine(q.expected.UnsortedList()),
printOneItemPerLine(q.dirty.UnsortedList()),
printOneItemPerLine(q.processing.UnsortedList()),
printOneItemPerLine(q.queue))
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -698,7 +698,7 @@ func (c *NamespacedResourceWatcherCache) fetch(key string) (interface{}, error)
// parallelize the process of value retrieval because fetch() calls
// c.config.onGet() which will de-code the data from bytes into expected struct, which
// may be computationally expensive and thus benefit from multiple threads of execution
func (c *NamespacedResourceWatcherCache) fetchMultiple(keys sets.String) (map[string]interface{}, error) {
func (c *NamespacedResourceWatcherCache) fetchMultiple(keys sets.Set[string]) (map[string]interface{}, error) {
response := make(map[string]interface{})

type fetchValueJob struct {
Expand Down Expand Up @@ -763,7 +763,7 @@ func (c *NamespacedResourceWatcherCache) fetchMultiple(keys sets.String) (map[st
// it's value will be returned,
// whereas 'fetchMultiple' does not guarantee that.
// The keys are expected to be in the format of the cache (the caller does that)
func (c *NamespacedResourceWatcherCache) GetMultiple(keys sets.String) (map[string]interface{}, error) {
func (c *NamespacedResourceWatcherCache) GetMultiple(keys sets.Set[string]) (map[string]interface{}, error) {
c.resyncCond.L.(*sync.RWMutex).RLock()
defer c.resyncCond.L.(*sync.RWMutex).RUnlock()

Expand All @@ -779,7 +779,7 @@ func (c *NamespacedResourceWatcherCache) GetMultiple(keys sets.String) (map[stri
}

// now, re-compute and fetch the ones that are left over from the previous operation
keysLeft := sets.String{}
keysLeft := sets.Set[string]{}

for key, value := range chartsUntyped {
if value == nil {
Expand Down Expand Up @@ -822,7 +822,7 @@ func (c *NamespacedResourceWatcherCache) populateWith(items []ctrlclient.Object)
return status.Errorf(codes.Internal, "Invalid state of the cache in populateWith()")
}

keys := sets.String{}
keys := sets.Set[string]{}
for _, item := range items {
if key, err := c.keyFor(item); err != nil {
return status.Errorf(codes.Internal, "%v", err)
Expand All @@ -836,7 +836,7 @@ func (c *NamespacedResourceWatcherCache) populateWith(items []ctrlclient.Object)
return nil
}

func (c *NamespacedResourceWatcherCache) computeValuesForKeys(keys sets.String) {
func (c *NamespacedResourceWatcherCache) computeValuesForKeys(keys sets.Set[string]) {
var wg sync.WaitGroup
numWorkers := int(math.Min(float64(len(keys)), float64(maxWorkers)))
requestChan := make(chan string, numWorkers)
Expand Down Expand Up @@ -866,7 +866,7 @@ func (c *NamespacedResourceWatcherCache) computeValuesForKeys(keys sets.String)
wg.Wait()
}

func (c *NamespacedResourceWatcherCache) computeAndFetchValuesForKeys(keys sets.String) (map[string]interface{}, error) {
func (c *NamespacedResourceWatcherCache) computeAndFetchValuesForKeys(keys sets.Set[string]) (map[string]interface{}, error) {
type computeValueJob struct {
key string
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ func TestKindClusterGetAvailablePackageSummariesForLargeReposAndTinyRedis(t *tes
const MAX_REPOS_NEVER = 100
var totalRepos = 0
// ref https://stackoverflow.com/questions/32840687/timeout-for-waitgroup-wait
evictedRepos := sets.String{}
evictedRepos := sets.Set[string]{}

// do this part in a func so we can defer subscribe.Close
func() {
Expand Down Expand Up @@ -233,7 +233,7 @@ func TestKindClusterGetAvailablePackageSummariesForLargeReposAndTinyRedis(t *tes

// we need to make sure that response contains packages from all existing repositories
// regardless whether they're in the cache or not
expected := sets.String{}
expected := sets.Set[string]{}
for i := 0; i < totalRepos; i++ {
repo := fmt.Sprintf("bitnami-%d", i)
expected.Insert(repo)
Expand All @@ -245,7 +245,7 @@ func TestKindClusterGetAvailablePackageSummariesForLargeReposAndTinyRedis(t *tes

if expected.Len() != 0 {
t.Fatalf("Expected to get packages from these repositories: %s, but did not get any",
expected.List())
expected.UnsortedList())
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1249,7 +1249,7 @@ func newRedisClientForIntegrationTest(t *testing.T) (*redis.Client, error) {
return redisCli, nil
}

func redisReceiveNotificationsLoop(t *testing.T, ch <-chan *redis.Message, sem *semaphore.Weighted, evictedRepos *sets.String) {
func redisReceiveNotificationsLoop(t *testing.T, ch <-chan *redis.Message, sem *semaphore.Weighted, evictedRepos *sets.Set[string]) {
if totalBitnamiCharts == -1 {
t.Errorf("Error: unexpected state: number of charts in bitnami catalog is not initialized")
t.Fail()
Expand All @@ -1258,7 +1258,7 @@ func redisReceiveNotificationsLoop(t *testing.T, ch <-chan *redis.Message, sem *
// this for loop running in the background will signal to the main goroutine
// when it is okay to proceed to load the next repo
t.Logf("Listening for events from redis in the background...")
reposAdded := sets.String{}
reposAdded := sets.Set[string]{}
var chartsLeftToSync = 0
for {
event, ok := <-ch
Expand Down
11 changes: 6 additions & 5 deletions cmd/kubeapps-apis/plugins/fluxv2/packages/v1alpha1/repo.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,12 @@ import (
"context"
"encoding/gob"
"fmt"
"github.com/vmware-tanzu/kubeapps/cmd/kubeapps-apis/plugins/pkg/k8sutils"
"regexp"
"strings"
"time"

"github.com/vmware-tanzu/kubeapps/cmd/kubeapps-apis/plugins/pkg/k8sutils"

fluxmeta "github.com/fluxcd/pkg/apis/meta"
sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
corev1 "github.com/vmware-tanzu/kubeapps/cmd/kubeapps-apis/gen/core/packages/v1alpha1"
Expand Down Expand Up @@ -69,11 +70,11 @@ func (s *Server) listReposInNamespace(ctx context.Context, ns string) ([]sourcev
return nil, statuserror.FromK8sError("list", "HelmRepository", "", err)
} else {
// filter out those repos the caller has no access to
namespaces := sets.String{}
namespaces := sets.Set[string]{}
for _, item := range repoList.Items {
namespaces.Insert(item.GetNamespace())
}
allowedNamespaces := sets.String{}
allowedNamespaces := sets.Set[string]{}
gvr := common.GetRepositoriesGvr()
for ns := range namespaces {
if ok, err := s.hasAccessToNamespace(ctx, gvr, ns); err == nil && ok {
Expand Down Expand Up @@ -110,12 +111,12 @@ func (s *Server) getRepoInCluster(ctx context.Context, key types.NamespacedName)
}

// regexp expressions are used for matching actual names against expected patters
func (s *Server) filterReadyReposByName(repoList []sourcev1.HelmRepository, match []string) (sets.String, error) {
func (s *Server) filterReadyReposByName(repoList []sourcev1.HelmRepository, match []string) (sets.Set[string], error) {
if s.repoCache == nil {
return nil, status.Errorf(codes.FailedPrecondition, "server cache has not been properly initialized")
}

resultKeys := sets.String{}
resultKeys := sets.Set[string]{}
for r := range repoList {
repo := repoList[r] // avoid implicit memory aliasing
// first check if repo is in ready state
Expand Down
10 changes: 5 additions & 5 deletions cmd/kubeapps-apis/plugins/fluxv2/packages/v1alpha1/repo_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1039,7 +1039,7 @@ func TestGetAvailablePackageSummariesAfterCacheResyncQueueNotIdle(t *testing.T)

// we need to make sure that response contains packages from all existing repositories
// regardless whether they're in the cache or not
expected := sets.String{}
expected := sets.Set[string]{}
for i := 0; i < len(repos); i++ {
repo := fmt.Sprintf("bitnami-%d", i)
expected.Insert(repo)
Expand All @@ -1051,7 +1051,7 @@ func TestGetAvailablePackageSummariesAfterCacheResyncQueueNotIdle(t *testing.T)

if expected.Len() != 0 {
t.Fatalf("Expected to get packages from these repositories: %s, but did not get any",
expected.List())
expected.UnsortedList())
}

if err = mock.ExpectationsWereMet(); err != nil {
Expand Down Expand Up @@ -1152,7 +1152,7 @@ func TestGetAvailablePackageSummariesAfterCacheResyncQueueIdle(t *testing.T) {

// we need to make sure that response contains packages from all existing repositories
// regardless whether they're in the cache or not
expected := sets.String{}
expected := sets.Set[string]{}
expected.Insert(repoName)
for _, s := range resp.AvailablePackageSummaries {
id := strings.Split(s.AvailablePackageRef.Identifier, "/")
Expand All @@ -1161,7 +1161,7 @@ func TestGetAvailablePackageSummariesAfterCacheResyncQueueIdle(t *testing.T) {

if expected.Len() != 0 {
t.Fatalf("Expected to get packages from these repositories: %s, but did not get any",
expected.List())
expected.UnsortedList())
}

if err = mock.ExpectationsWereMet(); err != nil {
Expand Down Expand Up @@ -2515,7 +2515,7 @@ func newRepo(name string, namespace string, spec *sourcev1.HelmRepositorySpec, s
// does a series of mock.ExpectGet(...)
func (s *Server) redisMockExpectGetFromRepoCache(mock redismock.ClientMock, filterOptions *corev1.FilterOptions, repos ...sourcev1.HelmRepository) error {
mapVals := make(map[string][]byte)
ociRepoKeys := sets.String{}
ociRepoKeys := sets.Set[string]{}
for _, r := range repos {
key, bytes, err := s.redisKeyValueForRepo(r)
if err != nil {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -445,8 +445,8 @@ func seedChartCacheWithCharts(t *testing.T,

var chartCache *cache.ChartCache
var err error
cachedChartKeys := sets.String{}
cachedChartIds := sets.String{}
cachedChartKeys := sets.Set[string]{}
cachedChartIds := sets.Set[string]{}

if charts != nil {
chartCache, err = cache.NewChartCache("chartCacheTest", redisCli, stopCh)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,10 @@ func (w *withWatchWrapper) List(ctx context.Context, list client.ObjectList, opt
}
}

func (w *withWatchWrapper) SubResource(subResource string) client.SubResourceClient {
return w.delegate.SubResource(subResource)
}

func (w *withWatchWrapper) Delete(ctx context.Context, obj client.Object, opts ...client.DeleteOption) error {
return w.delegate.Delete(ctx, obj, opts...)
}
Expand Down
Loading