From daec15b956369e09420ce8b34d91b50fcb4c517b Mon Sep 17 00:00:00 2001 From: Martin Buhr Date: Wed, 7 Aug 2024 13:32:14 +1200 Subject: [PATCH 1/4] moved storage implementations into their own packages --- certs/manager.go | 11 +- certs/manager_test.go | 2 +- ctx/ctx.go | 4 +- gateway/analytics.go | 3 +- gateway/api.go | 23 +- gateway/api_definition.go | 4 +- gateway/api_healthcheck.go | 8 +- gateway/api_loader.go | 15 +- gateway/api_test.go | 28 +-- gateway/auth_manager.go | 23 +- gateway/auth_manager_test.go | 8 +- gateway/cert_test.go | 4 +- gateway/coprocess_api.go | 8 +- gateway/coprocess_id_extractor_test.go | 25 +-- gateway/delete_api_cache.go | 4 +- gateway/event_handler_webhooks.go | 7 +- gateway/gateway_test.go | 20 +- gateway/health_check.go | 4 +- gateway/host_checker_manager.go | 8 +- gateway/host_checker_manager_test.go | 14 +- gateway/host_checker_test.go | 4 +- gateway/ldap_auth_handler.go | 2 +- gateway/middleware.go | 8 +- gateway/mw_api_rate_limit.go | 8 +- gateway/mw_auth_key.go | 4 +- gateway/mw_auth_key_test.go | 4 +- gateway/mw_basic_auth.go | 6 +- gateway/mw_basic_auth_test.go | 4 +- gateway/mw_external_oauth.go | 6 +- gateway/mw_jwt.go | 4 +- gateway/mw_redis_cache.go | 4 +- gateway/oauth_manager.go | 20 +- gateway/oauth_manager_test.go | 7 +- gateway/redis_analytics_purger.go | 4 +- gateway/redis_logrus_hook.go | 5 +- gateway/redis_signals.go | 8 +- gateway/res_cache.go | 4 +- gateway/rpc_backup_handlers.go | 10 +- gateway/rpc_storage_handler.go | 202 +++++++++--------- gateway/rpc_storage_handler_test.go | 9 +- gateway/server.go | 38 ++-- gateway/session_manager.go | 9 +- gateway/testutil.go | 6 +- interfaces/storage.go | 38 ++++ internal/rate/sliding_log.go | 2 +- internal/rate/sliding_log_test.go | 16 +- rpc/rpc_analytics_purger.go | 5 +- rpc/synchronization_forcer.go | 11 +- rpc/synchronization_forcer_test.go | 6 +- storage/{ => dummy}/dummy.go | 2 +- storage/{ => dummy}/dummy_test.go | 2 +- storage/{ => mdcb}/mdcb_storage.go | 9 +- storage/{ => mdcb}/mdcb_storage_test.go | 7 +- .../{ => redis-cluster}/connection_handler.go | 2 +- .../connection_handler_test.go | 2 +- storage/{ => redis-cluster}/redis_cluster.go | 17 +- .../{ => redis-cluster}/redis_cluster_test.go | 49 ++--- storage/{ => redis-cluster}/redis_shim.go | 2 +- .../{ => redis-cluster}/redis_shim_test.go | 2 +- storage/shared/errors.go | 7 + storage/storage.go | 183 +++------------- storage/storage_test.go | 8 +- storage/util/util.go | 127 +++++++++++ 63 files changed, 590 insertions(+), 506 deletions(-) create mode 100644 interfaces/storage.go rename storage/{ => dummy}/dummy.go (99%) rename storage/{ => dummy}/dummy_test.go (99%) rename storage/{ => mdcb}/mdcb_storage.go (95%) rename storage/{ => mdcb}/mdcb_storage_test.go (92%) rename storage/{ => redis-cluster}/connection_handler.go (99%) rename storage/{ => redis-cluster}/connection_handler_test.go (99%) rename storage/{ => redis-cluster}/redis_cluster.go (98%) rename storage/{ => redis-cluster}/redis_cluster_test.go (98%) rename storage/{ => redis-cluster}/redis_shim.go (98%) rename storage/{ => redis-cluster}/redis_shim_test.go (98%) create mode 100644 storage/shared/errors.go create mode 100644 storage/util/util.go diff --git a/certs/manager.go b/certs/manager.go index e8585c36a1c..021b032e5df 100644 --- a/certs/manager.go +++ b/certs/manager.go @@ -18,9 +18,10 @@ import ( "github.com/sirupsen/logrus" + "github.com/TykTechnologies/tyk/interfaces" "github.com/TykTechnologies/tyk/internal/cache" tykcrypto "github.com/TykTechnologies/tyk/internal/crypto" - "github.com/TykTechnologies/tyk/storage" + "github.com/TykTechnologies/tyk/storage/mdcb" ) const ( @@ -52,14 +53,14 @@ type CertificateManager interface { } type certificateManager struct { - storage storage.Handler + storage interfaces.Handler logger *logrus.Entry cache cache.Repository secret string migrateCertList bool } -func NewCertificateManager(storage storage.Handler, secret string, logger *logrus.Logger, migrateCertList bool) *certificateManager { +func NewCertificateManager(storage interfaces.Handler, secret string, logger *logrus.Logger, migrateCertList bool) *certificateManager { if logger == nil { logger = logrus.New() } @@ -79,7 +80,7 @@ func getOrgFromKeyID(key, certID string) string { return orgId } -func NewSlaveCertManager(localStorage, rpcStorage storage.Handler, secret string, logger *logrus.Logger, migrateCertList bool) *certificateManager { +func NewSlaveCertManager(localStorage, rpcStorage interfaces.Handler, secret string, logger *logrus.Logger, migrateCertList bool) *certificateManager { if logger == nil { logger = logrus.New() } @@ -101,7 +102,7 @@ func NewSlaveCertManager(localStorage, rpcStorage storage.Handler, secret string return err } - mdcbStorage := storage.NewMdcbStorage(localStorage, rpcStorage, log) + mdcbStorage := mdcb.NewMdcbStorage(localStorage, rpcStorage, log) mdcbStorage.CallbackonPullfromRPC = &callbackOnPullCertFromRPC cm.storage = mdcbStorage diff --git a/certs/manager_test.go b/certs/manager_test.go index fa6449ceff8..62d5253a062 100644 --- a/certs/manager_test.go +++ b/certs/manager_test.go @@ -196,7 +196,7 @@ func TestStorageIndex(t *testing.T) { storage, ok := m.storage.(*storage.DummyStorage) if !ok { - t.Error("cannot make storage.DummyStorage of type storage.Handler") + t.Error("cannot make storage.DummyStorage of type interfaces.Handler") } if len(storage.IndexList) != 0 { diff --git a/ctx/ctx.go b/ctx/ctx.go index 13b7fd76aeb..5b4976952c8 100644 --- a/ctx/ctx.go +++ b/ctx/ctx.go @@ -6,12 +6,12 @@ import ( "net/http" "github.com/TykTechnologies/tyk/apidef/oas" + "github.com/TykTechnologies/tyk/storage/util" "github.com/TykTechnologies/tyk/config" "github.com/TykTechnologies/tyk/apidef" logger "github.com/TykTechnologies/tyk/log" - "github.com/TykTechnologies/tyk/storage" "github.com/TykTechnologies/tyk/user" ) @@ -69,7 +69,7 @@ func ctxSetSession(r *http.Request, s *user.SessionState, scheduleUpdate bool, h } if s.KeyHashEmpty() { - s.SetKeyHash(storage.HashKey(s.KeyID, hashKey)) + s.SetKeyHash(util.HashKey(s.KeyID, hashKey)) } ctx := r.Context() diff --git a/gateway/analytics.go b/gateway/analytics.go index 123ce386c9a..c238420210b 100644 --- a/gateway/analytics.go +++ b/gateway/analytics.go @@ -16,6 +16,7 @@ import ( "github.com/TykTechnologies/tyk/config" "github.com/TykTechnologies/tyk/regexp" "github.com/TykTechnologies/tyk/storage" + "github.com/TykTechnologies/tyk/storage/util" ) const analyticsKeyName = "tyk-system-analytics" @@ -175,7 +176,7 @@ func (r *RedisAnalyticsHandler) recordWorker() { // we have new record - prepare it and add to buffer // If we are obfuscating API Keys, store the hashed representation (config check handled in hashing function) - record.APIKey = storage.HashKey(record.APIKey, r.globalConf.HashKeys) + record.APIKey = util.HashKey(record.APIKey, r.globalConf.HashKeys) if r.globalConf.SlaveOptions.UseRPC { // Extend tag list to include this data so wecan segment by node if necessary diff --git a/gateway/api.go b/gateway/api.go index 6bb26f24ed8..f038f1d087d 100644 --- a/gateway/api.go +++ b/gateway/api.go @@ -48,6 +48,8 @@ import ( "github.com/getkin/kin-openapi/openapi3" "github.com/TykTechnologies/tyk/config" + redisCluster "github.com/TykTechnologies/tyk/storage/redis-cluster" + "github.com/TykTechnologies/tyk/storage/util" "github.com/TykTechnologies/tyk/internal/otel" "github.com/TykTechnologies/tyk/internal/uuid" @@ -64,7 +66,6 @@ import ( "github.com/TykTechnologies/tyk/apidef" "github.com/TykTechnologies/tyk/ctx" "github.com/TykTechnologies/tyk/header" - "github.com/TykTechnologies/tyk/storage" "github.com/TykTechnologies/tyk/user" gql "github.com/TykTechnologies/graphql-go-tools/pkg/graphql" @@ -415,7 +416,7 @@ func (gw *Gateway) setBasicAuthSessionPassword(session *user.SessionState) { return } - session.BasicAuthData.Password = storage.HashStr(session.BasicAuthData.Password, basicAuthHashAlgo) + session.BasicAuthData.Password = util.HashStr(session.BasicAuthData.Password, basicAuthHashAlgo) session.BasicAuthData.Hash = user.HashType(basicAuthHashAlgo) } @@ -538,7 +539,7 @@ func (gw *Gateway) handleAddOrUpdate(keyName string, r *http.Request, isHashed b newSession.BasicAuthData.Password = originalKey.BasicAuthData.Password } - if r.Method == http.MethodPost || storage.TokenOrg(keyName) != "" { + if r.Method == http.MethodPost || util.TokenOrg(keyName) != "" { // use new key format if key gets created or updating key with new format if err := gw.doAddOrUpdate(keyName, newSession, suppressReset, isHashed); err != nil { return apiError("Failed to create key, ensure security settings are correct."), http.StatusInternalServerError @@ -586,7 +587,7 @@ func (gw *Gateway) handleAddOrUpdate(keyName string, r *http.Request, isHashed b if isHashed { response.KeyHash = keyName } else { - response.KeyHash = storage.HashKey(keyName, gw.GetConfig().HashKeys) + response.KeyHash = util.HashKey(keyName, gw.GetConfig().HashKeys) } } @@ -614,7 +615,7 @@ func (gw *Gateway) handleGetDetail(sessionKey, apiID, orgID string, byHash bool) mw.ApplyPolicies(&session) if session.QuotaMax != -1 { - quotaKey := QuotaKeyPrefix + storage.HashKey(sessionKey, gw.GetConfig().HashKeys) + quotaKey := QuotaKeyPrefix + util.HashKey(sessionKey, gw.GetConfig().HashKeys) if byHash { quotaKey = QuotaKeyPrefix + sessionKey } @@ -649,7 +650,7 @@ func (gw *Gateway) handleGetDetail(sessionKey, apiID, orgID string, byHash bool) quotaScope = access.AllowanceScope + "-" } - limQuotaKey := QuotaKeyPrefix + quotaScope + storage.HashKey(sessionKey, gw.GetConfig().HashKeys) + limQuotaKey := QuotaKeyPrefix + quotaScope + util.HashKey(sessionKey, gw.GetConfig().HashKeys) if byHash { limQuotaKey = QuotaKeyPrefix + quotaScope + sessionKey } @@ -679,7 +680,7 @@ func (gw *Gateway) handleGetDetail(sessionKey, apiID, orgID string, byHash bool) // If it's a basic auth key and a valid Base64 string, use it as the key ID: if session.IsBasicAuth() { - if storage.TokenOrg(sessionKey) != "" { + if util.TokenOrg(sessionKey) != "" { session.KeyID = sessionKey } session.BasicAuthData.Password = "" @@ -1794,7 +1795,7 @@ func (gw *Gateway) handleOrgAddOrUpdate(orgID string, r *http.Request) (interfac if r.URL.Query().Get("reset_quota") == "1" { sessionManager.ResetQuota(orgID, newSession, false) newSession.QuotaRenews = time.Now().Unix() + newSession.QuotaRenewalRate - rawKey := QuotaKeyPrefix + storage.HashKey(orgID, gw.GetConfig().HashKeys) + rawKey := QuotaKeyPrefix + util.HashKey(orgID, gw.GetConfig().HashKeys) // manage quotas separately gw.DefaultQuotaStore.RemoveSession(orgID, rawKey, false) @@ -2062,7 +2063,7 @@ func (gw *Gateway) createKeyHandler(w http.ResponseWriter, r *http.Request) { // add key hash to reply if gw.GetConfig().HashKeys { - obj.KeyHash = storage.HashKey(newKey, gw.GetConfig().HashKeys) + obj.KeyHash = util.HashKey(newKey, gw.GetConfig().HashKeys) } gw.FireSystemEvent(EventTokenCreated, EventTokenMeta{ @@ -2241,7 +2242,7 @@ func (gw *Gateway) createOauthClient(w http.ResponseWriter, r *http.Request) { &RedisOsinStorageInterface{ storageManager, gw.GlobalSessionManager, - &storage.RedisCluster{KeyPrefix: prefix, HashKeys: false, ConnectionHandler: gw.StorageConnectionHandler}, + &redisCluster.RedisCluster{KeyPrefix: prefix, HashKeys: false, ConnectionHandler: gw.StorageConnectionHandler}, apiSpec.OrgID, gw, }), @@ -2627,7 +2628,7 @@ func (gw *Gateway) getOauthClientDetails(keyName, apiID string) (interface{}, in &RedisOsinStorageInterface{ storageManager, gw.GlobalSessionManager, - &storage.RedisCluster{KeyPrefix: prefix, HashKeys: false, ConnectionHandler: gw.StorageConnectionHandler}, + &redisCluster.RedisCluster{KeyPrefix: prefix, HashKeys: false, ConnectionHandler: gw.StorageConnectionHandler}, apiSpec.OrgID, gw, }), diff --git a/gateway/api_definition.go b/gateway/api_definition.go index f9cbcbcc58b..0006743c1f1 100644 --- a/gateway/api_definition.go +++ b/gateway/api_definition.go @@ -19,6 +19,7 @@ import ( texttemplate "text/template" "time" + "github.com/TykTechnologies/tyk/interfaces" "github.com/TykTechnologies/tyk/storage/kv" "github.com/getkin/kin-openapi/routers" @@ -49,7 +50,6 @@ import ( "github.com/TykTechnologies/tyk/header" "github.com/TykTechnologies/tyk/regexp" "github.com/TykTechnologies/tyk/rpc" - "github.com/TykTechnologies/tyk/storage" ) // const used by cache middleware @@ -1421,7 +1421,7 @@ func (a APIDefinitionLoader) getExtendedPathSpecs(apiVersionDef apidef.VersionIn return combinedPath, len(whiteListPaths) > 0 } -func (a *APISpec) Init(authStore, sessionStore, healthStore, orgStore storage.Handler) { +func (a *APISpec) Init(authStore, sessionStore, healthStore, orgStore interfaces.Handler) { a.AuthManager.Init(authStore) a.Health.Init(healthStore) a.OrgSessionManager.Init(orgStore) diff --git a/gateway/api_healthcheck.go b/gateway/api_healthcheck.go index e8a8d5ce851..ec1ad893925 100644 --- a/gateway/api_healthcheck.go +++ b/gateway/api_healthcheck.go @@ -5,7 +5,7 @@ import ( "strings" "time" - "github.com/TykTechnologies/tyk/storage" + "github.com/TykTechnologies/tyk/interfaces" ) type HealthPrefix string @@ -19,7 +19,7 @@ const ( ) type HealthChecker interface { - Init(storage.Handler) + Init(interfaces.Handler) ApiHealthValues() (HealthCheckValues, error) StoreCounterVal(HealthPrefix, string) } @@ -34,11 +34,11 @@ type HealthCheckValues struct { type DefaultHealthChecker struct { Gw *Gateway `json:"-"` - storage storage.Handler + storage interfaces.Handler APIID string } -func (h *DefaultHealthChecker) Init(storeType storage.Handler) { +func (h *DefaultHealthChecker) Init(storeType interfaces.Handler) { if !h.Gw.GetConfig().HealthCheck.EnableHealthChecks { return } diff --git a/gateway/api_loader.go b/gateway/api_loader.go index 20f52493e06..098dc04de17 100644 --- a/gateway/api_loader.go +++ b/gateway/api_loader.go @@ -15,7 +15,9 @@ import ( "sync" texttemplate "text/template" + "github.com/TykTechnologies/tyk/interfaces" "github.com/TykTechnologies/tyk/rpc" + redisCluster "github.com/TykTechnologies/tyk/storage/redis-cluster" "github.com/gorilla/mux" "github.com/justinas/alice" @@ -25,7 +27,6 @@ import ( "github.com/TykTechnologies/tyk/apidef" "github.com/TykTechnologies/tyk/coprocess" "github.com/TykTechnologies/tyk/internal/otel" - "github.com/TykTechnologies/tyk/storage" "github.com/TykTechnologies/tyk/trace" ) @@ -42,9 +43,9 @@ type ChainObject struct { func (gw *Gateway) prepareStorage() generalStores { var gs generalStores - gs.redisStore = &storage.RedisCluster{KeyPrefix: "apikey-", HashKeys: gw.GetConfig().HashKeys, ConnectionHandler: gw.StorageConnectionHandler} - gs.redisOrgStore = &storage.RedisCluster{KeyPrefix: "orgkey.", ConnectionHandler: gw.StorageConnectionHandler} - gs.healthStore = &storage.RedisCluster{KeyPrefix: "apihealth.", ConnectionHandler: gw.StorageConnectionHandler} + gs.redisStore = &redisCluster.RedisCluster{KeyPrefix: "apikey-", HashKeys: gw.GetConfig().HashKeys, ConnectionHandler: gw.StorageConnectionHandler} + gs.redisOrgStore = &redisCluster.RedisCluster{KeyPrefix: "orgkey.", ConnectionHandler: gw.StorageConnectionHandler} + gs.healthStore = &redisCluster.RedisCluster{KeyPrefix: "apihealth.", ConnectionHandler: gw.StorageConnectionHandler} gs.rpcAuthStore = &RPCStorageHandler{KeyPrefix: "apikey-", HashKeys: gw.GetConfig().HashKeys, Gw: gw} gs.rpcOrgStore = gw.getGlobalMDCBStorageHandler("orgkey.", false) @@ -287,7 +288,7 @@ func (gw *Gateway) processSpec(spec *APISpec, apisByListen map[string]int, } keyPrefix := "cache-" + spec.APIID - cacheStore := storage.RedisCluster{KeyPrefix: keyPrefix, IsCache: true, ConnectionHandler: gw.StorageConnectionHandler} + cacheStore := redisCluster.RedisCluster{KeyPrefix: keyPrefix, IsCache: true, ConnectionHandler: gw.StorageConnectionHandler} cacheStore.Connect() var chain http.Handler @@ -520,7 +521,7 @@ func (gw *Gateway) processSpec(spec *APISpec, apisByListen map[string]int, return &chainDef } -func (gw *Gateway) configureAuthAndOrgStores(gs *generalStores, spec *APISpec) (storage.Handler, storage.Handler, storage.Handler) { +func (gw *Gateway) configureAuthAndOrgStores(gs *generalStores, spec *APISpec) (interfaces.Handler, interfaces.Handler, interfaces.Handler) { authStore := gs.redisStore orgStore := gs.redisOrgStore @@ -841,7 +842,7 @@ func (gw *Gateway) loadTCPService(spec *APISpec, gs *generalStores, muxer *proxy } type generalStores struct { - redisStore, redisOrgStore, healthStore, rpcAuthStore, rpcOrgStore storage.Handler + redisStore, redisOrgStore, healthStore, rpcAuthStore, rpcOrgStore interfaces.Handler } var playgroundTemplate *texttemplate.Template diff --git a/gateway/api_test.go b/gateway/api_test.go index ab450a0df3d..a9a8b6680ec 100644 --- a/gateway/api_test.go +++ b/gateway/api_test.go @@ -30,8 +30,10 @@ import ( "github.com/TykTechnologies/tyk/apidef/oas" "github.com/TykTechnologies/tyk/certs" "github.com/TykTechnologies/tyk/config" + "github.com/TykTechnologies/tyk/interfaces" "github.com/TykTechnologies/tyk/internal/uuid" - "github.com/TykTechnologies/tyk/storage" + redisCluster "github.com/TykTechnologies/tyk/storage/redis-cluster" + "github.com/TykTechnologies/tyk/storage/util" "github.com/TykTechnologies/tyk/test" "github.com/TykTechnologies/tyk/user" ) @@ -669,7 +671,7 @@ func TestKeyHandler_DeleteKeyWithQuota(t *testing.T) { // we might remove the key, but for rpc sometimes we just remove the key and not the quota // so we can get the updated key and still preserving the quota count - _, err := ts.Gw.DefaultQuotaStore.Store().GetRawKey("quota-" + storage.HashKey(key, tc.hashKeys)) + _, err := ts.Gw.DefaultQuotaStore.Store().GetRawKey("quota-" + util.HashKey(key, tc.hashKeys)) found := err == nil assert.Equal(t, quotaTc.quotaFound, found) }) @@ -868,10 +870,10 @@ func TestHashKeyHandler(t *testing.T) { desc string }{ {"", 8, " Legacy tokens, fallback to murmur32"}, - {storage.HashMurmur32, 8, ""}, - {storage.HashMurmur64, 16, ""}, - {storage.HashMurmur128, 32, ""}, - {storage.HashSha256, 64, ""}, + {util.HashMurmur32, 8, ""}, + {util.HashMurmur64, 16, ""}, + {util.HashMurmur128, 32, ""}, + {util.HashSha256, 64, ""}, {"wrong", 16, " Should fallback to murmur64 if wrong alg"}, } @@ -895,7 +897,7 @@ func TestDisableKeyActionsByUserName(t *testing.T) { conf := func(globalConf *config.Config) { globalConf.HashKeys = true globalConf.EnableHashedKeysListing = true - globalConf.HashKeyFunction = storage.HashMurmur64 + globalConf.HashKeyFunction = util.HashMurmur64 globalConf.DisableKeyActionsByUsername = true } @@ -1000,7 +1002,7 @@ func TestHashKeyHandlerLegacyWithHashFunc(t *testing.T) { }...) // set custom hashing function and check if we still can get BA session with legacy key format - globalConf.HashKeyFunction = storage.HashMurmur64 + globalConf.HashKeyFunction = util.HashMurmur64 ts.Gw.SetConfig(globalConf) _, _ = ts.Run(t, []test.TestCase{ @@ -1031,7 +1033,7 @@ func (ts *Test) testHashKeyHandlerHelper(t *testing.T, expectedHashSize int) { withAccessJSON := test.MarshalJSON(t)(withAccess) myKey := "my_key_id" - myKeyHash := storage.HashKey(ts.Gw.generateToken("default", myKey), ts.Gw.GetConfig().HashKeys) + myKeyHash := util.HashKey(ts.Gw.generateToken("default", myKey), ts.Gw.GetConfig().HashKeys) if len(myKeyHash) != expectedHashSize { t.Errorf("Expected hash size: %d, got %d. Hash: %s. Key: %s", expectedHashSize, len(myKeyHash), myKeyHash, myKey) @@ -1205,7 +1207,7 @@ func TestHashKeyListingDisabled(t *testing.T) { withAccessJSON := test.MarshalJSON(t)(withAccess) myKey := "my_key_id" - myKeyHash := storage.HashKey(ts.Gw.generateToken("default", myKey), ts.Gw.GetConfig().HashKeys) + myKeyHash := util.HashKey(ts.Gw.generateToken("default", myKey), ts.Gw.GetConfig().HashKeys) t.Run("Create, get and delete key with key hashing", func(t *testing.T) { _, _ = ts.Run(t, []test.TestCase{ @@ -1324,7 +1326,7 @@ func TestKeyHandler_HashingDisabled(t *testing.T) { myKeyID := "my_key_id" token := ts.Gw.generateToken("default", myKeyID) - myKeyHash := storage.HashKey(token, ts.Gw.GetConfig().HashKeys) + myKeyHash := util.HashKey(token, ts.Gw.GetConfig().HashKeys) t.Run("Create, get and delete key with key hashing", func(t *testing.T) { _, _ = ts.Run(t, []test.TestCase{ @@ -1721,7 +1723,7 @@ func TestGroupResetHandler(t *testing.T) { didSubscribe := make(chan bool, 1) didReload := make(chan bool, tryReloadCount) - cacheStore := storage.RedisCluster{ConnectionHandler: ts.Gw.StorageConnectionHandler} + cacheStore := redisCluster.RedisCluster{ConnectionHandler: ts.Gw.StorageConnectionHandler} cacheStore.Connect() // Test usually takes 0.05sec or so, timeout after 1s @@ -3874,7 +3876,7 @@ func TestPurgeOAuthClientTokensEndpoint(t *testing.T) { }...) }) - assertTokensLen := func(t *testing.T, storageManager storage.Handler, storageKey string, expectedTokensLen int) { + assertTokensLen := func(t *testing.T, storageManager interfaces.Handler, storageKey string, expectedTokensLen int) { t.Helper() nowTs := time.Now().Unix() startScore := strconv.FormatInt(nowTs, 10) diff --git a/gateway/auth_manager.go b/gateway/auth_manager.go index 45967444f8b..30d8da1f7a8 100644 --- a/gateway/auth_manager.go +++ b/gateway/auth_manager.go @@ -8,17 +8,18 @@ import ( "github.com/sirupsen/logrus" + "github.com/TykTechnologies/tyk/interfaces" "github.com/TykTechnologies/tyk/internal/uuid" + "github.com/TykTechnologies/tyk/storage/util" - "github.com/TykTechnologies/tyk/storage" "github.com/TykTechnologies/tyk/user" ) // SessionHandler handles all update/create/access session functions and deals exclusively with // user.SessionState objects, not identity type SessionHandler interface { - Init(store storage.Handler) - Store() storage.Handler + Init(store interfaces.Handler) + Store() interfaces.Handler UpdateSession(keyName string, session *user.SessionState, resetTTLTo int64, hashed bool) error RemoveSession(orgID string, keyName string, hashed bool) bool SessionDetail(orgID string, keyName string, hashed bool) (user.SessionState, bool) @@ -29,7 +30,7 @@ type SessionHandler interface { } type DefaultSessionManager struct { - store storage.Handler + store interfaces.Handler orgID string Gw *Gateway `json:"-"` } @@ -41,7 +42,7 @@ func (b *DefaultSessionManager) ResetQuotaObfuscateKey(keyName string) string { return keyName } -func (b *DefaultSessionManager) Init(store storage.Handler) { +func (b *DefaultSessionManager) Init(store interfaces.Handler) { b.store = store b.store.Connect() } @@ -54,7 +55,7 @@ func (b *DefaultSessionManager) KeyExpired(newSession *user.SessionState) bool { return false } -func (b *DefaultSessionManager) Store() storage.Handler { +func (b *DefaultSessionManager) Store() interfaces.Handler { return b.store } @@ -62,7 +63,7 @@ func (b *DefaultSessionManager) ResetQuota(keyName string, session *user.Session origKeyName := keyName if !isHashed { - keyName = storage.HashKey(keyName, b.Gw.GetConfig().HashKeys) + keyName = util.HashKey(keyName, b.Gw.GetConfig().HashKeys) } rawKey := QuotaKeyPrefix + keyName @@ -82,7 +83,7 @@ func (b *DefaultSessionManager) ResetQuota(keyName string, session *user.Session b.deleteRawKeysWithAllowanceScope(b.store, session, keyName) } -func (b *DefaultSessionManager) deleteRawKeysWithAllowanceScope(store storage.Handler, session *user.SessionState, keyName string) { +func (b *DefaultSessionManager) deleteRawKeysWithAllowanceScope(store interfaces.Handler, session *user.SessionState, keyName string) { if store == nil || session == nil { return } @@ -99,7 +100,7 @@ func (b *DefaultSessionManager) deleteRawKeysWithAllowanceScope(store storage.Ha func (b *DefaultSessionManager) clearCacheForKey(keyName string, hashed bool) { cacheKey := keyName if !hashed { - cacheKey = storage.HashKey(keyName, b.Gw.GetConfig().HashKeys) + cacheKey = util.HashKey(keyName, b.Gw.GetConfig().HashKeys) } // Delete gateway's cache immediately b.Gw.SessionCache.Delete(cacheKey) @@ -159,7 +160,7 @@ func (b *DefaultSessionManager) SessionDetail(orgID string, keyName string, hash if hashed { jsonKeyVal, err = b.store.GetRawKey(b.store.GetKeyPrefix() + keyName) } else { - if storage.TokenOrg(keyName) != orgID { + if util.TokenOrg(keyName) != orgID { // try to get legacy and new format key at once toSearchList := []string{} if !b.Gw.GetConfig().DisableKeyActionsByUsername { @@ -226,7 +227,7 @@ func (gw *Gateway) generateToken(orgID, keyID string, customHashKeyFunction ...s hashKeyFunction = customHashKeyFunction[0] } - token, err := storage.GenerateToken(orgID, keyID, hashKeyFunction) + token, err := util.GenerateToken(orgID, keyID, hashKeyFunction) if err != nil { log.WithFields(logrus.Fields{ "prefix": "auth-mgr", diff --git a/gateway/auth_manager_test.go b/gateway/auth_manager_test.go index 64cdc4c46aa..ef3fd6955cb 100644 --- a/gateway/auth_manager_test.go +++ b/gateway/auth_manager_test.go @@ -13,11 +13,11 @@ import ( "github.com/TykTechnologies/tyk/certs" "github.com/TykTechnologies/tyk/config" + "github.com/TykTechnologies/tyk/storage/util" "github.com/TykTechnologies/tyk/header" "github.com/TykTechnologies/tyk/apidef" - "github.com/TykTechnologies/tyk/storage" "github.com/TykTechnologies/tyk/test" "github.com/TykTechnologies/tyk/user" @@ -86,7 +86,7 @@ func TestAuthenticationAfterUpdateKey(t *testing.T) { APIID: api.APIID, }} - err := ts.Gw.GlobalSessionManager.UpdateSession(storage.HashKey(key, ts.Gw.GetConfig().HashKeys), session, 0, ts.Gw.GetConfig().HashKeys) + err := ts.Gw.GlobalSessionManager.UpdateSession(util.HashKey(key, ts.Gw.GetConfig().HashKeys), session, 0, ts.Gw.GetConfig().HashKeys) if err != nil { t.Error("could not update session in Session Manager. " + err.Error()) } @@ -103,7 +103,7 @@ func TestAuthenticationAfterUpdateKey(t *testing.T) { APIID: "dummy", }} - err = ts.Gw.GlobalSessionManager.UpdateSession(storage.HashKey(key, ts.Gw.GetConfig().HashKeys), session, 0, ts.Gw.GetConfig().HashKeys) + err = ts.Gw.GlobalSessionManager.UpdateSession(util.HashKey(key, ts.Gw.GetConfig().HashKeys), session, 0, ts.Gw.GetConfig().HashKeys) if err != nil { t.Error("could not update session in Session Manager. " + err.Error()) } @@ -386,7 +386,7 @@ func TestCustomKeysEdgeGw(t *testing.T) { func TestDeleteRawKeysWithAllowanceScope(t *testing.T) { sessionManager := DefaultSessionManager{} - t.Run("should not panic if storage.Handler is nil", func(t *testing.T) { + t.Run("should not panic if interfaces.Handler is nil", func(t *testing.T) { session := &user.SessionState{ AccessRights: map[string]user.AccessDefinition{ "ar1": {AllowanceScope: "scope1"}, diff --git a/gateway/cert_test.go b/gateway/cert_test.go index e133cdb6911..06ba51ab219 100644 --- a/gateway/cert_test.go +++ b/gateway/cert_test.go @@ -23,11 +23,11 @@ import ( "go.uber.org/mock/gomock" "github.com/TykTechnologies/tyk/certs/mock" + "github.com/TykTechnologies/tyk/storage/util" "github.com/TykTechnologies/tyk/internal/crypto" "github.com/TykTechnologies/tyk/header" - "github.com/TykTechnologies/tyk/storage" "github.com/TykTechnologies/tyk/user" @@ -1310,7 +1310,7 @@ func TestKeyWithCertificateTLS(t *testing.T) { _, _ = ts.Run(t, test.TestCase{Path: "/", Code: 200, Client: client}) session.Certificate = "fooBar" // update redis directly since we have protection not to allow create of a session with wrong certificate - err = ts.Gw.GlobalSessionManager.UpdateSession(storage.HashKey(clientCertID, ts.Gw.GetConfig().HashKeys), session, 0, ts.Gw.GetConfig().HashKeys) + err = ts.Gw.GlobalSessionManager.UpdateSession(util.HashKey(clientCertID, ts.Gw.GetConfig().HashKeys), session, 0, ts.Gw.GetConfig().HashKeys) if err != nil { t.Error("could not update session in Session Manager. " + err.Error()) } diff --git a/gateway/coprocess_api.go b/gateway/coprocess_api.go index e2227f8ac86..fb2ef32bddc 100644 --- a/gateway/coprocess_api.go +++ b/gateway/coprocess_api.go @@ -10,19 +10,19 @@ import ( "github.com/TykTechnologies/tyk/apidef" "github.com/TykTechnologies/tyk/config" - "github.com/TykTechnologies/tyk/storage" + redisCluster "github.com/TykTechnologies/tyk/storage/redis-cluster" ) // CoProcessDefaultKeyPrefix is used as a key prefix for this CP. const CoProcessDefaultKeyPrefix = "coprocess-data:" -func getStorageForPython(ctx context.Context) storage.RedisCluster { - rc := storage.NewConnectionHandler(ctx) +func getStorageForPython(ctx context.Context) redisCluster.RedisCluster { + rc := redisCluster.NewConnectionHandler(ctx) go rc.Connect(ctx, nil, &config.Config{}) rc.WaitConnect(ctx) - return storage.RedisCluster{KeyPrefix: CoProcessDefaultKeyPrefix, ConnectionHandler: rc} + return redisCluster.RedisCluster{KeyPrefix: CoProcessDefaultKeyPrefix, ConnectionHandler: rc} } // TykStoreData is a CoProcess API function for storing data. diff --git a/gateway/coprocess_id_extractor_test.go b/gateway/coprocess_id_extractor_test.go index 49de2e50632..84b77eff68c 100644 --- a/gateway/coprocess_id_extractor_test.go +++ b/gateway/coprocess_id_extractor_test.go @@ -11,7 +11,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/TykTechnologies/tyk/apidef" - "github.com/TykTechnologies/tyk/storage" + redisCluster "github.com/TykTechnologies/tyk/storage/redis-cluster" + "github.com/TykTechnologies/tyk/storage/util" ) const ( @@ -33,9 +34,9 @@ func (ts *Test) createSpecTestFrom(tb testing.TB, def *apidef.APIDefinition) *AP loader := APIDefinitionLoader{Gw: ts.Gw} spec, _ := loader.MakeSpec(&nestedApiDefinition{APIDefinition: def}, nil) tname := tb.Name() - redisStore := &storage.RedisCluster{KeyPrefix: tname + "-apikey.", ConnectionHandler: ts.Gw.StorageConnectionHandler} - healthStore := &storage.RedisCluster{KeyPrefix: tname + "-apihealth.", ConnectionHandler: ts.Gw.StorageConnectionHandler} - orgStore := &storage.RedisCluster{KeyPrefix: tname + "-orgKey.", ConnectionHandler: ts.Gw.StorageConnectionHandler} + redisStore := &redisCluster.RedisCluster{KeyPrefix: tname + "-apikey.", ConnectionHandler: ts.Gw.StorageConnectionHandler} + healthStore := &redisCluster.RedisCluster{KeyPrefix: tname + "-apihealth.", ConnectionHandler: ts.Gw.StorageConnectionHandler} + orgStore := &redisCluster.RedisCluster{KeyPrefix: tname + "-orgKey.", ConnectionHandler: ts.Gw.StorageConnectionHandler} spec.Init(redisStore, redisStore, healthStore, orgStore) return spec } @@ -126,7 +127,7 @@ func TestValueExtractor(t *testing.T) { if sessionID != testSessionID { t.Fatalf("session ID doesn't match, expected %s, got %s", testSessionID, sessionID) } - if storage.TokenOrg(sessionID) != spec.OrgID { + if util.TokenOrg(sessionID) != spec.OrgID { t.Fatalf("session ID doesn't contain the org ID, got %s", sessionID) } if overrides.ResponseCode != 0 { @@ -153,7 +154,7 @@ func TestValueExtractor(t *testing.T) { if sessionID != testSessionID { t.Fatalf("session ID doesn't match, expected %s, got %s", testSessionID, sessionID) } - if storage.TokenOrg(sessionID) != spec.OrgID { + if util.TokenOrg(sessionID) != spec.OrgID { t.Fatalf("session ID doesn't contain the org ID, got %s", sessionID) } if overrides.ResponseCode != 0 { @@ -190,7 +191,7 @@ func TestRegexExtractor(t *testing.T) { if sessionID != testSessionID { t.Fatalf("session ID doesn't match, expected %s, got %s", testSessionID, sessionID) } - if storage.TokenOrg(sessionID) != spec.OrgID { + if util.TokenOrg(sessionID) != spec.OrgID { t.Fatalf("session ID doesn't contain the org ID, got %s", sessionID) } if overrides.ResponseCode != 0 { @@ -219,7 +220,7 @@ func TestRegexExtractor(t *testing.T) { if sessionID != testSessionID { t.Fatalf("session ID doesn't match, expected %s, got %s", testSessionID, sessionID) } - if storage.TokenOrg(sessionID) != spec.OrgID { + if util.TokenOrg(sessionID) != spec.OrgID { t.Fatalf("session ID doesn't contain the org ID, got %s", sessionID) } if overrides.ResponseCode != 0 { @@ -249,7 +250,7 @@ func TestRegexExtractor(t *testing.T) { if sessionID != testSessionID { t.Fatalf("session ID doesn't match, expected %s, got %s", testSessionID, sessionID) } - if storage.TokenOrg(sessionID) != spec.OrgID { + if util.TokenOrg(sessionID) != spec.OrgID { t.Fatalf("session ID doesn't contain the org ID, got %s", sessionID) } if overrides.ResponseCode != 0 { @@ -285,7 +286,7 @@ func TestXPathExtractor(t *testing.T) { if sessionID != testSessionID { t.Fatalf("session ID doesn't match, expected %s, got %s", testSessionID, sessionID) } - if storage.TokenOrg(sessionID) != spec.OrgID { + if util.TokenOrg(sessionID) != spec.OrgID { t.Fatalf("session ID doesn't contain the org ID, got %s", sessionID) } if overrides.ResponseCode != 0 { @@ -313,7 +314,7 @@ func TestXPathExtractor(t *testing.T) { if sessionID != testSessionID { t.Fatalf("session ID doesn't match, expected %s, got %s", testSessionID, sessionID) } - if storage.TokenOrg(sessionID) != spec.OrgID { + if util.TokenOrg(sessionID) != spec.OrgID { t.Fatalf("session ID doesn't contain the org ID, got %s", sessionID) } if overrides.ResponseCode != 0 { @@ -342,7 +343,7 @@ func TestXPathExtractor(t *testing.T) { if sessionID != testSessionID { t.Fatalf("session ID doesn't match, expected %s, got %s", testSessionID, sessionID) } - if storage.TokenOrg(sessionID) != spec.OrgID { + if util.TokenOrg(sessionID) != spec.OrgID { t.Fatalf("session ID doesn't contain the org ID, got %s", sessionID) } if overrides.ResponseCode != 0 { diff --git a/gateway/delete_api_cache.go b/gateway/delete_api_cache.go index 5cc138ec13b..0fd7a1a44be 100644 --- a/gateway/delete_api_cache.go +++ b/gateway/delete_api_cache.go @@ -3,10 +3,10 @@ package gateway import ( "fmt" - "github.com/TykTechnologies/tyk/storage" + redisCluster "github.com/TykTechnologies/tyk/storage/redis-cluster" ) func (gw *Gateway) invalidateAPICache(apiID string) bool { - store := storage.RedisCluster{IsCache: true, ConnectionHandler: gw.StorageConnectionHandler} + store := redisCluster.RedisCluster{IsCache: true, ConnectionHandler: gw.StorageConnectionHandler} return store.DeleteScanMatch(fmt.Sprintf("cache-%s*", apiID)) } diff --git a/gateway/event_handler_webhooks.go b/gateway/event_handler_webhooks.go index e985a7b3ab7..5d45a6955a4 100644 --- a/gateway/event_handler_webhooks.go +++ b/gateway/event_handler_webhooks.go @@ -18,8 +18,9 @@ import ( "github.com/TykTechnologies/tyk/apidef" "github.com/TykTechnologies/tyk/config" "github.com/TykTechnologies/tyk/header" + "github.com/TykTechnologies/tyk/interfaces" "github.com/TykTechnologies/tyk/internal/event" - "github.com/TykTechnologies/tyk/storage" + redisCluster "github.com/TykTechnologies/tyk/storage/redis-cluster" ) type WebHookRequestMethod string @@ -45,7 +46,7 @@ var ( type WebHookHandler struct { conf apidef.WebHookHandlerConf template *htmltemplate.Template // non-nil if Init is run without error - store storage.Handler + store interfaces.Handler contentType string dashboardService DashboardServiceSender @@ -69,7 +70,7 @@ func (w *WebHookHandler) Init(handlerConf interface{}) error { return ErrEventHandlerDisabled } - w.store = &storage.RedisCluster{KeyPrefix: "webhook.cache.", ConnectionHandler: w.Gw.StorageConnectionHandler} + w.store = &redisCluster.RedisCluster{KeyPrefix: "webhook.cache.", ConnectionHandler: w.Gw.StorageConnectionHandler} w.store.Connect() // Pre-load template on init diff --git a/gateway/gateway_test.go b/gateway/gateway_test.go index 8354e98d233..db3f278b78b 100644 --- a/gateway/gateway_test.go +++ b/gateway/gateway_test.go @@ -26,7 +26,7 @@ import ( "github.com/TykTechnologies/tyk-pump/analytics" "github.com/TykTechnologies/tyk/apidef" "github.com/TykTechnologies/tyk/config" - "github.com/TykTechnologies/tyk/storage" + redisCluster "github.com/TykTechnologies/tyk/storage/redis-cluster" "github.com/TykTechnologies/tyk/test" "github.com/TykTechnologies/tyk/user" ) @@ -939,7 +939,7 @@ func TestGatewayHealthCheck(t *testing.T) { func TestCacheAllSafeRequests(t *testing.T) { ts := StartTest(nil) defer ts.Close() - cache := storage.RedisCluster{KeyPrefix: "cache-", ConnectionHandler: ts.Gw.StorageConnectionHandler} + cache := redisCluster.RedisCluster{KeyPrefix: "cache-", ConnectionHandler: ts.Gw.StorageConnectionHandler} defer cache.DeleteScanMatch("*") ts.Gw.BuildAndLoadAPI(func(spec *APISpec) { @@ -965,7 +965,7 @@ func TestCacheAllSafeRequests(t *testing.T) { func TestCacheAllSafeRequestsWithCachedHeaders(t *testing.T) { ts := StartTest(nil) defer ts.Close() - cache := storage.RedisCluster{KeyPrefix: "cache-", ConnectionHandler: ts.Gw.StorageConnectionHandler} + cache := redisCluster.RedisCluster{KeyPrefix: "cache-", ConnectionHandler: ts.Gw.StorageConnectionHandler} defer cache.DeleteScanMatch("*") authorization := "authorization" tenant := "tenant-id" @@ -1006,7 +1006,7 @@ func TestCacheAllSafeRequestsWithCachedHeaders(t *testing.T) { func TestCacheWithAdvanceUrlRewrite(t *testing.T) { ts := StartTest(nil) defer ts.Close() - cache := storage.RedisCluster{KeyPrefix: "cache-", ConnectionHandler: ts.Gw.StorageConnectionHandler} + cache := redisCluster.RedisCluster{KeyPrefix: "cache-", ConnectionHandler: ts.Gw.StorageConnectionHandler} defer cache.DeleteScanMatch("*") ts.Gw.BuildAndLoadAPI(func(spec *APISpec) { @@ -1061,7 +1061,7 @@ func TestCacheWithAdvanceUrlRewrite(t *testing.T) { func TestCachePostRequest(t *testing.T) { ts := StartTest(nil) defer ts.Close() - cache := storage.RedisCluster{KeyPrefix: "cache-", ConnectionHandler: ts.Gw.StorageConnectionHandler} + cache := redisCluster.RedisCluster{KeyPrefix: "cache-", ConnectionHandler: ts.Gw.StorageConnectionHandler} defer cache.DeleteScanMatch("*") tenant := "tenant-id" @@ -1103,7 +1103,7 @@ func TestCachePostRequest(t *testing.T) { func TestAdvanceCachePutRequest(t *testing.T) { ts := StartTest(nil) defer ts.Close() - cache := storage.RedisCluster{KeyPrefix: "cache-", ConnectionHandler: ts.Gw.StorageConnectionHandler} + cache := redisCluster.RedisCluster{KeyPrefix: "cache-", ConnectionHandler: ts.Gw.StorageConnectionHandler} defer cache.DeleteScanMatch("*") tenant := "tenant-id" @@ -1191,7 +1191,7 @@ func TestAdvanceCachePutRequest(t *testing.T) { func TestCacheAllSafeRequestsWithAdvancedCacheEndpoint(t *testing.T) { ts := StartTest(nil) defer ts.Close() - cache := storage.RedisCluster{KeyPrefix: "cache-", ConnectionHandler: ts.Gw.StorageConnectionHandler} + cache := redisCluster.RedisCluster{KeyPrefix: "cache-", ConnectionHandler: ts.Gw.StorageConnectionHandler} defer cache.DeleteScanMatch("*") ts.Gw.BuildAndLoadAPI(func(spec *APISpec) { @@ -1226,7 +1226,7 @@ func TestCacheAllSafeRequestsWithAdvancedCacheEndpoint(t *testing.T) { func TestCacheEtag(t *testing.T) { ts := StartTest(nil) defer ts.Close() - cache := storage.RedisCluster{KeyPrefix: "cache-", ConnectionHandler: ts.Gw.StorageConnectionHandler} + cache := redisCluster.RedisCluster{KeyPrefix: "cache-", ConnectionHandler: ts.Gw.StorageConnectionHandler} defer cache.DeleteScanMatch("*") upstream := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -1276,7 +1276,7 @@ func TestOldCachePlugin(t *testing.T) { check := func(t *testing.T) { t.Helper() - cache := storage.RedisCluster{KeyPrefix: "cache-", ConnectionHandler: ts.Gw.StorageConnectionHandler} + cache := redisCluster.RedisCluster{KeyPrefix: "cache-", ConnectionHandler: ts.Gw.StorageConnectionHandler} defer cache.DeleteScanMatch("*") ts.Gw.LoadAPI(api) @@ -1301,7 +1301,7 @@ func TestOldCachePlugin(t *testing.T) { func TestAdvanceCacheTimeoutPerEndpoint(t *testing.T) { ts := StartTest(nil) defer ts.Close() - cache := storage.RedisCluster{KeyPrefix: "cache-", ConnectionHandler: ts.Gw.StorageConnectionHandler} + cache := redisCluster.RedisCluster{KeyPrefix: "cache-", ConnectionHandler: ts.Gw.StorageConnectionHandler} defer cache.DeleteScanMatch("*") extendedPaths := apidef.ExtendedPathsSet{ diff --git a/gateway/health_check.go b/gateway/health_check.go index 086071ecd8b..e0899fdd5be 100644 --- a/gateway/health_check.go +++ b/gateway/health_check.go @@ -9,12 +9,12 @@ import ( "time" "github.com/TykTechnologies/tyk/rpc" + redisCluster "github.com/TykTechnologies/tyk/storage/redis-cluster" "github.com/sirupsen/logrus" "github.com/TykTechnologies/tyk/apidef" "github.com/TykTechnologies/tyk/header" - "github.com/TykTechnologies/tyk/storage" ) func (gw *Gateway) setCurrentHealthCheckInfo(h map[string]apidef.HealthCheckItem) { @@ -65,7 +65,7 @@ type SafeHealthCheck struct { func (gw *Gateway) gatherHealthChecks() { allInfos := SafeHealthCheck{info: make(map[string]apidef.HealthCheckItem, 3)} - redisStore := storage.RedisCluster{KeyPrefix: "livenesscheck-", ConnectionHandler: gw.StorageConnectionHandler} + redisStore := redisCluster.RedisCluster{KeyPrefix: "livenesscheck-", ConnectionHandler: gw.StorageConnectionHandler} key := "tyk-liveness-probe" diff --git a/gateway/host_checker_manager.go b/gateway/host_checker_manager.go index 76ac2c9b201..128207eab62 100644 --- a/gateway/host_checker_manager.go +++ b/gateway/host_checker_manager.go @@ -13,16 +13,16 @@ import ( "github.com/sirupsen/logrus" msgpack "gopkg.in/vmihailenco/msgpack.v2" + "github.com/TykTechnologies/tyk/interfaces" "github.com/TykTechnologies/tyk/internal/uuid" "github.com/TykTechnologies/tyk/apidef" - "github.com/TykTechnologies/tyk/storage" ) type HostCheckerManager struct { Gw *Gateway `json:"-"` Id string - store storage.Handler + store interfaces.Handler checkerMu sync.Mutex checker *HostUptimeChecker stopLoop bool @@ -72,7 +72,7 @@ const ( UptimeAnalytics_KEYNAME = "tyk-uptime-analytics" ) -func (hc *HostCheckerManager) Init(store storage.Handler) { +func (hc *HostCheckerManager) Init(store interfaces.Handler) { hc.store = store hc.unhealthyHostList = new(sync.Map) hc.resetsInitiated = make(map[string]bool) @@ -534,7 +534,7 @@ func (hc *HostCheckerManager) RecordUptimeAnalytics(report HostHealthReport) err return nil } -func (gw *Gateway) InitHostCheckManager(ctx context.Context, store storage.Handler) { +func (gw *Gateway) InitHostCheckManager(ctx context.Context, store interfaces.Handler) { // Already initialized if gw.GlobalHostChecker.Id != "" { return diff --git a/gateway/host_checker_manager_test.go b/gateway/host_checker_manager_test.go index d50e6dbe266..04565cbdf50 100644 --- a/gateway/host_checker_manager_test.go +++ b/gateway/host_checker_manager_test.go @@ -5,7 +5,7 @@ import ( "testing" "github.com/TykTechnologies/tyk/apidef" - "github.com/TykTechnologies/tyk/storage" + redisCluster "github.com/TykTechnologies/tyk/storage/redis-cluster" "github.com/TykTechnologies/tyk/test" ) @@ -14,7 +14,7 @@ func TestHostCheckerManagerInit(t *testing.T) { defer ts.Close() hc := HostCheckerManager{Gw: ts.Gw} - redisStorage := &storage.RedisCluster{KeyPrefix: "host-checker-test:", ConnectionHandler: ts.Gw.StorageConnectionHandler} + redisStorage := &redisCluster.RedisCluster{KeyPrefix: "host-checker-test:", ConnectionHandler: ts.Gw.StorageConnectionHandler} hc.Init(redisStorage) if hc.Id == "" { @@ -45,7 +45,7 @@ func TestAmIPolling(t *testing.T) { globalConf.UptimeTests.PollerGroup = groupID ts.Gw.SetConfig(globalConf) - redisStorage := &storage.RedisCluster{KeyPrefix: "host-checker-test:", ConnectionHandler: ts.Gw.StorageConnectionHandler} + redisStorage := &redisCluster.RedisCluster{KeyPrefix: "host-checker-test:", ConnectionHandler: ts.Gw.StorageConnectionHandler} hc.Init(redisStorage) hc2 := HostCheckerManager{Gw: ts.Gw} hc2.Init(redisStorage) @@ -74,7 +74,7 @@ func TestAmIPolling(t *testing.T) { //Testing if the PollerCacheKey doesn't contains the poller_group by default hc = HostCheckerManager{Gw: ts.Gw} - redisStorage = &storage.RedisCluster{KeyPrefix: "host-checker-test:", ConnectionHandler: ts.Gw.StorageConnectionHandler} + redisStorage = &redisCluster.RedisCluster{KeyPrefix: "host-checker-test:", ConnectionHandler: ts.Gw.StorageConnectionHandler} hc.Init(redisStorage) hc.AmIPolling() @@ -106,7 +106,7 @@ func TestCheckActivePollerLoop(t *testing.T) { defer ts.Close() hc := &HostCheckerManager{Gw: ts.Gw} - redisStorage := &storage.RedisCluster{KeyPrefix: "host-checker-test-1:", ConnectionHandler: ts.Gw.StorageConnectionHandler} + redisStorage := &redisCluster.RedisCluster{KeyPrefix: "host-checker-test-1:", ConnectionHandler: ts.Gw.StorageConnectionHandler} hc.Init(redisStorage) go hc.CheckActivePollerLoop(ts.Gw.ctx) @@ -122,7 +122,7 @@ func TestStartPoller(t *testing.T) { defer ts.Close() hc := HostCheckerManager{Gw: ts.Gw} - redisStorage := &storage.RedisCluster{KeyPrefix: "host-checker-TestStartPoller:", ConnectionHandler: ts.Gw.StorageConnectionHandler} + redisStorage := &redisCluster.RedisCluster{KeyPrefix: "host-checker-TestStartPoller:", ConnectionHandler: ts.Gw.StorageConnectionHandler} hc.Init(redisStorage) hc.StartPoller(ts.Gw.ctx) @@ -137,7 +137,7 @@ func TestRecordUptimeAnalytics(t *testing.T) { defer ts.Close() hc := &HostCheckerManager{Gw: ts.Gw} - redisStorage := &storage.RedisCluster{KeyPrefix: "host-checker-test-analytics:", ConnectionHandler: ts.Gw.StorageConnectionHandler} + redisStorage := &redisCluster.RedisCluster{KeyPrefix: "host-checker-test-analytics:", ConnectionHandler: ts.Gw.StorageConnectionHandler} hc.Init(redisStorage) spec := &APISpec{} diff --git a/gateway/host_checker_test.go b/gateway/host_checker_test.go index f28ed369b2b..01a62079b3f 100644 --- a/gateway/host_checker_test.go +++ b/gateway/host_checker_test.go @@ -17,10 +17,10 @@ import ( "github.com/stretchr/testify/assert" "github.com/TykTechnologies/tyk/internal/uuid" + redisCluster "github.com/TykTechnologies/tyk/storage/redis-cluster" "github.com/TykTechnologies/tyk/apidef" "github.com/TykTechnologies/tyk/config" - "github.com/TykTechnologies/tyk/storage" "github.com/TykTechnologies/tyk/test" ) @@ -169,7 +169,7 @@ func TestHostChecker(t *testing.T) { t.Error("Should set defaults", ts.Gw.GlobalHostChecker.checker.checkTimeout) } - redisStore := ts.Gw.GlobalHostChecker.store.(*storage.RedisCluster) + redisStore := ts.Gw.GlobalHostChecker.store.(*redisCluster.RedisCluster) if ttl, _ := redisStore.GetKeyTTL(PoolerHostSentinelKeyPrefix + testHttpFailure); int(ttl) != ts.Gw.GlobalHostChecker.checker.checkTimeout*ts.Gw.GlobalHostChecker.checker.sampleTriggerLimit { t.Error("HostDown expiration key should be checkTimeout + 1", ttl) } diff --git a/gateway/ldap_auth_handler.go b/gateway/ldap_auth_handler.go index 7be982a3952..c81e860c8d0 100644 --- a/gateway/ldap_auth_handler.go +++ b/gateway/ldap_auth_handler.go @@ -7,7 +7,7 @@ import ( "github.com/mavricknz/ldap" ) -// LDAPStorageHandler implements storage.Handler, this is a read-only implementation to access keys from an LDAP service +// LDAPStorageHandler implements interfaces.Handler, this is a read-only implementation to access keys from an LDAP service type LDAPStorageHandler struct { LDAPServer string LDAPPort uint16 diff --git a/gateway/middleware.go b/gateway/middleware.go index 91e84d960bc..ed1804d21af 100644 --- a/gateway/middleware.go +++ b/gateway/middleware.go @@ -17,6 +17,7 @@ import ( "github.com/TykTechnologies/tyk/internal/otel" "github.com/TykTechnologies/tyk/internal/policy" "github.com/TykTechnologies/tyk/rpc" + "github.com/TykTechnologies/tyk/storage/util" "github.com/TykTechnologies/tyk/header" @@ -29,7 +30,6 @@ import ( "github.com/TykTechnologies/tyk/apidef" "github.com/TykTechnologies/tyk/request" - "github.com/TykTechnologies/tyk/storage" "github.com/TykTechnologies/tyk/trace" "github.com/TykTechnologies/tyk/user" ) @@ -284,7 +284,7 @@ func (t *BaseMiddleware) OrgSession(orgID string) (user.SessionState, bool) { t.Gw.ExpiryCache.Set(session.OrgID, session.DataExpires, cache.DefaultExpiration) } - session.SetKeyHash(storage.HashKey(orgID, t.Gw.GetConfig().HashKeys)) + session.SetKeyHash(util.HashKey(orgID, t.Gw.GetConfig().HashKeys)) return session.Clone(), found } @@ -397,7 +397,7 @@ func (t *BaseMiddleware) CheckSessionAndIdentityForValidKey(originalKey string, keyHash := key cacheKey := key if t.Spec.GlobalConfig.HashKeys { - cacheKey = storage.HashStr(key, storage.HashMurmur64) // always hash cache keys with murmur64 to prevent collisions + cacheKey = util.HashStr(key, util.HashMurmur64) // always hash cache keys with murmur64 to prevent collisions } // Check in-memory cache @@ -420,7 +420,7 @@ func (t *BaseMiddleware) CheckSessionAndIdentityForValidKey(originalKey string, if found { if t.Spec.GlobalConfig.HashKeys { - keyHash = storage.HashStr(session.KeyID) + keyHash = util.HashStr(session.KeyID) } session := session.Clone() session.SetKeyHash(keyHash) diff --git a/gateway/mw_api_rate_limit.go b/gateway/mw_api_rate_limit.go index 6471845ce4b..f17689b1ae5 100644 --- a/gateway/mw_api_rate_limit.go +++ b/gateway/mw_api_rate_limit.go @@ -7,7 +7,7 @@ import ( "time" "github.com/TykTechnologies/tyk/internal/event" - "github.com/TykTechnologies/tyk/storage" + "github.com/TykTechnologies/tyk/storage/util" "github.com/TykTechnologies/tyk/user" ) @@ -54,14 +54,14 @@ func (k *RateLimitForAPI) getSession(r *http.Request) *user.SessionState { if ok { if limits := spec.RateLimit; limits.Valid() { // track per-endpoint with a hash of the path - keyname := k.keyName + "-" + storage.HashStr(limits.Path) + keyname := k.keyName + "-" + util.HashStr(limits.Path) session := &user.SessionState{ Rate: limits.Rate, Per: limits.Per, LastUpdated: k.apiSess.LastUpdated, } - session.SetKeyHash(storage.HashKey(keyname, k.Gw.GetConfig().HashKeys)) + session.SetKeyHash(util.HashKey(keyname, k.Gw.GetConfig().HashKeys)) return session } @@ -84,7 +84,7 @@ func (k *RateLimitForAPI) EnabledForSpec() bool { Per: k.Spec.GlobalRateLimit.Per, LastUpdated: strconv.Itoa(int(time.Now().UnixNano())), } - k.apiSess.SetKeyHash(storage.HashKey(k.keyName, k.Gw.GetConfig().HashKeys)) + k.apiSess.SetKeyHash(util.HashKey(k.keyName, k.Gw.GetConfig().HashKeys)) return true } diff --git a/gateway/mw_auth_key.go b/gateway/mw_auth_key.go index aaf136ef778..f2d1328a420 100644 --- a/gateway/mw_auth_key.go +++ b/gateway/mw_auth_key.go @@ -8,7 +8,7 @@ import ( "github.com/TykTechnologies/tyk/internal/crypto" "github.com/TykTechnologies/tyk/internal/otel" - "github.com/TykTechnologies/tyk/storage" + "github.com/TykTechnologies/tyk/storage/util" "github.com/TykTechnologies/tyk/user" @@ -172,7 +172,7 @@ func (k *AuthKey) ProcessRequest(_ http.ResponseWriter, r *http.Request, _ inter } // As a second approach, try to use the internal ID that's part of the B64 JSON key: - keyID, err := storage.TokenID(key) + keyID, err := util.TokenID(key) if err == nil { err, statusCode := k.validateSignature(r, keyID) if err == nil { diff --git a/gateway/mw_auth_key_test.go b/gateway/mw_auth_key_test.go index f1b4b5e0776..ed5337e57e6 100644 --- a/gateway/mw_auth_key_test.go +++ b/gateway/mw_auth_key_test.go @@ -17,7 +17,7 @@ import ( "github.com/TykTechnologies/tyk/certs" "github.com/TykTechnologies/tyk/config" signaturevalidator "github.com/TykTechnologies/tyk/signature_validator" - "github.com/TykTechnologies/tyk/storage" + "github.com/TykTechnologies/tyk/storage/util" "github.com/TykTechnologies/tyk/test" "github.com/TykTechnologies/tyk/user" @@ -254,7 +254,7 @@ func TestSignatureValidation(t *testing.T) { } // Second request uses token (org ID + key) and token-based signature: - token, err := storage.GenerateToken("default", customKey, "murmur64") + token, err := util.GenerateToken("default", customKey, "murmur64") if err != nil { t.Fatal(err) } diff --git a/gateway/mw_basic_auth.go b/gateway/mw_basic_auth.go index c7d1275df8a..fc7881c6bff 100644 --- a/gateway/mw_basic_auth.go +++ b/gateway/mw_basic_auth.go @@ -16,7 +16,7 @@ import ( "github.com/TykTechnologies/tyk/apidef" "github.com/TykTechnologies/tyk/header" "github.com/TykTechnologies/tyk/regexp" - "github.com/TykTechnologies/tyk/storage" + "github.com/TykTechnologies/tyk/storage/util" "github.com/TykTechnologies/tyk/user" "github.com/TykTechnologies/tyk/internal/cache" @@ -189,7 +189,7 @@ func (k *BasicAuthKeyIsValid) ProcessRequest(w http.ResponseWriter, r *http.Requ } else { // check for key with legacy format "org_id" + "user_name" logger.Info("Could not find user, falling back to legacy format key.") legacyKeyName := strings.TrimPrefix(username, k.Spec.OrgID) - keyName, _ = storage.GenerateToken(k.Spec.OrgID, legacyKeyName, "") + keyName, _ = util.GenerateToken(k.Spec.OrgID, legacyKeyName, "") session, keyExists = k.CheckSessionAndIdentityForValidKey(keyName, r) keyName = session.KeyID if !keyExists { @@ -227,7 +227,7 @@ func (k *BasicAuthKeyIsValid) checkPassword(session *user.SessionState, plainPas hashAlgo := string(session.BasicAuthData.Hash) // Checks the storage algo picked - hashedPassword := storage.HashStr(plainPassword, hashAlgo) + hashedPassword := util.HashStr(plainPassword, hashAlgo) if session.BasicAuthData.Password != hashedPassword { return errUnauthorized } diff --git a/gateway/mw_basic_auth_test.go b/gateway/mw_basic_auth_test.go index 48b311e461b..0081801dbe7 100644 --- a/gateway/mw_basic_auth_test.go +++ b/gateway/mw_basic_auth_test.go @@ -9,7 +9,7 @@ import ( "github.com/stretchr/testify/assert" - "github.com/TykTechnologies/tyk/storage" + "github.com/TykTechnologies/tyk/storage/util" "github.com/TykTechnologies/tyk/test" "github.com/TykTechnologies/tyk/user" ) @@ -122,7 +122,7 @@ func TestBasicAuthLegacyWithHashFunc(t *testing.T) { }...) // set custom hashing function and check if we still do BA session auth with legacy key format - globalConf.HashKeyFunction = storage.HashMurmur64 + globalConf.HashKeyFunction = util.HashMurmur64 ts.Gw.SetConfig(globalConf) ts.Run(t, []test.TestCase{ diff --git a/gateway/mw_external_oauth.go b/gateway/mw_external_oauth.go index 24654134d42..8f7e7e2b2a1 100644 --- a/gateway/mw_external_oauth.go +++ b/gateway/mw_external_oauth.go @@ -15,7 +15,7 @@ import ( "github.com/golang-jwt/jwt/v4" "github.com/TykTechnologies/tyk/apidef" - "github.com/TykTechnologies/tyk/storage" + redisCluster "github.com/TykTechnologies/tyk/storage/redis-cluster" "github.com/TykTechnologies/tyk/user" "github.com/TykTechnologies/tyk/internal/cache" @@ -292,11 +292,11 @@ func isExpired(claims jwt.MapClaims) bool { } func newIntrospectionCache(gw *Gateway) *introspectionCache { - return &introspectionCache{RedisCluster: storage.RedisCluster{KeyPrefix: "introspection-", ConnectionHandler: gw.StorageConnectionHandler}} + return &introspectionCache{RedisCluster: redisCluster.RedisCluster{KeyPrefix: "introspection-", ConnectionHandler: gw.StorageConnectionHandler}} } type introspectionCache struct { - storage.RedisCluster + redisCluster.RedisCluster } func (c *introspectionCache) GetRes(token string) (jwt.MapClaims, bool) { diff --git a/gateway/mw_jwt.go b/gateway/mw_jwt.go index 64ee86c6dcf..0b7d0228809 100644 --- a/gateway/mw_jwt.go +++ b/gateway/mw_jwt.go @@ -20,7 +20,7 @@ import ( "github.com/lonelycode/osin" "github.com/TykTechnologies/tyk/apidef" - "github.com/TykTechnologies/tyk/storage" + redisCluster "github.com/TykTechnologies/tyk/storage/redis-cluster" "github.com/TykTechnologies/tyk/user" "github.com/TykTechnologies/tyk/internal/cache" @@ -584,7 +584,7 @@ func (k *JWTMiddleware) processCentralisedJWT(r *http.Request, token *jwt.Token) &RedisOsinStorageInterface{ storageManager, k.Gw.GlobalSessionManager, - &storage.RedisCluster{KeyPrefix: prefix, HashKeys: false, ConnectionHandler: k.Gw.StorageConnectionHandler}, + &redisCluster.RedisCluster{KeyPrefix: prefix, HashKeys: false, ConnectionHandler: k.Gw.StorageConnectionHandler}, k.Spec.OrgID, k.Gw, }), diff --git a/gateway/mw_redis_cache.go b/gateway/mw_redis_cache.go index 3db91eba964..4bce3877ac6 100644 --- a/gateway/mw_redis_cache.go +++ b/gateway/mw_redis_cache.go @@ -18,9 +18,9 @@ import ( "github.com/TykTechnologies/murmur3" "github.com/TykTechnologies/tyk/header" + "github.com/TykTechnologies/tyk/interfaces" "github.com/TykTechnologies/tyk/regexp" "github.com/TykTechnologies/tyk/request" - "github.com/TykTechnologies/tyk/storage" ) const ( @@ -31,7 +31,7 @@ const ( type RedisCacheMiddleware struct { *BaseMiddleware - store storage.Handler + store interfaces.Handler sh SuccessHandler } diff --git a/gateway/oauth_manager.go b/gateway/oauth_manager.go index 2b0780d2527..a0cb7862e75 100644 --- a/gateway/oauth_manager.go +++ b/gateway/oauth_manager.go @@ -18,14 +18,16 @@ import ( "github.com/sirupsen/logrus" "golang.org/x/crypto/bcrypt" + "github.com/TykTechnologies/tyk/interfaces" internalerrors "github.com/TykTechnologies/tyk/internal/errors" "github.com/TykTechnologies/tyk/internal/uuid" "github.com/TykTechnologies/tyk/request" + redisCluster "github.com/TykTechnologies/tyk/storage/redis-cluster" + "github.com/TykTechnologies/tyk/storage/util" "strconv" "github.com/TykTechnologies/tyk/header" - "github.com/TykTechnologies/tyk/storage" "github.com/TykTechnologies/tyk/user" ) @@ -403,7 +405,7 @@ func (o *OAuthManager) HandleAccess(r *http.Request) *osin.Response { if ar.Type == osin.PASSWORD { username = r.Form.Get("username") password := r.Form.Get("password") - searchKey := "apikey-" + storage.HashKey(o.API.OrgID+username, o.Gw.GetConfig().HashKeys) + searchKey := "apikey-" + util.HashKey(o.API.OrgID+username, o.Gw.GetConfig().HashKeys) log.Debug("Getting: ", searchKey) var err error @@ -589,9 +591,9 @@ func (gw *Gateway) TykOsinNewServer(config *osin.ServerConfig, storage ExtendedO // TODO: Refactor this to move prefix handling into a checker method, then it can be an unexported setting in the struct. // RedisOsinStorageInterface implements osin.Storage interface to use Tyk's own storage mechanism type RedisOsinStorageInterface struct { - store storage.Handler + store interfaces.Handler sessionManager SessionHandler - redisStore storage.Handler + redisStore interfaces.Handler orgID string Gw *Gateway `json:"-"` } @@ -938,7 +940,7 @@ func (r *RedisOsinStorageInterface) SaveAccess(accessData *osin.AccessData) erro if err != nil { return err } - key := prefixAccess + storage.HashKey(accessData.AccessToken, r.Gw.GetConfig().HashKeys) + key := prefixAccess + util.HashKey(accessData.AccessToken, r.Gw.GetConfig().HashKeys) log.Debug("Saving ACCESS key: ", key) // Overide default ExpiresIn: @@ -956,7 +958,7 @@ func (r *RedisOsinStorageInterface) SaveAccess(accessData *osin.AccessData) erro log.Debug("Adding ACCESS key to sorted list: ", sortedListKey) r.redisStore.AddToSortedSet( sortedListKey, - storage.HashKey(accessData.AccessToken, r.Gw.GetConfig().HashKeys), + util.HashKey(accessData.AccessToken, r.Gw.GetConfig().HashKeys), float64(accessData.CreatedAt.Unix()+int64(accessData.ExpiresIn)), // set score as token expire timestamp ) @@ -1035,7 +1037,7 @@ func (r *RedisOsinStorageInterface) SaveAccess(accessData *osin.AccessData) erro // LoadAccess will load access data from redis func (r *RedisOsinStorageInterface) LoadAccess(token string) (*osin.AccessData, error) { - key := prefixAccess + storage.HashKey(token, r.Gw.GetConfig().HashKeys) + key := prefixAccess + util.HashKey(token, r.Gw.GetConfig().HashKeys) log.Debug("Loading ACCESS key: ", key) accessJSON, err := r.store.GetKey(key) @@ -1073,7 +1075,7 @@ func (r *RedisOsinStorageInterface) RemoveAccess(token string) error { log.Warning("Cannot load access token:", token) } - key := prefixAccess + storage.HashKey(token, r.Gw.GetConfig().HashKeys) + key := prefixAccess + util.HashKey(token, r.Gw.GetConfig().HashKeys) r.store.DeleteKey(key) // remove the access token from central storage too r.sessionManager.RemoveSession(r.orgID, token, false) @@ -1190,7 +1192,7 @@ func (gw *Gateway) purgeLapsedOAuthTokens() error { return nil } - redisCluster := &storage.RedisCluster{KeyPrefix: "", HashKeys: false, ConnectionHandler: gw.StorageConnectionHandler} + redisCluster := &redisCluster.RedisCluster{KeyPrefix: "", HashKeys: false, ConnectionHandler: gw.StorageConnectionHandler} ok, err := redisCluster.Lock("oauth-purge-lock", time.Minute) if err != nil { diff --git a/gateway/oauth_manager_test.go b/gateway/oauth_manager_test.go index 92d4f965cbd..e42e61f1198 100644 --- a/gateway/oauth_manager_test.go +++ b/gateway/oauth_manager_test.go @@ -18,7 +18,9 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/TykTechnologies/tyk/interfaces" "github.com/TykTechnologies/tyk/internal/redis" + "github.com/TykTechnologies/tyk/storage/util" "github.com/TykTechnologies/tyk/config" @@ -33,7 +35,6 @@ import ( "github.com/TykTechnologies/tyk/internal/uuid" "github.com/TykTechnologies/tyk/apidef" - "github.com/TykTechnologies/tyk/storage" "github.com/TykTechnologies/tyk/test" "github.com/TykTechnologies/tyk/user" ) @@ -910,7 +911,7 @@ func testGetClientTokens(t *testing.T, hashed bool) { if hashed { // save tokens for future check - tokensID[storage.HashKey(response["access_token"].(string), ts.Gw.GetConfig().HashKeys)] = true + tokensID[util.HashKey(response["access_token"].(string), ts.Gw.GetConfig().HashKeys)] = true } else { tokensID[response["access_token"].(string)] = true } @@ -1315,7 +1316,7 @@ func TestJSONToFormValues(t *testing.T) { }) } -func assertTokensLen(t *testing.T, storageManager storage.Handler, storageKey string, expectedTokensLen int) { +func assertTokensLen(t *testing.T, storageManager interfaces.Handler, storageKey string, expectedTokensLen int) { t.Helper() nowTs := time.Now().Unix() startScore := strconv.FormatInt(nowTs, 10) diff --git a/gateway/redis_analytics_purger.go b/gateway/redis_analytics_purger.go index f4ef99c7eeb..379165e13ff 100644 --- a/gateway/redis_analytics_purger.go +++ b/gateway/redis_analytics_purger.go @@ -5,7 +5,7 @@ import ( "fmt" "time" - "github.com/TykTechnologies/tyk/storage" + "github.com/TykTechnologies/tyk/interfaces" ) // Purger is an interface that will define how the in-memory store will be purged @@ -16,7 +16,7 @@ type Purger interface { } type RedisPurger struct { - Store storage.Handler + Store interfaces.Handler Gw *Gateway `json:"-"` } diff --git a/gateway/redis_logrus_hook.go b/gateway/redis_logrus_hook.go index b5a41c78eab..83d6ccc9acd 100644 --- a/gateway/redis_logrus_hook.go +++ b/gateway/redis_logrus_hook.go @@ -3,9 +3,8 @@ package gateway import ( "time" + redisCluster "github.com/TykTechnologies/tyk/storage/redis-cluster" "github.com/sirupsen/logrus" - - "github.com/TykTechnologies/tyk/storage" ) type redisChannelHook struct { @@ -16,7 +15,7 @@ type redisChannelHook struct { func (gw *Gateway) newRedisHook() *redisChannelHook { hook := &redisChannelHook{} hook.formatter = new(logrus.JSONFormatter) - hook.notifier.store = &storage.RedisCluster{KeyPrefix: "gateway-notifications:", ConnectionHandler: gw.StorageConnectionHandler} + hook.notifier.store = &redisCluster.RedisCluster{KeyPrefix: "gateway-notifications:", ConnectionHandler: gw.StorageConnectionHandler} hook.notifier.channel = "dashboard.ui.messages" return hook } diff --git a/gateway/redis_signals.go b/gateway/redis_signals.go index e8e79d6c7cb..df650e1b4bc 100644 --- a/gateway/redis_signals.go +++ b/gateway/redis_signals.go @@ -15,7 +15,7 @@ import ( temporalmodel "github.com/TykTechnologies/storage/temporal/model" "github.com/TykTechnologies/tyk/internal/crypto" - "github.com/TykTechnologies/tyk/storage" + redisCluster "github.com/TykTechnologies/tyk/storage/redis-cluster" ) type NotificationCommand string @@ -59,7 +59,7 @@ func (n *Notification) Sign() { } func (gw *Gateway) startPubSubLoop() { - cacheStore := storage.RedisCluster{ConnectionHandler: gw.StorageConnectionHandler} + cacheStore := redisCluster.RedisCluster{ConnectionHandler: gw.StorageConnectionHandler} cacheStore.Connect() message := "Connection to Redis failed, reconnect in 10s" @@ -228,7 +228,7 @@ func isPayloadSignatureValid(notification Notification) bool { // RedisNotifier will use redis pub/sub channels to send notifications type RedisNotifier struct { - store *storage.RedisCluster + store *redisCluster.RedisCluster channel string *Gateway } @@ -251,7 +251,7 @@ func (r *RedisNotifier) Notify(notif interface{}) bool { // pubSubLog.Debug("Sending notification", notif) if err := r.store.Publish(r.channel, string(toSend)); err != nil { - if !errors.Is(err, storage.ErrRedisIsDown) { + if !errors.Is(err, redisCluster.ErrRedisIsDown) { pubSubLog.Error("Could not send notification: ", err) } return false diff --git a/gateway/res_cache.go b/gateway/res_cache.go index 15245a3f71e..55716337c17 100644 --- a/gateway/res_cache.go +++ b/gateway/res_cache.go @@ -10,7 +10,7 @@ import ( "github.com/sirupsen/logrus" - "github.com/TykTechnologies/tyk/storage" + "github.com/TykTechnologies/tyk/interfaces" "github.com/TykTechnologies/tyk/user" ) @@ -22,7 +22,7 @@ const ( // ResponseCacheMiddleware is a caching middleware that will pull data from Redis instead of the upstream proxy type ResponseCacheMiddleware struct { BaseTykResponseHandler - store storage.Handler + store interfaces.Handler } func (m *ResponseCacheMiddleware) Base() *BaseTykResponseHandler { diff --git a/gateway/rpc_backup_handlers.go b/gateway/rpc_backup_handlers.go index 2a59a92d74d..8b4b70aefe9 100644 --- a/gateway/rpc_backup_handlers.go +++ b/gateway/rpc_backup_handlers.go @@ -12,7 +12,7 @@ import ( "github.com/sirupsen/logrus" - "github.com/TykTechnologies/tyk/storage" + redisCluster "github.com/TykTechnologies/tyk/storage/redis-cluster" "github.com/TykTechnologies/tyk/user" ) @@ -33,7 +33,7 @@ func (gw *Gateway) LoadDefinitionsFromRPCBackup() ([]*APISpec, error) { tagList := getTagListAsString(gw.GetConfig().DBAppConfOptions.Tags) checkKey := BackupApiKeyBase + tagList - store := storage.RedisCluster{KeyPrefix: RPCKeyPrefix, ConnectionHandler: gw.StorageConnectionHandler} + store := redisCluster.RedisCluster{KeyPrefix: RPCKeyPrefix, ConnectionHandler: gw.StorageConnectionHandler} connected := store.Connect() log.Info("[RPC] --> Loading API definitions from backup") @@ -63,7 +63,7 @@ func (gw *Gateway) saveRPCDefinitionsBackup(list string) error { log.Info("--> Connecting to DB") - store := storage.RedisCluster{KeyPrefix: RPCKeyPrefix, ConnectionHandler: gw.StorageConnectionHandler} + store := redisCluster.RedisCluster{KeyPrefix: RPCKeyPrefix, ConnectionHandler: gw.StorageConnectionHandler} connected := store.Connect() log.Info("--> Connected to DB") @@ -86,7 +86,7 @@ func (gw *Gateway) LoadPoliciesFromRPCBackup() (map[string]user.Policy, error) { tagList := getTagListAsString(gw.GetConfig().DBAppConfOptions.Tags) checkKey := BackupPolicyKeyBase + tagList - store := storage.RedisCluster{KeyPrefix: RPCKeyPrefix, ConnectionHandler: gw.StorageConnectionHandler} + store := redisCluster.RedisCluster{KeyPrefix: RPCKeyPrefix, ConnectionHandler: gw.StorageConnectionHandler} connected := store.Connect() log.Info("[RPC] Loading Policies from backup") @@ -123,7 +123,7 @@ func (gw *Gateway) saveRPCPoliciesBackup(list string) error { log.Info("--> Connecting to DB") - store := storage.RedisCluster{KeyPrefix: RPCKeyPrefix, ConnectionHandler: gw.StorageConnectionHandler} + store := redisCluster.RedisCluster{KeyPrefix: RPCKeyPrefix, ConnectionHandler: gw.StorageConnectionHandler} connected := store.Connect() log.Info("--> Connected to DB") diff --git a/gateway/rpc_storage_handler.go b/gateway/rpc_storage_handler.go index c53c3177945..5d5cd0563d6 100644 --- a/gateway/rpc_storage_handler.go +++ b/gateway/rpc_storage_handler.go @@ -8,12 +8,14 @@ import ( "time" temporalmodel "github.com/TykTechnologies/storage/temporal/model" + "github.com/TykTechnologies/tyk/internal/cache" "github.com/TykTechnologies/tyk/internal/model" "github.com/TykTechnologies/tyk/rpc" + "github.com/TykTechnologies/tyk/storage/shared" + "github.com/TykTechnologies/tyk/storage/util" "github.com/TykTechnologies/tyk/apidef" - "github.com/TykTechnologies/tyk/storage" "github.com/sirupsen/logrus" ) @@ -220,7 +222,7 @@ func (r *RPCStorageHandler) hashKey(in string) string { // Not hashing? Return the raw key return in } - return storage.HashStr(in) + return util.HashStr(in) } func (r *RPCStorageHandler) fixKey(keyName string) string { @@ -270,7 +272,7 @@ func (r *RPCStorageHandler) GetRawKey(keyName string) (string, error) { } if rpc.IsEmergencyMode() { - return "", storage.ErrMDCBConnectionLost + return "", shared.ErrMDCBConnectionLost } value, err := rpc.FuncClientSingleton("GetKey", keyName) @@ -286,9 +288,9 @@ func (r *RPCStorageHandler) GetRawKey(keyName string) (string, error) { } if cacheEnabled { // Errors, and key not found, should be cached for a small amount of time - cacheStore.Set(keyName, storage.ErrKeyNotFound, 1) + cacheStore.Set(keyName, shared.ErrKeyNotFound, 1) } - return "", storage.ErrKeyNotFound + return "", shared.ErrKeyNotFound } if cacheEnabled { @@ -331,7 +333,7 @@ func (r *RPCStorageHandler) GetExp(keyName string) (int64, error) { } } log.Error("Error trying to get TTL: ", err) - return 0, storage.ErrKeyNotFound + return 0, shared.ErrKeyNotFound } return value.(int64), nil } @@ -902,98 +904,6 @@ func (r *RPCStorageHandler) CheckForKeyspaceChanges(orgId string) { } } -func (gw *Gateway) getSessionAndCreate(keyName string, r *RPCStorageHandler, isHashed bool, orgId string) { - - key := keyName - // avoid double hashing - if !isHashed { - key = storage.HashKey(keyName, gw.GetConfig().HashKeys) - } - - sessionString, err := r.GetRawKey("apikey-" + key) - if err != nil { - log.Error("Key not found in master - skipping") - } else { - gw.handleAddKey(key, sessionString, orgId) - } -} - -func (gw *Gateway) ProcessSingleOauthClientEvent(apiId, oauthClientId, orgID, event string) { - store, _, err := gw.GetStorageForApi(apiId) - if err != nil { - log.Error("Could not get oauth storage for api") - return - } - - switch event { - case OauthClientAdded: - // on add: pull from rpc and save it in local redis - client, err := store.GetClient(oauthClientId) - if err != nil { - log.WithError(err).Error("Could not retrieve new oauth client information") - return - } - - err = store.SetClient(oauthClientId, orgID, client, false) - if err != nil { - log.WithError(err).Error("Could not save oauth client.") - return - } - - log.Info("oauth client created successfully") - case OauthClientRemoved: - // on remove: remove from local redis - err := store.DeleteClient(oauthClientId, orgID, false) - if err != nil { - log.Errorf("Could not delete oauth client with id: %v", oauthClientId) - return - } - log.Infof("Oauth Client deleted successfully") - case OauthClientUpdated: - // on update: delete from local redis and pull again from rpc - _, err := store.GetClient(oauthClientId) - if err != nil { - log.WithError(err).Error("Could not retrieve oauth client information") - return - } - - err = store.DeleteClient(oauthClientId, orgID, false) - if err != nil { - log.WithError(err).Error("Could not delete oauth client") - return - } - - client, err := store.GetClient(oauthClientId) - if err != nil { - log.WithError(err).Error("Could not retrieve oauth client information") - return - } - - err = store.SetClient(oauthClientId, orgID, client, false) - if err != nil { - log.WithError(err).Error("Could not save oauth client.") - return - } - log.Info("oauth client updated successfully") - default: - log.Warningf("Oauth client event not supported:%v", event) - } -} - -// ProcessOauthClientsOps performs the appropriate action for the received clients -// it can be any of the Create,Update and Delete operations -func (gw *Gateway) ProcessOauthClientsOps(clients map[string]string) { - for clientInfo, action := range clients { - // clientInfo is: APIID.ClientID.OrgID - eventValues := strings.Split(clientInfo, ".") - apiId := eventValues[0] - oauthClientId := eventValues[1] - orgID := eventValues[2] - - gw.ProcessSingleOauthClientEvent(apiId, oauthClientId, orgID, action) - } -} - // ProcessKeySpaceChanges receives an array of keys to be processed, those keys are considered changes in the keyspace in the // management layer, they could be: regular keys (hashed, unhashed), revoke oauth client, revoke single oauth token, // certificates (added, removed), oauth client (added, updated, removed) @@ -1112,7 +1022,7 @@ func (r *RPCStorageHandler) ProcessKeySpaceChanges(keys []string, orgId string) } else { log.Info("--> removing cached key: ", r.Gw.obfuscateKey(key)) // in case it's a username (basic auth) or custom-key then generate the token - if storage.TokenOrg(key) == "" { + if util.TokenOrg(key) == "" { key = r.Gw.generateToken(orgId, key) } _, status = r.Gw.handleDeleteKey(key, orgId, "-1", resetQuota) @@ -1153,7 +1063,7 @@ func (r *RPCStorageHandler) ProcessKeySpaceChanges(keys []string, orgId string) // Function to handle fallback deletion using token ID func (r *RPCStorageHandler) deleteUsingTokenID(key, orgId string, resetQuota bool, status int) (int, error) { if status == http.StatusNotFound { - id, err := storage.TokenID(key) + id, err := util.TokenID(key) if err == nil { _, status = r.Gw.handleDeleteKey(id, orgId, "-1", resetQuota) } @@ -1197,3 +1107,95 @@ func (r *RPCStorageHandler) Exists(keyName string) (bool, error) { log.Error("Not implemented") return false, nil } + +func (gw *Gateway) getSessionAndCreate(keyName string, r *RPCStorageHandler, isHashed bool, orgId string) { + + key := keyName + // avoid double hashing + if !isHashed { + key = util.HashKey(keyName, gw.GetConfig().HashKeys) + } + + sessionString, err := r.GetRawKey("apikey-" + key) + if err != nil { + log.Error("Key not found in master - skipping") + } else { + gw.handleAddKey(key, sessionString, orgId) + } +} + +func (gw *Gateway) ProcessSingleOauthClientEvent(apiId, oauthClientId, orgID, event string) { + store, _, err := gw.GetStorageForApi(apiId) + if err != nil { + log.Error("Could not get oauth storage for api") + return + } + + switch event { + case OauthClientAdded: + // on add: pull from rpc and save it in local redis + client, err := store.GetClient(oauthClientId) + if err != nil { + log.WithError(err).Error("Could not retrieve new oauth client information") + return + } + + err = store.SetClient(oauthClientId, orgID, client, false) + if err != nil { + log.WithError(err).Error("Could not save oauth client.") + return + } + + log.Info("oauth client created successfully") + case OauthClientRemoved: + // on remove: remove from local redis + err := store.DeleteClient(oauthClientId, orgID, false) + if err != nil { + log.Errorf("Could not delete oauth client with id: %v", oauthClientId) + return + } + log.Infof("Oauth Client deleted successfully") + case OauthClientUpdated: + // on update: delete from local redis and pull again from rpc + _, err := store.GetClient(oauthClientId) + if err != nil { + log.WithError(err).Error("Could not retrieve oauth client information") + return + } + + err = store.DeleteClient(oauthClientId, orgID, false) + if err != nil { + log.WithError(err).Error("Could not delete oauth client") + return + } + + client, err := store.GetClient(oauthClientId) + if err != nil { + log.WithError(err).Error("Could not retrieve oauth client information") + return + } + + err = store.SetClient(oauthClientId, orgID, client, false) + if err != nil { + log.WithError(err).Error("Could not save oauth client.") + return + } + log.Info("oauth client updated successfully") + default: + log.Warningf("Oauth client event not supported:%v", event) + } +} + +// ProcessOauthClientsOps performs the appropriate action for the received clients +// it can be any of the Create,Update and Delete operations +func (gw *Gateway) ProcessOauthClientsOps(clients map[string]string) { + for clientInfo, action := range clients { + // clientInfo is: APIID.ClientID.OrgID + eventValues := strings.Split(clientInfo, ".") + apiId := eventValues[0] + oauthClientId := eventValues[1] + orgID := eventValues[2] + + gw.ProcessSingleOauthClientEvent(apiId, oauthClientId, orgID, action) + } +} diff --git a/gateway/rpc_storage_handler_test.go b/gateway/rpc_storage_handler_test.go index 4d7f21737e3..e4193b2485b 100644 --- a/gateway/rpc_storage_handler_test.go +++ b/gateway/rpc_storage_handler_test.go @@ -10,6 +10,8 @@ import ( "github.com/TykTechnologies/tyk/internal/model" "github.com/TykTechnologies/tyk/rpc" + "github.com/TykTechnologies/tyk/storage/shared" + "github.com/TykTechnologies/tyk/storage/util" "github.com/TykTechnologies/tyk/apidef" "github.com/TykTechnologies/tyk/config" @@ -18,7 +20,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/TykTechnologies/tyk/header" - "github.com/TykTechnologies/tyk/storage" "github.com/TykTechnologies/tyk/test" "github.com/TykTechnologies/tyk/user" ) @@ -36,7 +37,7 @@ func buildStringEvent(eventType, token, apiId string) string { switch eventType { case RevokeOauthHashedToken: // string is as= {the-hashed-token}#hashed:{api-id}:oAuthRevokeToken - token = storage.HashStr(token) + token = util.HashStr(token) return fmt.Sprintf("%s#hashed:%s:oAuthRevokeToken", token, apiId) case RevokeOauthToken: // string is as= {the-token}:{api-id}:oAuthRevokeToken @@ -621,7 +622,7 @@ func TestGetRawKey(t *testing.T) { } _, err := rpcListener.GetRawKey("any-key") - assert.Equal(t, storage.ErrMDCBConnectionLost, err) + assert.Equal(t, shared.ErrMDCBConnectionLost, err) }) } @@ -677,7 +678,7 @@ func TestDeleteUsingTokenID(t *testing.T) { assert.Equal(t, http.StatusOK, status) // it should not exist anymore _, err = ts.Gw.GlobalSessionManager.Store().GetKey(customKey) - assert.ErrorIs(t, storage.ErrKeyNotFound, err) + assert.ErrorIs(t, shared.ErrKeyNotFound, err) }) t.Run("status not found and TokenID do not exist", func(t *testing.T) { diff --git a/gateway/server.go b/gateway/server.go index 876640e5cf3..e1b16889928 100644 --- a/gateway/server.go +++ b/gateway/server.go @@ -25,6 +25,7 @@ import ( texttemplate "text/template" "time" + "github.com/TykTechnologies/tyk/interfaces" "github.com/TykTechnologies/tyk/internal/crypto" "github.com/TykTechnologies/tyk/internal/httputil" "github.com/TykTechnologies/tyk/internal/otel" @@ -59,8 +60,9 @@ import ( logger "github.com/TykTechnologies/tyk/log" "github.com/TykTechnologies/tyk/regexp" "github.com/TykTechnologies/tyk/rpc" - "github.com/TykTechnologies/tyk/storage" "github.com/TykTechnologies/tyk/storage/kv" + "github.com/TykTechnologies/tyk/storage/mdcb" + redisCluster "github.com/TykTechnologies/tyk/storage/redis-cluster" "github.com/TykTechnologies/tyk/trace" "github.com/TykTechnologies/tyk/user" @@ -195,7 +197,7 @@ type Gateway struct { templatesRaw *texttemplate.Template // RedisController keeps track of redis connection and singleton - StorageConnectionHandler *storage.ConnectionHandler + StorageConnectionHandler *redisCluster.ConnectionHandler hostDetails model.HostDetails healthCheckInfo atomic.Value @@ -246,7 +248,7 @@ func NewGateway(config config.Config, ctx context.Context) *Gateway { gw.ReloadTestCase = NewReloadMachinery() gw.TestBundles = map[string]map[string]string{} - gw.StorageConnectionHandler = storage.NewConnectionHandler(ctx) + gw.StorageConnectionHandler = redisCluster.NewConnectionHandler(ctx) gw.SetNodeID("solo-" + uuid.New()) gw.SessionID = uuid.New() @@ -345,16 +347,16 @@ func (gw *Gateway) setupGlobals() { mainLog.Warn("Running Uptime checks in a management node.") } - healthCheckStore := storage.RedisCluster{KeyPrefix: "host-checker:", IsAnalytics: true, ConnectionHandler: gw.StorageConnectionHandler} + healthCheckStore := redisCluster.RedisCluster{KeyPrefix: "host-checker:", IsAnalytics: true, ConnectionHandler: gw.StorageConnectionHandler} gw.InitHostCheckManager(gw.ctx, &healthCheckStore) } gw.initHealthCheck(gw.ctx) - redisStore := storage.RedisCluster{KeyPrefix: "apikey-", HashKeys: gwConfig.HashKeys, ConnectionHandler: gw.StorageConnectionHandler} + redisStore := redisCluster.RedisCluster{KeyPrefix: "apikey-", HashKeys: gwConfig.HashKeys, ConnectionHandler: gw.StorageConnectionHandler} gw.GlobalSessionManager.Init(&redisStore) - versionStore := storage.RedisCluster{KeyPrefix: "version-check-", ConnectionHandler: gw.StorageConnectionHandler} + versionStore := redisCluster.RedisCluster{KeyPrefix: "version-check-", ConnectionHandler: gw.StorageConnectionHandler} versionStore.Connect() err := versionStore.SetKey("gateway", VERSION, 0) @@ -368,11 +370,11 @@ func (gw *Gateway) setupGlobals() { gw.SetConfig(Conf) mainLog.Debug("Setting up analytics DB connection") - analyticsStore := storage.RedisCluster{KeyPrefix: "analytics-", IsAnalytics: true, ConnectionHandler: gw.StorageConnectionHandler} + analyticsStore := redisCluster.RedisCluster{KeyPrefix: "analytics-", IsAnalytics: true, ConnectionHandler: gw.StorageConnectionHandler} gw.Analytics.Store = &analyticsStore gw.Analytics.Init() - store := storage.RedisCluster{KeyPrefix: "analytics-", IsAnalytics: true, ConnectionHandler: gw.StorageConnectionHandler} + store := redisCluster.RedisCluster{KeyPrefix: "analytics-", IsAnalytics: true, ConnectionHandler: gw.StorageConnectionHandler} redisPurger := RedisPurger{Store: &store, Gw: gw} go redisPurger.PurgeLoop(gw.ctx) @@ -382,7 +384,7 @@ func (gw *Gateway) setupGlobals() { } else { mainLog.Debug("Using RPC cache purge") - store := storage.RedisCluster{KeyPrefix: "analytics-", IsAnalytics: true, ConnectionHandler: gw.StorageConnectionHandler} + store := redisCluster.RedisCluster{KeyPrefix: "analytics-", IsAnalytics: true, ConnectionHandler: gw.StorageConnectionHandler} purger := rpc.Purger{ Store: &store, } @@ -402,7 +404,7 @@ func (gw *Gateway) setupGlobals() { // Get the notifier ready mainLog.Debug("Notifier will not work in hybrid mode") - mainNotifierStore := &storage.RedisCluster{ConnectionHandler: gw.StorageConnectionHandler} + mainNotifierStore := &redisCluster.RedisCluster{ConnectionHandler: gw.StorageConnectionHandler} mainNotifierStore.Connect() gw.MainNotifier = RedisNotifier{mainNotifierStore, RedisPubSubChannel, gw} @@ -426,7 +428,7 @@ func (gw *Gateway) setupGlobals() { certificateSecret = gw.GetConfig().Security.PrivateCertificateEncodingSecret } - storeCert := &storage.RedisCluster{KeyPrefix: "cert-", HashKeys: false, ConnectionHandler: gw.StorageConnectionHandler} + storeCert := &redisCluster.RedisCluster{KeyPrefix: "cert-", HashKeys: false, ConnectionHandler: gw.StorageConnectionHandler} gw.CertificateManager = certs.NewCertificateManager(storeCert, certificateSecret, log, !gw.GetConfig().Cloud) if gw.GetConfig().SlaveOptions.UseRPC { rpcStore := &RPCStorageHandler{ @@ -750,7 +752,7 @@ func (gw *Gateway) addOAuthHandlers(spec *APISpec, muxer *mux.Router) *OAuthMana osinStorage := &RedisOsinStorageInterface{ storageManager, gw.GlobalSessionManager, - &storage.RedisCluster{KeyPrefix: prefix, HashKeys: false, ConnectionHandler: gw.StorageConnectionHandler}, + &redisCluster.RedisCluster{KeyPrefix: prefix, HashKeys: false, ConnectionHandler: gw.StorageConnectionHandler}, spec.OrgID, gw, } @@ -942,7 +944,7 @@ func (gw *Gateway) createResponseMiddlewareChain(spec *APISpec, responseFuncs [] } keyPrefix := "cache-" + spec.APIID - cacheStore := &storage.RedisCluster{KeyPrefix: keyPrefix, IsCache: true, ConnectionHandler: gw.StorageConnectionHandler} + cacheStore := &redisCluster.RedisCluster{KeyPrefix: keyPrefix, IsCache: true, ConnectionHandler: gw.StorageConnectionHandler} cacheStore.Connect() // Add cache writer as the final step of the response middleware chain @@ -1564,12 +1566,12 @@ func (gw *Gateway) getHostDetails(file string) { } } -func (gw *Gateway) getGlobalMDCBStorageHandler(keyPrefix string, hashKeys bool) storage.Handler { - localStorage := &storage.RedisCluster{KeyPrefix: keyPrefix, HashKeys: hashKeys, ConnectionHandler: gw.StorageConnectionHandler} +func (gw *Gateway) getGlobalMDCBStorageHandler(keyPrefix string, hashKeys bool) interfaces.Handler { + localStorage := &redisCluster.RedisCluster{KeyPrefix: keyPrefix, HashKeys: hashKeys, ConnectionHandler: gw.StorageConnectionHandler} logger := logrus.New().WithFields(logrus.Fields{"prefix": "mdcb-storage-handler"}) if gw.GetConfig().SlaveOptions.UseRPC { - return storage.NewMdcbStorage( + return mdcb.NewMdcbStorage( localStorage, &RPCStorageHandler{ KeyPrefix: keyPrefix, @@ -1582,7 +1584,7 @@ func (gw *Gateway) getGlobalMDCBStorageHandler(keyPrefix string, hashKeys bool) return localStorage } -func (gw *Gateway) getGlobalStorageHandler(keyPrefix string, hashKeys bool) storage.Handler { +func (gw *Gateway) getGlobalStorageHandler(keyPrefix string, hashKeys bool) interfaces.Handler { if gw.GetConfig().SlaveOptions.UseRPC { return &RPCStorageHandler{ KeyPrefix: keyPrefix, @@ -1590,7 +1592,7 @@ func (gw *Gateway) getGlobalStorageHandler(keyPrefix string, hashKeys bool) stor Gw: gw, } } - return &storage.RedisCluster{KeyPrefix: keyPrefix, HashKeys: hashKeys, ConnectionHandler: gw.StorageConnectionHandler} + return &redisCluster.RedisCluster{KeyPrefix: keyPrefix, HashKeys: hashKeys, ConnectionHandler: gw.StorageConnectionHandler} } func Start() { diff --git a/gateway/session_manager.go b/gateway/session_manager.go index 31099ea6039..e2eaa0cbc46 100644 --- a/gateway/session_manager.go +++ b/gateway/session_manager.go @@ -12,9 +12,10 @@ import ( "github.com/TykTechnologies/leakybucket/memorycache" "github.com/TykTechnologies/tyk/config" + "github.com/TykTechnologies/tyk/interfaces" "github.com/TykTechnologies/tyk/internal/rate" "github.com/TykTechnologies/tyk/internal/redis" - "github.com/TykTechnologies/tyk/storage" + "github.com/TykTechnologies/tyk/storage/util" "github.com/TykTechnologies/tyk/user" ) @@ -226,7 +227,7 @@ func (sfr sessionFailReason) String() string { // sessionFailReason if session limits have been exceeded. // Key values to manage rate are Rate and Per, e.g. Rate of 10 messages // Per 10 seconds -func (l *SessionLimiter) ForwardMessage(r *http.Request, session *user.SessionState, rateLimitKey string, quotaKey string, store storage.Handler, enableRL, enableQ bool, api *APISpec, dryRun bool) sessionFailReason { +func (l *SessionLimiter) ForwardMessage(r *http.Request, session *user.SessionState, rateLimitKey string, quotaKey string, store interfaces.Handler, enableRL, enableQ bool, api *APISpec, dryRun bool) sessionFailReason { // check for limit on API level (set to session by ApplyPolicies) accessDef, allowanceScope, err := GetAccessDefinitionByAPIIDOrSession(session, api) if err != nil { @@ -309,7 +310,7 @@ func (l *SessionLimiter) ForwardMessage(r *http.Request, session *user.SessionSt } -func (l *SessionLimiter) RedisQuotaExceeded(r *http.Request, session *user.SessionState, quotaKey, scope string, limit *user.APILimit, store storage.Handler, hashKeys bool) bool { +func (l *SessionLimiter) RedisQuotaExceeded(r *http.Request, session *user.SessionState, quotaKey, scope string, limit *user.APILimit, store interfaces.Handler, hashKeys bool) bool { // Unlimited? if limit.QuotaMax == -1 || limit.QuotaMax == 0 { // No quota set @@ -326,7 +327,7 @@ func (l *SessionLimiter) RedisQuotaExceeded(r *http.Request, session *user.Sessi key := session.KeyID if hashKeys { - key = storage.HashStr(session.KeyID) + key = util.HashStr(session.KeyID) } if quotaKey != "" { diff --git a/gateway/testutil.go b/gateway/testutil.go index 9e245f86987..60b40ca5763 100644 --- a/gateway/testutil.go +++ b/gateway/testutil.go @@ -28,6 +28,7 @@ import ( "github.com/getkin/kin-openapi/openapi3" "github.com/TykTechnologies/tyk/apidef/oas" + "github.com/TykTechnologies/tyk/storage/util" "github.com/TykTechnologies/tyk/rpc" @@ -46,7 +47,6 @@ import ( "github.com/TykTechnologies/tyk/apidef" "github.com/TykTechnologies/tyk/cli" "github.com/TykTechnologies/tyk/config" - "github.com/TykTechnologies/tyk/storage" _ "github.com/TykTechnologies/tyk/templates" // Don't delete "github.com/TykTechnologies/tyk/test" _ "github.com/TykTechnologies/tyk/testdata" // Don't delete @@ -832,7 +832,7 @@ func CreateSession(gw *Gateway, sGen ...func(s *user.SessionState)) string { } hashKeys := gw.GetConfig().HashKeys - hashedKey := storage.HashKey(key, hashKeys) + hashedKey := util.HashKey(key, hashKeys) err := gw.GlobalSessionManager.UpdateSession(hashedKey, session, 60, hashKeys) if err != nil { log.WithError(err).Error("updating session.") @@ -1142,7 +1142,7 @@ func (s *Test) newGateway(genConf func(globalConf *config.Config)) *Gateway { gwConfig.AnalyticsConfig.GeoIPDBLocation = filepath.Join(rootPath, "testdata", "MaxMind-DB-test-ipv4-24.mmdb") gwConfig.EnableJSVM = true - gwConfig.HashKeyFunction = storage.HashMurmur64 + gwConfig.HashKeyFunction = util.HashMurmur64 gwConfig.Monitor.EnableTriggerMonitors = true gwConfig.AnalyticsConfig.NormaliseUrls.Enabled = true gwConfig.AllowInsecureConfigs = true diff --git a/interfaces/storage.go b/interfaces/storage.go new file mode 100644 index 00000000000..8b9fcaa4a61 --- /dev/null +++ b/interfaces/storage.go @@ -0,0 +1,38 @@ +package interfaces + +// Handler is a standard interface to a storage backend, used by +// AuthorisationManager to read and write key values to the backend +type Handler interface { + GetKey(string) (string, error) // Returned string is expected to be a JSON object (user.SessionState) + GetMultiKey([]string) ([]string, error) + GetRawKey(string) (string, error) + SetKey(string, string, int64) error // Second input string is expected to be a JSON object (user.SessionState) + SetRawKey(string, string, int64) error + SetExp(string, int64) error // Set key expiration + GetExp(string) (int64, error) // Returns expiry of a key + GetKeys(string) []string + DeleteKey(string) bool + DeleteAllKeys() bool + DeleteRawKey(string) bool + Connect() bool + GetKeysAndValues() map[string]string + GetKeysAndValuesWithFilter(string) map[string]string + DeleteKeys([]string) bool + Decrement(string) + IncrememntWithExpire(string, int64) int64 + SetRollingWindow(key string, per int64, val string, pipeline bool) (int, []interface{}) + GetRollingWindow(key string, per int64, pipeline bool) (int, []interface{}) + GetSet(string) (map[string]string, error) + AddToSet(string, string) + GetAndDeleteSet(string) []interface{} + RemoveFromSet(string, string) + DeleteScanMatch(string) bool + GetKeyPrefix() string + AddToSortedSet(string, string, float64) + GetSortedSetRange(string, string, string) ([]string, []float64, error) + RemoveSortedSetRange(string, string, string) error + GetListRange(string, int64, int64) ([]string, error) + RemoveFromList(string, string) error + AppendToSet(string, string) + Exists(string) (bool, error) +} diff --git a/internal/rate/sliding_log.go b/internal/rate/sliding_log.go index 8d0c89c1b9b..8b86547d147 100644 --- a/internal/rate/sliding_log.go +++ b/internal/rate/sliding_log.go @@ -26,7 +26,7 @@ type SlidingLog struct { // ErrRedisClientProvider is returned if NewSlidingLog isn't passed a valid RedisClientProvider parameter. var ErrRedisClientProvider = errors.New("Client doesn't implement RedisClientProvider") -// NewSlidingLog creates a new SlidingLog instance with a storage.Handler. In case +// NewSlidingLog creates a new SlidingLog instance with a interfaces.Handler. In case // the storage is offline, it's expected to return nil and an error to handle. func NewSlidingLog(client interface{}, pipeline bool, smoothingFn SmoothingFn) (*SlidingLog, error) { cluster, ok := client.(model.RedisClientProvider) diff --git a/internal/rate/sliding_log_test.go b/internal/rate/sliding_log_test.go index af6a1f21dbb..c6f1be5f890 100644 --- a/internal/rate/sliding_log_test.go +++ b/internal/rate/sliding_log_test.go @@ -9,11 +9,11 @@ import ( "github.com/stretchr/testify/assert" "github.com/TykTechnologies/tyk/internal/redis" + redisCluster "github.com/TykTechnologies/tyk/storage/redis-cluster" "github.com/TykTechnologies/tyk/config" "github.com/TykTechnologies/tyk/internal/rate" "github.com/TykTechnologies/tyk/internal/uuid" - "github.com/TykTechnologies/tyk/storage" ) // TestSlidingLog_Do is an integration test that tests counter behaviour. @@ -25,7 +25,7 @@ func TestSlidingLog_Do(t *testing.T) { conf, err := config.New() assert.NoError(t, err) - conn, err := storage.NewConnector(storage.DefaultConn, *conf) + conn, err := redisCluster.NewConnector(redisCluster.DefaultConn, *conf) assert.Nil(t, err) var db redis.UniversalClient @@ -72,7 +72,7 @@ func TestSlidingLog_GetCount(t *testing.T) { conf, err := config.New() assert.NoError(t, err) - conn, err := storage.NewConnector(storage.DefaultConn, *conf) + conn, err := redisCluster.NewConnector(redisCluster.DefaultConn, *conf) assert.Nil(t, err) var db redis.UniversalClient @@ -93,7 +93,7 @@ func TestSlidingLog_Get(t *testing.T) { conf, err := config.New() assert.NoError(t, err) - conn, err := storage.NewConnector(storage.DefaultConn, *conf) + conn, err := redisCluster.NewConnector(redisCluster.DefaultConn, *conf) assert.Nil(t, err) var db redis.UniversalClient @@ -114,7 +114,7 @@ func TestSlidingLog_pipelinerError(t *testing.T) { conf, err := config.New() assert.NoError(t, err) - rc := storage.NewConnectionHandler(ctx) + rc := redisCluster.NewConnectionHandler(ctx) go rc.Connect(ctx, nil, conf) timeout, cancel := context.WithTimeout(ctx, time.Second) @@ -125,7 +125,7 @@ func TestSlidingLog_pipelinerError(t *testing.T) { panic("can't connect to redis '" + conf.Storage.Host + "', timeout") } - rl, err := rate.NewSlidingLog(&storage.RedisCluster{KeyPrefix: "test-cluster", ConnectionHandler: rc}, false, nil) + rl, err := rate.NewSlidingLog(&redisCluster.RedisCluster{KeyPrefix: "test-cluster", ConnectionHandler: rc}, false, nil) assert.NoError(t, err) rl.PipelineFn = func(context.Context, func(redis.Pipeliner) error) error { @@ -216,7 +216,7 @@ func BenchmarkSlidingLog_New(b *testing.B) { conf, err := config.New() assert.NoError(b, err) - conn, err := storage.NewConnector(storage.DefaultConn, *conf) + conn, err := redisCluster.NewConnector(redisCluster.DefaultConn, *conf) assert.Nil(b, err) var db redis.UniversalClient @@ -237,7 +237,7 @@ func BenchmarkSlidingLog_Count(b *testing.B) { conf, err := config.New() assert.NoError(b, err) - conn, err := storage.NewConnector(storage.DefaultConn, *conf) + conn, err := redisCluster.NewConnector(redisCluster.DefaultConn, *conf) assert.Nil(b, err) var db redis.UniversalClient diff --git a/rpc/rpc_analytics_purger.go b/rpc/rpc_analytics_purger.go index ecb19c2e231..149600eb70e 100644 --- a/rpc/rpc_analytics_purger.go +++ b/rpc/rpc_analytics_purger.go @@ -7,10 +7,9 @@ import ( "time" "github.com/TykTechnologies/tyk-pump/analytics" + "github.com/TykTechnologies/tyk/interfaces" "github.com/vmihailenco/msgpack" - - "github.com/TykTechnologies/tyk/storage" ) const ANALYTICS_KEYNAME = "tyk-system-analytics" @@ -18,7 +17,7 @@ const ANALYTICS_KEYNAME = "tyk-system-analytics" // RPCPurger will purge analytics data into a Mongo database, requires that the Mongo DB string is specified // in the Config object type Purger struct { - Store storage.Handler + Store interfaces.Handler } // Connect Connects to RPC diff --git a/rpc/synchronization_forcer.go b/rpc/synchronization_forcer.go index a391d0f332f..8aa2889eb15 100644 --- a/rpc/synchronization_forcer.go +++ b/rpc/synchronization_forcer.go @@ -4,19 +4,20 @@ import ( "errors" "github.com/TykTechnologies/tyk/apidef" - "github.com/TykTechnologies/tyk/storage" + redisCluster "github.com/TykTechnologies/tyk/storage/redis-cluster" + "github.com/TykTechnologies/tyk/storage/shared" ) type SyncronizerForcer struct { - store *storage.RedisCluster + store *redisCluster.RedisCluster getNodeDataFunc func() []byte } // NewSyncForcer returns a new syncforcer with a connected redis with a key prefix synchronizer-group- for group synchronization control. -func NewSyncForcer(controller *storage.ConnectionHandler, getNodeDataFunc func() []byte) *SyncronizerForcer { +func NewSyncForcer(controller *redisCluster.ConnectionHandler, getNodeDataFunc func() []byte) *SyncronizerForcer { sf := &SyncronizerForcer{} sf.getNodeDataFunc = getNodeDataFunc - sf.store = &storage.RedisCluster{KeyPrefix: "synchronizer-group-", ConnectionHandler: controller} + sf.store = &redisCluster.RedisCluster{KeyPrefix: "synchronizer-group-", ConnectionHandler: controller} sf.store.Connect() return sf @@ -28,7 +29,7 @@ func (sf *SyncronizerForcer) GroupLoginCallback(userKey string, groupID string) shouldForce := false _, err := sf.store.GetKey(groupID) - if err != nil && errors.Is(err, storage.ErrKeyNotFound) { + if err != nil && errors.Is(err, shared.ErrKeyNotFound) { shouldForce = true err = sf.store.SetKey(groupID, "", 0) diff --git a/rpc/synchronization_forcer_test.go b/rpc/synchronization_forcer_test.go index 0d9053ffa06..dd8f0c140a7 100644 --- a/rpc/synchronization_forcer_test.go +++ b/rpc/synchronization_forcer_test.go @@ -10,10 +10,10 @@ import ( "github.com/TykTechnologies/tyk/apidef" "github.com/TykTechnologies/tyk/config" - "github.com/TykTechnologies/tyk/storage" + redisCluster "github.com/TykTechnologies/tyk/storage/redis-cluster" ) -var rc *storage.ConnectionHandler +var rc *redisCluster.ConnectionHandler func TestMain(m *testing.M) { conf, err := config.New() @@ -21,7 +21,7 @@ func TestMain(m *testing.M) { panic(err) } - rc = storage.NewConnectionHandler(context.Background()) + rc = redisCluster.NewConnectionHandler(context.Background()) go rc.Connect(context.Background(), nil, conf) timeout, cancel := context.WithTimeout(context.Background(), time.Second) diff --git a/storage/dummy.go b/storage/dummy/dummy.go similarity index 99% rename from storage/dummy.go rename to storage/dummy/dummy.go index a2d4ff6083d..12cd8ae4a3d 100644 --- a/storage/dummy.go +++ b/storage/dummy/dummy.go @@ -1,4 +1,4 @@ -package storage +package dummy import ( "errors" diff --git a/storage/dummy_test.go b/storage/dummy/dummy_test.go similarity index 99% rename from storage/dummy_test.go rename to storage/dummy/dummy_test.go index 8814de1a598..8b96fd867ab 100644 --- a/storage/dummy_test.go +++ b/storage/dummy/dummy_test.go @@ -1,4 +1,4 @@ -package storage +package dummy import ( "reflect" diff --git a/storage/mdcb_storage.go b/storage/mdcb/mdcb_storage.go similarity index 95% rename from storage/mdcb_storage.go rename to storage/mdcb/mdcb_storage.go index b135bfcdec4..9a7146a234d 100644 --- a/storage/mdcb_storage.go +++ b/storage/mdcb/mdcb_storage.go @@ -1,20 +1,21 @@ -package storage +package mdcb import ( "errors" "strings" + "github.com/TykTechnologies/tyk/interfaces" "github.com/sirupsen/logrus" ) type MdcbStorage struct { - local Handler - rpc Handler + local interfaces.Handler + rpc interfaces.Handler logger *logrus.Entry CallbackonPullfromRPC *func(key string, val string) error } -func NewMdcbStorage(local, rpc Handler, log *logrus.Entry) *MdcbStorage { +func NewMdcbStorage(local, rpc interfaces.Handler, log *logrus.Entry) *MdcbStorage { return &MdcbStorage{ local: local, rpc: rpc, diff --git a/storage/mdcb_storage_test.go b/storage/mdcb/mdcb_storage_test.go similarity index 92% rename from storage/mdcb_storage_test.go rename to storage/mdcb/mdcb_storage_test.go index 0d12bc29940..39691f6d7f7 100644 --- a/storage/mdcb_storage_test.go +++ b/storage/mdcb/mdcb_storage_test.go @@ -1,10 +1,11 @@ -package storage +package mdcb import ( "context" "io" "testing" + "github.com/TykTechnologies/tyk/storage/dummy" "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" ) @@ -31,13 +32,13 @@ func TestGetResourceType(t *testing.T) { } func TestMdcbStorage_GetMultiKey(t *testing.T) { - rpcHandler := NewDummyStorage() + rpcHandler := dummy.NewDummyStorage() err := rpcHandler.SetKey("key1", "1", 0) if err != nil { t.Error(err.Error()) } - localHandler := NewDummyStorage() + localHandler := dummy.NewDummyStorage() err = localHandler.SetKey("key2", "1", 0) if err != nil { t.Error(err.Error()) diff --git a/storage/connection_handler.go b/storage/redis-cluster/connection_handler.go similarity index 99% rename from storage/connection_handler.go rename to storage/redis-cluster/connection_handler.go index de22cd7d05b..c2d4f220427 100644 --- a/storage/connection_handler.go +++ b/storage/redis-cluster/connection_handler.go @@ -1,4 +1,4 @@ -package storage +package redisCluster import ( "context" diff --git a/storage/connection_handler_test.go b/storage/redis-cluster/connection_handler_test.go similarity index 99% rename from storage/connection_handler_test.go rename to storage/redis-cluster/connection_handler_test.go index fcc325686d1..3a2b01f1fb9 100644 --- a/storage/connection_handler_test.go +++ b/storage/redis-cluster/connection_handler_test.go @@ -1,4 +1,4 @@ -package storage +package redisCluster import ( "context" diff --git a/storage/redis_cluster.go b/storage/redis-cluster/redis_cluster.go similarity index 98% rename from storage/redis_cluster.go rename to storage/redis-cluster/redis_cluster.go index afce41ea274..1a5f9e81706 100644 --- a/storage/redis_cluster.go +++ b/storage/redis-cluster/redis_cluster.go @@ -1,4 +1,4 @@ -package storage +package redisCluster import ( "context" @@ -21,8 +21,11 @@ import ( tempset "github.com/TykTechnologies/storage/temporal/set" tempsortedset "github.com/TykTechnologies/storage/temporal/sortedset" redis "github.com/TykTechnologies/tyk/internal/redis" + "github.com/TykTechnologies/tyk/storage/shared" + "github.com/TykTechnologies/tyk/storage/util" "github.com/TykTechnologies/tyk/config" + logger "github.com/TykTechnologies/tyk/log" ) var ( @@ -31,6 +34,8 @@ var ( // ErrStorageConn is returned when we can't get a connection from the ConnectionHandler ErrStorageConn = fmt.Errorf("Error trying to get singleton instance: %w", ErrRedisIsDown) + + log = logger.Get() ) // RedisCluster is a storage manager that uses the redis database. @@ -246,7 +251,7 @@ func (r *RedisCluster) hashKey(in string) string { // Not hashing? Return the raw key return in } - return HashStr(in) + return util.HashStr(in) } func (r *RedisCluster) fixKey(keyName string) string { @@ -275,7 +280,7 @@ func (r *RedisCluster) GetKey(keyName string) (string, error) { value, err := storage.Get(context.Background(), r.fixKey(keyName)) if err != nil { log.Debug("Error trying to get value:", err) - return "", ErrKeyNotFound + return "", shared.ErrKeyNotFound } return value, nil @@ -298,7 +303,7 @@ func (r *RedisCluster) GetMultiKey(keys []string) ([]string, error) { values, err := storage.GetMulti(context.Background(), keyNames) if err != nil { log.WithError(err).Debug("Error trying to get value") - return nil, ErrKeyNotFound + return nil, shared.ErrKeyNotFound } result := make([]string, 0) for _, val := range values { @@ -315,7 +320,7 @@ func (r *RedisCluster) GetMultiKey(keys []string) ([]string, error) { } } - return nil, ErrKeyNotFound + return nil, shared.ErrKeyNotFound } func (r *RedisCluster) GetKeyTTL(keyName string) (ttl int64, err error) { @@ -338,7 +343,7 @@ func (r *RedisCluster) GetRawKey(keyName string) (string, error) { value, err := storage.Get(context.Background(), keyName) if err != nil { log.Debug("Error trying to get value:", err) - return "", ErrKeyNotFound + return "", shared.ErrKeyNotFound } return value, nil diff --git a/storage/redis_cluster_test.go b/storage/redis-cluster/redis_cluster_test.go similarity index 98% rename from storage/redis_cluster_test.go rename to storage/redis-cluster/redis_cluster_test.go index fa75f74de60..1f63c94079e 100644 --- a/storage/redis_cluster_test.go +++ b/storage/redis-cluster/redis_cluster_test.go @@ -1,4 +1,4 @@ -package storage +package redisCluster import ( "context" @@ -13,6 +13,7 @@ import ( tempmocks "github.com/TykTechnologies/storage/temporal/tempmocks" "github.com/TykTechnologies/tyk/config" "github.com/TykTechnologies/tyk/internal/redis" + "github.com/TykTechnologies/tyk/storage/shared" "github.com/stretchr/testify/mock" ) @@ -116,8 +117,8 @@ func TestRedisClusterGetMultiKey(t *testing.T) { r.DeleteAllKeys() _, err := r.GetMultiKey(keys) - if !errors.Is(err, ErrKeyNotFound) { - t.Errorf("expected %v got %v", ErrKeyNotFound, err) + if !errors.Is(err, shared.ErrKeyNotFound) { + t.Errorf("expected %v got %v", shared.ErrKeyNotFound, err) } err = r.SetKey(keys[0], keys[0], 0) if err != nil { @@ -408,7 +409,7 @@ func TestGetKey(t *testing.T) { val, err := storage.GetKey("key") assert.Error(t, err) - assert.Equal(t, ErrKeyNotFound, err) + assert.Equal(t, shared.ErrKeyNotFound, err) assert.Equal(t, "", val) mockKv.AssertExpectations(t) }) @@ -458,7 +459,7 @@ func TestGetMultiKey(t *testing.T) { val, err := storage.GetMultiKey([]string{"key"}) assert.Error(t, err) - assert.Equal(t, ErrKeyNotFound, err) + assert.Equal(t, shared.ErrKeyNotFound, err) assert.Equal(t, []string(nil), val) mockKv.AssertExpectations(t) }) @@ -503,11 +504,11 @@ func TestGetKeyTTL(t *testing.T) { storage := &RedisCluster{ConnectionHandler: rc} mockKv := tempmocks.NewKeyValue(t) storage.kvStorage = mockKv - mockKv.On("TTL", mock.Anything, "key").Return(int64(0), ErrKeyNotFound) + mockKv.On("TTL", mock.Anything, "key").Return(int64(0), shared.ErrKeyNotFound) val, err := storage.GetKeyTTL("key") assert.Error(t, err) - assert.Equal(t, ErrKeyNotFound, err) + assert.Equal(t, shared.ErrKeyNotFound, err) assert.Equal(t, int64(0), val) mockKv.AssertExpectations(t) }) @@ -541,11 +542,11 @@ func TestGetRawKey(t *testing.T) { storage := &RedisCluster{ConnectionHandler: rc} mockKv := tempmocks.NewKeyValue(t) storage.kvStorage = mockKv - mockKv.On("Get", mock.Anything, "key").Return("", ErrKeyNotFound) + mockKv.On("Get", mock.Anything, "key").Return("", shared.ErrKeyNotFound) val, err := storage.GetRawKey("key") assert.Error(t, err) - assert.Equal(t, ErrKeyNotFound, err) + assert.Equal(t, shared.ErrKeyNotFound, err) assert.Equal(t, "", val) mockKv.AssertExpectations(t) }) @@ -590,11 +591,11 @@ func TestGetExp(t *testing.T) { storage := &RedisCluster{ConnectionHandler: rc} mockKv := tempmocks.NewKeyValue(t) storage.kvStorage = mockKv - mockKv.On("TTL", mock.Anything, "key").Return(int64(0), ErrKeyNotFound) + mockKv.On("TTL", mock.Anything, "key").Return(int64(0), shared.ErrKeyNotFound) val, err := storage.GetExp("key") assert.Error(t, err) - assert.Equal(t, ErrKeyNotFound, err) + assert.Equal(t, shared.ErrKeyNotFound, err) assert.Equal(t, int64(0), val) mockKv.AssertExpectations(t) }) @@ -637,11 +638,11 @@ func TestSetExp(t *testing.T) { storage := &RedisCluster{ConnectionHandler: rc} mockKv := tempmocks.NewKeyValue(t) storage.kvStorage = mockKv - mockKv.On("Expire", mock.Anything, "key", time.Duration(-1*time.Second)).Return(ErrKeyNotFound) + mockKv.On("Expire", mock.Anything, "key", time.Duration(-1*time.Second)).Return(shared.ErrKeyNotFound) err := storage.SetExp("key", -1) assert.Error(t, err) - assert.Equal(t, ErrKeyNotFound, err) + assert.Equal(t, shared.ErrKeyNotFound, err) mockKv.AssertExpectations(t) }) } @@ -683,11 +684,11 @@ func TestSetKey(t *testing.T) { storage := &RedisCluster{ConnectionHandler: rc} mockKv := tempmocks.NewKeyValue(t) storage.kvStorage = mockKv - mockKv.On("Set", mock.Anything, "key", "value", time.Duration(-1*time.Second)).Return(ErrKeyNotFound) + mockKv.On("Set", mock.Anything, "key", "value", time.Duration(-1*time.Second)).Return(shared.ErrKeyNotFound) err := storage.SetKey("key", "value", -1) assert.Error(t, err) - assert.Equal(t, ErrKeyNotFound, err) + assert.Equal(t, shared.ErrKeyNotFound, err) mockKv.AssertExpectations(t) }) } @@ -729,11 +730,11 @@ func TestSetRawKey(t *testing.T) { storage := &RedisCluster{ConnectionHandler: rc} mockKv := tempmocks.NewKeyValue(t) storage.kvStorage = mockKv - mockKv.On("Set", mock.Anything, "key", "value", time.Duration(-1*time.Second)).Return(ErrKeyNotFound) + mockKv.On("Set", mock.Anything, "key", "value", time.Duration(-1*time.Second)).Return(shared.ErrKeyNotFound) err := storage.SetRawKey("key", "value", -1) assert.Error(t, err) - assert.Equal(t, ErrKeyNotFound, err) + assert.Equal(t, shared.ErrKeyNotFound, err) mockKv.AssertExpectations(t) }) } @@ -771,7 +772,7 @@ func TestDecrement(t *testing.T) { storage := &RedisCluster{ConnectionHandler: rc} mockKv := tempmocks.NewKeyValue(t) storage.kvStorage = mockKv - mockKv.On("Decrement", mock.Anything, "key").Return(int64(1), ErrKeyNotFound) + mockKv.On("Decrement", mock.Anything, "key").Return(int64(1), shared.ErrKeyNotFound) storage.Decrement("key") mockKv.AssertExpectations(t) @@ -837,7 +838,7 @@ func TestIncrememntWithExpire(t *testing.T) { storage := &RedisCluster{ConnectionHandler: rc} mockKv := tempmocks.NewKeyValue(t) storage.kvStorage = mockKv - mockKv.On("Increment", mock.Anything, "key").Return(int64(0), ErrKeyNotFound) + mockKv.On("Increment", mock.Anything, "key").Return(int64(0), shared.ErrKeyNotFound) val := storage.IncrememntWithExpire("key", 0) assert.Equal(t, int64(0), val) @@ -947,7 +948,7 @@ func TestGetKeysAndValuesWithFilter(t *testing.T) { storage := &RedisCluster{ConnectionHandler: rc} mockKv := tempmocks.NewKeyValue(t) storage.kvStorage = mockKv - mockKv.On("GetKeysAndValuesWithFilter", mock.Anything, "test").Return(map[string]interface{}{}, ErrKeyNotFound) + mockKv.On("GetKeysAndValuesWithFilter", mock.Anything, "test").Return(map[string]interface{}{}, shared.ErrKeyNotFound) res := storage.GetKeysAndValuesWithFilter("test") assert.Equal(t, map[string]string(map[string]string(nil)), res) @@ -982,7 +983,7 @@ func TestGetKeysAndValues(t *testing.T) { storage := &RedisCluster{ConnectionHandler: rc} mockKv := tempmocks.NewKeyValue(t) storage.kvStorage = mockKv - mockKv.On("GetKeysAndValuesWithFilter", mock.Anything, "").Return(map[string]interface{}{}, ErrKeyNotFound) + mockKv.On("GetKeysAndValuesWithFilter", mock.Anything, "").Return(map[string]interface{}{}, shared.ErrKeyNotFound) res := storage.GetKeysAndValues() assert.Equal(t, map[string]string(map[string]string(nil)), res) @@ -1121,7 +1122,7 @@ func TestDeleteRawKey(t *testing.T) { storage := &RedisCluster{ConnectionHandler: rc} mockKv := tempmocks.NewKeyValue(t) storage.kvStorage = mockKv - mockKv.On("Delete", mock.Anything, "key").Return(ErrKeyNotFound) + mockKv.On("Delete", mock.Anything, "key").Return(shared.ErrKeyNotFound) deleted := storage.DeleteRawKey("key") assert.False(t, deleted) @@ -1166,7 +1167,7 @@ func TestDeleteScanMatch(t *testing.T) { storage := &RedisCluster{ConnectionHandler: rc} mockKv := tempmocks.NewKeyValue(t) storage.kvStorage = mockKv - mockKv.On("DeleteScanMatch", mock.Anything, "key").Return(int64(0), ErrKeyNotFound) + mockKv.On("DeleteScanMatch", mock.Anything, "key").Return(int64(0), shared.ErrKeyNotFound) deleted := storage.DeleteScanMatch("key") assert.False(t, deleted) @@ -1222,7 +1223,7 @@ func TestDeleteKeys(t *testing.T) { storage := &RedisCluster{ConnectionHandler: rc} mockKv := tempmocks.NewKeyValue(t) storage.kvStorage = mockKv - mockKv.On("DeleteKeys", mock.Anything, mock.Anything).Return(int64(0), ErrKeyNotFound) + mockKv.On("DeleteKeys", mock.Anything, mock.Anything).Return(int64(0), shared.ErrKeyNotFound) deleted := storage.DeleteKeys([]string{"key"}) assert.False(t, deleted) diff --git a/storage/redis_shim.go b/storage/redis-cluster/redis_shim.go similarity index 98% rename from storage/redis_shim.go rename to storage/redis-cluster/redis_shim.go index bdd168ee203..bdb9ad34579 100644 --- a/storage/redis_shim.go +++ b/storage/redis-cluster/redis_shim.go @@ -1,4 +1,4 @@ -package storage +package redisCluster import ( "context" diff --git a/storage/redis_shim_test.go b/storage/redis-cluster/redis_shim_test.go similarity index 98% rename from storage/redis_shim_test.go rename to storage/redis-cluster/redis_shim_test.go index 4bc2c3480e0..50a72080455 100644 --- a/storage/redis_shim_test.go +++ b/storage/redis-cluster/redis_shim_test.go @@ -1,4 +1,4 @@ -package storage +package redisCluster import ( "context" diff --git a/storage/shared/errors.go b/storage/shared/errors.go new file mode 100644 index 00000000000..e76bcd30fd4 --- /dev/null +++ b/storage/shared/errors.go @@ -0,0 +1,7 @@ +package shared + +import "errors" + +// ErrKeyNotFound is a standard error for when a key is not found in the storage engine +var ErrKeyNotFound = errors.New("key not found") +var ErrMDCBConnectionLost = errors.New("mdcb connection is lost") diff --git a/storage/storage.go b/storage/storage.go index 4da4d8dec66..3f2bcccd79b 100644 --- a/storage/storage.go +++ b/storage/storage.go @@ -1,67 +1,17 @@ package storage import ( - "crypto/sha256" - "encoding/base64" - "encoding/hex" - "errors" "fmt" - "hash" - "strings" - "github.com/buger/jsonparser" - - "github.com/TykTechnologies/murmur3" - logger "github.com/TykTechnologies/tyk/log" - - "github.com/TykTechnologies/tyk/internal/uuid" + "github.com/TykTechnologies/tyk/interfaces" + redisCluster "github.com/TykTechnologies/tyk/storage/redis-cluster" ) -var log = logger.Get() - -// ErrKeyNotFound is a standard error for when a key is not found in the storage engine -var ErrKeyNotFound = errors.New("key not found") - -var ErrMDCBConnectionLost = errors.New("mdcb connection is lost") - -const MongoBsonIdLength = 24 - -// Handler is a standard interface to a storage backend, used by -// AuthorisationManager to read and write key values to the backend -type Handler interface { - GetKey(string) (string, error) // Returned string is expected to be a JSON object (user.SessionState) - GetMultiKey([]string) ([]string, error) - GetRawKey(string) (string, error) - SetKey(string, string, int64) error // Second input string is expected to be a JSON object (user.SessionState) - SetRawKey(string, string, int64) error - SetExp(string, int64) error // Set key expiration - GetExp(string) (int64, error) // Returns expiry of a key - GetKeys(string) []string - DeleteKey(string) bool - DeleteAllKeys() bool - DeleteRawKey(string) bool - Connect() bool - GetKeysAndValues() map[string]string - GetKeysAndValuesWithFilter(string) map[string]string - DeleteKeys([]string) bool - Decrement(string) - IncrememntWithExpire(string, int64) int64 - SetRollingWindow(key string, per int64, val string, pipeline bool) (int, []interface{}) - GetRollingWindow(key string, per int64, pipeline bool) (int, []interface{}) - GetSet(string) (map[string]string, error) - AddToSet(string, string) - GetAndDeleteSet(string) []interface{} - RemoveFromSet(string, string) - DeleteScanMatch(string) bool - GetKeyPrefix() string - AddToSortedSet(string, string, float64) - GetSortedSetRange(string, string, string) ([]string, []float64, error) - RemoveSortedSetRange(string, string, string) error - GetListRange(string, int64, int64) ([]string, error) - RemoveFromList(string, string) error - AppendToSet(string, string) - Exists(string) (bool, error) -} +const ( + REDIS_CLUSTER = "redis" + MDCB = "mdcb" + DUMMY = "dummy" +) type AnalyticsHandler interface { Connect() bool @@ -71,114 +21,47 @@ type AnalyticsHandler interface { GetExp(string) (int64, error) // Returns expiry of a key } -const defaultHashAlgorithm = "murmur64" - -// If hashing algorithm is empty, use legacy key generation -func GenerateToken(orgID, keyID, hashAlgorithm string) (string, error) { - if keyID == "" { - keyID = uuid.NewHex() - } - - if hashAlgorithm != "" { - _, err := hashFunction(hashAlgorithm) - if err != nil { - hashAlgorithm = defaultHashAlgorithm +func WithKeyPrefix(prefix string) func(interfaces.Handler) { + return func(impl interfaces.Handler) { + // Type assertion for more iplementations later + if impl, ok := impl.(*redisCluster.RedisCluster); ok { + impl.KeyPrefix = prefix } - - jsonToken := fmt.Sprintf(`{"org":"%s","id":"%s","h":"%s"}`, orgID, keyID, hashAlgorithm) - return base64.StdEncoding.EncodeToString([]byte(jsonToken)), err } - - // Legacy keys - return orgID + keyID, nil } -// `{"` in base64 -const B64JSONPrefix = "ey" - -func TokenHashAlgo(token string) string { - // Legacy tokens not b64 and not JSON records - if strings.HasPrefix(token, B64JSONPrefix) { - if jsonToken, err := base64.StdEncoding.DecodeString(token); err == nil { - hashAlgo, _ := jsonparser.GetString(jsonToken, "h") - return hashAlgo +func WithHashKeys(hashKeys bool) func(interfaces.Handler) { + return func(impl interfaces.Handler) { + // Type assertion for more iplementations later + if impl, ok := impl.(*redisCluster.RedisCluster); ok { + impl.HashKeys = hashKeys } } - - return "" -} - -// TODO: add checks -func TokenID(token string) (id string, err error) { - jsonToken, err := base64.StdEncoding.DecodeString(token) - if err != nil { - return "", err - } - - return jsonparser.GetString(jsonToken, "id") } -func TokenOrg(token string) string { - if strings.HasPrefix(token, B64JSONPrefix) { - if jsonToken, err := base64.StdEncoding.DecodeString(token); err == nil { - // Checking error in case if it is a legacy tooken which just by accided has the same b64JSON prefix - if org, err := jsonparser.GetString(jsonToken, "org"); err == nil { - return org - } - } - } - - // 24 is mongo bson id length - if len(token) > MongoBsonIdLength { - newToken := token[:MongoBsonIdLength] - _, err := hex.DecodeString(newToken) - if err == nil { - return newToken +func WithConnectionhandler(handler *redisCluster.ConnectionHandler) func(interfaces.Handler) { + return func(impl interfaces.Handler) { + // Type assertion for more iplementations later + if impl, ok := impl.(*redisCluster.RedisCluster); ok { + impl.ConnectionHandler = handler } } - - return "" } -var ( - HashSha256 = "sha256" - HashMurmur32 = "murmur32" - HashMurmur64 = "murmur64" - HashMurmur128 = "murmur128" -) - -func hashFunction(algorithm string) (hash.Hash, error) { - switch algorithm { - case HashSha256: - return sha256.New(), nil - case HashMurmur64: - return murmur3.New64(), nil - case HashMurmur128: - return murmur3.New128(), nil - case "", HashMurmur32: - return murmur3.New32(), nil +func NewStorageHandler(name string, opts ...func(interfaces.Handler)) (interfaces.Handler, error) { + var impl interfaces.Handler + switch name { + case REDIS_CLUSTER: + impl = &redisCluster.RedisCluster{} + case MDCB: + return nil, fmt.Errorf("mdcb storage handler is not implemented") default: - return murmur3.New32(), fmt.Errorf("Unknown key hash function: %s. Falling back to murmur32.", algorithm) + return nil, fmt.Errorf("unknown storage handler: %s", name) } -} -func HashStr(in string, withAlg ...string) string { - var algo string - if len(withAlg) > 0 && withAlg[0] != "" { - algo = withAlg[0] - } else { - algo = TokenHashAlgo(in) + for _, opt := range opts { + opt(impl) } - h, _ := hashFunction(algo) - h.Write([]byte(in)) - return hex.EncodeToString(h.Sum(nil)) -} - -func HashKey(in string, hashKey bool) string { - if !hashKey { - // Not hashing? Return the raw key - return in - } - return HashStr(in) + return impl, nil } diff --git a/storage/storage_test.go b/storage/storage_test.go index ea105b74164..7aed39d7ed0 100644 --- a/storage/storage_test.go +++ b/storage/storage_test.go @@ -1,6 +1,10 @@ package storage -import "testing" +import ( + "testing" + + "github.com/TykTechnologies/tyk/storage/util" +) func Test_TokenOrg(t *testing.T) { tcs := []struct { @@ -32,7 +36,7 @@ func Test_TokenOrg(t *testing.T) { for _, tc := range tcs { t.Run(tc.name, func(t *testing.T) { - result := TokenOrg(tc.givenKey) + result := util.TokenOrg(tc.givenKey) if result != tc.expectedResult { t.Errorf("Expected %s, got %s", tc.expectedResult, result) } diff --git a/storage/util/util.go b/storage/util/util.go new file mode 100644 index 00000000000..636564f6bae --- /dev/null +++ b/storage/util/util.go @@ -0,0 +1,127 @@ +package util + +import ( + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "fmt" + "hash" + "strings" + + "github.com/TykTechnologies/murmur3" + "github.com/TykTechnologies/tyk/internal/uuid" + "github.com/buger/jsonparser" +) + +const defaultHashAlgorithm = "murmur64" +const MongoBsonIdLength = 24 + +// If hashing algorithm is empty, use legacy key generation +func GenerateToken(orgID, keyID, hashAlgorithm string) (string, error) { + if keyID == "" { + keyID = uuid.NewHex() + } + + if hashAlgorithm != "" { + _, err := hashFunction(hashAlgorithm) + if err != nil { + hashAlgorithm = defaultHashAlgorithm + } + + jsonToken := fmt.Sprintf(`{"org":"%s","id":"%s","h":"%s"}`, orgID, keyID, hashAlgorithm) + return base64.StdEncoding.EncodeToString([]byte(jsonToken)), err + } + + // Legacy keys + return orgID + keyID, nil +} + +// `{"` in base64 +const B64JSONPrefix = "ey" + +func TokenHashAlgo(token string) string { + // Legacy tokens not b64 and not JSON records + if strings.HasPrefix(token, B64JSONPrefix) { + if jsonToken, err := base64.StdEncoding.DecodeString(token); err == nil { + hashAlgo, _ := jsonparser.GetString(jsonToken, "h") + return hashAlgo + } + } + + return "" +} + +// TODO: add checks +func TokenID(token string) (id string, err error) { + jsonToken, err := base64.StdEncoding.DecodeString(token) + if err != nil { + return "", err + } + + return jsonparser.GetString(jsonToken, "id") +} + +func TokenOrg(token string) string { + if strings.HasPrefix(token, B64JSONPrefix) { + if jsonToken, err := base64.StdEncoding.DecodeString(token); err == nil { + // Checking error in case if it is a legacy tooken which just by accided has the same b64JSON prefix + if org, err := jsonparser.GetString(jsonToken, "org"); err == nil { + return org + } + } + } + + // 24 is mongo bson id length + if len(token) > MongoBsonIdLength { + newToken := token[:MongoBsonIdLength] + _, err := hex.DecodeString(newToken) + if err == nil { + return newToken + } + } + + return "" +} + +var ( + HashSha256 = "sha256" + HashMurmur32 = "murmur32" + HashMurmur64 = "murmur64" + HashMurmur128 = "murmur128" +) + +func hashFunction(algorithm string) (hash.Hash, error) { + switch algorithm { + case HashSha256: + return sha256.New(), nil + case HashMurmur64: + return murmur3.New64(), nil + case HashMurmur128: + return murmur3.New128(), nil + case "", HashMurmur32: + return murmur3.New32(), nil + default: + return murmur3.New32(), fmt.Errorf("Unknown key hash function: %s. Falling back to murmur32.", algorithm) + } +} + +func HashStr(in string, withAlg ...string) string { + var algo string + if len(withAlg) > 0 && withAlg[0] != "" { + algo = withAlg[0] + } else { + algo = TokenHashAlgo(in) + } + + h, _ := hashFunction(algo) + h.Write([]byte(in)) + return hex.EncodeToString(h.Sum(nil)) +} + +func HashKey(in string, hashKey bool) string { + if !hashKey { + // Not hashing? Return the raw key + return in + } + return HashStr(in) +} From ae1b0ddb3408d21b3504cb37a8257bf138fd631a Mon Sep 17 00:00:00 2001 From: Martin Buhr Date: Wed, 7 Aug 2024 13:45:00 +1200 Subject: [PATCH 2/4] added options constructors --- storage/mdcb/mdcb_storage.go | 76 ++++++++++++++++---------------- storage/mdcb/options.go | 34 ++++++++++++++ storage/redis-cluster/options.go | 58 ++++++++++++++++++++++++ storage/storage.go | 31 ++----------- 4 files changed, 133 insertions(+), 66 deletions(-) create mode 100644 storage/mdcb/options.go create mode 100644 storage/redis-cluster/options.go diff --git a/storage/mdcb/mdcb_storage.go b/storage/mdcb/mdcb_storage.go index 9a7146a234d..d95b049816d 100644 --- a/storage/mdcb/mdcb_storage.go +++ b/storage/mdcb/mdcb_storage.go @@ -9,17 +9,17 @@ import ( ) type MdcbStorage struct { - local interfaces.Handler - rpc interfaces.Handler - logger *logrus.Entry + Local interfaces.Handler + Rpc interfaces.Handler + Logger *logrus.Entry CallbackonPullfromRPC *func(key string, val string) error } func NewMdcbStorage(local, rpc interfaces.Handler, log *logrus.Entry) *MdcbStorage { return &MdcbStorage{ - local: local, - rpc: rpc, - logger: log, + Local: local, + Rpc: rpc, + Logger: log, } } @@ -27,25 +27,25 @@ func (m MdcbStorage) GetKey(key string) (string, error) { var val string var err error - if m.local == nil { - return m.rpc.GetKey(key) + if m.Local == nil { + return m.Rpc.GetKey(key) } - val, err = m.local.GetKey(key) + val, err = m.Local.GetKey(key) if err != nil { - m.logger.Infof("Retrieving key from rpc.") - val, err = m.rpc.GetKey(key) + m.Logger.Infof("Retrieving key from rpc.") + val, err = m.Rpc.GetKey(key) if err != nil { resourceType := getResourceType(key) - m.logger.Errorf("cannot retrieve %v from rpc: %v", resourceType, err.Error()) + m.Logger.Errorf("cannot retrieve %v from rpc: %v", resourceType, err.Error()) return val, err } if m.CallbackonPullfromRPC != nil { err := (*m.CallbackonPullfromRPC)(key, val) if err != nil { - m.logger.Error(err) + m.Logger.Error(err) } } } @@ -87,7 +87,7 @@ func (m MdcbStorage) GetRawKey(string) (string, error) { func (m MdcbStorage) SetKey(key string, content string, TTL int64) error { // only set the value locally as rpc writtes is not allowed - errLocal := m.local.SetKey(key, content, TTL) + errLocal := m.Local.SetKey(key, content, TTL) if errLocal != nil { return errors.New("cannot save key in local") @@ -111,20 +111,20 @@ func (m MdcbStorage) GetExp(string) (int64, error) { func (m MdcbStorage) GetKeys(key string) []string { var val []string - if m.local != nil { - val = m.local.GetKeys(key) + if m.Local != nil { + val = m.Local.GetKeys(key) if len(val) == 0 { - val = m.rpc.GetKeys(key) + val = m.Rpc.GetKeys(key) } } else { - val = m.rpc.GetKeys(key) + val = m.Rpc.GetKeys(key) } return val } func (m MdcbStorage) DeleteKey(key string) bool { - deleteLocal := m.local.DeleteKey(key) - deleteRPC := m.rpc.DeleteKey(key) + deleteLocal := m.Local.DeleteKey(key) + deleteRPC := m.Rpc.DeleteKey(key) return deleteLocal || deleteRPC } @@ -138,7 +138,7 @@ func (m MdcbStorage) DeleteRawKey(string) bool { } func (m MdcbStorage) Connect() bool { - return m.local.Connect() && m.rpc.Connect() + return m.Local.Connect() && m.Rpc.Connect() } func (m MdcbStorage) GetKeysAndValues() map[string]string { @@ -146,7 +146,7 @@ func (m MdcbStorage) GetKeysAndValues() map[string]string { } func (m MdcbStorage) GetKeysAndValuesWithFilter(key string) map[string]string { - return m.local.GetKeysAndValuesWithFilter(key) + return m.Local.GetKeysAndValuesWithFilter(key) } func (m MdcbStorage) DeleteKeys([]string) bool { @@ -170,16 +170,16 @@ func (m MdcbStorage) GetRollingWindow(key string, per int64, pipeline bool) (int } func (m MdcbStorage) GetSet(key string) (map[string]string, error) { - val, err := m.local.GetSet(key) + val, err := m.Local.GetSet(key) if err != nil { // try rpc - val, err = m.rpc.GetSet(key) + val, err = m.Rpc.GetSet(key) } return val, err } func (m MdcbStorage) AddToSet(key string, value string) { - m.local.AddToSet(key, value) + m.Local.AddToSet(key, value) } func (m MdcbStorage) GetAndDeleteSet(string) []interface{} { @@ -187,12 +187,12 @@ func (m MdcbStorage) GetAndDeleteSet(string) []interface{} { } func (m MdcbStorage) RemoveFromSet(key string, value string) { - m.local.RemoveFromSet(key, value) + m.Local.RemoveFromSet(key, value) } func (m MdcbStorage) DeleteScanMatch(key string) bool { - deleteLocal := m.local.DeleteScanMatch(key) - deleteRPC := m.rpc.DeleteScanMatch(key) + deleteLocal := m.Local.DeleteScanMatch(key) + deleteRPC := m.Rpc.DeleteScanMatch(key) return deleteLocal || deleteRPC } @@ -217,21 +217,21 @@ func (m MdcbStorage) GetListRange(key string, from int64, to int64) ([]string, e var val []string var err error - if m.local == nil { - return m.rpc.GetListRange(key, from, to) + if m.Local == nil { + return m.Rpc.GetListRange(key, from, to) } - val, err = m.local.GetListRange(key, from, to) + val, err = m.Local.GetListRange(key, from, to) if err != nil { - val, err = m.rpc.GetListRange(key, from, to) + val, err = m.Rpc.GetListRange(key, from, to) } return val, err } func (m MdcbStorage) RemoveFromList(key string, value string) error { - errLocal := m.local.RemoveFromList(key, value) - errRpc := m.rpc.RemoveFromList(key, value) + errLocal := m.Local.RemoveFromList(key, value) + errRpc := m.Rpc.RemoveFromList(key, value) if errLocal != nil && errRpc != nil { return errors.New("cannot delete key in storages") @@ -241,13 +241,13 @@ func (m MdcbStorage) RemoveFromList(key string, value string) error { } func (m MdcbStorage) AppendToSet(key string, value string) { - m.local.AppendToSet(key, value) - m.rpc.AppendToSet(key, value) + m.Local.AppendToSet(key, value) + m.Rpc.AppendToSet(key, value) } func (m MdcbStorage) Exists(key string) (bool, error) { - foundLocal, errLocal := m.local.Exists(key) - foundRpc, errRpc := m.rpc.Exists(key) + foundLocal, errLocal := m.Local.Exists(key) + foundRpc, errRpc := m.Rpc.Exists(key) if errLocal != nil && errRpc != nil { return false, errors.New("cannot find key in storages") diff --git a/storage/mdcb/options.go b/storage/mdcb/options.go new file mode 100644 index 00000000000..3fd02542d78 --- /dev/null +++ b/storage/mdcb/options.go @@ -0,0 +1,34 @@ +package mdcb + +import ( + "github.com/TykTechnologies/tyk/interfaces" + "github.com/sirupsen/logrus" +) + +// MDCB Options +func WithLocalStorageHandler(handler interfaces.Handler) func(interfaces.Handler) { + return func(impl interfaces.Handler) { + // Type assertion for more iplementations later + if impl, ok := impl.(*MdcbStorage); ok { + impl.Local = handler + } + } +} + +func WithRpcStorageHandler(handler interfaces.Handler) func(interfaces.Handler) { + return func(impl interfaces.Handler) { + // Type assertion for more iplementations later + if impl, ok := impl.(*MdcbStorage); ok { + impl.Rpc = handler + } + } +} + +func WithLogger(logger *logrus.Entry) func(interfaces.Handler) { + return func(impl interfaces.Handler) { + // Type assertion for more iplementations later + if impl, ok := impl.(*MdcbStorage); ok { + impl.Logger = logger + } + } +} diff --git a/storage/redis-cluster/options.go b/storage/redis-cluster/options.go new file mode 100644 index 00000000000..7c70d006b01 --- /dev/null +++ b/storage/redis-cluster/options.go @@ -0,0 +1,58 @@ +package redisCluster + +import "github.com/TykTechnologies/tyk/interfaces" + +// Redis Cluster Options +func WithKeyPrefix(prefix string) func(interfaces.Handler) { + return func(impl interfaces.Handler) { + // Type assertion for more iplementations later + if impl, ok := impl.(*RedisCluster); ok { + impl.KeyPrefix = prefix + } + } +} + +func WithHashKeys(hashKeys bool) func(interfaces.Handler) { + return func(impl interfaces.Handler) { + // Type assertion for more iplementations later + if impl, ok := impl.(*RedisCluster); ok { + impl.HashKeys = hashKeys + } + } +} + +func WithConnectionhandler(handler *ConnectionHandler) func(interfaces.Handler) { + return func(impl interfaces.Handler) { + // Type assertion for more iplementations later + if impl, ok := impl.(*RedisCluster); ok { + impl.ConnectionHandler = handler + } + } +} + +func IsCache(cache bool) func(interfaces.Handler) { + return func(impl interfaces.Handler) { + // Type assertion for more iplementations later + if impl, ok := impl.(*RedisCluster); ok { + impl.IsCache = cache + } + } +} + +func IsAnalytics(analytics bool) func(interfaces.Handler) { + return func(impl interfaces.Handler) { + // Type assertion for more iplementations later + if impl, ok := impl.(*RedisCluster); ok { + impl.IsAnalytics = analytics + } + } +} + +func WithRedisController(controller *RedisController) func(interfaces.Handler) { + return func(impl interfaces.Handler) { + // Type assertion for more iplementations later + if impl, ok := impl.(*RedisCluster); ok { + impl.RedisController = controller + } + } +} diff --git a/storage/storage.go b/storage/storage.go index 3f2bcccd79b..afbc997cc0f 100644 --- a/storage/storage.go +++ b/storage/storage.go @@ -4,6 +4,7 @@ import ( "fmt" "github.com/TykTechnologies/tyk/interfaces" + "github.com/TykTechnologies/tyk/storage/mdcb" redisCluster "github.com/TykTechnologies/tyk/storage/redis-cluster" ) @@ -21,40 +22,14 @@ type AnalyticsHandler interface { GetExp(string) (int64, error) // Returns expiry of a key } -func WithKeyPrefix(prefix string) func(interfaces.Handler) { - return func(impl interfaces.Handler) { - // Type assertion for more iplementations later - if impl, ok := impl.(*redisCluster.RedisCluster); ok { - impl.KeyPrefix = prefix - } - } -} - -func WithHashKeys(hashKeys bool) func(interfaces.Handler) { - return func(impl interfaces.Handler) { - // Type assertion for more iplementations later - if impl, ok := impl.(*redisCluster.RedisCluster); ok { - impl.HashKeys = hashKeys - } - } -} - -func WithConnectionhandler(handler *redisCluster.ConnectionHandler) func(interfaces.Handler) { - return func(impl interfaces.Handler) { - // Type assertion for more iplementations later - if impl, ok := impl.(*redisCluster.RedisCluster); ok { - impl.ConnectionHandler = handler - } - } -} - func NewStorageHandler(name string, opts ...func(interfaces.Handler)) (interfaces.Handler, error) { var impl interfaces.Handler switch name { case REDIS_CLUSTER: impl = &redisCluster.RedisCluster{} case MDCB: - return nil, fmt.Errorf("mdcb storage handler is not implemented") + impl = mdcb.MdcbStorage{} + default: return nil, fmt.Errorf("unknown storage handler: %s", name) } From 47ba96f38fe21716efd4a30dcd8e5111f82978b6 Mon Sep 17 00:00:00 2001 From: Martin Buhr Date: Wed, 7 Aug 2024 14:28:52 +1200 Subject: [PATCH 3/4] moved all non-streaming and non-analytics to new constructor --- gateway/api.go | 38 ++++++- gateway/api_loader.go | 50 ++++++++-- gateway/coprocess_api.go | 15 ++- gateway/delete_api_cache.go | 14 ++- gateway/event_handler_webhooks.go | 15 ++- gateway/health_check.go | 14 ++- gateway/mw_external_oauth.go | 17 +++- gateway/mw_jwt.go | 14 ++- gateway/oauth_manager.go | 24 ++++- gateway/redis_logrus_hook.go | 20 +++- gateway/redis_signals.go | 1 + gateway/rpc_backup_handlers.go | 50 ++++++++-- gateway/server.go | 99 ++++++++++++++++--- rpc/synchronization_forcer.go | 17 +++- storage/{mdcb/options.go => mdcb_options.go} | 9 +- .../options.go => redis_options.go} | 23 +++-- 16 files changed, 357 insertions(+), 63 deletions(-) rename storage/{mdcb/options.go => mdcb_options.go} (77%) rename storage/{redis-cluster/options.go => redis_options.go} (60%) diff --git a/gateway/api.go b/gateway/api.go index f038f1d087d..5a1415f556f 100644 --- a/gateway/api.go +++ b/gateway/api.go @@ -48,7 +48,7 @@ import ( "github.com/getkin/kin-openapi/openapi3" "github.com/TykTechnologies/tyk/config" - redisCluster "github.com/TykTechnologies/tyk/storage/redis-cluster" + "github.com/TykTechnologies/tyk/storage" "github.com/TykTechnologies/tyk/storage/util" "github.com/TykTechnologies/tyk/internal/otel" @@ -2236,13 +2236,29 @@ func (gw *Gateway) createOauthClient(w http.ResponseWriter, r *http.Request) { storageManager := gw.getGlobalMDCBStorageHandler(prefix, false) storageManager.Connect() + store, err := storage.NewStorageHandler(storage.REDIS_CLUSTER, + storage.WithKeyPrefix(prefix), + storage.WithHashKeys(false), + storage.WithConnectionHandler(gw.StorageConnectionHandler)) + + if err != nil { + log.WithFields(logrus.Fields{ + "prefix": "api", + "apiID": apiID, + "status": "fail", + "err": err, + }).Error("Failed to create OAuth client") + doJSONWrite(w, http.StatusInternalServerError, apiError("Failure in storing client data.")) + return + } + apiSpec.OAuthManager = &OAuthManager{ OsinServer: gw.TykOsinNewServer( &osin.ServerConfig{}, &RedisOsinStorageInterface{ storageManager, gw.GlobalSessionManager, - &redisCluster.RedisCluster{KeyPrefix: prefix, HashKeys: false, ConnectionHandler: gw.StorageConnectionHandler}, + store, apiSpec.OrgID, gw, }), @@ -2623,12 +2639,28 @@ func (gw *Gateway) getOauthClientDetails(keyName, apiID string) (interface{}, in prefix := generateOAuthPrefix(apiSpec.APIID) storageManager := gw.getGlobalMDCBStorageHandler(prefix, false) storageManager.Connect() + + store, err := storage.NewStorageHandler(storage.REDIS_CLUSTER, + storage.WithKeyPrefix(prefix), + storage.WithHashKeys(false), + storage.WithConnectionHandler(gw.StorageConnectionHandler)) + + if err != nil { + log.WithFields(logrus.Fields{ + "prefix": "api", + "apiID": apiID, + "status": "fail", + "err": err, + }).Error("Failed to retrieve OAuth client details") + return apiError("OAuth Client ID not found"), http.StatusNotFound + } + apiSpec.OAuthManager = &OAuthManager{ OsinServer: gw.TykOsinNewServer(&osin.ServerConfig{}, &RedisOsinStorageInterface{ storageManager, gw.GlobalSessionManager, - &redisCluster.RedisCluster{KeyPrefix: prefix, HashKeys: false, ConnectionHandler: gw.StorageConnectionHandler}, + store, apiSpec.OrgID, gw, }), diff --git a/gateway/api_loader.go b/gateway/api_loader.go index 098dc04de17..772ce362d04 100644 --- a/gateway/api_loader.go +++ b/gateway/api_loader.go @@ -17,7 +17,7 @@ import ( "github.com/TykTechnologies/tyk/interfaces" "github.com/TykTechnologies/tyk/rpc" - redisCluster "github.com/TykTechnologies/tyk/storage/redis-cluster" + "github.com/TykTechnologies/tyk/storage" "github.com/gorilla/mux" "github.com/justinas/alice" @@ -43,9 +43,38 @@ type ChainObject struct { func (gw *Gateway) prepareStorage() generalStores { var gs generalStores - gs.redisStore = &redisCluster.RedisCluster{KeyPrefix: "apikey-", HashKeys: gw.GetConfig().HashKeys, ConnectionHandler: gw.StorageConnectionHandler} - gs.redisOrgStore = &redisCluster.RedisCluster{KeyPrefix: "orgkey.", ConnectionHandler: gw.StorageConnectionHandler} - gs.healthStore = &redisCluster.RedisCluster{KeyPrefix: "apihealth.", ConnectionHandler: gw.StorageConnectionHandler} + var err error + gs.redisStore, err = storage.NewStorageHandler(storage.REDIS_CLUSTER, + storage.WithKeyPrefix("apikey-"), + storage.WithHashKeys(gw.GetConfig().HashKeys), + storage.WithConnectionHandler(gw.StorageConnectionHandler), + ) + + if err != nil { + log.WithError(err).Fatal("Failed to create redis storage handler") + } + + gs.redisOrgStore, err = storage.NewStorageHandler(storage.REDIS_CLUSTER, + storage.WithKeyPrefix("orgkey."), + storage.WithHashKeys(gw.GetConfig().HashKeys), + storage.WithConnectionHandler(gw.StorageConnectionHandler), + ) + + if err != nil { + log.WithError(err).Fatal("Failed to create redis storage handler") + } + + gs.healthStore, err = storage.NewStorageHandler(storage.REDIS_CLUSTER, + storage.WithKeyPrefix("apihealth."), + storage.WithHashKeys(gw.GetConfig().HashKeys), + storage.WithConnectionHandler(gw.StorageConnectionHandler), + ) + + if err != nil { + log.WithError(err).Fatal("Failed to create redis storage handler") + } + + gs.rpcAuthStore = &RPCStorageHandler{KeyPrefix: "apikey-", HashKeys: gw.GetConfig().HashKeys, Gw: gw} gs.rpcAuthStore = &RPCStorageHandler{KeyPrefix: "apikey-", HashKeys: gw.GetConfig().HashKeys, Gw: gw} gs.rpcOrgStore = gw.getGlobalMDCBStorageHandler("orgkey.", false) @@ -288,7 +317,16 @@ func (gw *Gateway) processSpec(spec *APISpec, apisByListen map[string]int, } keyPrefix := "cache-" + spec.APIID - cacheStore := redisCluster.RedisCluster{KeyPrefix: keyPrefix, IsCache: true, ConnectionHandler: gw.StorageConnectionHandler} + cacheStore, err := storage.NewStorageHandler(storage.REDIS_CLUSTER, + storage.WithKeyPrefix(keyPrefix), + storage.WithConnectionHandler(gw.StorageConnectionHandler), + storage.IsCache(true), + ) + + if err != nil { + logger.WithError(err).Error("Failed to create redis storage handler") + } + cacheStore.Connect() var chain http.Handler @@ -442,7 +480,7 @@ func (gw *Gateway) processSpec(spec *APISpec, apisByListen map[string]int, gw.mwAppendEnabled(&chainArray, &TransformMethod{BaseMiddleware: baseMid}) // Earliest we can respond with cache get 200 ok - gw.mwAppendEnabled(&chainArray, &RedisCacheMiddleware{BaseMiddleware: baseMid, store: &cacheStore}) + gw.mwAppendEnabled(&chainArray, &RedisCacheMiddleware{BaseMiddleware: baseMid, store: cacheStore}) gw.mwAppendEnabled(&chainArray, &VirtualEndpoint{BaseMiddleware: baseMid}) gw.mwAppendEnabled(&chainArray, &RequestSigning{BaseMiddleware: baseMid}) diff --git a/gateway/coprocess_api.go b/gateway/coprocess_api.go index fb2ef32bddc..59d8e83e51c 100644 --- a/gateway/coprocess_api.go +++ b/gateway/coprocess_api.go @@ -10,19 +10,30 @@ import ( "github.com/TykTechnologies/tyk/apidef" "github.com/TykTechnologies/tyk/config" + "github.com/TykTechnologies/tyk/interfaces" + "github.com/TykTechnologies/tyk/storage" redisCluster "github.com/TykTechnologies/tyk/storage/redis-cluster" ) // CoProcessDefaultKeyPrefix is used as a key prefix for this CP. const CoProcessDefaultKeyPrefix = "coprocess-data:" -func getStorageForPython(ctx context.Context) redisCluster.RedisCluster { +func getStorageForPython(ctx context.Context) interfaces.Handler { rc := redisCluster.NewConnectionHandler(ctx) go rc.Connect(ctx, nil, &config.Config{}) rc.WaitConnect(ctx) - return redisCluster.RedisCluster{KeyPrefix: CoProcessDefaultKeyPrefix, ConnectionHandler: rc} + store, err := storage.NewStorageHandler( + storage.REDIS_CLUSTER, + storage.WithKeyPrefix(CoProcessDefaultKeyPrefix), + storage.WithConnectionHandler(rc)) + + if err != nil { + log.WithError(err).Error("could not create storage handler") + } + + return store } // TykStoreData is a CoProcess API function for storing data. diff --git a/gateway/delete_api_cache.go b/gateway/delete_api_cache.go index 0fd7a1a44be..0dd7408e095 100644 --- a/gateway/delete_api_cache.go +++ b/gateway/delete_api_cache.go @@ -3,10 +3,20 @@ package gateway import ( "fmt" - redisCluster "github.com/TykTechnologies/tyk/storage/redis-cluster" + "github.com/TykTechnologies/tyk/storage" ) func (gw *Gateway) invalidateAPICache(apiID string) bool { - store := redisCluster.RedisCluster{IsCache: true, ConnectionHandler: gw.StorageConnectionHandler} + store, err := storage.NewStorageHandler( + storage.REDIS_CLUSTER, + storage.WithConnectionHandler(gw.StorageConnectionHandler), + storage.IsCache(true), + ) + + if err != nil { + log.WithError(err).Error("could not create storage handler") + return false + } + return store.DeleteScanMatch(fmt.Sprintf("cache-%s*", apiID)) } diff --git a/gateway/event_handler_webhooks.go b/gateway/event_handler_webhooks.go index 5d45a6955a4..aafdbeafa07 100644 --- a/gateway/event_handler_webhooks.go +++ b/gateway/event_handler_webhooks.go @@ -20,7 +20,7 @@ import ( "github.com/TykTechnologies/tyk/header" "github.com/TykTechnologies/tyk/interfaces" "github.com/TykTechnologies/tyk/internal/event" - redisCluster "github.com/TykTechnologies/tyk/storage/redis-cluster" + "github.com/TykTechnologies/tyk/storage" ) type WebHookRequestMethod string @@ -70,7 +70,18 @@ func (w *WebHookHandler) Init(handlerConf interface{}) error { return ErrEventHandlerDisabled } - w.store = &redisCluster.RedisCluster{KeyPrefix: "webhook.cache.", ConnectionHandler: w.Gw.StorageConnectionHandler} + w.store, err = storage.NewStorageHandler( + storage.REDIS_CLUSTER, + storage.WithKeyPrefix("webhook.cache."), + storage.WithConnectionHandler(w.Gw.StorageConnectionHandler), + ) + if err != nil { + log.WithFields(logrus.Fields{ + "prefix": "webhooks", + }).Error("Failed to create storage handler: ", err) + return err + } + w.store.Connect() // Pre-load template on init diff --git a/gateway/health_check.go b/gateway/health_check.go index e0899fdd5be..56a298a1995 100644 --- a/gateway/health_check.go +++ b/gateway/health_check.go @@ -9,7 +9,7 @@ import ( "time" "github.com/TykTechnologies/tyk/rpc" - redisCluster "github.com/TykTechnologies/tyk/storage/redis-cluster" + "github.com/TykTechnologies/tyk/storage" "github.com/sirupsen/logrus" @@ -65,7 +65,15 @@ type SafeHealthCheck struct { func (gw *Gateway) gatherHealthChecks() { allInfos := SafeHealthCheck{info: make(map[string]apidef.HealthCheckItem, 3)} - redisStore := redisCluster.RedisCluster{KeyPrefix: "livenesscheck-", ConnectionHandler: gw.StorageConnectionHandler} + store, err := storage.NewStorageHandler( + storage.REDIS_CLUSTER, + storage.WithKeyPrefix("livenesscheck-"), + storage.WithConnectionHandler(gw.StorageConnectionHandler)) + + if err != nil { + mainLog.WithField("liveness-check", true).WithError(err).Error("Could not create storage handler") + return + } key := "tyk-liveness-probe" @@ -81,7 +89,7 @@ func (gw *Gateway) gatherHealthChecks() { Time: time.Now().Format(time.RFC3339), } - err := redisStore.SetRawKey(key, key, 10) + err := store.SetRawKey(key, key, 10) if err != nil { mainLog.WithField("liveness-check", true).WithError(err).Error("Redis health check failed") checkItem.Output = err.Error() diff --git a/gateway/mw_external_oauth.go b/gateway/mw_external_oauth.go index 8f7e7e2b2a1..479eb1612db 100644 --- a/gateway/mw_external_oauth.go +++ b/gateway/mw_external_oauth.go @@ -15,7 +15,8 @@ import ( "github.com/golang-jwt/jwt/v4" "github.com/TykTechnologies/tyk/apidef" - redisCluster "github.com/TykTechnologies/tyk/storage/redis-cluster" + "github.com/TykTechnologies/tyk/interfaces" + "github.com/TykTechnologies/tyk/storage" "github.com/TykTechnologies/tyk/user" "github.com/TykTechnologies/tyk/internal/cache" @@ -292,11 +293,21 @@ func isExpired(claims jwt.MapClaims) bool { } func newIntrospectionCache(gw *Gateway) *introspectionCache { - return &introspectionCache{RedisCluster: redisCluster.RedisCluster{KeyPrefix: "introspection-", ConnectionHandler: gw.StorageConnectionHandler}} + store, err := storage.NewStorageHandler( + storage.REDIS_CLUSTER, + storage.WithKeyPrefix("introspection-"), + storage.WithConnectionHandler(gw.StorageConnectionHandler), + ) + + if err != nil { + log.WithError(err).Error("could not create storage handler") + } + + return &introspectionCache{Handler: store} } type introspectionCache struct { - redisCluster.RedisCluster + interfaces.Handler } func (c *introspectionCache) GetRes(token string) (jwt.MapClaims, bool) { diff --git a/gateway/mw_jwt.go b/gateway/mw_jwt.go index 0b7d0228809..bd17d7e89bd 100644 --- a/gateway/mw_jwt.go +++ b/gateway/mw_jwt.go @@ -20,7 +20,7 @@ import ( "github.com/lonelycode/osin" "github.com/TykTechnologies/tyk/apidef" - redisCluster "github.com/TykTechnologies/tyk/storage/redis-cluster" + "github.com/TykTechnologies/tyk/storage" "github.com/TykTechnologies/tyk/user" "github.com/TykTechnologies/tyk/internal/cache" @@ -579,12 +579,22 @@ func (k *JWTMiddleware) processCentralisedJWT(r *http.Request, token *jwt.Token) prefix := generateOAuthPrefix(k.Spec.APIID) storageManager := k.Gw.getGlobalMDCBStorageHandler(prefix, false) storageManager.Connect() + + store, err := storage.NewStorageHandler( + storage.REDIS_CLUSTER, + storage.WithKeyPrefix(prefix), + storage.WithConnectionHandler(k.Gw.StorageConnectionHandler), + ) + if err != nil { + return errors.New("failed to create storage handler: " + err.Error()), http.StatusInternalServerError + } + k.Spec.OAuthManager = &OAuthManager{ OsinServer: k.Gw.TykOsinNewServer(&osin.ServerConfig{}, &RedisOsinStorageInterface{ storageManager, k.Gw.GlobalSessionManager, - &redisCluster.RedisCluster{KeyPrefix: prefix, HashKeys: false, ConnectionHandler: k.Gw.StorageConnectionHandler}, + store, k.Spec.OrgID, k.Gw, }), diff --git a/gateway/oauth_manager.go b/gateway/oauth_manager.go index a0cb7862e75..790271d0a14 100644 --- a/gateway/oauth_manager.go +++ b/gateway/oauth_manager.go @@ -22,6 +22,7 @@ import ( internalerrors "github.com/TykTechnologies/tyk/internal/errors" "github.com/TykTechnologies/tyk/internal/uuid" "github.com/TykTechnologies/tyk/request" + "github.com/TykTechnologies/tyk/storage" redisCluster "github.com/TykTechnologies/tyk/storage/redis-cluster" "github.com/TykTechnologies/tyk/storage/util" @@ -1192,9 +1193,24 @@ func (gw *Gateway) purgeLapsedOAuthTokens() error { return nil } - redisCluster := &redisCluster.RedisCluster{KeyPrefix: "", HashKeys: false, ConnectionHandler: gw.StorageConnectionHandler} + st, err := storage.NewStorageHandler( + storage.REDIS_CLUSTER, + storage.WithHashKeys(false), + storage.WithConnectionHandler(gw.StorageConnectionHandler), + storage.WithKeyPrefix(""), + ) + + storage, ok := st.(*redisCluster.RedisCluster) + if !ok { + return errors.New("oauth token purge requires Redis storage") + } + + if err != nil { + log.WithError(err).Error("error creating storage handler") + return err + } - ok, err := redisCluster.Lock("oauth-purge-lock", time.Minute) + ok, err = storage.Lock("oauth-purge-lock", time.Minute) if err != nil { log.WithError(err).Error("error acquiring lock to purge oauth tokens") return err @@ -1205,7 +1221,7 @@ func (gw *Gateway) purgeLapsedOAuthTokens() error { return nil } - keys, err := redisCluster.ScanKeys(oAuthClientTokensKeyPattern) + keys, err := storage.ScanKeys(oAuthClientTokensKeyPattern) if err != nil { log.WithError(err).Error("error while scanning for tokens") @@ -1223,7 +1239,7 @@ func (gw *Gateway) purgeLapsedOAuthTokens() error { wg.Add(1) go func(k string) { defer wg.Done() - if err := redisCluster.RemoveSortedSetRange(k, "-inf", cleanupStartScore); err != nil { + if err := storage.RemoveSortedSetRange(k, "-inf", cleanupStartScore); err != nil { errs <- err } }(key) diff --git a/gateway/redis_logrus_hook.go b/gateway/redis_logrus_hook.go index 83d6ccc9acd..2ee044cd626 100644 --- a/gateway/redis_logrus_hook.go +++ b/gateway/redis_logrus_hook.go @@ -3,6 +3,7 @@ package gateway import ( "time" + "github.com/TykTechnologies/tyk/storage" redisCluster "github.com/TykTechnologies/tyk/storage/redis-cluster" "github.com/sirupsen/logrus" ) @@ -15,7 +16,24 @@ type redisChannelHook struct { func (gw *Gateway) newRedisHook() *redisChannelHook { hook := &redisChannelHook{} hook.formatter = new(logrus.JSONFormatter) - hook.notifier.store = &redisCluster.RedisCluster{KeyPrefix: "gateway-notifications:", ConnectionHandler: gw.StorageConnectionHandler} + + st, err := storage.NewStorageHandler( + storage.REDIS_CLUSTER, + storage.WithConnectionHandler(gw.StorageConnectionHandler), + storage.WithKeyPrefix("gateway-notifications:"), + ) + + if err != nil { + log.WithError(err).Error("could not create storage handler") + return nil + } + + storage, ok := st.(*redisCluster.RedisCluster) + if !ok { + log.Fatal("gateway channel hoook requires Redis storage") + } + + hook.notifier.store = storage hook.notifier.channel = "dashboard.ui.messages" return hook } diff --git a/gateway/redis_signals.go b/gateway/redis_signals.go index df650e1b4bc..bfdceee1a31 100644 --- a/gateway/redis_signals.go +++ b/gateway/redis_signals.go @@ -59,6 +59,7 @@ func (n *Notification) Sign() { } func (gw *Gateway) startPubSubLoop() { + // TODO: Interface this with the new storage handler cacheStore := redisCluster.RedisCluster{ConnectionHandler: gw.StorageConnectionHandler} cacheStore.Connect() diff --git a/gateway/rpc_backup_handlers.go b/gateway/rpc_backup_handlers.go index 8b4b70aefe9..00a7cc0ef3a 100644 --- a/gateway/rpc_backup_handlers.go +++ b/gateway/rpc_backup_handlers.go @@ -12,7 +12,7 @@ import ( "github.com/sirupsen/logrus" - redisCluster "github.com/TykTechnologies/tyk/storage/redis-cluster" + "github.com/TykTechnologies/tyk/storage" "github.com/TykTechnologies/tyk/user" ) @@ -33,7 +33,16 @@ func (gw *Gateway) LoadDefinitionsFromRPCBackup() ([]*APISpec, error) { tagList := getTagListAsString(gw.GetConfig().DBAppConfOptions.Tags) checkKey := BackupApiKeyBase + tagList - store := redisCluster.RedisCluster{KeyPrefix: RPCKeyPrefix, ConnectionHandler: gw.StorageConnectionHandler} + store, err := storage.NewStorageHandler( + storage.REDIS_CLUSTER, + storage.WithConnectionHandler(gw.StorageConnectionHandler), + storage.WithKeyPrefix(RPCKeyPrefix), + ) + + if err != nil { + return nil, errors.New("[RPC] --> RPC Backup recovery failed: " + err.Error()) + } + connected := store.Connect() log.Info("[RPC] --> Loading API definitions from backup") @@ -63,7 +72,16 @@ func (gw *Gateway) saveRPCDefinitionsBackup(list string) error { log.Info("--> Connecting to DB") - store := redisCluster.RedisCluster{KeyPrefix: RPCKeyPrefix, ConnectionHandler: gw.StorageConnectionHandler} + store, err := storage.NewStorageHandler( + storage.REDIS_CLUSTER, + storage.WithConnectionHandler(gw.StorageConnectionHandler), + storage.WithKeyPrefix(RPCKeyPrefix), + ) + + if err != nil { + return errors.New("--> RPC Backup save failed: " + err.Error()) + } + connected := store.Connect() log.Info("--> Connected to DB") @@ -74,7 +92,7 @@ func (gw *Gateway) saveRPCDefinitionsBackup(list string) error { secret := rightPad2Len(gw.GetConfig().Secret, "=", 32) cryptoText := encrypt([]byte(secret), list) - err := store.SetKey(BackupApiKeyBase+tagList, cryptoText, -1) + err = store.SetKey(BackupApiKeyBase+tagList, cryptoText, -1) if err != nil { return errors.New("Failed to store node backup: " + err.Error()) } @@ -86,7 +104,15 @@ func (gw *Gateway) LoadPoliciesFromRPCBackup() (map[string]user.Policy, error) { tagList := getTagListAsString(gw.GetConfig().DBAppConfOptions.Tags) checkKey := BackupPolicyKeyBase + tagList - store := redisCluster.RedisCluster{KeyPrefix: RPCKeyPrefix, ConnectionHandler: gw.StorageConnectionHandler} + store, err := storage.NewStorageHandler( + storage.REDIS_CLUSTER, + storage.WithConnectionHandler(gw.StorageConnectionHandler), + storage.WithKeyPrefix(RPCKeyPrefix), + ) + + if err != nil { + return nil, errors.New("[RPC] --> RPC Policy Backup recovery failed: " + err.Error()) + } connected := store.Connect() log.Info("[RPC] Loading Policies from backup") @@ -123,9 +149,17 @@ func (gw *Gateway) saveRPCPoliciesBackup(list string) error { log.Info("--> Connecting to DB") - store := redisCluster.RedisCluster{KeyPrefix: RPCKeyPrefix, ConnectionHandler: gw.StorageConnectionHandler} - connected := store.Connect() + store, err := storage.NewStorageHandler( + storage.REDIS_CLUSTER, + storage.WithKeyPrefix(RPCKeyPrefix), + storage.WithConnectionHandler(gw.StorageConnectionHandler), + ) + if err != nil { + return errors.New("--> RPC Backup save failed: " + err.Error()) + } + + connected := store.Connect() log.Info("--> Connected to DB") if !connected { @@ -134,7 +168,7 @@ func (gw *Gateway) saveRPCPoliciesBackup(list string) error { secret := rightPad2Len(gw.GetConfig().Secret, "=", 32) cryptoText := encrypt([]byte(secret), list) - err := store.SetKey(BackupPolicyKeyBase+tagList, cryptoText, -1) + err = store.SetKey(BackupPolicyKeyBase+tagList, cryptoText, -1) if err != nil { return errors.New("Failed to store node backup: " + err.Error()) } diff --git a/gateway/server.go b/gateway/server.go index e1b16889928..99fd55776ff 100644 --- a/gateway/server.go +++ b/gateway/server.go @@ -30,6 +30,7 @@ import ( "github.com/TykTechnologies/tyk/internal/httputil" "github.com/TykTechnologies/tyk/internal/otel" "github.com/TykTechnologies/tyk/internal/scheduler" + "github.com/TykTechnologies/tyk/storage" "github.com/TykTechnologies/tyk/test" logstashhook "github.com/bshuster-repo/logrus-logstash-hook" @@ -347,18 +348,44 @@ func (gw *Gateway) setupGlobals() { mainLog.Warn("Running Uptime checks in a management node.") } - healthCheckStore := redisCluster.RedisCluster{KeyPrefix: "host-checker:", IsAnalytics: true, ConnectionHandler: gw.StorageConnectionHandler} - gw.InitHostCheckManager(gw.ctx, &healthCheckStore) + healthCheckStore, err := storage.NewStorageHandler( + storage.REDIS_CLUSTER, + storage.WithKeyPrefix("host-checker:"), + storage.WithConnectionHandler(gw.StorageConnectionHandler), + storage.IsAnalytics(true), + ) + if err != nil { + mainLog.WithError(err).Error("Could not create storage handler") + } + + gw.InitHostCheckManager(gw.ctx, healthCheckStore) } gw.initHealthCheck(gw.ctx) - redisStore := redisCluster.RedisCluster{KeyPrefix: "apikey-", HashKeys: gwConfig.HashKeys, ConnectionHandler: gw.StorageConnectionHandler} - gw.GlobalSessionManager.Init(&redisStore) + redisStore, err := storage.NewStorageHandler( + storage.REDIS_CLUSTER, + storage.WithKeyPrefix("apikey-"), + storage.WithConnectionHandler(gw.StorageConnectionHandler), + storage.WithHashKeys(gwConfig.HashKeys), + ) + if err != nil { + mainLog.WithError(err).Error("Could not create storage handler") + } + + gw.GlobalSessionManager.Init(redisStore) + + versionStore, err := storage.NewStorageHandler( + storage.REDIS_CLUSTER, + storage.WithKeyPrefix("version-check-"), + storage.WithConnectionHandler(gw.StorageConnectionHandler), + ) + if err != nil { + mainLog.WithError(err).Error("Could not create storage handler") + } - versionStore := redisCluster.RedisCluster{KeyPrefix: "version-check-", ConnectionHandler: gw.StorageConnectionHandler} versionStore.Connect() - err := versionStore.SetKey("gateway", VERSION, 0) + err = versionStore.SetKey("gateway", VERSION, 0) if err != nil { mainLog.WithError(err).Error("Could not set version in versionStore") @@ -370,6 +397,7 @@ func (gw *Gateway) setupGlobals() { gw.SetConfig(Conf) mainLog.Debug("Setting up analytics DB connection") + // TODO: Needs a new interface constructor analyticsStore := redisCluster.RedisCluster{KeyPrefix: "analytics-", IsAnalytics: true, ConnectionHandler: gw.StorageConnectionHandler} gw.Analytics.Store = &analyticsStore gw.Analytics.Init() @@ -428,7 +456,16 @@ func (gw *Gateway) setupGlobals() { certificateSecret = gw.GetConfig().Security.PrivateCertificateEncodingSecret } - storeCert := &redisCluster.RedisCluster{KeyPrefix: "cert-", HashKeys: false, ConnectionHandler: gw.StorageConnectionHandler} + storeCert, err := storage.NewStorageHandler( + storage.REDIS_CLUSTER, + storage.WithConnectionHandler(gw.StorageConnectionHandler), + storage.WithKeyPrefix("cert-"), + storage.WithHashKeys(false), + ) + if err != nil { + mainLog.WithError(err).Error("Could not create storage handler") + } + gw.CertificateManager = certs.NewCertificateManager(storeCert, certificateSecret, log, !gw.GetConfig().Cloud) if gw.GetConfig().SlaveOptions.UseRPC { rpcStore := &RPCStorageHandler{ @@ -749,10 +786,22 @@ func (gw *Gateway) addOAuthHandlers(spec *APISpec, muxer *mux.Router) *OAuthMana prefix := generateOAuthPrefix(spec.APIID) storageManager := gw.getGlobalMDCBStorageHandler(prefix, false) storageManager.Connect() + + store, err := storage.NewStorageHandler( + storage.REDIS_CLUSTER, + storage.WithKeyPrefix(prefix), + storage.WithConnectionHandler(gw.StorageConnectionHandler), + storage.WithHashKeys(false), + ) + + if err != nil { + mainLog.WithError(err).Error("Could not create storage handler") + } + osinStorage := &RedisOsinStorageInterface{ storageManager, gw.GlobalSessionManager, - &redisCluster.RedisCluster{KeyPrefix: prefix, HashKeys: false, ConnectionHandler: gw.StorageConnectionHandler}, + store, spec.OrgID, gw, } @@ -944,7 +993,16 @@ func (gw *Gateway) createResponseMiddlewareChain(spec *APISpec, responseFuncs [] } keyPrefix := "cache-" + spec.APIID - cacheStore := &redisCluster.RedisCluster{KeyPrefix: keyPrefix, IsCache: true, ConnectionHandler: gw.StorageConnectionHandler} + cacheStore, err := storage.NewStorageHandler( + storage.REDIS_CLUSTER, + storage.WithKeyPrefix(keyPrefix), + storage.WithConnectionHandler(gw.StorageConnectionHandler), + storage.IsCache(true), + ) + + if err != nil { + mainLog.WithError(err).Error("Could not create storage handler") + } cacheStore.Connect() // Add cache writer as the final step of the response middleware chain @@ -1567,7 +1625,15 @@ func (gw *Gateway) getHostDetails(file string) { } func (gw *Gateway) getGlobalMDCBStorageHandler(keyPrefix string, hashKeys bool) interfaces.Handler { - localStorage := &redisCluster.RedisCluster{KeyPrefix: keyPrefix, HashKeys: hashKeys, ConnectionHandler: gw.StorageConnectionHandler} + localStorage, err := storage.NewStorageHandler(storage.REDIS_CLUSTER, + storage.WithKeyPrefix(keyPrefix), + storage.WithHashKeys(hashKeys), + storage.WithConnectionHandler(gw.StorageConnectionHandler)) + + if err != nil { + mainLog.Fatalf("Error creating storage handler: %v", err) + } + logger := logrus.New().WithFields(logrus.Fields{"prefix": "mdcb-storage-handler"}) if gw.GetConfig().SlaveOptions.UseRPC { @@ -1592,7 +1658,18 @@ func (gw *Gateway) getGlobalStorageHandler(keyPrefix string, hashKeys bool) inte Gw: gw, } } - return &redisCluster.RedisCluster{KeyPrefix: keyPrefix, HashKeys: hashKeys, ConnectionHandler: gw.StorageConnectionHandler} + + handler, err := storage.NewStorageHandler(storage.REDIS_CLUSTER, + storage.WithKeyPrefix(keyPrefix), + storage.WithHashKeys(hashKeys), + storage.WithConnectionHandler(gw.StorageConnectionHandler)) + + if err != nil { + mainLog.Fatalf("Error creating storage handler: %v", err) + } + + return handler + } func Start() { diff --git a/rpc/synchronization_forcer.go b/rpc/synchronization_forcer.go index 8aa2889eb15..e14e5f450d3 100644 --- a/rpc/synchronization_forcer.go +++ b/rpc/synchronization_forcer.go @@ -4,12 +4,14 @@ import ( "errors" "github.com/TykTechnologies/tyk/apidef" + "github.com/TykTechnologies/tyk/interfaces" + "github.com/TykTechnologies/tyk/storage" redisCluster "github.com/TykTechnologies/tyk/storage/redis-cluster" "github.com/TykTechnologies/tyk/storage/shared" ) type SyncronizerForcer struct { - store *redisCluster.RedisCluster + store interfaces.Handler getNodeDataFunc func() []byte } @@ -17,7 +19,18 @@ type SyncronizerForcer struct { func NewSyncForcer(controller *redisCluster.ConnectionHandler, getNodeDataFunc func() []byte) *SyncronizerForcer { sf := &SyncronizerForcer{} sf.getNodeDataFunc = getNodeDataFunc - sf.store = &redisCluster.RedisCluster{KeyPrefix: "synchronizer-group-", ConnectionHandler: controller} + store, err := storage.NewStorageHandler( + storage.REDIS_CLUSTER, + storage.WithConnectionHandler(controller), + storage.WithKeyPrefix("synchronizer-group-"), + ) + + if err != nil { + Log.Error("could not create storage handler") + return nil + } + + sf.store = store sf.store.Connect() return sf diff --git a/storage/mdcb/options.go b/storage/mdcb_options.go similarity index 77% rename from storage/mdcb/options.go rename to storage/mdcb_options.go index 3fd02542d78..dca17869aaa 100644 --- a/storage/mdcb/options.go +++ b/storage/mdcb_options.go @@ -1,7 +1,8 @@ -package mdcb +package storage import ( "github.com/TykTechnologies/tyk/interfaces" + "github.com/TykTechnologies/tyk/storage/mdcb" "github.com/sirupsen/logrus" ) @@ -9,7 +10,7 @@ import ( func WithLocalStorageHandler(handler interfaces.Handler) func(interfaces.Handler) { return func(impl interfaces.Handler) { // Type assertion for more iplementations later - if impl, ok := impl.(*MdcbStorage); ok { + if impl, ok := impl.(*mdcb.MdcbStorage); ok { impl.Local = handler } } @@ -18,7 +19,7 @@ func WithLocalStorageHandler(handler interfaces.Handler) func(interfaces.Handler func WithRpcStorageHandler(handler interfaces.Handler) func(interfaces.Handler) { return func(impl interfaces.Handler) { // Type assertion for more iplementations later - if impl, ok := impl.(*MdcbStorage); ok { + if impl, ok := impl.(*mdcb.MdcbStorage); ok { impl.Rpc = handler } } @@ -27,7 +28,7 @@ func WithRpcStorageHandler(handler interfaces.Handler) func(interfaces.Handler) func WithLogger(logger *logrus.Entry) func(interfaces.Handler) { return func(impl interfaces.Handler) { // Type assertion for more iplementations later - if impl, ok := impl.(*MdcbStorage); ok { + if impl, ok := impl.(*mdcb.MdcbStorage); ok { impl.Logger = logger } } diff --git a/storage/redis-cluster/options.go b/storage/redis_options.go similarity index 60% rename from storage/redis-cluster/options.go rename to storage/redis_options.go index 7c70d006b01..1b1438b672d 100644 --- a/storage/redis-cluster/options.go +++ b/storage/redis_options.go @@ -1,12 +1,15 @@ -package redisCluster +package storage -import "github.com/TykTechnologies/tyk/interfaces" +import ( + "github.com/TykTechnologies/tyk/interfaces" + redisCluster "github.com/TykTechnologies/tyk/storage/redis-cluster" +) // Redis Cluster Options func WithKeyPrefix(prefix string) func(interfaces.Handler) { return func(impl interfaces.Handler) { // Type assertion for more iplementations later - if impl, ok := impl.(*RedisCluster); ok { + if impl, ok := impl.(*redisCluster.RedisCluster); ok { impl.KeyPrefix = prefix } } @@ -15,16 +18,16 @@ func WithKeyPrefix(prefix string) func(interfaces.Handler) { func WithHashKeys(hashKeys bool) func(interfaces.Handler) { return func(impl interfaces.Handler) { // Type assertion for more iplementations later - if impl, ok := impl.(*RedisCluster); ok { + if impl, ok := impl.(*redisCluster.RedisCluster); ok { impl.HashKeys = hashKeys } } } -func WithConnectionhandler(handler *ConnectionHandler) func(interfaces.Handler) { +func WithConnectionHandler(handler *redisCluster.ConnectionHandler) func(interfaces.Handler) { return func(impl interfaces.Handler) { // Type assertion for more iplementations later - if impl, ok := impl.(*RedisCluster); ok { + if impl, ok := impl.(*redisCluster.RedisCluster); ok { impl.ConnectionHandler = handler } } @@ -33,7 +36,7 @@ func WithConnectionhandler(handler *ConnectionHandler) func(interfaces.Handler) func IsCache(cache bool) func(interfaces.Handler) { return func(impl interfaces.Handler) { // Type assertion for more iplementations later - if impl, ok := impl.(*RedisCluster); ok { + if impl, ok := impl.(*redisCluster.RedisCluster); ok { impl.IsCache = cache } } @@ -42,16 +45,16 @@ func IsCache(cache bool) func(interfaces.Handler) { func IsAnalytics(analytics bool) func(interfaces.Handler) { return func(impl interfaces.Handler) { // Type assertion for more iplementations later - if impl, ok := impl.(*RedisCluster); ok { + if impl, ok := impl.(*redisCluster.RedisCluster); ok { impl.IsAnalytics = analytics } } } -func WithRedisController(controller *RedisController) func(interfaces.Handler) { +func WithRedisController(controller *redisCluster.RedisController) func(interfaces.Handler) { return func(impl interfaces.Handler) { // Type assertion for more iplementations later - if impl, ok := impl.(*RedisCluster); ok { + if impl, ok := impl.(*redisCluster.RedisCluster); ok { impl.RedisController = controller } } From b6ee537271a8b1b37dbdfcbe0002568bcdd001a5 Mon Sep 17 00:00:00 2001 From: Martin Buhr Date: Wed, 7 Aug 2024 14:38:24 +1200 Subject: [PATCH 4/4] made the storage type lookup dynamic so it can be configured later --- gateway/api.go | 4 ++-- gateway/api_loader.go | 8 ++++---- gateway/coprocess_api.go | 2 +- gateway/delete_api_cache.go | 2 +- gateway/event_handler_webhooks.go | 2 +- gateway/health_check.go | 2 +- gateway/mw_external_oauth.go | 2 +- gateway/mw_jwt.go | 2 +- gateway/oauth_manager.go | 2 +- gateway/redis_logrus_hook.go | 2 +- gateway/rpc_backup_handlers.go | 8 ++++---- gateway/server.go | 16 ++++++++-------- rpc/synchronization_forcer.go | 2 +- storage/storage.go | 10 ++++++++++ 14 files changed, 37 insertions(+), 27 deletions(-) diff --git a/gateway/api.go b/gateway/api.go index 5a1415f556f..e9ff30640e7 100644 --- a/gateway/api.go +++ b/gateway/api.go @@ -2236,7 +2236,7 @@ func (gw *Gateway) createOauthClient(w http.ResponseWriter, r *http.Request) { storageManager := gw.getGlobalMDCBStorageHandler(prefix, false) storageManager.Connect() - store, err := storage.NewStorageHandler(storage.REDIS_CLUSTER, + store, err := storage.NewStorageHandler(storage.GetStorageForModule(storage.DEFAULT_MODULE), storage.WithKeyPrefix(prefix), storage.WithHashKeys(false), storage.WithConnectionHandler(gw.StorageConnectionHandler)) @@ -2640,7 +2640,7 @@ func (gw *Gateway) getOauthClientDetails(keyName, apiID string) (interface{}, in storageManager := gw.getGlobalMDCBStorageHandler(prefix, false) storageManager.Connect() - store, err := storage.NewStorageHandler(storage.REDIS_CLUSTER, + store, err := storage.NewStorageHandler(storage.GetStorageForModule(storage.DEFAULT_MODULE), storage.WithKeyPrefix(prefix), storage.WithHashKeys(false), storage.WithConnectionHandler(gw.StorageConnectionHandler)) diff --git a/gateway/api_loader.go b/gateway/api_loader.go index 772ce362d04..2f07f393cf9 100644 --- a/gateway/api_loader.go +++ b/gateway/api_loader.go @@ -44,7 +44,7 @@ type ChainObject struct { func (gw *Gateway) prepareStorage() generalStores { var gs generalStores var err error - gs.redisStore, err = storage.NewStorageHandler(storage.REDIS_CLUSTER, + gs.redisStore, err = storage.NewStorageHandler(storage.GetStorageForModule(storage.DEFAULT_MODULE), storage.WithKeyPrefix("apikey-"), storage.WithHashKeys(gw.GetConfig().HashKeys), storage.WithConnectionHandler(gw.StorageConnectionHandler), @@ -54,7 +54,7 @@ func (gw *Gateway) prepareStorage() generalStores { log.WithError(err).Fatal("Failed to create redis storage handler") } - gs.redisOrgStore, err = storage.NewStorageHandler(storage.REDIS_CLUSTER, + gs.redisOrgStore, err = storage.NewStorageHandler(storage.GetStorageForModule(storage.DEFAULT_MODULE), storage.WithKeyPrefix("orgkey."), storage.WithHashKeys(gw.GetConfig().HashKeys), storage.WithConnectionHandler(gw.StorageConnectionHandler), @@ -64,7 +64,7 @@ func (gw *Gateway) prepareStorage() generalStores { log.WithError(err).Fatal("Failed to create redis storage handler") } - gs.healthStore, err = storage.NewStorageHandler(storage.REDIS_CLUSTER, + gs.healthStore, err = storage.NewStorageHandler(storage.GetStorageForModule(storage.DEFAULT_MODULE), storage.WithKeyPrefix("apihealth."), storage.WithHashKeys(gw.GetConfig().HashKeys), storage.WithConnectionHandler(gw.StorageConnectionHandler), @@ -317,7 +317,7 @@ func (gw *Gateway) processSpec(spec *APISpec, apisByListen map[string]int, } keyPrefix := "cache-" + spec.APIID - cacheStore, err := storage.NewStorageHandler(storage.REDIS_CLUSTER, + cacheStore, err := storage.NewStorageHandler(storage.GetStorageForModule(storage.DEFAULT_MODULE), storage.WithKeyPrefix(keyPrefix), storage.WithConnectionHandler(gw.StorageConnectionHandler), storage.IsCache(true), diff --git a/gateway/coprocess_api.go b/gateway/coprocess_api.go index 59d8e83e51c..75940c43be3 100644 --- a/gateway/coprocess_api.go +++ b/gateway/coprocess_api.go @@ -25,7 +25,7 @@ func getStorageForPython(ctx context.Context) interfaces.Handler { rc.WaitConnect(ctx) store, err := storage.NewStorageHandler( - storage.REDIS_CLUSTER, + storage.GetStorageForModule(storage.DEFAULT_MODULE), storage.WithKeyPrefix(CoProcessDefaultKeyPrefix), storage.WithConnectionHandler(rc)) diff --git a/gateway/delete_api_cache.go b/gateway/delete_api_cache.go index 0dd7408e095..fb7244b9574 100644 --- a/gateway/delete_api_cache.go +++ b/gateway/delete_api_cache.go @@ -8,7 +8,7 @@ import ( func (gw *Gateway) invalidateAPICache(apiID string) bool { store, err := storage.NewStorageHandler( - storage.REDIS_CLUSTER, + storage.GetStorageForModule(storage.DEFAULT_MODULE), storage.WithConnectionHandler(gw.StorageConnectionHandler), storage.IsCache(true), ) diff --git a/gateway/event_handler_webhooks.go b/gateway/event_handler_webhooks.go index aafdbeafa07..4de24930ad0 100644 --- a/gateway/event_handler_webhooks.go +++ b/gateway/event_handler_webhooks.go @@ -71,7 +71,7 @@ func (w *WebHookHandler) Init(handlerConf interface{}) error { } w.store, err = storage.NewStorageHandler( - storage.REDIS_CLUSTER, + storage.GetStorageForModule(storage.DEFAULT_MODULE), storage.WithKeyPrefix("webhook.cache."), storage.WithConnectionHandler(w.Gw.StorageConnectionHandler), ) diff --git a/gateway/health_check.go b/gateway/health_check.go index 56a298a1995..c8d05463861 100644 --- a/gateway/health_check.go +++ b/gateway/health_check.go @@ -66,7 +66,7 @@ func (gw *Gateway) gatherHealthChecks() { allInfos := SafeHealthCheck{info: make(map[string]apidef.HealthCheckItem, 3)} store, err := storage.NewStorageHandler( - storage.REDIS_CLUSTER, + storage.GetStorageForModule(storage.DEFAULT_MODULE), storage.WithKeyPrefix("livenesscheck-"), storage.WithConnectionHandler(gw.StorageConnectionHandler)) diff --git a/gateway/mw_external_oauth.go b/gateway/mw_external_oauth.go index 479eb1612db..7f225e9b194 100644 --- a/gateway/mw_external_oauth.go +++ b/gateway/mw_external_oauth.go @@ -294,7 +294,7 @@ func isExpired(claims jwt.MapClaims) bool { func newIntrospectionCache(gw *Gateway) *introspectionCache { store, err := storage.NewStorageHandler( - storage.REDIS_CLUSTER, + storage.GetStorageForModule(storage.DEFAULT_MODULE), storage.WithKeyPrefix("introspection-"), storage.WithConnectionHandler(gw.StorageConnectionHandler), ) diff --git a/gateway/mw_jwt.go b/gateway/mw_jwt.go index bd17d7e89bd..11e7cdb078d 100644 --- a/gateway/mw_jwt.go +++ b/gateway/mw_jwt.go @@ -581,7 +581,7 @@ func (k *JWTMiddleware) processCentralisedJWT(r *http.Request, token *jwt.Token) storageManager.Connect() store, err := storage.NewStorageHandler( - storage.REDIS_CLUSTER, + storage.GetStorageForModule(storage.DEFAULT_MODULE), storage.WithKeyPrefix(prefix), storage.WithConnectionHandler(k.Gw.StorageConnectionHandler), ) diff --git a/gateway/oauth_manager.go b/gateway/oauth_manager.go index 790271d0a14..881be4770d2 100644 --- a/gateway/oauth_manager.go +++ b/gateway/oauth_manager.go @@ -1194,7 +1194,7 @@ func (gw *Gateway) purgeLapsedOAuthTokens() error { } st, err := storage.NewStorageHandler( - storage.REDIS_CLUSTER, + storage.GetStorageForModule(storage.DEFAULT_MODULE), storage.WithHashKeys(false), storage.WithConnectionHandler(gw.StorageConnectionHandler), storage.WithKeyPrefix(""), diff --git a/gateway/redis_logrus_hook.go b/gateway/redis_logrus_hook.go index 2ee044cd626..62191f95b51 100644 --- a/gateway/redis_logrus_hook.go +++ b/gateway/redis_logrus_hook.go @@ -18,7 +18,7 @@ func (gw *Gateway) newRedisHook() *redisChannelHook { hook.formatter = new(logrus.JSONFormatter) st, err := storage.NewStorageHandler( - storage.REDIS_CLUSTER, + storage.GetStorageForModule(storage.DEFAULT_MODULE), storage.WithConnectionHandler(gw.StorageConnectionHandler), storage.WithKeyPrefix("gateway-notifications:"), ) diff --git a/gateway/rpc_backup_handlers.go b/gateway/rpc_backup_handlers.go index 00a7cc0ef3a..7565fdfd5bf 100644 --- a/gateway/rpc_backup_handlers.go +++ b/gateway/rpc_backup_handlers.go @@ -34,7 +34,7 @@ func (gw *Gateway) LoadDefinitionsFromRPCBackup() ([]*APISpec, error) { checkKey := BackupApiKeyBase + tagList store, err := storage.NewStorageHandler( - storage.REDIS_CLUSTER, + storage.GetStorageForModule(storage.DEFAULT_MODULE), storage.WithConnectionHandler(gw.StorageConnectionHandler), storage.WithKeyPrefix(RPCKeyPrefix), ) @@ -73,7 +73,7 @@ func (gw *Gateway) saveRPCDefinitionsBackup(list string) error { log.Info("--> Connecting to DB") store, err := storage.NewStorageHandler( - storage.REDIS_CLUSTER, + storage.GetStorageForModule(storage.DEFAULT_MODULE), storage.WithConnectionHandler(gw.StorageConnectionHandler), storage.WithKeyPrefix(RPCKeyPrefix), ) @@ -105,7 +105,7 @@ func (gw *Gateway) LoadPoliciesFromRPCBackup() (map[string]user.Policy, error) { checkKey := BackupPolicyKeyBase + tagList store, err := storage.NewStorageHandler( - storage.REDIS_CLUSTER, + storage.GetStorageForModule(storage.DEFAULT_MODULE), storage.WithConnectionHandler(gw.StorageConnectionHandler), storage.WithKeyPrefix(RPCKeyPrefix), ) @@ -150,7 +150,7 @@ func (gw *Gateway) saveRPCPoliciesBackup(list string) error { log.Info("--> Connecting to DB") store, err := storage.NewStorageHandler( - storage.REDIS_CLUSTER, + storage.GetStorageForModule(storage.DEFAULT_MODULE), storage.WithKeyPrefix(RPCKeyPrefix), storage.WithConnectionHandler(gw.StorageConnectionHandler), ) diff --git a/gateway/server.go b/gateway/server.go index 99fd55776ff..e5f94641419 100644 --- a/gateway/server.go +++ b/gateway/server.go @@ -349,7 +349,7 @@ func (gw *Gateway) setupGlobals() { } healthCheckStore, err := storage.NewStorageHandler( - storage.REDIS_CLUSTER, + storage.GetStorageForModule(storage.DEFAULT_MODULE), storage.WithKeyPrefix("host-checker:"), storage.WithConnectionHandler(gw.StorageConnectionHandler), storage.IsAnalytics(true), @@ -364,7 +364,7 @@ func (gw *Gateway) setupGlobals() { gw.initHealthCheck(gw.ctx) redisStore, err := storage.NewStorageHandler( - storage.REDIS_CLUSTER, + storage.GetStorageForModule(storage.DEFAULT_MODULE), storage.WithKeyPrefix("apikey-"), storage.WithConnectionHandler(gw.StorageConnectionHandler), storage.WithHashKeys(gwConfig.HashKeys), @@ -376,7 +376,7 @@ func (gw *Gateway) setupGlobals() { gw.GlobalSessionManager.Init(redisStore) versionStore, err := storage.NewStorageHandler( - storage.REDIS_CLUSTER, + storage.GetStorageForModule(storage.DEFAULT_MODULE), storage.WithKeyPrefix("version-check-"), storage.WithConnectionHandler(gw.StorageConnectionHandler), ) @@ -457,7 +457,7 @@ func (gw *Gateway) setupGlobals() { } storeCert, err := storage.NewStorageHandler( - storage.REDIS_CLUSTER, + storage.GetStorageForModule(storage.DEFAULT_MODULE), storage.WithConnectionHandler(gw.StorageConnectionHandler), storage.WithKeyPrefix("cert-"), storage.WithHashKeys(false), @@ -788,7 +788,7 @@ func (gw *Gateway) addOAuthHandlers(spec *APISpec, muxer *mux.Router) *OAuthMana storageManager.Connect() store, err := storage.NewStorageHandler( - storage.REDIS_CLUSTER, + storage.GetStorageForModule(storage.DEFAULT_MODULE), storage.WithKeyPrefix(prefix), storage.WithConnectionHandler(gw.StorageConnectionHandler), storage.WithHashKeys(false), @@ -994,7 +994,7 @@ func (gw *Gateway) createResponseMiddlewareChain(spec *APISpec, responseFuncs [] keyPrefix := "cache-" + spec.APIID cacheStore, err := storage.NewStorageHandler( - storage.REDIS_CLUSTER, + storage.GetStorageForModule(storage.DEFAULT_MODULE), storage.WithKeyPrefix(keyPrefix), storage.WithConnectionHandler(gw.StorageConnectionHandler), storage.IsCache(true), @@ -1625,7 +1625,7 @@ func (gw *Gateway) getHostDetails(file string) { } func (gw *Gateway) getGlobalMDCBStorageHandler(keyPrefix string, hashKeys bool) interfaces.Handler { - localStorage, err := storage.NewStorageHandler(storage.REDIS_CLUSTER, + localStorage, err := storage.NewStorageHandler(storage.GetStorageForModule(storage.DEFAULT_MODULE), storage.WithKeyPrefix(keyPrefix), storage.WithHashKeys(hashKeys), storage.WithConnectionHandler(gw.StorageConnectionHandler)) @@ -1659,7 +1659,7 @@ func (gw *Gateway) getGlobalStorageHandler(keyPrefix string, hashKeys bool) inte } } - handler, err := storage.NewStorageHandler(storage.REDIS_CLUSTER, + handler, err := storage.NewStorageHandler(storage.GetStorageForModule(storage.DEFAULT_MODULE), storage.WithKeyPrefix(keyPrefix), storage.WithHashKeys(hashKeys), storage.WithConnectionHandler(gw.StorageConnectionHandler)) diff --git a/rpc/synchronization_forcer.go b/rpc/synchronization_forcer.go index e14e5f450d3..66bd66238a8 100644 --- a/rpc/synchronization_forcer.go +++ b/rpc/synchronization_forcer.go @@ -20,7 +20,7 @@ func NewSyncForcer(controller *redisCluster.ConnectionHandler, getNodeDataFunc f sf := &SyncronizerForcer{} sf.getNodeDataFunc = getNodeDataFunc store, err := storage.NewStorageHandler( - storage.REDIS_CLUSTER, + storage.GetStorageForModule(storage.DEFAULT_MODULE), storage.WithConnectionHandler(controller), storage.WithKeyPrefix("synchronizer-group-"), ) diff --git a/storage/storage.go b/storage/storage.go index afbc997cc0f..b6f195e2698 100644 --- a/storage/storage.go +++ b/storage/storage.go @@ -40,3 +40,13 @@ func NewStorageHandler(name string, opts ...func(interfaces.Handler)) (interface return impl, nil } + +const ( + DEFAULT_MODULE = "default" +) + +// GetStorageForModule returns the storage type for the given module. +// Defaults to REDIS_CLUSTER for the initial implementation. +func GetStorageForModule(module string) string { + return REDIS_CLUSTER +}