Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion DEPRECATED.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@ A logged warning is expected for each deprecated item that is in deprecation win
[fault.proto](https://github.com/envoyproxy/envoy/blob/master/api/envoy/config/filter/fault/v2/fault.proto))
has been deprecated. It was never used and setting it has no effect. It will be removed in the
following release.
* Use of `cluster`, found in [redis-proxy.proto](https://github.com/envoyproxy/envoy/blob/master/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto) is deprecated. Set a `PrefixRoutes.catch_all_cluster` instead.

## Version 1.9.0 (Dec 20, 2018)

Expand Down
63 changes: 2 additions & 61 deletions api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto
Original file line number Diff line number Diff line change
Expand Up @@ -22,13 +22,7 @@ message RedisProxy {
// Name of cluster from cluster manager. See the :ref:`configuration section
// <arch_overview_redis_configuration>` of the architecture overview for recommendations on
// configuring the backing cluster.
//
// .. attention::
//
// This field is deprecated. Use a :ref:`catch-all
// cluster<envoy_api_field_config.filter.network.redis_proxy.v2.RedisProxy.PrefixRoutes.catch_all_cluster>`
// instead.
string cluster = 2 [deprecated = true];
string cluster = 2 [(validate.rules).string.min_bytes = 1];

// Redis connection pool settings.
message ConnPoolSettings {
Expand All @@ -54,63 +48,10 @@ message RedisProxy {
bool enable_hashtagging = 2;
}

// Network settings for the connection pool to the upstream clusters.
// Network settings for the connection pool to the upstream cluster.
ConnPoolSettings settings = 3 [(validate.rules).message.required = true];

// Indicates that latency stat should be computed in microseconds. By default it is computed in
// milliseconds.
bool latency_in_micros = 4;

message PrefixRoutes {
message Route {
// String prefix that must match the beginning of the keys. Envoy will always favor the
// longest match.
string prefix = 1 [(validate.rules).string.min_bytes = 1];

// Indicates if the prefix needs to be removed from the key when forwarded.
bool remove_prefix = 2;

// Upstream cluster to forward the command to.
string cluster = 3 [(validate.rules).string.min_bytes = 1];
}

// List of prefix routes.
repeated Route routes = 1 [(gogoproto.nullable) = false];

// Indicates that prefix matching should be case insensitive.
bool case_insensitive = 2;

// Optional catch-all route to forward commands that doesn't match any of the routes. The
// catch-all route becomes required when no routes are specified.
string catch_all_cluster = 3;
}

// List of **unique** prefixes used to separate keys from different workloads to different
// clusters. Envoy will always favor the longest match first in case of overlap. A catch-all
// cluster can be used to forward commands when there is no match. Time complexity of the
// lookups are in O(min(longest key prefix, key length)).
//
// Example:
//
// .. code-block:: yaml
//
// prefix_routes:
// routes:
// - prefix: "ab"
// cluster: "cluster_a"
// - prefix: "abc"
// cluster: "cluster_b"
//
// When using the above routes, the following prefixes would be sent to:
//
// * 'get abc:users' would retrive the key 'abc:users' from cluster_b.
// * 'get ab:users' would retrive the key 'ab:users' from cluster_a.
// * 'get z:users' would return a NoUpstreamHost error. A :ref:`catch-all
// cluster<envoy_api_field_config.filter.network.redis_proxy.v2.RedisProxy.PrefixRoutes.catch_all_cluster>`
// would have retrieved the key from that cluster instead.
//
// See the :ref:`configuration section
// <arch_overview_redis_configuration>` of the architecture overview for recommendations on
// configuring the backing clusters.
PrefixRoutes prefix_routes = 5 [(gogoproto.nullable) = false];
}
5 changes: 1 addition & 4 deletions docs/root/intro/arch_overview/redis.rst
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,7 @@ In this mode, the goals of Envoy are to maintain availability and partition tole
over consistency. This is the key point when comparing Envoy to `Redis Cluster
<https://redis.io/topics/cluster-spec>`_. Envoy is designed as a best-effort cache,
meaning that it will not try to reconcile inconsistent data or keep a globally consistent
view of cluster membership. It also supports routing commands from different workload to
different to different upstream clusters based on their access patterns, eviction, or isolation
requirements.
view of cluster membership.

The Redis project offers a thorough reference on partitioning as it relates to Redis. See
"`Partitioning: how to split data among multiple Redis instances
Expand All @@ -24,7 +22,6 @@ The Redis project offers a thorough reference on partitioning as it relates to R
* Detailed command statistics.
* Active and passive healthchecking.
* Hash tagging.
* Prefix routing.

**Planned future enhancements**:

Expand Down
1 change: 0 additions & 1 deletion docs/root/intro/version_history.rst
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,6 @@ Version history
* ratelimit: removed deprecated rate limit configuration from bootstrap.
* redis: added :ref:`hashtagging <envoy_api_field_config.filter.network.redis_proxy.v2.RedisProxy.ConnPoolSettings.enable_hashtagging>` to guarantee a given key's upstream.
* redis: added :ref:`latency stats <config_network_filters_redis_proxy_per_command_stats>` for commands.
* redis: added :ref:`prefix routing <envoy_api_field_config.filter.network.redis_proxy.v2.RedisProxy.prefix_routes>` to enable routing commands based on their key's prefix to different upstream.
* redis: added :ref:`success and error stats <config_network_filters_redis_proxy_per_command_stats>` for commands.
* redis: migrate hash function for host selection to `MurmurHash2 <https://sites.google.com/site/murmurhash>`_ from std::hash. MurmurHash2 is compatible with std::hash in GNU libstdc++ 3.4.20 or above. This is typically the case when compiled on Linux and not macOS.
* redis: added :ref:`latency_in_micros <envoy_api_field_config.filter.network.redis_proxy.v2.RedisProxy.latency_in_micros>` to specify the redis commands stats time unit in microseconds.
Expand Down
34 changes: 1 addition & 33 deletions source/common/common/utility.h
Original file line number Diff line number Diff line change
Expand Up @@ -568,11 +568,8 @@ template <class Value> struct TrieLookupTable {
* Adds an entry to the Trie at the given Key.
* @param key the key used to add the entry.
* @param value the value to be associated with the key.
* @param overwrite_existing will overwrite the value when the value for a given key already
* exists.
* @return false when a value already exists for the given key.
*/
bool add(const char* key, Value value, bool overwrite_existing = true) {
void add(const char* key, Value value) {
TrieEntry<Value>* current = &root_;
while (uint8_t c = *key) {
if (!current->entries_[c]) {
Expand All @@ -581,11 +578,7 @@ template <class Value> struct TrieLookupTable {
current = current->entries_[c].get();
key++;
}
if (current->value_ && !overwrite_existing) {
return false;
}
current->value_ = value;
return true;
}

/**
Expand All @@ -606,31 +599,6 @@ template <class Value> struct TrieLookupTable {
return current->value_;
}

/**
* Finds the entry associated with the longest prefix. Complexity is O(min(longest key prefix, key
* length))
* @param key the key used to find.
* @return the value matching the longest prefix based on the key.
*/
Value findLongestPrefix(const char* key) const {
const TrieEntry<Value>* current = &root_;
const TrieEntry<Value>* result = nullptr;
while (uint8_t c = *key) {
if (current->value_) {
result = current;
}

// https://github.com/facebook/mcrouter/blob/master/mcrouter/lib/fbi/cpp/Trie-inl.h#L126-L143
current = current->entries_[c].get();
if (current == nullptr) {
return result ? result->value_ : nullptr;
}

key++;
}
return current ? current->value_ : result->value_;
}

TrieEntry<Value> root_;
};

Expand Down
29 changes: 3 additions & 26 deletions source/extensions/filters/network/redis_proxy/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -30,22 +30,13 @@ envoy_cc_library(
],
)

envoy_cc_library(
name = "router_interface",
hdrs = ["router.h"],
deps = [
":conn_pool_interface",
"@envoy_api//envoy/config/filter/network/redis_proxy/v2:redis_proxy_cc",
],
)

envoy_cc_library(
name = "command_splitter_lib",
srcs = ["command_splitter_impl.cc"],
hdrs = ["command_splitter_impl.h"],
deps = [
":command_splitter_interface",
":router_interface",
":conn_pool_interface",
"//include/envoy/stats:stats_macros",
"//include/envoy/stats:timespan",
"//source/common/common:assert_lib",
Expand All @@ -63,6 +54,7 @@ envoy_cc_library(
hdrs = ["conn_pool_impl.h"],
deps = [
":conn_pool_interface",
"//include/envoy/router:router_interface",
"//include/envoy/thread_local:thread_local_interface",
"//include/envoy/upstream:cluster_manager_interface",
"//source/common/buffer:buffer_lib",
Expand All @@ -81,7 +73,6 @@ envoy_cc_library(
hdrs = ["proxy_filter.h"],
deps = [
":command_splitter_interface",
":router_interface",
"//include/envoy/network:drain_decision_interface",
"//include/envoy/network:filter_interface",
"//include/envoy/upstream:cluster_manager_interface",
Expand All @@ -104,21 +95,7 @@ envoy_cc_library(
"//source/extensions/filters/network/common:factory_base_lib",
"//source/extensions/filters/network/common/redis:codec_lib",
"//source/extensions/filters/network/redis_proxy:command_splitter_lib",
"//source/extensions/filters/network/redis_proxy:proxy_filter_lib",
"//source/extensions/filters/network/redis_proxy:router_lib",
],
)

envoy_cc_library(
name = "router_lib",
srcs = ["router_impl.cc"],
hdrs = ["router_impl.h"],
deps = [
":router_interface",
"//include/envoy/thread_local:thread_local_interface",
"//include/envoy/upstream:cluster_manager_interface",
"//source/common/common:to_lower_table_lib",
"//source/extensions/filters/network/redis_proxy:conn_pool_lib",
"@envoy_api//envoy/config/filter/network/redis_proxy/v2:redis_proxy_cc",
"//source/extensions/filters/network/redis_proxy:proxy_filter_lib",
],
)
Original file line number Diff line number Diff line change
Expand Up @@ -59,15 +59,15 @@ void SingleServerRequest::cancel() {
handle_ = nullptr;
}

SplitRequestPtr SimpleRequest::create(Router& router,
SplitRequestPtr SimpleRequest::create(ConnPool::Instance& conn_pool,
const Common::Redis::RespValue& incoming_request,
SplitCallbacks& callbacks, CommandStats& command_stats,
TimeSource& time_source, bool latency_in_micros) {
std::unique_ptr<SimpleRequest> request_ptr{
new SimpleRequest(callbacks, command_stats, time_source, latency_in_micros)};

request_ptr->handle_ =
router.makeRequest(incoming_request.asArray()[1].asString(), incoming_request, *request_ptr);
request_ptr->handle_ = conn_pool.makeRequest(incoming_request.asArray()[1].asString(),
incoming_request, *request_ptr);
if (!request_ptr->handle_) {
request_ptr->callbacks_.onResponse(Utility::makeError(Response::get().NoUpstreamHost));
return nullptr;
Expand All @@ -76,7 +76,7 @@ SplitRequestPtr SimpleRequest::create(Router& router,
return std::move(request_ptr);
}

SplitRequestPtr EvalRequest::create(Router& router,
SplitRequestPtr EvalRequest::create(ConnPool::Instance& conn_pool,
const Common::Redis::RespValue& incoming_request,
SplitCallbacks& callbacks, CommandStats& command_stats,
TimeSource& time_source, bool latency_in_micros) {
Expand All @@ -91,8 +91,8 @@ SplitRequestPtr EvalRequest::create(Router& router,

std::unique_ptr<EvalRequest> request_ptr{
new EvalRequest(callbacks, command_stats, time_source, latency_in_micros)};
request_ptr->handle_ =
router.makeRequest(incoming_request.asArray()[3].asString(), incoming_request, *request_ptr);
request_ptr->handle_ = conn_pool.makeRequest(incoming_request.asArray()[3].asString(),
incoming_request, *request_ptr);
if (!request_ptr->handle_) {
command_stats.error_.inc();
request_ptr->callbacks_.onResponse(Utility::makeError(Response::get().NoUpstreamHost));
Expand Down Expand Up @@ -123,7 +123,7 @@ void FragmentedRequest::onChildFailure(uint32_t index) {
onChildResponse(Utility::makeError(Response::get().UpstreamFailure), index);
}

SplitRequestPtr MGETRequest::create(Router& router,
SplitRequestPtr MGETRequest::create(ConnPool::Instance& conn_pool,
const Common::Redis::RespValue& incoming_request,
SplitCallbacks& callbacks, CommandStats& command_stats,
TimeSource& time_source, bool latency_in_micros) {
Expand Down Expand Up @@ -152,8 +152,8 @@ SplitRequestPtr MGETRequest::create(Router& router,

single_mget.asArray()[1].asString() = incoming_request.asArray()[i].asString();
ENVOY_LOG(debug, "redis: parallel get: '{}'", single_mget.toString());
pending_request.handle_ =
router.makeRequest(incoming_request.asArray()[i].asString(), single_mget, pending_request);
pending_request.handle_ = conn_pool.makeRequest(incoming_request.asArray()[i].asString(),
single_mget, pending_request);
if (!pending_request.handle_) {
pending_request.onResponse(Utility::makeError(Response::get().NoUpstreamHost));
}
Expand Down Expand Up @@ -195,7 +195,7 @@ void MGETRequest::onChildResponse(Common::Redis::RespValuePtr&& value, uint32_t
}
}

SplitRequestPtr MSETRequest::create(Router& router,
SplitRequestPtr MSETRequest::create(ConnPool::Instance& conn_pool,
const Common::Redis::RespValue& incoming_request,
SplitCallbacks& callbacks, CommandStats& command_stats,
TimeSource& time_source, bool latency_in_micros) {
Expand Down Expand Up @@ -231,8 +231,8 @@ SplitRequestPtr MSETRequest::create(Router& router,
single_mset.asArray()[2].asString() = incoming_request.asArray()[i + 1].asString();

ENVOY_LOG(debug, "redis: parallel set: '{}'", single_mset.toString());
pending_request.handle_ =
router.makeRequest(incoming_request.asArray()[i].asString(), single_mset, pending_request);
pending_request.handle_ = conn_pool.makeRequest(incoming_request.asArray()[i].asString(),
single_mset, pending_request);
if (!pending_request.handle_) {
pending_request.onResponse(Utility::makeError(Response::get().NoUpstreamHost));
}
Expand Down Expand Up @@ -270,7 +270,7 @@ void MSETRequest::onChildResponse(Common::Redis::RespValuePtr&& value, uint32_t
}
}

SplitRequestPtr SplitKeysSumResultRequest::create(Router& router,
SplitRequestPtr SplitKeysSumResultRequest::create(ConnPool::Instance& conn_pool,
const Common::Redis::RespValue& incoming_request,
SplitCallbacks& callbacks,
CommandStats& command_stats,
Expand Down Expand Up @@ -299,8 +299,8 @@ SplitRequestPtr SplitKeysSumResultRequest::create(Router& router,
single_fragment.asArray()[1].asString() = incoming_request.asArray()[i].asString();
ENVOY_LOG(debug, "redis: parallel {}: '{}'", incoming_request.asArray()[0].asString(),
single_fragment.toString());
pending_request.handle_ = router.makeRequest(incoming_request.asArray()[i].asString(),
single_fragment, pending_request);
pending_request.handle_ = conn_pool.makeRequest(incoming_request.asArray()[i].asString(),
single_fragment, pending_request);
if (!pending_request.handle_) {
pending_request.onResponse(Utility::makeError(Response::get().NoUpstreamHost));
}
Expand Down Expand Up @@ -337,11 +337,12 @@ void SplitKeysSumResultRequest::onChildResponse(Common::Redis::RespValuePtr&& va
}
}

InstanceImpl::InstanceImpl(RouterPtr&& router, Stats::Scope& scope, const std::string& stat_prefix,
TimeSource& time_source, bool latency_in_micros)
: router_(std::move(router)), simple_command_handler_(*router_),
eval_command_handler_(*router_), mget_handler_(*router_), mset_handler_(*router_),
split_keys_sum_result_handler_(*router_),
InstanceImpl::InstanceImpl(ConnPool::InstancePtr&& conn_pool, Stats::Scope& scope,
const std::string& stat_prefix, TimeSource& time_source,
bool latency_in_micros)
: conn_pool_(std::move(conn_pool)), simple_command_handler_(*conn_pool_),
eval_command_handler_(*conn_pool_), mget_handler_(*conn_pool_), mset_handler_(*conn_pool_),
split_keys_sum_result_handler_(*conn_pool_),
stats_{ALL_COMMAND_SPLITTER_STATS(POOL_COUNTER_PREFIX(scope, stat_prefix + "splitter."))},
latency_in_micros_(latency_in_micros), time_source_(time_source) {
for (const std::string& command : Common::Redis::SupportedCommands::simpleCommands()) {
Expand Down
Loading