From fa209f409d5c1f425e5109e4f190d4946ece61a9 Mon Sep 17 00:00:00 2001 From: dantengsky Date: Wed, 8 Jan 2025 22:51:25 +0800 Subject: [PATCH] chore: bring back function `set_cache_capacity` (#17196) * chore:bring back func `set_cache_capacity` * test cases * revert config file * tweak test case * fix: use usize as bytes capacity * tweak lock scope * rename vars/methods of cache mgr --- src/common/cache/src/cache.rs | 4 + src/common/cache/src/cache/lru.rs | 14 ++ .../interpreters/access/privilege_access.rs | 2 +- .../table_functions/table_function_factory.rs | 9 + .../storages/common/cache/src/manager.rs | 233 +++++++++++++----- .../cache/src/providers/memory_cache.rs | 33 ++- .../storages/fuse/src/table_functions/mod.rs | 3 + .../src/table_functions/set_cache_capacity.rs | 100 ++++++++ .../09_fuse_engine/09_0043_set_cache_cap.sql | 17 ++ ..._set_cache_capacity_privilege_check.result | 2 + ...0021_set_cache_capacity_privilege_check.sh | 15 ++ 11 files changed, 363 insertions(+), 69 deletions(-) create mode 100644 src/query/storages/fuse/src/table_functions/set_cache_capacity.rs create mode 100644 tests/sqllogictests/suites/base/09_fuse_engine/09_0043_set_cache_cap.sql create mode 100644 tests/suites/0_stateless/20+_others/20_0021_set_cache_capacity_privilege_check.result create mode 100755 tests/suites/0_stateless/20+_others/20_0021_set_cache_capacity_privilege_check.sh diff --git a/src/common/cache/src/cache.rs b/src/common/cache/src/cache.rs index 4328287536274..79a855ea4b353 100644 --- a/src/common/cache/src/cache.rs +++ b/src/common/cache/src/cache.rs @@ -72,6 +72,10 @@ pub trait Cache { fn items_capacity(&self) -> u64; + fn set_bytes_capacity(&mut self, capacity: usize); + + fn set_items_capacity(&mut self, capacity: usize); + /// Returns the bytes size of all the key-value pairs in the cache. fn bytes_size(&self) -> u64; diff --git a/src/common/cache/src/cache/lru.rs b/src/common/cache/src/cache/lru.rs index 52b8064f6bc97..d9b8c7c46865f 100644 --- a/src/common/cache/src/cache/lru.rs +++ b/src/common/cache/src/cache/lru.rs @@ -301,6 +301,20 @@ impl Cache for LruCache { self.max_items as u64 } + fn set_bytes_capacity(&mut self, max_bytes: usize) { + while self.bytes > max_bytes || self.map.len() > self.max_items { + self.pop_by_policy(); + } + self.max_bytes = max_bytes; + } + + fn set_items_capacity(&mut self, max_items: usize) { + while self.bytes > self.max_bytes || self.map.len() > max_items { + self.pop_by_policy(); + } + self.max_items = max_items; + } + /// Returns the bytes size of all the key-value pairs in the cache. fn bytes_size(&self) -> u64 { self.bytes as u64 diff --git a/src/query/service/src/interpreters/access/privilege_access.rs b/src/query/service/src/interpreters/access/privilege_access.rs index 3f0a0edc15dcf..4c5df38fb8068 100644 --- a/src/query/service/src/interpreters/access/privilege_access.rs +++ b/src/query/service/src/interpreters/access/privilege_access.rs @@ -60,7 +60,7 @@ enum ObjectId { } // table functions that need `Super` privilege -const SYSTEM_TABLE_FUNCTIONS: [&str; 1] = ["fuse_amend"]; +const SYSTEM_TABLE_FUNCTIONS: [&str; 2] = ["fuse_amend", "set_cache_capacity"]; impl PrivilegeAccess { pub fn create(ctx: Arc) -> Box { diff --git a/src/query/service/src/table_functions/table_function_factory.rs b/src/query/service/src/table_functions/table_function_factory.rs index 019d01f4e7e3f..3a24d03099102 100644 --- a/src/query/service/src/table_functions/table_function_factory.rs +++ b/src/query/service/src/table_functions/table_function_factory.rs @@ -27,6 +27,7 @@ use databend_common_storages_fuse::table_functions::FuseEncodingFunc; use databend_common_storages_fuse::table_functions::FuseStatisticsFunc; use databend_common_storages_fuse::table_functions::FuseTimeTravelSizeFunc; use databend_common_storages_fuse::table_functions::FuseVacuumTemporaryTable; +use databend_common_storages_fuse::table_functions::SetCacheCapacity; use databend_common_storages_fuse::table_functions::TableFunctionTemplate; use databend_common_storages_stream::stream_status_table_func::StreamStatusTable; use databend_storages_common_table_meta::table_id_ranges::SYS_TBL_FUC_ID_END; @@ -138,6 +139,14 @@ impl TableFunctionFactory { ), ); + creators.insert( + "set_cache_capacity".to_string(), + ( + next_id(), + Arc::new(TableFunctionTemplate::::create), + ), + ); + creators.insert( "fuse_segment".to_string(), ( diff --git a/src/query/storages/common/cache/src/manager.rs b/src/query/storages/common/cache/src/manager.rs index 3bf65686d19dd..d24f8851d8b4e 100644 --- a/src/query/storages/common/cache/src/manager.rs +++ b/src/query/storages/common/cache/src/manager.rs @@ -19,8 +19,10 @@ use databend_common_base::base::GlobalInstance; use databend_common_config::CacheConfig; use databend_common_config::CacheStorageTypeInnerConfig; use databend_common_config::DiskCacheKeyReloadPolicy; +use databend_common_exception::ErrorCode; use databend_common_exception::Result; use log::info; +use parking_lot::RwLock; use crate::caches::BlockMetaCache; use crate::caches::BloomIndexFilterCache; @@ -40,20 +42,43 @@ use crate::TableDataCacheBuilder; static DEFAULT_PARQUET_META_DATA_CACHE_ITEMS: usize = 3000; +struct CacheSlot { + cache: RwLock>, +} + +impl CacheSlot { + fn new(t: Option) -> CacheSlot { + CacheSlot { + cache: RwLock::new(t), + } + } + + fn set(&self, t: Option) { + let mut guard = self.cache.write(); + *guard = t + } +} + +impl CacheSlot { + fn get(&self) -> Option { + self.cache.read().clone() + } +} + /// Where all the caches reside pub struct CacheManager { - table_snapshot_cache: Option, - table_statistic_cache: Option, - compact_segment_info_cache: Option, - bloom_index_filter_cache: Option, - bloom_index_meta_cache: Option, - inverted_index_meta_cache: Option, - inverted_index_file_cache: Option, - prune_partitions_cache: Option, - parquet_meta_data_cache: Option, - table_data_cache: Option, - in_memory_table_data_cache: Option, - block_meta_cache: Option, + table_snapshot_cache: CacheSlot, + table_statistic_cache: CacheSlot, + compact_segment_info_cache: CacheSlot, + bloom_index_filter_cache: CacheSlot, + bloom_index_meta_cache: CacheSlot, + inverted_index_meta_cache: CacheSlot, + inverted_index_file_cache: CacheSlot, + prune_partitions_cache: CacheSlot, + parquet_meta_data_cache: CacheSlot, + table_data_cache: CacheSlot, + in_memory_table_data_cache: CacheSlot, + block_meta_cache: CacheSlot, } impl CacheManager { @@ -66,7 +91,7 @@ impl CacheManager { // setup table data cache let table_data_cache = { match config.data_cache_storage { - CacheStorageTypeInnerConfig::None => None, + CacheStorageTypeInnerConfig::None => CacheSlot::new(None), CacheStorageTypeInnerConfig::Disk => { let real_disk_cache_root = PathBuf::from(&config.disk_cache_config.path) .join(tenant_id.into()) @@ -110,48 +135,48 @@ impl CacheManager { // Cache of deserialized table data let in_memory_table_data_cache = - Self::new_named_bytes_cache(MEMORY_CACHE_TABLE_DATA, memory_cache_capacity); + Self::new_bytes_cache_slot(MEMORY_CACHE_TABLE_DATA, memory_cache_capacity); // setup in-memory table meta cache if !config.enable_table_meta_cache { GlobalInstance::set(Arc::new(Self { - table_snapshot_cache: None, - compact_segment_info_cache: None, - bloom_index_filter_cache: None, - bloom_index_meta_cache: None, - inverted_index_meta_cache: None, - inverted_index_file_cache: None, - prune_partitions_cache: None, - parquet_meta_data_cache: None, - table_statistic_cache: None, + table_snapshot_cache: CacheSlot::new(None), + compact_segment_info_cache: CacheSlot::new(None), + bloom_index_filter_cache: CacheSlot::new(None), + bloom_index_meta_cache: CacheSlot::new(None), + inverted_index_meta_cache: CacheSlot::new(None), + inverted_index_file_cache: CacheSlot::new(None), + prune_partitions_cache: CacheSlot::new(None), + parquet_meta_data_cache: CacheSlot::new(None), + table_statistic_cache: CacheSlot::new(None), table_data_cache, in_memory_table_data_cache, - block_meta_cache: None, + block_meta_cache: CacheSlot::new(None), })); } else { - let table_snapshot_cache = Self::new_named_items_cache( - config.table_meta_snapshot_count as usize, + let table_snapshot_cache = Self::new_items_cache_slot( MEMORY_CACHE_TABLE_SNAPSHOT, + config.table_meta_snapshot_count as usize, ); - let table_statistic_cache = Self::new_named_items_cache( - config.table_meta_statistic_count as usize, + let table_statistic_cache = Self::new_items_cache_slot( MEMORY_CACHE_TABLE_STATISTICS, + config.table_meta_statistic_count as usize, ); - let compact_segment_info_cache = Self::new_named_bytes_cache( + let compact_segment_info_cache = Self::new_bytes_cache_slot( MEMORY_CACHE_COMPACT_SEGMENT_INFO, config.table_meta_segment_bytes as usize, ); - let bloom_index_filter_cache = Self::new_named_bytes_cache( + let bloom_index_filter_cache = Self::new_bytes_cache_slot( MEMORY_CACHE_BLOOM_INDEX_FILTER, config.table_bloom_index_filter_size as usize, ); - let bloom_index_meta_cache = Self::new_named_items_cache( - config.table_bloom_index_meta_count as usize, + let bloom_index_meta_cache = Self::new_items_cache_slot( MEMORY_CACHE_BLOOM_INDEX_FILE_META_DATA, + config.table_bloom_index_meta_count as usize, ); - let inverted_index_meta_cache = Self::new_named_items_cache( - config.inverted_index_meta_count as usize, + let inverted_index_meta_cache = Self::new_items_cache_slot( MEMORY_CACHE_INVERTED_INDEX_FILE_META_DATA, + config.inverted_index_meta_count as usize, ); // setup in-memory inverted index filter cache @@ -162,23 +187,23 @@ impl CacheManager { } else { config.inverted_index_filter_size as usize }; - let inverted_index_file_cache = Self::new_named_bytes_cache( + let inverted_index_file_cache = Self::new_bytes_cache_slot( MEMORY_CACHE_INVERTED_INDEX_FILE, inverted_index_file_size, ); - let prune_partitions_cache = Self::new_named_items_cache( - config.table_prune_partitions_count as usize, + let prune_partitions_cache = Self::new_items_cache_slot( MEMORY_CACHE_PRUNE_PARTITIONS, + config.table_prune_partitions_count as usize, ); - let parquet_meta_data_cache = Self::new_named_items_cache( - DEFAULT_PARQUET_META_DATA_CACHE_ITEMS, + let parquet_meta_data_cache = Self::new_items_cache_slot( MEMORY_CACHE_PARQUET_META_DATA, + DEFAULT_PARQUET_META_DATA_CACHE_ITEMS, ); - let block_meta_cache = Self::new_named_items_cache( - config.block_meta_count as usize, + let block_meta_cache = Self::new_items_cache_slot( MEMORY_CACHE_BLOCK_META, + config.block_meta_count as usize, ); GlobalInstance::set(Arc::new(Self { @@ -205,56 +230,141 @@ impl CacheManager { } pub fn get_table_snapshot_cache(&self) -> Option { - self.table_snapshot_cache.clone() + self.table_snapshot_cache.get() + } + + pub fn set_cache_capacity(&self, name: &str, new_capacity: u64) -> Result<()> { + match name { + MEMORY_CACHE_TABLE_DATA => { + let cache = &self.in_memory_table_data_cache; + Self::set_bytes_capacity(cache, new_capacity, name); + } + MEMORY_CACHE_PARQUET_META_DATA => { + let cache = &self.parquet_meta_data_cache; + Self::set_items_capacity(cache, new_capacity, name) + } + MEMORY_CACHE_PRUNE_PARTITIONS => { + let cache = &self.prune_partitions_cache; + Self::set_items_capacity(cache, new_capacity, name) + } + MEMORY_CACHE_INVERTED_INDEX_FILE => { + let cache = &self.inverted_index_file_cache; + Self::set_bytes_capacity(cache, new_capacity, name); + } + MEMORY_CACHE_INVERTED_INDEX_FILE_META_DATA => { + let cache = &self.inverted_index_meta_cache; + Self::set_items_capacity(cache, new_capacity, name); + } + MEMORY_CACHE_BLOOM_INDEX_FILE_META_DATA => { + Self::set_items_capacity(&self.bloom_index_meta_cache, new_capacity, name); + } + MEMORY_CACHE_BLOOM_INDEX_FILTER => { + Self::set_bytes_capacity(&self.bloom_index_filter_cache, new_capacity, name); + } + MEMORY_CACHE_COMPACT_SEGMENT_INFO => { + Self::set_bytes_capacity(&self.compact_segment_info_cache, new_capacity, name); + } + MEMORY_CACHE_TABLE_STATISTICS => { + Self::set_items_capacity(&self.table_statistic_cache, new_capacity, name); + } + MEMORY_CACHE_TABLE_SNAPSHOT => { + Self::set_items_capacity(&self.table_snapshot_cache, new_capacity, name); + } + MEMORY_CACHE_BLOCK_META => { + Self::set_items_capacity(&self.block_meta_cache, new_capacity, name); + } + + crate::DISK_TABLE_DATA_CACHE_NAME => { + return Err(ErrorCode::BadArguments(format!( + "set capacity of cache {} is not allowed", + name + ))); + } + _ => return Err(ErrorCode::BadArguments(format!("cache {} not found", name))), + } + Ok(()) + } + + fn set_bytes_capacity>>( + cache: &CacheSlot>, + new_capacity: u64, + name: impl Into, + ) { + if let Some(v) = cache.get() { + v.set_bytes_capacity(new_capacity as usize); + } else { + let new_cache = Self::new_bytes_cache(name, new_capacity as usize); + cache.set(new_cache) + } + } + + fn set_items_capacity>>( + cache: &CacheSlot>, + new_capacity: u64, + name: impl Into, + ) { + if let Some(v) = cache.get() { + v.set_items_capacity(new_capacity as usize); + } else { + let new_cache = Self::new_items_cache(name, new_capacity as usize); + cache.set(new_cache) + } } pub fn get_block_meta_cache(&self) -> Option { - self.block_meta_cache.clone() + self.block_meta_cache.get() } pub fn get_table_snapshot_statistics_cache(&self) -> Option { - self.table_statistic_cache.clone() + self.table_statistic_cache.get() } pub fn get_table_segment_cache(&self) -> Option { - self.compact_segment_info_cache.clone() + self.compact_segment_info_cache.get() } pub fn get_bloom_index_filter_cache(&self) -> Option { - self.bloom_index_filter_cache.clone() + self.bloom_index_filter_cache.get() } pub fn get_bloom_index_meta_cache(&self) -> Option { - self.bloom_index_meta_cache.clone() + self.bloom_index_meta_cache.get() } pub fn get_inverted_index_meta_cache(&self) -> Option { - self.inverted_index_meta_cache.clone() + self.inverted_index_meta_cache.get() } pub fn get_inverted_index_file_cache(&self) -> Option { - self.inverted_index_file_cache.clone() + self.inverted_index_file_cache.get() } pub fn get_prune_partitions_cache(&self) -> Option { - self.prune_partitions_cache.clone() + self.prune_partitions_cache.get() } pub fn get_parquet_meta_data_cache(&self) -> Option { - self.parquet_meta_data_cache.clone() + self.parquet_meta_data_cache.get() } pub fn get_table_data_cache(&self) -> Option { - self.table_data_cache.clone() + self.table_data_cache.get() } pub fn get_table_data_array_cache(&self) -> Option { - self.in_memory_table_data_cache.clone() + self.in_memory_table_data_cache.get() } - pub fn new_named_items_cache>>( + fn new_items_cache_slot>>( + name: impl Into, capacity: usize, + ) -> CacheSlot> { + CacheSlot::new(Self::new_items_cache(name, capacity)) + } + + fn new_items_cache>>( name: impl Into, + capacity: usize, ) -> Option> { match capacity { 0 => None, @@ -262,7 +372,14 @@ impl CacheManager { } } - fn new_named_bytes_cache>>( + fn new_bytes_cache_slot>>( + name: impl Into, + bytes_capacity: usize, + ) -> CacheSlot> { + CacheSlot::new(Self::new_bytes_cache(name, bytes_capacity)) + } + + fn new_bytes_cache>>( name: impl Into, bytes_capacity: usize, ) -> Option> { @@ -281,7 +398,7 @@ impl CacheManager { disk_cache_bytes_size: usize, disk_cache_key_reload_policy: DiskCacheKeyReloadPolicy, sync_data: bool, - ) -> Result> { + ) -> Result> { if disk_cache_bytes_size > 0 { let cache_holder = TableDataCacheBuilder::new_table_data_disk_cache( path, @@ -290,9 +407,9 @@ impl CacheManager { disk_cache_key_reload_policy, sync_data, )?; - Ok(Some(cache_holder)) + Ok(CacheSlot::new(Some(cache_holder))) } else { - Ok(None) + Ok(CacheSlot::new(None)) } } } diff --git a/src/query/storages/common/cache/src/providers/memory_cache.rs b/src/query/storages/common/cache/src/providers/memory_cache.rs index c0856a34c690d..cb26338d55c1c 100644 --- a/src/query/storages/common/cache/src/providers/memory_cache.rs +++ b/src/query/storages/common/cache/src/providers/memory_cache.rs @@ -27,6 +27,18 @@ pub struct InMemoryLruCache>> { inner: Arc>>>, } +impl>> InMemoryLruCache { + pub fn set_bytes_capacity(&self, capacity: usize) { + let mut cache = self.inner.write(); + cache.set_bytes_capacity(capacity); + } + + pub fn set_items_capacity(&self, capacity: usize) { + let mut cache = self.inner.write(); + cache.set_items_capacity(capacity); + } +} + impl>> Clone for InMemoryLruCache { fn clone(&self) -> Self { Self { @@ -81,17 +93,18 @@ mod impls { fn get>(&self, k: Q) -> Option> { metrics_inc_cache_access_count(1, self.name()); - let mut guard = self.inner.write(); - match guard.get(k.as_ref()) { - None => { - metrics_inc_cache_miss_count(1, &self.name); - None - } - Some(cached_value) => { - metrics_inc_cache_hit_count(1, &self.name); - Some(cached_value.get_inner()) - } + let v = { + let mut guard = self.inner.write(); + guard + .get(k.as_ref()) + .map(|cache_value: &CacheValue| cache_value.get_inner()) + }; + if v.is_none() { + metrics_inc_cache_miss_count(1, &self.name); + } else { + metrics_inc_cache_hit_count(1, &self.name); } + v } fn get_sized>(&self, k: Q, len: u64) -> Option> { diff --git a/src/query/storages/fuse/src/table_functions/mod.rs b/src/query/storages/fuse/src/table_functions/mod.rs index 2f68941a86630..b4bf4e61872d3 100644 --- a/src/query/storages/fuse/src/table_functions/mod.rs +++ b/src/query/storages/fuse/src/table_functions/mod.rs @@ -26,6 +26,8 @@ mod fuse_time_travel_size; mod fuse_vacuum_temporary_table; mod table_args; +mod set_cache_capacity; + pub use clustering_information::ClusteringInformationFunc; pub use clustering_statistics::ClusteringStatisticsFunc; use databend_common_catalog::table_args::TableArgs; @@ -43,4 +45,5 @@ pub use fuse_statistic::FuseStatisticsFunc; pub use fuse_time_travel_size::FuseTimeTravelSize; pub use fuse_time_travel_size::FuseTimeTravelSizeFunc; pub use fuse_vacuum_temporary_table::FuseVacuumTemporaryTable; +pub use set_cache_capacity::SetCacheCapacity; pub use table_args::*; diff --git a/src/query/storages/fuse/src/table_functions/set_cache_capacity.rs b/src/query/storages/fuse/src/table_functions/set_cache_capacity.rs new file mode 100644 index 0000000000000..05e704c4e0455 --- /dev/null +++ b/src/query/storages/fuse/src/table_functions/set_cache_capacity.rs @@ -0,0 +1,100 @@ +// Copyright 2021 Datafuse Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::sync::Arc; + +use databend_common_catalog::plan::DataSourcePlan; +use databend_common_catalog::table_context::TableContext; +use databend_common_exception::Result; +use databend_common_expression::types::StringType; +use databend_common_expression::DataBlock; +use databend_common_expression::FromData; +use databend_common_expression::TableDataType; +use databend_common_expression::TableField; +use databend_common_expression::TableSchemaRef; +use databend_common_expression::TableSchemaRefExt; +use databend_storages_common_cache::CacheManager; + +use crate::table_functions::string_literal; +use crate::table_functions::string_value; +use crate::table_functions::SimpleTableFunc; +use crate::table_functions::TableArgs; + +#[derive(Clone)] +pub struct SetCapacity { + cache_name: String, + capacity: u64, +} + +impl From<&SetCapacity> for TableArgs { + fn from(value: &SetCapacity) -> Self { + TableArgs::new_positioned(vec![ + string_literal(&value.cache_name), + string_literal(&value.capacity.to_string()), + ]) + } +} + +pub struct SetCacheCapacity { + operation: SetCapacity, +} +#[async_trait::async_trait] +impl SimpleTableFunc for SetCacheCapacity { + fn table_args(&self) -> Option { + Some((&self.operation).into()) + } + + fn schema(&self) -> TableSchemaRef { + TableSchemaRefExt::create(vec![ + TableField::new("node", TableDataType::String), + TableField::new("result", TableDataType::String), + ]) + } + + fn is_local_func(&self) -> bool { + // cache operation needs to be broadcast to all nodes + false + } + + async fn apply( + &self, + ctx: &Arc, + _plan: &DataSourcePlan, + ) -> Result> { + let cache_mgr = CacheManager::instance(); + let op = &self.operation; + cache_mgr.set_cache_capacity(&op.cache_name, op.capacity)?; + + let node = vec![ctx.get_cluster().local_id.clone()]; + let res = vec!["Ok".to_owned()]; + + Ok(Some(DataBlock::new_from_columns(vec![ + StringType::from_data(node), + StringType::from_data(res), + ]))) + } + + fn create(_func_name: &str, table_args: TableArgs) -> Result + where Self: Sized { + let args = table_args.expect_all_positioned("", Some(2))?; + let cache_name = string_value(&args[0])?; + let capacity = string_value(&args[1])?.parse::()?; + + let operation = SetCapacity { + cache_name, + capacity, + }; + Ok(Self { operation }) + } +} diff --git a/tests/sqllogictests/suites/base/09_fuse_engine/09_0043_set_cache_cap.sql b/tests/sqllogictests/suites/base/09_fuse_engine/09_0043_set_cache_cap.sql new file mode 100644 index 0000000000000..1b75c19a4703d --- /dev/null +++ b/tests/sqllogictests/suites/base/09_fuse_engine/09_0043_set_cache_cap.sql @@ -0,0 +1,17 @@ +statement ok +create or replace database db_09_0041; + +statement ok +use db_09_0041; + +# By default, memory_cache_block_meta is disabled, +# let's enable it by setting a non-zero capacity +statement ok +call system$set_cache_capacity('memory_cache_block_meta', 1000); + +# check cache "memory_cache_block_meta" exists + +query II +select count()>=1 from system.caches where name = 'memory_cache_block_meta' and capacity = 1000; +---- +1 diff --git a/tests/suites/0_stateless/20+_others/20_0021_set_cache_capacity_privilege_check.result b/tests/suites/0_stateless/20+_others/20_0021_set_cache_capacity_privilege_check.result new file mode 100644 index 0000000000000..516d019d5aec0 --- /dev/null +++ b/tests/suites/0_stateless/20+_others/20_0021_set_cache_capacity_privilege_check.result @@ -0,0 +1,2 @@ +Error: APIError: QueryFailed: [1063]Permission denied: privilege [Super] is required to invoke table function [set_cache_capacity] for user 'test'@'%' with roles [public] +Error: APIError: QueryFailed: [1063]Permission denied: privilege [Super] is required to invoke table function [set_cache_capacity] for user 'test'@'%' with roles [public] diff --git a/tests/suites/0_stateless/20+_others/20_0021_set_cache_capacity_privilege_check.sh b/tests/suites/0_stateless/20+_others/20_0021_set_cache_capacity_privilege_check.sh new file mode 100755 index 0000000000000..37147dd4d519d --- /dev/null +++ b/tests/suites/0_stateless/20+_others/20_0021_set_cache_capacity_privilege_check.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +. "$CURDIR"/../../../shell_env.sh + +echo "create user if not exists test identified by 'test'"|$BENDSQL_CLIENT_CONNECT + +export TEST_NON_PRIVILEGED_USER_CONNECT="bendsql --user=test --password=test --host=${QUERY_MYSQL_HANDLER_HOST} --port ${QUERY_HTTP_HANDLER_PORT}" + +echo "call system\$set_cache_capacity('memory_cache_block_meta', 100)" | $TEST_NON_PRIVILEGED_USER_CONNECT + +echo "select * from set_cache_capacity('memory_cache_block_meta', '100')" | $TEST_NON_PRIVILEGED_USER_CONNECT + +# CI will check $? of this script, and think it is [FAIL] if $? is non-zero, +exit 0 \ No newline at end of file