Skip to content

chore: remove obsolete configs #14414

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Draft
wants to merge 1 commit into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
70 changes: 0 additions & 70 deletions src/query/config/src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1486,58 +1486,6 @@ pub struct QueryConfig {
#[clap(long, value_name = "VALUE", default_value = "90")]
pub(crate) data_retention_time_in_days_max: u64,

// ----- the following options/args are all deprecated ----
// ----- and turned into Option<T>, to help user migrate the configs ----
/// OBSOLETED: Table disk cache size (mb).
#[clap(long, value_name = "VALUE")]
pub table_disk_cache_mb_size: Option<u64>,

/// OBSOLETED: Table Meta Cached enabled
#[clap(long, value_name = "VALUE")]
pub table_meta_cache_enabled: Option<bool>,

/// OBSOLETED: Max number of cached table block meta
#[clap(long, value_name = "VALUE")]
pub table_cache_block_meta_count: Option<u64>,

/// OBSOLETED: Table memory cache size (mb)
#[clap(long, value_name = "VALUE")]
pub table_memory_cache_mb_size: Option<u64>,

/// OBSOLETED: Table disk cache folder root
#[clap(long, value_name = "VALUE")]
pub table_disk_cache_root: Option<String>,

/// OBSOLETED: Max number of cached table snapshot
#[clap(long, value_name = "VALUE")]
pub table_cache_snapshot_count: Option<u64>,

/// OBSOLETED: Max number of cached table snapshot statistics
#[clap(long, value_name = "VALUE")]
pub table_cache_statistic_count: Option<u64>,

/// OBSOLETED: Max number of cached table segment
#[clap(long, value_name = "VALUE")]
pub table_cache_segment_count: Option<u64>,

/// OBSOLETED: Max number of cached bloom index meta objects
#[clap(long, value_name = "VALUE")]
pub table_cache_bloom_index_meta_count: Option<u64>,

/// OBSOLETED:
/// Max number of cached bloom index filters, default value is 1024 * 1024 items.
/// One bloom index filter per column of data block being indexed will be generated if necessary.
///
/// For example, a table of 1024 columns, with 800 data blocks, a query that triggers a full
/// table filter on 2 columns, might populate 2 * 800 bloom index filter cache items (at most)
#[clap(long, value_name = "VALUE")]
pub table_cache_bloom_index_filter_count: Option<u64>,

/// OBSOLETED: (cache of raw bloom filter data is no longer supported)
/// Max bytes of cached bloom filter bytes.
#[clap(long, value_name = "VALUE")]
pub(crate) table_cache_bloom_index_data_bytes: Option<u64>,

/// Disable some system load(For example system.configs) for cloud security.
#[clap(long, value_name = "VALUE")]
pub disable_system_table_load: bool,
Expand Down Expand Up @@ -1728,18 +1676,6 @@ impl From<InnerQueryConfig> for QueryConfig {
internal_merge_on_read_mutation: false,
data_retention_time_in_days_max: 90,

// obsoleted config entries
table_disk_cache_mb_size: None,
table_meta_cache_enabled: None,
table_cache_block_meta_count: None,
table_memory_cache_mb_size: None,
table_disk_cache_root: None,
table_cache_snapshot_count: None,
table_cache_statistic_count: None,
table_cache_segment_count: None,
table_cache_bloom_index_meta_count: None,
table_cache_bloom_index_filter_count: None,
table_cache_bloom_index_data_bytes: None,
//
disable_system_table_load: inner.disable_system_table_load,
openai_api_chat_base_url: inner.openai_api_chat_base_url,
Expand Down Expand Up @@ -2679,11 +2615,6 @@ pub struct CacheConfig {
default_value = "0"
)]
pub table_data_deserialized_data_bytes: u64,

// ----- the following options/args are all deprecated ----
/// Max number of cached table segment
#[clap(long = "cache-table-meta-segment-count", value_name = "VALUE")]
pub table_meta_segment_count: Option<u64>,
}

impl Default for CacheConfig {
Expand Down Expand Up @@ -2832,7 +2763,6 @@ mod cache_config_converters {
.table_data_cache_population_queue_size,
disk_cache_config: value.disk_cache_config.into(),
table_data_deserialized_data_bytes: value.table_data_deserialized_data_bytes,
table_meta_segment_count: None,
}
}
}
Expand Down
139 changes: 8 additions & 131 deletions src/query/config/src/obsolete.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,145 +22,22 @@ use crate::Config;
// The following code should be removed from the release after the next release.
// Just give user errors without any detail explanation and migration suggestions.
impl Config {
pub const fn obsoleted_option_keys() -> &'static [&'static str; 11] {
&[
"table_disk_cache_mb_size",
"table_meta_cache_enabled",
"table_cache_block_meta_count",
"table_memory_cache_mb_size",
"table_disk_cache_root",
"table_cache_snapshot_count",
"table_cache_statistic_count",
"table_cache_segment_count",
"table_cache_bloom_index_meta_count",
"table_cache_bloom_index_filter_count",
"table_cache_bloom_index_data_bytes",
]
pub const fn obsoleted_option_keys() -> &'static [&'static str; 1] {
&["obsoleted-name"]
}

pub(crate) fn check_obsoleted(&self) -> Result<()> {
let check_results = vec![
// This is a demo.
Self::check(
&self.query.table_disk_cache_mb_size,
"table-disk-cache-mb-size",
"cache-disk-max-bytes",
&Some("obsoleted-name"),
"obsoleted-name",
"new-name",
r#"
[cache]
...
data_cache_storage = "disk"
...
[cache.disk]
max_bytes = [MAX_BYTES]
...
name-name = "value"
"#,
"CACHE_DISK_MAX_BYTES",
),
Self::check(
&self.query.table_meta_cache_enabled,
"table-meta-cache-enabled",
"cache-enable-table-meta-cache",
r#"
[cache]
enable_table_meta_cache=[true|false]
"#,
"CACHE_ENABLE_TABLE_META_CACHE",
),
Self::check(
&self.query.table_cache_block_meta_count,
"table-cache-block-meta-count",
"N/A",
"N/A",
"N/A",
),
Self::check(
&self.query.table_memory_cache_mb_size,
"table-memory-cache-mb-size",
"N/A",
"N/A",
"N/A",
),
Self::check(
&self.query.table_disk_cache_root,
"table-disk-cache-root",
"cache-disk-path",
r#"
[cache]
...
data_cache_storage = "disk"
...
[cache.disk]
max_bytes = [MAX_BYTES]
path = [PATH]
...
"#,
"CACHE_DISK_PATH",
),
Self::check(
&self.query.table_cache_snapshot_count,
"table-cache-snapshot-count",
"cache-table-meta-snapshot-count",
r#"
[cache]
table_meta_snapshot_count = [COUNT]
"#,
"CACHE_TABLE_META_SNAPSHOT_COUNT",
),
Self::check(
&self.query.table_cache_statistic_count,
"table-cache-statistic-count",
"cache-table-meta-statistic-count",
r#"
[cache]
table_meta_statistic_count = [COUNT]
"#,
"CACHE_TABLE_META_STATISTIC_COUNT",
),
Self::check(
&self.query.table_cache_segment_count,
"table-cache-segment-count",
"cache-table-meta-segment-count",
r#"
[cache]
table_meta_segment_count = [COUNT]
"#,
"CACHE_TABLE_META_SEGMENT_COUNT",
),
Self::check(
&self.query.table_cache_bloom_index_meta_count,
"table-cache-bloom-index-meta-count",
"cache-table-bloom-index-meta-count",
r#"
[cache]
table_bloom_index_meta_count = [COUNT]
"#,
"CACHE_TABLE_BLOOM_INDEX_META_COUNT",
),
Self::check(
&self.query.table_cache_bloom_index_filter_count,
"table-cache-bloom-index-filter-count",
"cache-table-bloom-index-filter-count",
r#"
[cache]
table_bloom_index_filter_count = [COUNT]
"#,
"CACHE_TABLE_BLOOM_INDEX_FILTER_COUNT",
),
Self::check(
&self.query.table_cache_bloom_index_data_bytes,
"table-cache-bloom-index-data-bytes",
"N/A",
"N/A",
"N/A",
),
Self::check(
&self.cache.table_meta_segment_count,
"cache-table-meta-segment-count",
"cache-table-meta-segment-bytes",
r#"
[cache]
table_meta_segment_bytes = [BYTES]
"#,
"CACHE_TABLE_META_SEGMENT_BYTES",
"NEW-NAME",
),
];

Expand Down
5 changes: 4 additions & 1 deletion tests/cloud_control_server/simple_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -265,14 +265,17 @@ def GetTaskDependents(self, request, context):
root = TASK_DB[root.after[0]]
l.insert(0, root)
return task_pb2.GetTaskDependentsResponse(task=l)

def EnableTaskDependents(self, request, context):
print("EnableTaskDependents", request)
task_name = request.task_name
if task_name not in TASK_DB:
return task_pb2.EnableTaskDependentsResponse()
return task_pb2.EnableTaskDependentsResponse()
task = TASK_DB[task_name]
task.status = task_pb2.Task.Started
return task_pb2.EnableTaskDependentsResponse()


def serve():
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
task_pb2_grpc.add_TaskServiceServicer_to_server(TaskService(), server)
Expand Down
Loading