From 67fdd9b69dca741001bb14765a4c2078ff86954a Mon Sep 17 00:00:00 2001 From: Matt Butrovich Date: Fri, 5 Dec 2025 14:39:09 -0500 Subject: [PATCH 1/3] Bump MSRV to 1.88, fix warnings and clippy errors. --- Cargo.toml | 5 +- crates/catalog/glue/src/catalog.rs | 46 +++++------ crates/iceberg/Cargo.toml | 3 + crates/iceberg/src/arrow/reader.rs | 68 ++++++++-------- .../src/arrow/record_batch_projector.rs | 35 ++++----- .../src/arrow/record_batch_transformer.rs | 4 +- crates/iceberg/src/arrow/value.rs | 26 +++---- crates/iceberg/src/catalog/mod.rs | 14 ++-- crates/iceberg/src/delete_vector.rs | 10 +-- .../src/expr/visitors/manifest_evaluator.rs | 26 +++---- .../src/expr/visitors/page_index_evaluator.rs | 16 ++-- .../expr/visitors/strict_metrics_evaluator.rs | 24 +++--- crates/iceberg/src/inspect/metadata_table.rs | 4 +- crates/iceberg/src/io/storage.rs | 4 +- crates/iceberg/src/io/storage_azdls.rs | 20 ++--- crates/iceberg/src/io/storage_gcs.rs | 26 +++---- crates/iceberg/src/io/storage_oss.rs | 2 +- crates/iceberg/src/io/storage_s3.rs | 24 +++--- crates/iceberg/src/spec/datatypes.rs | 3 +- crates/iceberg/src/spec/manifest/writer.rs | 8 +- .../iceberg/src/spec/schema/prune_columns.rs | 34 ++++---- crates/iceberg/src/spec/table_metadata.rs | 71 +++++++++-------- crates/iceberg/src/spec/transform.rs | 78 +++++++++---------- crates/iceberg/src/spec/values/tests.rs | 2 +- .../iceberg/src/spec/view_metadata_builder.rs | 8 +- crates/iceberg/src/transaction/mod.rs | 2 +- crates/iceberg/src/transaction/snapshot.rs | 14 ++-- .../src/writer/file_writer/rolling_writer.rs | 24 +++--- crates/iceberg/tests/file_io_gcs_test.rs | 6 +- .../src/physical_plan/repartition.rs | 11 +-- .../datafusion/src/physical_plan/sort.rs | 3 +- .../integrations/datafusion/src/table/mod.rs | 2 +- .../tests/integration_datafusion_test.rs | 15 ++-- rust-toolchain.toml | 2 +- 34 files changed, 316 insertions(+), 324 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 0512155534..83a1f44a23 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -36,7 +36,10 @@ version = "0.7.0" license = "Apache-2.0" repository = "https://github.com/apache/iceberg-rust" # Check the MSRV policy in README.md before changing this -rust-version = "1.87" +rust-version = "1.88" + +[workspace.lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = ['cfg(FALSE)'] } [workspace.dependencies] anyhow = "1.0.72" diff --git a/crates/catalog/glue/src/catalog.rs b/crates/catalog/glue/src/catalog.rs index dce287ed6e..37a7996f80 100644 --- a/crates/catalog/glue/src/catalog.rs +++ b/crates/catalog/glue/src/catalog.rs @@ -151,33 +151,33 @@ impl GlueCatalog { async fn new(config: GlueCatalogConfig) -> Result { let sdk_config = create_sdk_config(&config.props, config.uri.as_ref()).await; let mut file_io_props = config.props.clone(); - if !file_io_props.contains_key(S3_ACCESS_KEY_ID) { - if let Some(access_key_id) = file_io_props.get(AWS_ACCESS_KEY_ID) { - file_io_props.insert(S3_ACCESS_KEY_ID.to_string(), access_key_id.to_string()); - } + if !file_io_props.contains_key(S3_ACCESS_KEY_ID) + && let Some(access_key_id) = file_io_props.get(AWS_ACCESS_KEY_ID) + { + file_io_props.insert(S3_ACCESS_KEY_ID.to_string(), access_key_id.to_string()); } - if !file_io_props.contains_key(S3_SECRET_ACCESS_KEY) { - if let Some(secret_access_key) = file_io_props.get(AWS_SECRET_ACCESS_KEY) { - file_io_props.insert( - S3_SECRET_ACCESS_KEY.to_string(), - secret_access_key.to_string(), - ); - } + if !file_io_props.contains_key(S3_SECRET_ACCESS_KEY) + && let Some(secret_access_key) = file_io_props.get(AWS_SECRET_ACCESS_KEY) + { + file_io_props.insert( + S3_SECRET_ACCESS_KEY.to_string(), + secret_access_key.to_string(), + ); } - if !file_io_props.contains_key(S3_REGION) { - if let Some(region) = file_io_props.get(AWS_REGION_NAME) { - file_io_props.insert(S3_REGION.to_string(), region.to_string()); - } + if !file_io_props.contains_key(S3_REGION) + && let Some(region) = file_io_props.get(AWS_REGION_NAME) + { + file_io_props.insert(S3_REGION.to_string(), region.to_string()); } - if !file_io_props.contains_key(S3_SESSION_TOKEN) { - if let Some(session_token) = file_io_props.get(AWS_SESSION_TOKEN) { - file_io_props.insert(S3_SESSION_TOKEN.to_string(), session_token.to_string()); - } + if !file_io_props.contains_key(S3_SESSION_TOKEN) + && let Some(session_token) = file_io_props.get(AWS_SESSION_TOKEN) + { + file_io_props.insert(S3_SESSION_TOKEN.to_string(), session_token.to_string()); } - if !file_io_props.contains_key(S3_ENDPOINT) { - if let Some(aws_endpoint) = config.uri.as_ref() { - file_io_props.insert(S3_ENDPOINT.to_string(), aws_endpoint.to_string()); - } + if !file_io_props.contains_key(S3_ENDPOINT) + && let Some(aws_endpoint) = config.uri.as_ref() + { + file_io_props.insert(S3_ENDPOINT.to_string(), aws_endpoint.to_string()); } let client = aws_sdk_glue::Client::new(&sdk_config); diff --git a/crates/iceberg/Cargo.toml b/crates/iceberg/Cargo.toml index 895a5cf5e4..5c95f0b89f 100644 --- a/crates/iceberg/Cargo.toml +++ b/crates/iceberg/Cargo.toml @@ -28,6 +28,9 @@ keywords = ["iceberg"] license = { workspace = true } repository = { workspace = true } +[lints] +workspace = true + [features] default = ["storage-memory", "storage-fs", "storage-s3", "tokio"] storage-all = ["storage-memory", "storage-fs", "storage-s3", "storage-gcs"] diff --git a/crates/iceberg/src/arrow/reader.rs b/crates/iceberg/src/arrow/reader.rs index ab5a96f751..3a9b814b2b 100644 --- a/crates/iceberg/src/arrow/reader.rs +++ b/crates/iceberg/src/arrow/reader.rs @@ -485,10 +485,10 @@ impl ArrowReader { // we need to call next() to update the cache with the newly positioned value. delete_vector_iter.advance_to(next_row_group_base_idx); // Only update the cache if the cached value is stale (in the skipped range) - if let Some(cached_idx) = next_deleted_row_idx_opt { - if cached_idx < next_row_group_base_idx { - next_deleted_row_idx_opt = delete_vector_iter.next(); - } + if let Some(cached_idx) = next_deleted_row_idx_opt + && cached_idx < next_row_group_base_idx + { + next_deleted_row_idx_opt = delete_vector_iter.next(); } // still increment the current page base index but then skip to the next row group @@ -842,10 +842,10 @@ impl ArrowReader { }; // If all row groups were filtered out, return an empty RowSelection (select no rows) - if let Some(selected_row_groups) = selected_row_groups { - if selected_row_groups.is_empty() { - return Ok(RowSelection::from(Vec::new())); - } + if let Some(selected_row_groups) = selected_row_groups + && selected_row_groups.is_empty() + { + return Ok(RowSelection::from(Vec::new())); } let mut selected_row_groups_idx = 0; @@ -878,10 +878,10 @@ impl ArrowReader { results.push(selections_for_page); - if let Some(selected_row_groups) = selected_row_groups { - if selected_row_groups_idx == selected_row_groups.len() { - break; - } + if let Some(selected_row_groups) = selected_row_groups + && selected_row_groups_idx == selected_row_groups.len() + { + break; } } @@ -1012,13 +1012,13 @@ fn apply_name_mapping_to_arrow_schema( let mut metadata = field.metadata().clone(); - if let Some(mapped_field) = mapped_field_opt { - if let Some(field_id) = mapped_field.field_id() { - // Field found in mapping with a field_id → assign it - metadata.insert(PARQUET_FIELD_ID_META_KEY.to_string(), field_id.to_string()); - } - // If field_id is None, leave the field without an ID (will be filtered by projection) + if let Some(mapped_field) = mapped_field_opt + && let Some(field_id) = mapped_field.field_id() + { + // Field found in mapping with a field_id → assign it + metadata.insert(PARQUET_FIELD_ID_META_KEY.to_string(), field_id.to_string()); } + // If field_id is None, leave the field without an ID (will be filtered by projection) // If field not found in mapping, leave it without an ID (will be filtered by projection) Field::new(field.name(), field.data_type().clone(), field.is_nullable()) @@ -2712,15 +2712,14 @@ message schema { // Step 4: Verify we got 199 rows (not 200) let total_rows: usize = result.iter().map(|b| b.num_rows()).sum(); - println!("Total rows read: {}", total_rows); + println!("Total rows read: {total_rows}"); println!("Expected: 199 rows (deleted row 199 which had id=200)"); // This assertion will FAIL before the fix and PASS after the fix assert_eq!( total_rows, 199, - "Expected 199 rows after deleting row 199, but got {} rows. \ - The bug causes position deletes in later row groups to be ignored.", - total_rows + "Expected 199 rows after deleting row 199, but got {total_rows} rows. \ + The bug causes position deletes in later row groups to be ignored." ); // Verify the deleted row (id=200) is not present @@ -2931,16 +2930,15 @@ message schema { // Row group 1 has 100 rows (ids 101-200), minus 1 delete (id=200) = 99 rows let total_rows: usize = result.iter().map(|b| b.num_rows()).sum(); - println!("Total rows read from row group 1: {}", total_rows); + println!("Total rows read from row group 1: {total_rows}"); println!("Expected: 99 rows (row group 1 has 100 rows, 1 delete at position 199)"); // This assertion will FAIL before the fix and PASS after the fix assert_eq!( total_rows, 99, - "Expected 99 rows from row group 1 after deleting position 199, but got {} rows. \ + "Expected 99 rows from row group 1 after deleting position 199, but got {total_rows} rows. \ The bug causes position deletes to be lost when advance_to() is followed by next() \ - when skipping unselected row groups.", - total_rows + when skipping unselected row groups." ); // Verify the deleted row (id=200) is not present @@ -3222,7 +3220,7 @@ message schema { start: 0, length: 0, record_count: None, - data_file_path: format!("{}/1.parquet", table_location), + data_file_path: format!("{table_location}/1.parquet"), data_file_format: DataFileFormat::Parquet, schema: schema.clone(), project_field_ids: vec![1, 2], @@ -3319,7 +3317,7 @@ message schema { start: 0, length: 0, record_count: None, - data_file_path: format!("{}/1.parquet", table_location), + data_file_path: format!("{table_location}/1.parquet"), data_file_format: DataFileFormat::Parquet, schema: schema.clone(), project_field_ids: vec![1, 3], @@ -3405,7 +3403,7 @@ message schema { start: 0, length: 0, record_count: None, - data_file_path: format!("{}/1.parquet", table_location), + data_file_path: format!("{table_location}/1.parquet"), data_file_format: DataFileFormat::Parquet, schema: schema.clone(), project_field_ids: vec![1, 2, 3], @@ -3505,7 +3503,7 @@ message schema { start: 0, length: 0, record_count: None, - data_file_path: format!("{}/1.parquet", table_location), + data_file_path: format!("{table_location}/1.parquet"), data_file_format: DataFileFormat::Parquet, schema: schema.clone(), project_field_ids: vec![1, 2], @@ -3546,7 +3544,7 @@ message schema { assert_eq!(all_values.len(), 6); for i in 0..6 { - assert_eq!(all_names[i], format!("name_{}", i)); + assert_eq!(all_names[i], format!("name_{i}")); assert_eq!(all_values[i], i as i32); } } @@ -3634,7 +3632,7 @@ message schema { start: 0, length: 0, record_count: None, - data_file_path: format!("{}/1.parquet", table_location), + data_file_path: format!("{table_location}/1.parquet"), data_file_format: DataFileFormat::Parquet, schema: schema.clone(), project_field_ids: vec![1, 2], @@ -3730,7 +3728,7 @@ message schema { start: 0, length: 0, record_count: None, - data_file_path: format!("{}/1.parquet", table_location), + data_file_path: format!("{table_location}/1.parquet"), data_file_format: DataFileFormat::Parquet, schema: schema.clone(), project_field_ids: vec![1, 5, 2], @@ -3839,7 +3837,7 @@ message schema { start: 0, length: 0, record_count: None, - data_file_path: format!("{}/1.parquet", table_location), + data_file_path: format!("{table_location}/1.parquet"), data_file_format: DataFileFormat::Parquet, schema: schema.clone(), project_field_ids: vec![1, 2, 3], @@ -3978,7 +3976,7 @@ message schema { start: 0, length: 0, record_count: None, - data_file_path: format!("{}/data.parquet", table_location), + data_file_path: format!("{table_location}/data.parquet"), data_file_format: DataFileFormat::Parquet, schema: schema.clone(), project_field_ids: vec![1, 2], diff --git a/crates/iceberg/src/arrow/record_batch_projector.rs b/crates/iceberg/src/arrow/record_batch_projector.rs index 45de0212e8..7028eee961 100644 --- a/crates/iceberg/src/arrow/record_batch_projector.rs +++ b/crates/iceberg/src/arrow/record_batch_projector.rs @@ -133,25 +133,24 @@ impl RecordBatchProjector { { for (pos, field) in fields.iter().enumerate() { let id = field_id_fetch_func(field)?; - if let Some(id) = id { - if target_field_id == id { - index_vec.push(pos); - return Ok(Some(field.clone())); - } + if let Some(id) = id + && target_field_id == id + { + index_vec.push(pos); + return Ok(Some(field.clone())); } - if let DataType::Struct(inner) = field.data_type() { - if searchable_field_func(field) { - if let Some(res) = Self::fetch_field_index( - inner, - index_vec, - target_field_id, - field_id_fetch_func, - searchable_field_func, - )? { - index_vec.push(pos); - return Ok(Some(res)); - } - } + if let DataType::Struct(inner) = field.data_type() + && searchable_field_func(field) + && let Some(res) = Self::fetch_field_index( + inner, + index_vec, + target_field_id, + field_id_fetch_func, + searchable_field_func, + )? + { + index_vec.push(pos); + return Ok(Some(res)); } } Ok(None) diff --git a/crates/iceberg/src/arrow/record_batch_transformer.rs b/crates/iceberg/src/arrow/record_batch_transformer.rs index a20adb6a5a..d79299f765 100644 --- a/crates/iceberg/src/arrow/record_batch_transformer.rs +++ b/crates/iceberg/src/arrow/record_batch_transformer.rs @@ -492,7 +492,7 @@ impl RecordBatchTransformer { let this_field_id = field_id_str.parse().map_err(|e| { Error::new( ErrorKind::DataInvalid, - format!("field id not parseable as an i32: {}", e), + format!("field id not parseable as an i32: {e}"), ) })?; @@ -615,7 +615,7 @@ impl RecordBatchTransformer { (dt, _) => { return Err(Error::new( ErrorKind::Unexpected, - format!("unexpected target column type {}", dt), + format!("unexpected target column type {dt}"), )); } }) diff --git a/crates/iceberg/src/arrow/value.rs b/crates/iceberg/src/arrow/value.rs index f1cf225bb4..eb675dff31 100644 --- a/crates/iceberg/src/arrow/value.rs +++ b/crates/iceberg/src/arrow/value.rs @@ -258,15 +258,15 @@ impl SchemaWithPartnerVisitor for ArrowArrayToIcebergStructConverter { "The partner is not a decimal128 array", ) })?; - if let DataType::Decimal128(arrow_precision, arrow_scale) = array.data_type() { - if *arrow_precision as u32 != *precision || *arrow_scale as u32 != *scale { - return Err(Error::new( - ErrorKind::DataInvalid, - format!( - "The precision or scale ({arrow_precision},{arrow_scale}) of arrow decimal128 array is not compatible with iceberg decimal type ({precision},{scale})" - ), - )); - } + if let DataType::Decimal128(arrow_precision, arrow_scale) = array.data_type() + && (*arrow_precision as u32 != *precision || *arrow_scale as u32 != *scale) + { + return Err(Error::new( + ErrorKind::DataInvalid, + format!( + "The precision or scale ({arrow_precision},{arrow_scale}) of arrow decimal128 array is not compatible with iceberg decimal type ({precision},{scale})" + ), + )); } Ok(array.iter().map(|v| v.map(Literal::decimal)).collect()) } @@ -348,10 +348,10 @@ impl SchemaWithPartnerVisitor for ArrowArrayToIcebergStructConverter { } else if let Some(array) = partner.as_any().downcast_ref::() { Ok(array.iter().map(|v| v.map(Literal::string)).collect()) } else { - return Err(Error::new( + Err(Error::new( ErrorKind::DataInvalid, "The partner is not a string array", - )); + )) } } PrimitiveType::Uuid => { @@ -415,10 +415,10 @@ impl SchemaWithPartnerVisitor for ArrowArrayToIcebergStructConverter { .map(|v| v.map(|v| Literal::binary(v.to_vec()))) .collect()) } else { - return Err(Error::new( + Err(Error::new( ErrorKind::DataInvalid, "The partner is not a binary array", - )); + )) } } } diff --git a/crates/iceberg/src/catalog/mod.rs b/crates/iceberg/src/catalog/mod.rs index 27d5edaedb..f3a521379e 100644 --- a/crates/iceberg/src/catalog/mod.rs +++ b/crates/iceberg/src/catalog/mod.rs @@ -1000,13 +1000,13 @@ mod _serde_set_statistics { snapshot_id, statistics, } = SetStatistics::deserialize(deserializer)?; - if let Some(snapshot_id) = snapshot_id { - if snapshot_id != statistics.snapshot_id { - return Err(serde::de::Error::custom(format!( - "Snapshot id to set {snapshot_id} does not match the statistics file snapshot id {}", - statistics.snapshot_id - ))); - } + if let Some(snapshot_id) = snapshot_id + && snapshot_id != statistics.snapshot_id + { + return Err(serde::de::Error::custom(format!( + "Snapshot id to set {snapshot_id} does not match the statistics file snapshot id {}", + statistics.snapshot_id + ))); } Ok(statistics) diff --git a/crates/iceberg/src/delete_vector.rs b/crates/iceberg/src/delete_vector.rs index f382bf079e..df8a10193c 100644 --- a/crates/iceberg/src/delete_vector.rs +++ b/crates/iceberg/src/delete_vector.rs @@ -36,7 +36,7 @@ impl DeleteVector { } } - pub fn iter(&self) -> DeleteVectorIterator { + pub fn iter(&self) -> DeleteVectorIterator<'_> { let outer = self.inner.bitmaps(); DeleteVectorIterator { outer, inner: None } } @@ -93,10 +93,10 @@ impl Iterator for DeleteVectorIterator<'_> { type Item = u64; fn next(&mut self) -> Option { - if let Some(inner) = &mut self.inner { - if let Some(inner_next) = inner.bitmap_iter.next() { - return Some(u64::from(inner.high_bits) << 32 | u64::from(inner_next)); - } + if let Some(inner) = &mut self.inner + && let Some(inner_next) = inner.bitmap_iter.next() + { + return Some(u64::from(inner.high_bits) << 32 | u64::from(inner_next)); } if let Some((high_bits, next_bitmap)) = self.outer.next() { diff --git a/crates/iceberg/src/expr/visitors/manifest_evaluator.rs b/crates/iceberg/src/expr/visitors/manifest_evaluator.rs index abbd136cb1..770163ae95 100644 --- a/crates/iceberg/src/expr/visitors/manifest_evaluator.rs +++ b/crates/iceberg/src/expr/visitors/manifest_evaluator.rs @@ -161,10 +161,10 @@ impl BoundPredicateVisitor for ManifestFilterVisitor<'_> { _predicate: &BoundPredicate, ) -> crate::Result { let field = self.field_summary_for_reference(reference); - if let Some(contains_nan) = field.contains_nan { - if !contains_nan { - return ROWS_CANNOT_MATCH; - } + if let Some(contains_nan) = field.contains_nan + && !contains_nan + { + return ROWS_CANNOT_MATCH; } if ManifestFilterVisitor::are_all_null(field, &reference.field().field_type) { @@ -389,16 +389,16 @@ impl BoundPredicateVisitor for ManifestFilterVisitor<'_> { return ROWS_MIGHT_MATCH; } - if prefix.as_bytes().eq(&lower_bound[..prefix_len]) { - if let Some(upper_bound) = &field.upper_bound { - // if upper is shorter than the prefix then upper can't start with the prefix - if prefix_len > upper_bound.len() { - return ROWS_MIGHT_MATCH; - } + if prefix.as_bytes().eq(&lower_bound[..prefix_len]) + && let Some(upper_bound) = &field.upper_bound + { + // if upper is shorter than the prefix then upper can't start with the prefix + if prefix_len > upper_bound.len() { + return ROWS_MIGHT_MATCH; + } - if prefix.as_bytes().eq(&upper_bound[..prefix_len]) { - return ROWS_CANNOT_MATCH; - } + if prefix.as_bytes().eq(&upper_bound[..prefix_len]) { + return ROWS_CANNOT_MATCH; } } } diff --git a/crates/iceberg/src/expr/visitors/page_index_evaluator.rs b/crates/iceberg/src/expr/visitors/page_index_evaluator.rs index 3745d94d18..ae3a906275 100644 --- a/crates/iceberg/src/expr/visitors/page_index_evaluator.rs +++ b/crates/iceberg/src/expr/visitors/page_index_evaluator.rs @@ -547,16 +547,16 @@ impl BoundPredicateVisitor for PageIndexEvaluator<'_> { return Ok(false); } - if let Some(min) = min { - if min.gt(datum) { - return Ok(false); - } + if let Some(min) = min + && min.gt(datum) + { + return Ok(false); } - if let Some(max) = max { - if max.lt(datum) { - return Ok(false); - } + if let Some(max) = max + && max.lt(datum) + { + return Ok(false); } Ok(true) diff --git a/crates/iceberg/src/expr/visitors/strict_metrics_evaluator.rs b/crates/iceberg/src/expr/visitors/strict_metrics_evaluator.rs index e9bed775ef..7c652e2068 100644 --- a/crates/iceberg/src/expr/visitors/strict_metrics_evaluator.rs +++ b/crates/iceberg/src/expr/visitors/strict_metrics_evaluator.rs @@ -129,10 +129,10 @@ impl<'a> StrictMetricsEvaluator<'a> { self.upper_bound(field_id) }; - if let Some(bound) = bound { - if cmp_fn(bound, datum) { - return ROWS_MUST_MATCH; - } + if let Some(bound) = bound + && cmp_fn(bound, datum) + { + return ROWS_MUST_MATCH; } ROWS_MIGHT_NOT_MATCH @@ -219,10 +219,10 @@ impl BoundPredicateVisitor for StrictMetricsEvaluator<'_> { ) -> crate::Result { let field_id = reference.field().id; - if let Some(&nan_count) = self.nan_count(field_id) { - if nan_count == 0 { - return ROWS_MUST_MATCH; - } + if let Some(&nan_count) = self.nan_count(field_id) + && nan_count == 0 + { + return ROWS_MUST_MATCH; } if self.contains_nulls_only(field_id) { @@ -258,10 +258,10 @@ impl BoundPredicateVisitor for StrictMetricsEvaluator<'_> { ) -> crate::Result { let field_id = reference.field().id; - if let Some(lower) = self.lower_bound(field_id) { - if lower.is_nan() { - return ROWS_MIGHT_NOT_MATCH; - } + if let Some(lower) = self.lower_bound(field_id) + && lower.is_nan() + { + return ROWS_MIGHT_NOT_MATCH; } self.visit_inequality(reference, datum, PartialOrd::gt, true) diff --git a/crates/iceberg/src/inspect/metadata_table.rs b/crates/iceberg/src/inspect/metadata_table.rs index 92571db181..d5e9d60869 100644 --- a/crates/iceberg/src/inspect/metadata_table.rs +++ b/crates/iceberg/src/inspect/metadata_table.rs @@ -71,12 +71,12 @@ impl<'a> MetadataTable<'a> { } /// Get the snapshots table. - pub fn snapshots(&self) -> SnapshotsTable { + pub fn snapshots(&self) -> SnapshotsTable<'_> { SnapshotsTable::new(self.0) } /// Get the manifests table. - pub fn manifests(&self) -> ManifestsTable { + pub fn manifests(&self) -> ManifestsTable<'_> { ManifestsTable::new(self.0) } } diff --git a/crates/iceberg/src/io/storage.rs b/crates/iceberg/src/io/storage.rs index d5f2ad8fab..2300c42c01 100644 --- a/crates/iceberg/src/io/storage.rs +++ b/crates/iceberg/src/io/storage.rs @@ -175,7 +175,7 @@ impl Storage { } else { Err(Error::new( ErrorKind::DataInvalid, - format!("Invalid gcs url: {}, should start with {}", path, prefix), + format!("Invalid gcs url: {path}, should start with {prefix}"), )) } } @@ -190,7 +190,7 @@ impl Storage { } else { Err(Error::new( ErrorKind::DataInvalid, - format!("Invalid oss url: {}, should start with {}", path, prefix), + format!("Invalid oss url: {path}, should start with {prefix}"), )) } } diff --git a/crates/iceberg/src/io/storage_azdls.rs b/crates/iceberg/src/io/storage_azdls.rs index fe12167f6f..5abb0cd6e0 100644 --- a/crates/iceberg/src/io/storage_azdls.rs +++ b/crates/iceberg/src/io/storage_azdls.rs @@ -165,7 +165,7 @@ impl FromStr for AzureStorageScheme { "wasbs" => Ok(AzureStorageScheme::Wasbs), _ => Err(Error::new( ErrorKind::DataInvalid, - format!("Unexpected Azure Storage scheme: {}", s), + format!("Unexpected Azure Storage scheme: {s}"), )), } } @@ -397,11 +397,11 @@ mod tests { let config = azdls_config_parse(properties); match expected { Some(expected_config) => { - assert!(config.is_ok(), "Test case {} failed: {:?}", name, config); - assert_eq!(config.unwrap(), expected_config, "Test case: {}", name); + assert!(config.is_ok(), "Test case {name} failed: {config:?}"); + assert_eq!(config.unwrap(), expected_config, "Test case: {name}"); } None => { - assert!(config.is_err(), "Test case {} expected error.", name); + assert!(config.is_err(), "Test case {name} expected error."); } } } @@ -495,14 +495,14 @@ mod tests { let result = azdls_create_operator(input.0, &input.1, &input.2); match expected { Some((expected_filesystem, expected_path)) => { - assert!(result.is_ok(), "Test case {} failed: {:?}", name, result); + assert!(result.is_ok(), "Test case {name} failed: {result:?}"); let (op, relative_path) = result.unwrap(); assert_eq!(op.info().name(), expected_filesystem); assert_eq!(relative_path, expected_path); } None => { - assert!(result.is_err(), "Test case {} expected error.", name); + assert!(result.is_err(), "Test case {name} expected error."); } } } @@ -543,11 +543,11 @@ mod tests { let result = input.parse::(); match expected { Some(expected_path) => { - assert!(result.is_ok(), "Test case {} failed: {:?}", name, result); - assert_eq!(result.unwrap(), expected_path, "Test case: {}", name); + assert!(result.is_ok(), "Test case {name} failed: {result:?}"); + assert_eq!(result.unwrap(), expected_path, "Test case: {name}"); } None => { - assert!(result.is_err(), "Test case {} expected error.", name); + assert!(result.is_err(), "Test case {name} expected error."); } } } @@ -593,7 +593,7 @@ mod tests { for (name, path, expected) in test_cases { let endpoint = path.as_endpoint(); - assert_eq!(endpoint, expected, "Test case: {}", name); + assert_eq!(endpoint, expected, "Test case: {name}"); } } } diff --git a/crates/iceberg/src/io/storage_gcs.rs b/crates/iceberg/src/io/storage_gcs.rs index 8c3d914c86..7718df603f 100644 --- a/crates/iceberg/src/io/storage_gcs.rs +++ b/crates/iceberg/src/io/storage_gcs.rs @@ -71,20 +71,20 @@ pub(crate) fn gcs_config_parse(mut m: HashMap) -> Result Result let bucket = url.host_str().ok_or_else(|| { Error::new( ErrorKind::DataInvalid, - format!("Invalid gcs url: {}, bucket is required", path), + format!("Invalid gcs url: {path}, bucket is required"), ) })?; diff --git a/crates/iceberg/src/io/storage_oss.rs b/crates/iceberg/src/io/storage_oss.rs index 8bfffc6ca8..e82dda23a5 100644 --- a/crates/iceberg/src/io/storage_oss.rs +++ b/crates/iceberg/src/io/storage_oss.rs @@ -56,7 +56,7 @@ pub(crate) fn oss_config_build(cfg: &OssConfig, path: &str) -> Result let bucket = url.host_str().ok_or_else(|| { Error::new( ErrorKind::DataInvalid, - format!("Invalid oss url: {}, missing bucket", path), + format!("Invalid oss url: {path}, missing bucket"), ) })?; diff --git a/crates/iceberg/src/io/storage_s3.rs b/crates/iceberg/src/io/storage_s3.rs index fcf9afed1f..f069e0e2f9 100644 --- a/crates/iceberg/src/io/storage_s3.rs +++ b/crates/iceberg/src/io/storage_s3.rs @@ -134,20 +134,20 @@ pub(crate) fn s3_config_parse(mut m: HashMap) -> Result Deserialize<'de> for StructType { let type_val: String = map.next_value()?; if type_val != "struct" { return Err(serde::de::Error::custom(format!( - "expected type 'struct', got '{}'", - type_val + "expected type 'struct', got '{type_val}'" ))); } } diff --git a/crates/iceberg/src/spec/manifest/writer.rs b/crates/iceberg/src/spec/manifest/writer.rs index ebb0590bcf..389ac7a1fd 100644 --- a/crates/iceberg/src/spec/manifest/writer.rs +++ b/crates/iceberg/src/spec/manifest/writer.rs @@ -388,10 +388,10 @@ impl ManifestWriter { self.existing_rows += entry.data_file.record_count; } } - if entry.is_alive() { - if let Some(seq_num) = entry.sequence_number { - self.min_seq_num = Some(self.min_seq_num.map_or(seq_num, |v| min(v, seq_num))); - } + if entry.is_alive() + && let Some(seq_num) = entry.sequence_number + { + self.min_seq_num = Some(self.min_seq_num.map_or(seq_num, |v| min(v, seq_num))); } self.manifest_entries.push(entry); Ok(()) diff --git a/crates/iceberg/src/spec/schema/prune_columns.rs b/crates/iceberg/src/spec/schema/prune_columns.rs index 5a2f0b50fc..14f1bfd25f 100644 --- a/crates/iceberg/src/spec/schema/prune_columns.rs +++ b/crates/iceberg/src/spec/schema/prune_columns.rs @@ -110,19 +110,19 @@ impl SchemaVisitor for PruneColumn { if self.select_full_types { Ok(Some(*field.field_type.clone())) } else if field.field_type.is_struct() { - return Ok(Some(Type::Struct(PruneColumn::project_selected_struct( + Ok(Some(Type::Struct(PruneColumn::project_selected_struct( value, - )?))); + )?))) } else if !field.field_type.is_nested() { - return Ok(Some(*field.field_type.clone())); + Ok(Some(*field.field_type.clone())) } else { - return Err(Error::new( + Err(Error::new( ErrorKind::DataInvalid, "Can't project list or map field directly when not selecting full type." .to_string(), ) .with_context("field_id", field.id.to_string()) - .with_context("field_type", field.field_type.to_string())); + .with_context("field_type", field.field_type.to_string())) } } else { Ok(value) @@ -174,20 +174,20 @@ impl SchemaVisitor for PruneColumn { Ok(Some(Type::List(list.clone()))) } else if list.element_field.field_type.is_struct() { let projected_struct = PruneColumn::project_selected_struct(value).unwrap(); - return Ok(Some(Type::List(PruneColumn::project_list( + Ok(Some(Type::List(PruneColumn::project_list( list, Type::Struct(projected_struct), - )?))); + )?))) } else if list.element_field.field_type.is_primitive() { - return Ok(Some(Type::List(list.clone()))); + Ok(Some(Type::List(list.clone()))) } else { - return Err(Error::new( + Err(Error::new( ErrorKind::DataInvalid, format!( "Cannot explicitly project List or Map types, List element {} of type {} was selected", list.element_field.id, list.element_field.field_type ), - )); + )) } } else if let Some(result) = value { Ok(Some(Type::List(PruneColumn::project_list(list, result)?))) @@ -208,26 +208,26 @@ impl SchemaVisitor for PruneColumn { } else if map.value_field.field_type.is_struct() { let projected_struct = PruneColumn::project_selected_struct(Some(value.unwrap())).unwrap(); - return Ok(Some(Type::Map(PruneColumn::project_map( + Ok(Some(Type::Map(PruneColumn::project_map( map, Type::Struct(projected_struct), - )?))); + )?))) } else if map.value_field.field_type.is_primitive() { - return Ok(Some(Type::Map(map.clone()))); + Ok(Some(Type::Map(map.clone()))) } else { - return Err(Error::new( + Err(Error::new( ErrorKind::DataInvalid, format!( "Cannot explicitly project List or Map types, Map value {} of type {} was selected", map.value_field.id, map.value_field.field_type ), - )); + )) } } else if let Some(value_result) = value { - return Ok(Some(Type::Map(PruneColumn::project_map( + Ok(Some(Type::Map(PruneColumn::project_map( map, value_result, - )?))); + )?))) } else if self.selected.contains(&map.key_field.id) { Ok(Some(Type::Map(map.clone()))) } else { diff --git a/crates/iceberg/src/spec/table_metadata.rs b/crates/iceberg/src/spec/table_metadata.rs index 06b32cc847..48b715da59 100644 --- a/crates/iceberg/src/spec/table_metadata.rs +++ b/crates/iceberg/src/spec/table_metadata.rs @@ -390,18 +390,18 @@ impl TableMetadata { } fn construct_refs(&mut self) { - if let Some(current_snapshot_id) = self.current_snapshot_id { - if !self.refs.contains_key(MAIN_BRANCH) { - self.refs - .insert(MAIN_BRANCH.to_string(), SnapshotReference { - snapshot_id: current_snapshot_id, - retention: SnapshotRetention::Branch { - min_snapshots_to_keep: None, - max_snapshot_age_ms: None, - max_ref_age_ms: None, - }, - }); - } + if let Some(current_snapshot_id) = self.current_snapshot_id + && !self.refs.contains_key(MAIN_BRANCH) + { + self.refs + .insert(MAIN_BRANCH.to_string(), SnapshotReference { + snapshot_id: current_snapshot_id, + retention: SnapshotRetention::Branch { + min_snapshots_to_keep: None, + max_snapshot_age_ms: None, + max_ref_age_ms: None, + }, + }); } } @@ -572,17 +572,17 @@ impl TableMetadata { let main_ref = self.refs.get(MAIN_BRANCH); if self.current_snapshot_id.is_some() { - if let Some(main_ref) = main_ref { - if main_ref.snapshot_id != self.current_snapshot_id.unwrap_or_default() { - return Err(Error::new( - ErrorKind::DataInvalid, - format!( - "Current snapshot id does not match main branch ({:?} != {:?})", - self.current_snapshot_id.unwrap_or_default(), - main_ref.snapshot_id - ), - )); - } + if let Some(main_ref) = main_ref + && main_ref.snapshot_id != self.current_snapshot_id.unwrap_or_default() + { + return Err(Error::new( + ErrorKind::DataInvalid, + format!( + "Current snapshot id does not match main branch ({:?} != {:?})", + self.current_snapshot_id.unwrap_or_default(), + main_ref.snapshot_id + ), + )); } } else if main_ref.is_some() { return Err(Error::new( @@ -606,22 +606,21 @@ impl TableMetadata { )); } - if self.format_version >= FormatVersion::V2 { - if let Some(snapshot) = self + if self.format_version >= FormatVersion::V2 + && let Some(snapshot) = self .snapshots .values() .find(|snapshot| snapshot.sequence_number() > self.last_sequence_number) - { - return Err(Error::new( - ErrorKind::DataInvalid, - format!( - "Invalid snapshot with id {} and sequence number {} greater than last sequence number {}", - snapshot.snapshot_id(), - snapshot.sequence_number(), - self.last_sequence_number - ), - )); - } + { + return Err(Error::new( + ErrorKind::DataInvalid, + format!( + "Invalid snapshot with id {} and sequence number {} greater than last sequence number {}", + snapshot.snapshot_id(), + snapshot.sequence_number(), + self.last_sequence_number + ), + )); } Ok(()) diff --git a/crates/iceberg/src/spec/transform.rs b/crates/iceberg/src/spec/transform.rs index 6068716eff..354dc1889c 100644 --- a/crates/iceberg/src/spec/transform.rs +++ b/crates/iceberg/src/spec/transform.rs @@ -711,10 +711,10 @@ impl Transform { PredicateOperator::GreaterThan => Some(PredicateOperator::GreaterThanOrEq), PredicateOperator::StartsWith => match datum.literal() { PrimitiveLiteral::String(s) => { - if let Some(w) = width { - if s.len() == w as usize { - return Some(PredicateOperator::Eq); - }; + if let Some(w) = width + && s.len() == w as usize + { + return Some(PredicateOperator::Eq); }; Some(*op) } @@ -757,47 +757,45 @@ impl Transform { _ => false, }; - if should_adjust { - if let &PrimitiveLiteral::Int(v) = transformed.literal() { - match op { - PredicateOperator::LessThan - | PredicateOperator::LessThanOrEq - | PredicateOperator::In => { - if v < 0 { + if should_adjust && let &PrimitiveLiteral::Int(v) = transformed.literal() { + match op { + PredicateOperator::LessThan + | PredicateOperator::LessThanOrEq + | PredicateOperator::In => { + if v < 0 { + // # TODO + // An ugly hack to fix. Refine the increment and decrement logic later. + match self { + Transform::Day => { + return Some(AdjustedProjection::Single(Datum::date(v + 1))); + } + _ => { + return Some(AdjustedProjection::Single(Datum::int(v + 1))); + } + } + }; + } + PredicateOperator::Eq => { + if v < 0 { + let new_set = FnvHashSet::from_iter(vec![ + transformed.to_owned(), // # TODO // An ugly hack to fix. Refine the increment and decrement logic later. - match self { - Transform::Day => { - return Some(AdjustedProjection::Single(Datum::date(v + 1))); + { + match self { + Transform::Day => Datum::date(v + 1), + _ => Datum::int(v + 1), } - _ => { - return Some(AdjustedProjection::Single(Datum::int(v + 1))); - } - } - }; - } - PredicateOperator::Eq => { - if v < 0 { - let new_set = FnvHashSet::from_iter(vec![ - transformed.to_owned(), - // # TODO - // An ugly hack to fix. Refine the increment and decrement logic later. - { - match self { - Transform::Day => Datum::date(v + 1), - _ => Datum::int(v + 1), - } - }, - ]); - return Some(AdjustedProjection::Set(new_set)); - } - } - _ => { - return None; + }, + ]); + return Some(AdjustedProjection::Set(new_set)); } } - }; - } + _ => { + return None; + } + } + }; None } diff --git a/crates/iceberg/src/spec/values/tests.rs b/crates/iceberg/src/spec/values/tests.rs index 0e99d44dfe..73343a9a1a 100644 --- a/crates/iceberg/src/spec/values/tests.rs +++ b/crates/iceberg/src/spec/values/tests.rs @@ -447,7 +447,7 @@ fn check_raw_literal_bytes_error_via_avro(input_bytes: Vec, expected_type: & let avro_value = Value::Bytes(input_bytes); let raw_literal: _serde::RawLiteral = apache_avro::from_value(&avro_value).unwrap(); let result = raw_literal.try_into(expected_type); - assert!(result.is_err(), "Expected error but got: {:?}", result); + assert!(result.is_err(), "Expected error but got: {result:?}"); } #[test] diff --git a/crates/iceberg/src/spec/view_metadata_builder.rs b/crates/iceberg/src/spec/view_metadata_builder.rs index 9f542a7c61..38041ca625 100644 --- a/crates/iceberg/src/spec/view_metadata_builder.rs +++ b/crates/iceberg/src/spec/view_metadata_builder.rs @@ -478,10 +478,10 @@ impl ViewMetadataBuilder { // as it might panic if the metadata is invalid. self.metadata.validate()?; - if let Some(previous) = self.previous_view_version.take() { - if !allow_replace_drop_dialects(&self.metadata.properties) { - require_no_dialect_dropped(&previous, self.metadata.current_version())?; - } + if let Some(previous) = self.previous_view_version.take() + && !allow_replace_drop_dialects(&self.metadata.properties) + { + require_no_dialect_dropped(&previous, self.metadata.current_version())?; } let _expired_versions = self.expire_versions(); diff --git a/crates/iceberg/src/transaction/mod.rs b/crates/iceberg/src/transaction/mod.rs index 4116264a14..8ddaa26698 100644 --- a/crates/iceberg/src/transaction/mod.rs +++ b/crates/iceberg/src/transaction/mod.rs @@ -518,7 +518,7 @@ mod test_row_lineage { fn file_with_rows(record_count: u64) -> DataFile { DataFileBuilder::default() .content(DataContentType::Data) - .file_path(format!("test/{}.parquet", record_count)) + .file_path(format!("test/{record_count}.parquet")) .file_format(DataFileFormat::Parquet) .file_size_in_bytes(100) .record_count(record_count) diff --git a/crates/iceberg/src/transaction/snapshot.rs b/crates/iceberg/src/transaction/snapshot.rs index d59828ce31..c8bf26a174 100644 --- a/crates/iceberg/src/transaction/snapshot.rs +++ b/crates/iceberg/src/transaction/snapshot.rs @@ -276,13 +276,13 @@ impl<'a> SnapshotProducer<'a> { "Partition field should only be primitive type.", ) })?; - if let Some(value) = value { - if !field.compatible(&value.as_primitive_literal().unwrap()) { - return Err(Error::new( - ErrorKind::DataInvalid, - "Partition value is not compatible partition type", - )); - } + if let Some(value) = value + && !field.compatible(&value.as_primitive_literal().unwrap()) + { + return Err(Error::new( + ErrorKind::DataInvalid, + "Partition value is not compatible partition type", + )); } } Ok(()) diff --git a/crates/iceberg/src/writer/file_writer/rolling_writer.rs b/crates/iceberg/src/writer/file_writer/rolling_writer.rs index 06246ab660..a93e494d48 100644 --- a/crates/iceberg/src/writer/file_writer/rolling_writer.rs +++ b/crates/iceberg/src/writer/file_writer/rolling_writer.rs @@ -197,18 +197,18 @@ where ); } - if self.should_roll() { - if let Some(inner) = self.inner.take() { - // close the current writer, roll to a new file - self.data_file_builders.extend(inner.close().await?); - - // start a new writer - self.inner = Some( - self.inner_builder - .build(self.new_output_file(partition_key)?) - .await?, - ); - } + if self.should_roll() + && let Some(inner) = self.inner.take() + { + // close the current writer, roll to a new file + self.data_file_builders.extend(inner.close().await?); + + // start a new writer + self.inner = Some( + self.inner_builder + .build(self.new_output_file(partition_key)?) + .await?, + ); } // write the input diff --git a/crates/iceberg/tests/file_io_gcs_test.rs b/crates/iceberg/tests/file_io_gcs_test.rs index 161285ae6f..9fbcdadd0e 100644 --- a/crates/iceberg/tests/file_io_gcs_test.rs +++ b/crates/iceberg/tests/file_io_gcs_test.rs @@ -68,7 +68,7 @@ mod tests { FileIOBuilder::new("gcs") .with_props(vec![ - (GCS_SERVICE_PATH, format!("http://{}", addr)), + (GCS_SERVICE_PATH, format!("http://{addr}")), (GCS_NO_AUTH, "true".to_string()), ]) .build() @@ -81,13 +81,13 @@ mod tests { bucket_data.insert("name", name); let client = reqwest::Client::new(); - let endpoint = format!("http://{}/storage/v1/b", server_addr); + let endpoint = format!("http://{server_addr}/storage/v1/b"); client.post(endpoint).json(&bucket_data).send().await?; Ok(()) } fn get_gs_path() -> String { - format!("gs://{}", FAKE_GCS_BUCKET) + format!("gs://{FAKE_GCS_BUCKET}") } #[tokio::test] diff --git a/crates/integrations/datafusion/src/physical_plan/repartition.rs b/crates/integrations/datafusion/src/physical_plan/repartition.rs index 8ad87fd1cc..2d1d7f862c 100644 --- a/crates/integrations/datafusion/src/physical_plan/repartition.rs +++ b/crates/integrations/datafusion/src/physical_plan/repartition.rs @@ -159,9 +159,8 @@ fn determine_partitioning_strategy( // Case 2: Partitioned table missing _partition column (normally this should not happen) (true, Err(_)) => Err(DataFusionError::Plan(format!( - "Partitioned table input missing {} column. \ - Ensure projection happens before repartitioning.", - PROJECTED_PARTITION_VALUE_COLUMN + "Partitioned table input missing {PROJECTED_PARTITION_VALUE_COLUMN} column. \ + Ensure projection happens before repartitioning." ))), // Case 3: Unpartitioned table, always use RoundRobinBatch @@ -508,8 +507,7 @@ mod tests { assert!( column_names.contains(&PROJECTED_PARTITION_VALUE_COLUMN.to_string()), - "Should use _partition column, got: {:?}", - column_names + "Should use _partition column, got: {column_names:?}" ); } _ => panic!("Expected Hash partitioning with Identity transform"), @@ -733,8 +731,7 @@ mod tests { .collect(); assert!( column_names.contains(&PROJECTED_PARTITION_VALUE_COLUMN.to_string()), - "Should use _partition column for mixed transforms with Identity, got: {:?}", - column_names + "Should use _partition column for mixed transforms with Identity, got: {column_names:?}" ); } _ => panic!("Expected Hash partitioning for table with identity transforms"), diff --git a/crates/integrations/datafusion/src/physical_plan/sort.rs b/crates/integrations/datafusion/src/physical_plan/sort.rs index 2a57e16e43..ede2547535 100644 --- a/crates/integrations/datafusion/src/physical_plan/sort.rs +++ b/crates/integrations/datafusion/src/physical_plan/sort.rs @@ -53,8 +53,7 @@ pub(crate) fn sort_by_partition(input: Arc) -> DFResult Result<()> { let file_io = table.file_io(); // List files under each expected partition path - let electronics_path = format!("{}/data/category=electronics", table_location); - let books_path = format!("{}/data/category=books", table_location); - let clothing_path = format!("{}/data/category=clothing", table_location); + let electronics_path = format!("{table_location}/data/category=electronics"); + let books_path = format!("{table_location}/data/category=books"); + let clothing_path = format!("{table_location}/data/category=clothing"); // Verify partition directories exist and contain data files assert!( file_io.exists(&electronics_path).await?, - "Expected partition directory: {}", - electronics_path + "Expected partition directory: {electronics_path}" ); assert!( file_io.exists(&books_path).await?, - "Expected partition directory: {}", - books_path + "Expected partition directory: {books_path}" ); assert!( file_io.exists(&clothing_path).await?, - "Expected partition directory: {}", - clothing_path + "Expected partition directory: {clothing_path}" ); Ok(()) diff --git a/rust-toolchain.toml b/rust-toolchain.toml index ff7d1f7fbb..4b20d68e44 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -20,5 +20,5 @@ # # The channel is exactly same day for our MSRV. [toolchain] -channel = "nightly-2025-03-28" +channel = "nightly-2025-06-23" components = ["rustfmt", "clippy"] From 83d0e65d2862d53e95c55871a4e539627ee95c4d Mon Sep 17 00:00:00 2001 From: Matt Butrovich Date: Fri, 5 Dec 2025 14:45:28 -0500 Subject: [PATCH 2/3] Remove stray changes from #1899. --- Cargo.toml | 3 --- crates/iceberg/Cargo.toml | 3 --- 2 files changed, 6 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 83a1f44a23..f7373c9eac 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -38,9 +38,6 @@ repository = "https://github.com/apache/iceberg-rust" # Check the MSRV policy in README.md before changing this rust-version = "1.88" -[workspace.lints.rust] -unexpected_cfgs = { level = "warn", check-cfg = ['cfg(FALSE)'] } - [workspace.dependencies] anyhow = "1.0.72" apache-avro = { version = "0.21", features = ["zstandard"] } diff --git a/crates/iceberg/Cargo.toml b/crates/iceberg/Cargo.toml index 5c95f0b89f..895a5cf5e4 100644 --- a/crates/iceberg/Cargo.toml +++ b/crates/iceberg/Cargo.toml @@ -28,9 +28,6 @@ keywords = ["iceberg"] license = { workspace = true } repository = { workspace = true } -[lints] -workspace = true - [features] default = ["storage-memory", "storage-fs", "storage-s3", "tokio"] storage-all = ["storage-memory", "storage-fs", "storage-s3", "storage-gcs"] From 237b3274c9f67abc1a1d8a0199050499fb0799aa Mon Sep 17 00:00:00 2001 From: Matt Butrovich Date: Tue, 9 Dec 2025 08:29:12 -0500 Subject: [PATCH 3/3] fix merge conflict, fix clippy again. --- crates/iceberg/src/arrow/record_batch_transformer.rs | 2 +- crates/iceberg/src/arrow/value.rs | 7 ++----- crates/iceberg/src/metadata_columns.rs | 5 ++--- crates/iceberg/src/scan/mod.rs | 6 ++---- 4 files changed, 7 insertions(+), 13 deletions(-) diff --git a/crates/iceberg/src/arrow/record_batch_transformer.rs b/crates/iceberg/src/arrow/record_batch_transformer.rs index f30d4a09c3..c4782464c1 100644 --- a/crates/iceberg/src/arrow/record_batch_transformer.rs +++ b/crates/iceberg/src/arrow/record_batch_transformer.rs @@ -582,7 +582,7 @@ impl RecordBatchTransformer { let this_field_id = field_id_str.parse().map_err(|e| { Error::new( ErrorKind::DataInvalid, - format!("field id not parseable as an i32: {}", e), + format!("field id not parseable as an i32: {e}"), ) })?; diff --git a/crates/iceberg/src/arrow/value.rs b/crates/iceberg/src/arrow/value.rs index a38c6e346d..bc123d99e8 100644 --- a/crates/iceberg/src/arrow/value.rs +++ b/crates/iceberg/src/arrow/value.rs @@ -724,10 +724,7 @@ pub(crate) fn create_primitive_array_single_element( } _ => Err(Error::new( ErrorKind::Unexpected, - format!( - "Unsupported constant type combination: {:?} with {:?}", - data_type, prim_lit - ), + format!("Unsupported constant type combination: {data_type:?} with {prim_lit:?}"), )), } } @@ -825,7 +822,7 @@ pub(crate) fn create_primitive_array_repeated( (dt, _) => { return Err(Error::new( ErrorKind::Unexpected, - format!("unexpected target column type {}", dt), + format!("unexpected target column type {dt}"), )); } }) diff --git a/crates/iceberg/src/metadata_columns.rs b/crates/iceberg/src/metadata_columns.rs index b11b5cadb2..b388e40c4a 100644 --- a/crates/iceberg/src/metadata_columns.rs +++ b/crates/iceberg/src/metadata_columns.rs @@ -74,14 +74,13 @@ pub fn get_metadata_field(field_id: i32) -> Result { Err(Error::new( ErrorKind::Unexpected, format!( - "Metadata field ID {} recognized but field definition not implemented", - field_id + "Metadata field ID {field_id} recognized but field definition not implemented" ), )) } _ => Err(Error::new( ErrorKind::Unexpected, - format!("Field ID {} is not a metadata field", field_id), + format!("Field ID {field_id} is not a metadata field"), )), } } diff --git a/crates/iceberg/src/scan/mod.rs b/crates/iceberg/src/scan/mod.rs index 24c03b0b2c..d83da8a879 100644 --- a/crates/iceberg/src/scan/mod.rs +++ b/crates/iceberg/src/scan/mod.rs @@ -1872,8 +1872,7 @@ pub mod tests { let file_path = string_values.value(0); assert!( file_path.ends_with(".parquet"), - "File path should end with .parquet, got: {}", - file_path + "File path should end with .parquet, got: {file_path}" ); } @@ -1981,8 +1980,7 @@ pub mod tests { for path in &file_paths { assert!( path.ends_with(".parquet"), - "All file paths should end with .parquet, got: {}", - path + "All file paths should end with .parquet, got: {path}" ); } }