diff --git a/arrow-array/src/array/dictionary_array.rs b/arrow-array/src/array/dictionary_array.rs index 38c4e019206b..acbdcb8b60fa 100644 --- a/arrow-array/src/array/dictionary_array.rs +++ b/arrow-array/src/array/dictionary_array.rs @@ -485,6 +485,7 @@ impl DictionaryArray { /// Returns `PrimitiveDictionaryBuilder` of this dictionary array for mutating /// its keys and values if the underlying data buffer is not shared by others. + #[allow(clippy::result_large_err)] pub fn into_primitive_dict_builder(self) -> Result, Self> where V: ArrowPrimitiveType, @@ -541,6 +542,7 @@ impl DictionaryArray { /// assert_eq!(typed.value(1), 11); /// assert_eq!(typed.value(2), 21); /// ``` + #[allow(clippy::result_large_err)] pub fn unary_mut(self, op: F) -> Result, DictionaryArray> where V: ArrowPrimitiveType, diff --git a/arrow-flight/examples/flight_sql_server.rs b/arrow-flight/examples/flight_sql_server.rs index 657298b4a8b3..396b72f4cb22 100644 --- a/arrow-flight/examples/flight_sql_server.rs +++ b/arrow-flight/examples/flight_sql_server.rs @@ -112,6 +112,7 @@ static TABLES: Lazy> = Lazy::new(|| vec!["flight_sql.example.t pub struct FlightSqlServiceImpl {} impl FlightSqlServiceImpl { + #[allow(clippy::result_large_err)] fn check_token(&self, req: &Request) -> Result<(), Status> { let metadata = req.metadata(); let auth = metadata.get("authorization").ok_or_else(|| { diff --git a/arrow-integration-testing/src/flight_client_scenarios/middleware.rs b/arrow-integration-testing/src/flight_client_scenarios/middleware.rs index b826ad456055..495825738aec 100644 --- a/arrow-integration-testing/src/flight_client_scenarios/middleware.rs +++ b/arrow-integration-testing/src/flight_client_scenarios/middleware.rs @@ -76,7 +76,7 @@ pub async fn run_scenario(host: &str, port: u16) -> Result { Ok(()) } -#[allow(clippy::unnecessary_wraps)] +#[allow(clippy::result_large_err)] fn middleware_interceptor(mut req: Request<()>) -> Result, Status> { let metadata = req.metadata_mut(); metadata.insert("x-middleware", "expected value".parse().unwrap()); diff --git a/arrow-string/src/binary_predicate.rs b/arrow-string/src/binary_predicate.rs index b14e3c255997..590a34b10e27 100644 --- a/arrow-string/src/binary_predicate.rs +++ b/arrow-string/src/binary_predicate.rs @@ -21,6 +21,7 @@ use memchr::memmem::Finder; use std::iter::zip; /// A binary based predicate +#[allow(clippy::large_enum_variant)] pub enum BinaryPredicate<'a> { Contains(Finder<'a>), StartsWith(&'a [u8]), diff --git a/arrow-string/src/predicate.rs b/arrow-string/src/predicate.rs index d785c22b41db..8523129dbafd 100644 --- a/arrow-string/src/predicate.rs +++ b/arrow-string/src/predicate.rs @@ -24,6 +24,7 @@ use regex::{Regex, RegexBuilder}; use std::iter::zip; /// A string based predicate +#[allow(clippy::large_enum_variant)] pub(crate) enum Predicate<'a> { Eq(&'a str), Contains(Finder<'a>), diff --git a/parquet/benches/arrow_reader_clickbench.rs b/parquet/benches/arrow_reader_clickbench.rs index 28fdda7b1893..69f3e4c1a9a3 100644 --- a/parquet/benches/arrow_reader_clickbench.rs +++ b/parquet/benches/arrow_reader_clickbench.rs @@ -711,7 +711,7 @@ impl ReadTest { .schema_descr(); // Determine the correct selection ("ProjectionMask") - let projection_mask = if projection_columns.iter().any(|&name| name == "*") { + let projection_mask = if projection_columns.contains(&"*") { // * means all columns ProjectionMask::all() } else { diff --git a/parquet/src/compression.rs b/parquet/src/compression.rs index d3aea5b38a58..23c4bce50fa2 100644 --- a/parquet/src/compression.rs +++ b/parquet/src/compression.rs @@ -702,15 +702,11 @@ mod lz4_hadoop_codec { input_len -= PREFIX_LEN; if input_len < expected_compressed_size as usize { - return Err(io::Error::new( - io::ErrorKind::Other, - "Not enough bytes for Hadoop frame", - )); + return Err(io::Error::other("Not enough bytes for Hadoop frame")); } if output_len < expected_decompressed_size as usize { - return Err(io::Error::new( - io::ErrorKind::Other, + return Err(io::Error::other( "Not enough bytes to hold advertised output", )); } @@ -718,10 +714,7 @@ mod lz4_hadoop_codec { lz4_flex::decompress_into(&input[..expected_compressed_size as usize], output) .map_err(|e| ParquetError::External(Box::new(e)))?; if decompressed_size != expected_decompressed_size as usize { - return Err(io::Error::new( - io::ErrorKind::Other, - "Unexpected decompressed size", - )); + return Err(io::Error::other("Unexpected decompressed size")); } input_len -= expected_compressed_size as usize; output_len -= expected_decompressed_size as usize; @@ -736,10 +729,7 @@ mod lz4_hadoop_codec { if input_len == 0 { Ok(read_bytes) } else { - Err(io::Error::new( - io::ErrorKind::Other, - "Not all input are consumed", - )) + Err(io::Error::other("Not all input are consumed")) } } diff --git a/parquet/src/errors.rs b/parquet/src/errors.rs index 4cb1f99c3cf6..93b2c1b7e028 100644 --- a/parquet/src/errors.rs +++ b/parquet/src/errors.rs @@ -147,7 +147,7 @@ pub type Result = result::Result; impl From for io::Error { fn from(e: ParquetError) -> Self { - io::Error::new(io::ErrorKind::Other, e) + io::Error::other(e) } }