Skip to content

Commit

Permalink
chore: fix typos (delta-io#1813)
Browse files Browse the repository at this point in the history
  • Loading branch information
xiaolou86 committed Nov 7, 2023
1 parent b3f478e commit 7090a12
Show file tree
Hide file tree
Showing 10 changed files with 15 additions and 15 deletions.
2 changes: 1 addition & 1 deletion crates/deltalake-core/src/data_catalog/storage/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ impl ListingSchemaProvider {
}
}

// noramalizes a path fragement to be a valida table name in datafusion
// noramalizes a path fragment to be a valida table name in datafusion
// - removes some reserved characters (-, +, ., " ")
// - lowecase ascii
fn normalize_table_name(path: &Path) -> Result<String, DataFusionError> {
Expand Down
6 changes: 3 additions & 3 deletions crates/deltalake-core/src/delta_datafusion/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -385,7 +385,7 @@ pub(crate) fn logical_schema(
}

#[derive(Debug, Clone, Default)]
/// Used to specify if additonal metadata columns are exposed to the user
/// Used to specify if additional metadata columns are exposed to the user
pub struct DeltaScanConfigBuilder {
/// Include the source path for each record. The name of this column is determine by `file_column_name`
include_file_column: bool,
Expand Down Expand Up @@ -458,7 +458,7 @@ impl DeltaScanConfigBuilder {
}

#[derive(Debug, Clone, Default, Serialize, Deserialize)]
/// Include additonal metadata columns during a [`DeltaScan`]
/// Include additional metadata columns during a [`DeltaScan`]
pub struct DeltaScanConfig {
/// Include the source path for each record
pub file_column_name: Option<String>,
Expand Down Expand Up @@ -711,7 +711,7 @@ impl TableProvider for DeltaTable {
}
}

/// A Delta table provider that enables additonal metadata columns to be included during the scan
/// A Delta table provider that enables additional metadata columns to be included during the scan
pub struct DeltaTableProvider {
snapshot: DeltaTableState,
store: ObjectStoreRef,
Expand Down
2 changes: 1 addition & 1 deletion crates/deltalake-core/src/kernel/actions/types.rs
Original file line number Diff line number Diff line change
Expand Up @@ -255,7 +255,7 @@ pub enum WriterFeatures {
DomainMetadata,
/// version 2 of checkpointing
V2Checkpoint,
/// Iceberg compatability support
/// Iceberg compatibility support
IcebergCompatV1,
/// If we do not match any other reader features
#[serde(untagged)]
Expand Down
2 changes: 1 addition & 1 deletion crates/deltalake-core/src/operations/delete.rs
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ use crate::DeltaTable;
use super::datafusion_utils::Expression;

/// Delete Records from the Delta Table.
/// See this module's documentaiton for more information
/// See this module's documentation for more information
pub struct DeleteBuilder {
/// Which records to delete
predicate: Option<Expression>,
Expand Down
2 changes: 1 addition & 1 deletion crates/deltalake-core/src/operations/filesystem_check.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ use crate::table::state::DeltaTableState;
use crate::DeltaTable;

/// Audit the Delta Table's active files with the underlying file system.
/// See this module's documentaiton for more information
/// See this module's documentation for more information
#[derive(Debug)]
pub struct FileSystemCheckBuilder {
/// A snapshot of the to-be-checked table's state
Expand Down
2 changes: 1 addition & 1 deletion crates/deltalake-core/src/operations/update.rs
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ impl UpdateBuilder {
self
}

/// Perform an additonal update expression during the operaton
/// Perform an additional update expression during the operaton
pub fn with_update<S: Into<Column>, E: Into<Expression>>(
mut self,
column: S,
Expand Down
4 changes: 2 additions & 2 deletions crates/deltalake-core/src/protocol/parquet2_read/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -335,7 +335,7 @@ fn deserialize_add_column_page(
},
)?;
}
// FIXME suport partitionValueParsed
// FIXME support partitionValueParsed
"dataChange" => {
for_each_boolean_field_value(
actions,
Expand Down Expand Up @@ -420,7 +420,7 @@ fn deserialize_remove_column_page(
|action: &mut Remove, v: i64| action.size = Some(v),
)?;
}
// FIXME suport partitionValueParsed
// FIXME support partitionValueParsed
"partitionValues" => {
for_each_map_field_value(
&field[1..],
Expand Down
2 changes: 1 addition & 1 deletion crates/deltalake-core/src/writer/utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ pub fn record_batch_from_message(
// a happy middle-road might be to compute stats for partition columns only on the initial write since we should validate partition values anyway, and compute additional stats later (at checkpoint time perhaps?).
// also this does not currently support nested partition columns and many other data types.
// TODO is this comment still valid, since we should be sure now, that the arrays where this
// gets aplied have a single unique value
// gets applied have a single unique value
pub(crate) fn stringified_partition_value(
arr: &Arc<dyn Array>,
) -> Result<Option<String>, DeltaWriterError> {
Expand Down
4 changes: 2 additions & 2 deletions crates/deltalake-core/tests/command_filesystem_check.rs
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ async fn test_filesystem_check(storage: StorageIntegration) -> TestResult {
let remove = table.state.all_tombstones().get(file).unwrap();
assert!(remove.data_change);

// An additonal run should return an empty list of orphaned actions
// An additional run should return an empty list of orphaned actions
let op = DeltaOps::from(table);
let (table, metrics) = op.filesystem_check().await?;
assert_eq!(version + 1, table.state.version());
Expand Down Expand Up @@ -147,7 +147,7 @@ async fn test_filesystem_check_fails_for_concurrent_delete() -> TestResult {

#[tokio::test]
#[serial]
#[ignore = "should this actually fail? with conflcit resolution, we are re-trying again."]
#[ignore = "should this actually fail? with conflict resolution, we are re-trying again."]
async fn test_filesystem_check_outdated() -> TestResult {
// Validate failure when a non dry only executes on the latest version
let context = IntegrationContext::new(StorageIntegration::Local)?;
Expand Down
4 changes: 2 additions & 2 deletions proofs/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -221,7 +221,7 @@ impl Model for AtomicRenameSys {
WriterState::RepairRenameReturned => {
match writer.rename_err {
Some(RenameErr::AlreadyExists) => {
// already reapired by other writer
// already repaired by other writer
// TODO: still need to perform the delete cleanup?
actions.push(Action::UpdateLockData(wid));
}
Expand Down Expand Up @@ -456,7 +456,7 @@ impl Model for AtomicRenameSys {
if state.blob_store_deleted(src) {
let mut writer = &mut state.writer_ctx[wid];
// source object cleaned by up another worker's repair, it's not a real
// conflict, save to assume the rename was successfull
// conflict, save to assume the rename was successful
writer.state = WriterState::Shutdown;
} else {
let mut writer = &mut state.writer_ctx[wid];
Expand Down

0 comments on commit 7090a12

Please sign in to comment.