From 6c399c23d739cffb8999859338514f7d53c688ab Mon Sep 17 00:00:00 2001 From: Piotr Findeisen Date: Wed, 17 Jul 2024 19:11:24 +0200 Subject: [PATCH] docs changes after moving code around separate commit for review purposes --- datafusion/catalog/src/catalog.rs | 12 ++++-------- datafusion/catalog/src/session.rs | 12 ++---------- datafusion/catalog/src/table.rs | 2 +- datafusion/core/src/datasource/file_format/mod.rs | 2 +- 4 files changed, 8 insertions(+), 20 deletions(-) diff --git a/datafusion/catalog/src/catalog.rs b/datafusion/catalog/src/catalog.rs index 1a3b14ecf2d9..da59435be86b 100644 --- a/datafusion/catalog/src/catalog.rs +++ b/datafusion/catalog/src/catalog.rs @@ -40,12 +40,10 @@ use datafusion_common::Result; /// /// To implement a catalog, you implement at least one of the [`CatalogProviderList`], /// [`CatalogProvider`] and [`SchemaProvider`] traits and register them -/// appropriately the [`SessionContext`]. -/// -/// [`SessionContext`]: crate::execution::context::SessionContext +/// appropriately in the `SessionContext`. /// /// DataFusion comes with a simple in-memory catalog implementation, -/// [`MemoryCatalogProvider`], that is used by default and has no persistence. +/// `MemoryCatalogProvider`, that is used by default and has no persistence. /// DataFusion does not include more complex Catalog implementations because /// catalog management is a key design choice for most data systems, and thus /// it is unlikely that any general-purpose catalog implementation will work @@ -79,12 +77,10 @@ use datafusion_common::Result; /// access required to read table details (e.g. statistics). /// /// The pattern that DataFusion itself uses to plan SQL queries is to walk over -/// the query to [find all table references], +/// the query to find all table references, /// performing required remote catalog in parallel, and then plans the query /// using that snapshot. /// -/// [find all table references]: resolve_table_references -/// /// # Example Catalog Implementations /// /// Here are some examples of how to implement custom catalogs: @@ -135,7 +131,7 @@ pub trait CatalogProvider: Sync + Send { /// Removes a schema from this catalog. Implementations of this method should return /// errors if the schema exists but cannot be dropped. For example, in DataFusion's - /// default in-memory catalog, [`MemoryCatalogProvider`], a non-empty schema + /// default in-memory catalog, `MemoryCatalogProvider`, a non-empty schema /// will only be successfully dropped when `cascade` is true. /// This is equivalent to how DROP SCHEMA works in PostgreSQL. /// diff --git a/datafusion/catalog/src/session.rs b/datafusion/catalog/src/session.rs index 3a84ff3d26f7..cbd6be39df4d 100644 --- a/datafusion/catalog/src/session.rs +++ b/datafusion/catalog/src/session.rs @@ -47,9 +47,7 @@ pub trait CatalogSession: Send + Sync { /// /// This function will error for [`LogicalPlan`]s such as catalog DDL like /// `CREATE TABLE`, which do not have corresponding physical plans and must - /// be handled by another layer, typically [`SessionContext`]. - /// - /// [`SessionContext`]: crate::execution::context::SessionContext + /// be handled by another layer, typically the `SessionContext`. async fn create_physical_plan( &self, logical_plan: &LogicalPlan, @@ -58,17 +56,11 @@ pub trait CatalogSession: Send + Sync { /// Create a [`PhysicalExpr`] from an [`Expr`] after applying type /// coercion, and function rewrites. /// - /// Note: The expression is not [simplified] or otherwise optimized: `a = 1 + /// Note: The expression is not simplified or otherwise optimized: `a = 1 /// + 2` will not be simplified to `a = 3` as this is a more involved process. /// See the [expr_api] example for how to simplify expressions. /// - /// # See Also: - /// * [`SessionContext::create_physical_expr`] for a higher-level API - /// * [`create_physical_expr`] for a lower-level API - /// - /// [simplified]: datafusion_optimizer::simplify_expressions /// [expr_api]: https://github.com/apache/datafusion/blob/main/datafusion-examples/examples/expr_api.rs - /// [`SessionContext::create_physical_expr`]: crate::execution::context::SessionContext::create_physical_expr fn create_physical_expr( &self, expr: Expr, diff --git a/datafusion/catalog/src/table.rs b/datafusion/catalog/src/table.rs index 8678bc732a74..1d7bad32c0c1 100644 --- a/datafusion/catalog/src/table.rs +++ b/datafusion/catalog/src/table.rs @@ -267,7 +267,7 @@ pub trait TableProvider: Sync + Send { /// See [`DataSinkExec`] for the common pattern of inserting a /// streams of `RecordBatch`es as files to an ObjectStore. /// - /// [`DataSinkExec`]: crate::physical_plan::insert::DataSinkExec + /// [`DataSinkExec`]: datafusion_physical_plan::insert::DataSinkExec async fn insert_into( &self, _state: &dyn CatalogSession, diff --git a/datafusion/core/src/datasource/file_format/mod.rs b/datafusion/core/src/datasource/file_format/mod.rs index 1aa93a106aff..7bdfebcc58ce 100644 --- a/datafusion/core/src/datasource/file_format/mod.rs +++ b/datafusion/core/src/datasource/file_format/mod.rs @@ -69,7 +69,7 @@ pub trait FileFormatFactory: Sync + Send + GetExt { /// from the [`TableProvider`]. This helps code re-utilization across /// providers that support the same file formats. /// -/// [`TableProvider`]: crate::datasource::provider::TableProvider +/// [`TableProvider`]: crate::catalog_api::TableProvider #[async_trait] pub trait FileFormat: Send + Sync + fmt::Debug { /// Returns the table provider as [`Any`](std::any::Any) so that it can be