Skip to content
1 change: 1 addition & 0 deletions docs/sql-keywords.md
Original file line number Diff line number Diff line change
Expand Up @@ -179,6 +179,7 @@ Below is a list of all the keywords in Spark SQL.
<tr><td>MONTH</td><td>reserved</td><td>non-reserved</td><td>reserved</td></tr>
<tr><td>MONTHS</td><td>non-reserved</td><td>non-reserved</td><td>non-reserved</td></tr>
<tr><td>MSCK</td><td>non-reserved</td><td>non-reserved</td><td>non-reserved</td></tr>
<tr><td>NAMESPACES</td><td>non-reserved</td><td>non-reserved</td><td>non-reserved</td></tr>
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

cc @xianyinxin , we should also add DELETE and UPDATE. Can you open a PR to do it?

Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ok, will open a pr.

Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

DELETE is already there. UPDATE is included in #25626

<tr><td>NATURAL</td><td>reserved</td><td>strict-non-reserved</td><td>reserved</td></tr>
<tr><td>NO</td><td>non-reserved</td><td>non-reserved</td><td>reserved</td></tr>
<tr><td>NOT</td><td>reserved</td><td>non-reserved</td><td>reserved</td></tr>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,8 @@ statement
| DROP database (IF EXISTS)? db=errorCapturingIdentifier
(RESTRICT | CASCADE)? #dropDatabase
| SHOW DATABASES (LIKE? pattern=STRING)? #showDatabases
| SHOW NAMESPACES ((FROM | IN) multipartIdentifier)?
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I put both FROM and IN similar to SHOW TABLES. Please let me know if FROM is not needed.

(LIKE? pattern=STRING)? #showNamespaces
| createTableHeader ('(' colTypeList ')')? tableProvider
((OPTIONS options=tablePropertyList) |
(PARTITIONED BY partitioning=transformList) |
Expand Down Expand Up @@ -1006,6 +1008,7 @@ ansiNonReserved
| MINUTES
| MONTHS
| MSCK
| NAMESPACES
| NO
| NULLS
| OF
Expand Down Expand Up @@ -1255,6 +1258,7 @@ nonReserved
| MONTH
| MONTHS
| MSCK
| NAMESPACES
| NO
| NOT
| NULL
Expand Down Expand Up @@ -1515,6 +1519,7 @@ MINUTES: 'MINUTES';
MONTH: 'MONTH';
MONTHS: 'MONTHS';
MSCK: 'MSCK';
NAMESPACES: 'NAMESPACES';
NATURAL: 'NATURAL';
NO: 'NO';
NOT: 'NOT' | '!';
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,14 @@ object CatalogV2Implicits {
case _ =>
throw new AnalysisException(s"Cannot use catalog ${plugin.name}: not a TableCatalog")
}

def asNamespaceCatalog: SupportsNamespaces = plugin match {
case namespaceCatalog: SupportsNamespaces =>
namespaceCatalog
case _ =>
throw new AnalysisException(
s"Cannot use catalog ${plugin.name}: does not support namespaces")
}
}

implicit class NamespaceHelper(namespace: Array[String]) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ import org.apache.spark.sql.catalyst.expressions.aggregate.{First, Last}
import org.apache.spark.sql.catalyst.parser.SqlBaseParser._
import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.plans.logical.sql.{AlterTableAddColumnsStatement, AlterTableAlterColumnStatement, AlterTableDropColumnsStatement, AlterTableRenameColumnStatement, AlterTableSetLocationStatement, AlterTableSetPropertiesStatement, AlterTableUnsetPropertiesStatement, AlterViewSetPropertiesStatement, AlterViewUnsetPropertiesStatement, CreateTableAsSelectStatement, CreateTableStatement, DeleteFromStatement, DescribeColumnStatement, DescribeTableStatement, DropTableStatement, DropViewStatement, InsertIntoStatement, QualifiedColType, ReplaceTableAsSelectStatement, ReplaceTableStatement, ShowTablesStatement}
import org.apache.spark.sql.catalyst.plans.logical.sql.{AlterTableAddColumnsStatement, AlterTableAlterColumnStatement, AlterTableDropColumnsStatement, AlterTableRenameColumnStatement, AlterTableSetLocationStatement, AlterTableSetPropertiesStatement, AlterTableUnsetPropertiesStatement, AlterViewSetPropertiesStatement, AlterViewUnsetPropertiesStatement, CreateTableAsSelectStatement, CreateTableStatement, DeleteFromStatement, DescribeColumnStatement, DescribeTableStatement, DropTableStatement, DropViewStatement, InsertIntoStatement, QualifiedColType, ReplaceTableAsSelectStatement, ReplaceTableStatement, ShowNamespacesStatement, ShowTablesStatement}
import org.apache.spark.sql.catalyst.util.DateTimeUtils.{getZoneId, stringToDate, stringToTimestamp}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
Expand Down Expand Up @@ -2260,6 +2260,15 @@ class AstBuilder(conf: SQLConf) extends SqlBaseBaseVisitor[AnyRef] with Logging
}
}

/**
* Create a [[ShowNamespacesStatement]] command.
*/
override def visitShowNamespaces(ctx: ShowNamespacesContext): LogicalPlan = withOrigin(ctx) {
ShowNamespacesStatement(
Option(ctx.multipartIdentifier).map(visitMultipartIdentifier),
Option(ctx.pattern).map(string))
}

/**
* Create a table, returning a [[CreateTableStatement]] logical plan.
*
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

package org.apache.spark.sql.catalyst.plans.logical

import org.apache.spark.sql.catalog.v2.{Identifier, TableCatalog, TableChange}
import org.apache.spark.sql.catalog.v2.{Identifier, SupportsNamespaces, TableCatalog, TableChange}
import org.apache.spark.sql.catalog.v2.TableChange.{AddColumn, ColumnChange}
import org.apache.spark.sql.catalog.v2.expressions.Transform
import org.apache.spark.sql.catalyst.AliasIdentifier
Expand Down Expand Up @@ -560,6 +560,17 @@ object OverwritePartitionsDynamic {
}
}

/**
* The logical plan of the SHOW NAMESPACES command that works for v2 catalogs.
*/
case class ShowNamespaces(
catalog: SupportsNamespaces,
namespace: Option[Seq[String]],
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

After reading the code, it's actually catalogAndNamespace, right?

Copy link
Contributor Author

@imback82 imback82 Sep 4, 2019

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is just namespace since catalog is already resolved in DataSourceResolution.scala.

pattern: Option[String]) extends Command {
override val output: Seq[Attribute] = Seq(
AttributeReference("namespace", StringType, nullable = false)())
}

case class DescribeTable(table: NamedRelation, isExtended: Boolean) extends Command {

override def children: Seq[LogicalPlan] = Seq(table)
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.spark.sql.catalyst.plans.logical.sql

/**
* A SHOW NAMESPACES statement, as parsed from SQL.
*/
case class ShowNamespacesStatement(namespace: Option[Seq[String]], pattern: Option[String])
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ditto

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, this is catalog + namespace, but I followed the same convention as other statements - i.e., CreateTableStatement has tableName instead of catalogAndTableName. Please let me know if you prefer catalogAndNamespace here.

extends ParsedStatement
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ import org.apache.spark.sql.catalog.v2.expressions.{ApplyTransform, BucketTransf
import org.apache.spark.sql.catalyst.analysis.{AnalysisTest, UnresolvedRelation, UnresolvedStar}
import org.apache.spark.sql.catalyst.catalog.BucketSpec
import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, Project}
import org.apache.spark.sql.catalyst.plans.logical.sql.{AlterTableAddColumnsStatement, AlterTableAlterColumnStatement, AlterTableDropColumnsStatement, AlterTableRenameColumnStatement, AlterTableSetLocationStatement, AlterTableSetPropertiesStatement, AlterTableUnsetPropertiesStatement, AlterViewSetPropertiesStatement, AlterViewUnsetPropertiesStatement, CreateTableAsSelectStatement, CreateTableStatement, DescribeColumnStatement, DescribeTableStatement, DropTableStatement, DropViewStatement, InsertIntoStatement, QualifiedColType, ReplaceTableAsSelectStatement, ReplaceTableStatement, ShowTablesStatement}
import org.apache.spark.sql.catalyst.plans.logical.sql.{AlterTableAddColumnsStatement, AlterTableAlterColumnStatement, AlterTableDropColumnsStatement, AlterTableRenameColumnStatement, AlterTableSetLocationStatement, AlterTableSetPropertiesStatement, AlterTableUnsetPropertiesStatement, AlterViewSetPropertiesStatement, AlterViewUnsetPropertiesStatement, CreateTableAsSelectStatement, CreateTableStatement, DescribeColumnStatement, DescribeTableStatement, DropTableStatement, DropViewStatement, InsertIntoStatement, QualifiedColType, ReplaceTableAsSelectStatement, ReplaceTableStatement, ShowNamespacesStatement, ShowTablesStatement}
import org.apache.spark.sql.types.{IntegerType, LongType, StringType, StructType, TimestampType}
import org.apache.spark.unsafe.types.UTF8String

Expand Down Expand Up @@ -779,6 +779,21 @@ class DDLParserSuite extends AnalysisTest {
ShowTablesStatement(Some(Seq("tbl")), Some("*dog*")))
}

test("show namespaces") {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

cc @xianyinxin can you add similar parser tests for DELETE/UPDATE as well?

Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

For update, #25626 has added some parser cases. For delete, will done in #25652.

comparePlans(
parsePlan("SHOW NAMESPACES"),
ShowNamespacesStatement(None, None))
comparePlans(
parsePlan("SHOW NAMESPACES FROM testcat.ns1.ns2"),
ShowNamespacesStatement(Some(Seq("testcat", "ns1", "ns2")), None))
comparePlans(
parsePlan("SHOW NAMESPACES IN testcat.ns1.ns2"),
ShowNamespacesStatement(Some(Seq("testcat", "ns1", "ns2")), None))
comparePlans(
parsePlan("SHOW NAMESPACES IN testcat.ns1 LIKE '*pattern*'"),
ShowNamespacesStatement(Some(Seq("testcat", "ns1")), Some("*pattern*")))
}

private case class TableSpec(
name: Seq[String],
schema: Option[StructType],
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,12 +30,9 @@ import org.apache.spark.sql.sources.v2.Table
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.util.CaseInsensitiveStringMap

class InMemoryTableCatalog extends TableCatalog with SupportsNamespaces {
class BasicInMemoryTableCatalog extends TableCatalog {
import org.apache.spark.sql.catalog.v2.CatalogV2Implicits._

protected val namespaces: util.Map[List[String], Map[String, String]] =
new ConcurrentHashMap[List[String], Map[String, String]]()

protected val tables: util.Map[Identifier, InMemoryTable] =
new ConcurrentHashMap[Identifier, InMemoryTable]()

Expand Down Expand Up @@ -112,6 +109,13 @@ class InMemoryTableCatalog extends TableCatalog with SupportsNamespaces {
def clearTables(): Unit = {
tables.clear()
}
}

class InMemoryTableCatalog extends BasicInMemoryTableCatalog with SupportsNamespaces {
import org.apache.spark.sql.catalog.v2.CatalogV2Implicits._

protected val namespaces: util.Map[List[String], Map[String, String]] =
new ConcurrentHashMap[List[String], Map[String, String]]()

private def allNamespaces: Seq[Seq[String]] = {
(tables.keySet.asScala.map(_.namespace.toSeq) ++ namespaces.keySet.asScala).toSeq.distinct
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,15 +20,15 @@ package org.apache.spark.sql.execution.datasources
import scala.collection.mutable

import org.apache.spark.sql.{AnalysisException, SaveMode}
import org.apache.spark.sql.catalog.v2.{CatalogManager, Identifier, LookupCatalog, TableCatalog}
import org.apache.spark.sql.catalog.v2.{CatalogManager, Identifier, LookupCatalog, SupportsNamespaces, TableCatalog}
import org.apache.spark.sql.catalog.v2.expressions.Transform
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.analysis.{CastSupport, UnresolvedRelation}
import org.apache.spark.sql.catalyst.catalog.{BucketSpec, CatalogTable, CatalogTableType, CatalogUtils, UnresolvedCatalogRelation}
import org.apache.spark.sql.catalyst.plans.logical.{CreateTableAsSelect, CreateV2Table, DeleteFromTable, DropTable, Filter, LogicalPlan, ReplaceTable, ReplaceTableAsSelect, ShowTables, SubqueryAlias}
import org.apache.spark.sql.catalyst.plans.logical.sql.{AlterTableAddColumnsStatement, AlterTableSetLocationStatement, AlterTableSetPropertiesStatement, AlterTableUnsetPropertiesStatement, AlterViewSetPropertiesStatement, AlterViewUnsetPropertiesStatement, CreateTableAsSelectStatement, CreateTableStatement, DeleteFromStatement, DescribeColumnStatement, DescribeTableStatement, DropTableStatement, DropViewStatement, QualifiedColType, ReplaceTableAsSelectStatement, ReplaceTableStatement, ShowTablesStatement}
import org.apache.spark.sql.catalyst.plans.logical.{CreateTableAsSelect, CreateV2Table, DeleteFromTable, DropTable, Filter, LogicalPlan, ReplaceTable, ReplaceTableAsSelect, ShowNamespaces, ShowTables, SubqueryAlias}
import org.apache.spark.sql.catalyst.plans.logical.sql.{AlterTableAddColumnsStatement, AlterTableSetLocationStatement, AlterTableSetPropertiesStatement, AlterTableUnsetPropertiesStatement, AlterViewSetPropertiesStatement, AlterViewUnsetPropertiesStatement, CreateTableAsSelectStatement, CreateTableStatement, DeleteFromStatement, DescribeColumnStatement, DescribeTableStatement, DropTableStatement, DropViewStatement, QualifiedColType, ReplaceTableAsSelectStatement, ReplaceTableStatement, ShowNamespacesStatement, ShowTablesStatement}
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.execution.command.{AlterTableAddColumnsCommand, AlterTableSetLocationCommand, AlterTableSetPropertiesCommand, AlterTableUnsetPropertiesCommand, DescribeColumnCommand, DescribeTableCommand, DropTableCommand, ShowTablesCommand}
import org.apache.spark.sql.execution.command.{AlterTableAddColumnsCommand, AlterTableSetLocationCommand, AlterTableSetPropertiesCommand, AlterTableUnsetPropertiesCommand, DescribeColumnCommand, DescribeTableCommand, DropTableCommand, ShowDatabasesCommand, ShowTablesCommand}
import org.apache.spark.sql.execution.datasources.v2.FileDataSourceV2
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types.{HIVE_TYPE_STRING, HiveStringType, MetadataBuilder, StructField, StructType}
Expand Down Expand Up @@ -169,6 +169,24 @@ case class DataSourceResolution(
val aliased = delete.tableAlias.map(SubqueryAlias(_, relation)).getOrElse(relation)
DeleteFromTable(aliased, delete.condition)

case ShowNamespacesStatement(None, pattern) =>
defaultCatalog match {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think this should be currentCatalog instead. @cloud-fan, do you agree?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yea

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Let's implement switching the current catalog first, otherwise we are not able to test it.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@imback82 are you working on it?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yes, I am working on USE NAMESPACE

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I should be able to send out the PR sometime tomorrow.

case Some(catalog) =>
ShowNamespaces(catalog.asNamespaceCatalog, None, pattern)
case None =>
throw new AnalysisException("No default v2 catalog is set.")
}

case ShowNamespacesStatement(Some(namespace), pattern) =>
val CatalogNamespace(maybeCatalog, ns) = namespace
maybeCatalog match {
case Some(catalog) =>
ShowNamespaces(catalog.asNamespaceCatalog, Some(ns), pattern)
case None =>
throw new AnalysisException(
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think this needs to distinguish between the case where the catalog is None and the catalog does not support namespaces. For the second case, this should report that the catalog doesn't support namespaces. You can also add a conversion method, asNamespaceCatalog to CatalogV2Utils like asTableCatalog.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Using asNamespaceCatalog simplifies the matching. Thanks.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why not use the current catalog instead of failing?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If the catalog name is specified, but catalog doesn't support namespace, I think we should fail instead of falling back to the current catalog.

It's similar to: if the catalog name is specified, but doesn't contain the table we need, we should fail instead of falling back to the current catalog.

s"No v2 catalog is available for ${namespace.quoted}")
}

case ShowTablesStatement(None, pattern) =>
defaultCatalog match {
case Some(catalog) =>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ import org.apache.spark.sql.{AnalysisException, Strategy}
import org.apache.spark.sql.catalog.v2.StagingTableCatalog
import org.apache.spark.sql.catalyst.expressions.{And, AttributeReference, AttributeSet, Expression, PredicateHelper, SubqueryExpression}
import org.apache.spark.sql.catalyst.planning.PhysicalOperation
import org.apache.spark.sql.catalyst.plans.logical.{AlterTable, AppendData, CreateTableAsSelect, CreateV2Table, DeleteFromTable, DescribeTable, DropTable, LogicalPlan, OverwriteByExpression, OverwritePartitionsDynamic, Repartition, ReplaceTable, ReplaceTableAsSelect, ShowTables}
import org.apache.spark.sql.catalyst.plans.logical.{AlterTable, AppendData, CreateTableAsSelect, CreateV2Table, DeleteFromTable, DescribeTable, DropTable, LogicalPlan, OverwriteByExpression, OverwritePartitionsDynamic, Repartition, ReplaceTable, ReplaceTableAsSelect, ShowNamespaces, ShowTables}
import org.apache.spark.sql.execution.{FilterExec, ProjectExec, SparkPlan}
import org.apache.spark.sql.execution.datasources.DataSourceStrategy
import org.apache.spark.sql.execution.streaming.continuous.{ContinuousCoalesceExec, WriteToContinuousDataSource, WriteToContinuousDataSourceExec}
Expand Down Expand Up @@ -278,6 +278,9 @@ object DataSourceV2Strategy extends Strategy with PredicateHelper {
case AlterTable(catalog, ident, _, changes) =>
AlterTableExec(catalog, ident, changes) :: Nil

case r: ShowNamespaces =>
ShowNamespacesExec(r.output, r.catalog, r.namespace, r.pattern) :: Nil

case r : ShowTables =>
ShowTablesExec(r.output, r.catalog, r.namespace, r.pattern) :: Nil

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.spark.sql.execution.datasources.v2

import scala.collection.mutable.ArrayBuffer

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalog.v2.CatalogV2Implicits.NamespaceHelper
import org.apache.spark.sql.catalog.v2.SupportsNamespaces
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.encoders.RowEncoder
import org.apache.spark.sql.catalyst.expressions.{Attribute, GenericRowWithSchema}
import org.apache.spark.sql.catalyst.util.StringUtils
import org.apache.spark.sql.execution.LeafExecNode

/**
* Physical plan node for showing namespaces.
*/
case class ShowNamespacesExec(
output: Seq[Attribute],
catalog: SupportsNamespaces,
namespace: Option[Seq[String]],
pattern: Option[String])
extends LeafExecNode {
override protected def doExecute(): RDD[InternalRow] = {
val namespaces = namespace.map { ns =>
if (ns.nonEmpty) {
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@rdblue @cloud-fan this is for handling the case SHOW NAMESPACES IN catalogname. In this case, should we list the root namespaces or call listNamespaces with an empty array?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

should we list the root namespaces or call listNamespaces with an empty array?

I think these 2 are the same?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

From the SPIP, I see the following:

SHOW NAMESPACES IN foo
    Returns the result of
sparkSession.catalog("foo").listNamespaces().

Since the behavior of listNamespaces(Array()) depends on the implementation, I think it's safe to check and call listNamespaces(). @rdblue What do you think?

Copy link
Contributor

@rdblue rdblue Sep 4, 2019

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Calling listNamespaces() sounds good to me.

catalog.listNamespaces(ns.toArray)
} else {
catalog.listNamespaces()
}
}
.getOrElse(catalog.listNamespaces())

val rows = new ArrayBuffer[InternalRow]()
val encoder = RowEncoder(schema).resolveAndBind()

namespaces.map(_.quoted).map { ns =>
if (pattern.map(StringUtils.filterPattern(Seq(ns), _).nonEmpty).getOrElse(true)) {
rows += encoder
.toRow(new GenericRowWithSchema(Array(ns), schema))
.copy()
}
}

sparkContext.parallelize(rows, 1)
}
}
Loading