diff --git a/connector/connect/src/main/scala/org/apache/spark/sql/connect/SparkConnectPlugin.scala b/connector/connect/src/main/scala/org/apache/spark/sql/connect/SparkConnectPlugin.scala index 4ecbfd123f0d2..bb694a7679890 100644 --- a/connector/connect/src/main/scala/org/apache/spark/sql/connect/SparkConnectPlugin.scala +++ b/connector/connect/src/main/scala/org/apache/spark/sql/connect/SparkConnectPlugin.scala @@ -22,7 +22,6 @@ import java.util import scala.collection.JavaConverters._ import org.apache.spark.SparkContext -import org.apache.spark.annotation.Unstable import org.apache.spark.api.plugin.{DriverPlugin, ExecutorPlugin, PluginContext, SparkPlugin} import org.apache.spark.sql.connect.service.SparkConnectService @@ -33,7 +32,6 @@ import org.apache.spark.sql.connect.service.SparkConnectService * implement it as a Driver Plugin. To enable Spark Connect, simply make sure that the appropriate * JAR is available in the CLASSPATH and the driver plugin is configured to load this class. */ -@Unstable class SparkConnectPlugin extends SparkPlugin { /** diff --git a/connector/connect/src/main/scala/org/apache/spark/sql/connect/command/SparkConnectCommandPlanner.scala b/connector/connect/src/main/scala/org/apache/spark/sql/connect/command/SparkConnectCommandPlanner.scala index 47d421a0359bf..80c36a4773e6e 100644 --- a/connector/connect/src/main/scala/org/apache/spark/sql/connect/command/SparkConnectCommandPlanner.scala +++ b/connector/connect/src/main/scala/org/apache/spark/sql/connect/command/SparkConnectCommandPlanner.scala @@ -21,7 +21,6 @@ import scala.collection.JavaConverters._ import com.google.common.collect.{Lists, Maps} -import org.apache.spark.annotation.{Since, Unstable} import org.apache.spark.api.python.{PythonEvalType, SimplePythonFunction} import org.apache.spark.connect.proto import org.apache.spark.connect.proto.WriteOperation @@ -35,8 +34,6 @@ final case class InvalidCommandInput( private val cause: Throwable = null) extends Exception(message, cause) -@Unstable -@Since("3.4.0") class SparkConnectCommandPlanner(session: SparkSession, command: proto.Command) { lazy val pythonExec = diff --git a/connector/connect/src/main/scala/org/apache/spark/sql/connect/dsl/package.scala b/connector/connect/src/main/scala/org/apache/spark/sql/connect/dsl/package.scala index 6ae6dfa1577c6..a9a97e740d84d 100644 --- a/connector/connect/src/main/scala/org/apache/spark/sql/connect/dsl/package.scala +++ b/connector/connect/src/main/scala/org/apache/spark/sql/connect/dsl/package.scala @@ -26,6 +26,9 @@ import org.apache.spark.sql.connect.planner.DataTypeProtoConverter /** * A collection of implicit conversions that create a DSL for constructing connect protos. + * + * All classes in connect/dsl are considered an internal API to Spark Connect and are subject to + * change between minor releases. */ package object dsl { diff --git a/connector/connect/src/main/scala/org/apache/spark/sql/connect/planner/SparkConnectPlanner.scala b/connector/connect/src/main/scala/org/apache/spark/sql/connect/planner/SparkConnectPlanner.scala index 9e3899f4a1a03..53abf2e770909 100644 --- a/connector/connect/src/main/scala/org/apache/spark/sql/connect/planner/SparkConnectPlanner.scala +++ b/connector/connect/src/main/scala/org/apache/spark/sql/connect/planner/SparkConnectPlanner.scala @@ -19,7 +19,6 @@ package org.apache.spark.sql.connect.planner import scala.collection.JavaConverters._ -import org.apache.spark.annotation.{Since, Unstable} import org.apache.spark.connect.proto import org.apache.spark.sql.SparkSession import org.apache.spark.sql.catalyst.analysis.{UnresolvedAlias, UnresolvedAttribute, UnresolvedFunction, UnresolvedRelation, UnresolvedStar} @@ -38,8 +37,6 @@ final case class InvalidPlanInput( private val cause: Throwable = None.orNull) extends Exception(message, cause) -@Unstable -@Since("3.4.0") class SparkConnectPlanner(plan: proto.Relation, session: SparkSession) { def transform(): LogicalPlan = { diff --git a/connector/connect/src/main/scala/org/apache/spark/sql/connect/service/SparkConnectService.scala b/connector/connect/src/main/scala/org/apache/spark/sql/connect/service/SparkConnectService.scala index 5841017e5bb71..a1e70975da55e 100644 --- a/connector/connect/src/main/scala/org/apache/spark/sql/connect/service/SparkConnectService.scala +++ b/connector/connect/src/main/scala/org/apache/spark/sql/connect/service/SparkConnectService.scala @@ -27,7 +27,6 @@ import io.grpc.protobuf.services.ProtoReflectionService import io.grpc.stub.StreamObserver import org.apache.spark.SparkEnv -import org.apache.spark.annotation.{Since, Unstable} import org.apache.spark.connect.proto import org.apache.spark.connect.proto.{AnalyzeResponse, Request, Response, SparkConnectServiceGrpc} import org.apache.spark.internal.Logging @@ -44,8 +43,6 @@ import org.apache.spark.sql.execution.ExtendedMode * @param debug * delegates debug behavior to the handlers. */ -@Unstable -@Since("3.4.0") class SparkConnectService(debug: Boolean) extends SparkConnectServiceGrpc.SparkConnectServiceImplBase with Logging { @@ -127,9 +124,7 @@ class SparkConnectService(debug: Boolean) * @param userId * @param session */ -@Unstable -@Since("3.4.0") -private[connect] case class SessionHolder(userId: String, session: SparkSession) +case class SessionHolder(userId: String, session: SparkSession) /** * Static instance of the SparkConnectService. @@ -137,8 +132,6 @@ private[connect] case class SessionHolder(userId: String, session: SparkSession) * Used to start the overall SparkConnect service and provides global state to manage the * different SparkSession from different users connecting to the cluster. */ -@Unstable -@Since("3.4.0") object SparkConnectService { private val CACHE_SIZE = 100 @@ -169,7 +162,7 @@ object SparkConnectService { /** * Based on the `key` find or create a new SparkSession. */ - private[connect] def getOrCreateIsolatedSession(key: SessionCacheKey): SessionHolder = { + def getOrCreateIsolatedSession(key: SessionCacheKey): SessionHolder = { userSessionMapping.get( key, () => { diff --git a/connector/connect/src/main/scala/org/apache/spark/sql/connect/service/SparkConnectStreamHandler.scala b/connector/connect/src/main/scala/org/apache/spark/sql/connect/service/SparkConnectStreamHandler.scala index 997b0f6b6d826..a429823c02f8e 100644 --- a/connector/connect/src/main/scala/org/apache/spark/sql/connect/service/SparkConnectStreamHandler.scala +++ b/connector/connect/src/main/scala/org/apache/spark/sql/connect/service/SparkConnectStreamHandler.scala @@ -23,7 +23,6 @@ import com.google.protobuf.ByteString import io.grpc.stub.StreamObserver import org.apache.spark.SparkException -import org.apache.spark.annotation.{Since, Unstable} import org.apache.spark.connect.proto import org.apache.spark.connect.proto.{Request, Response} import org.apache.spark.internal.Logging @@ -34,8 +33,6 @@ import org.apache.spark.sql.execution.SparkPlan import org.apache.spark.sql.execution.adaptive.{AdaptiveSparkPlanExec, AdaptiveSparkPlanHelper, QueryStageExec} import org.apache.spark.sql.internal.SQLConf -@Unstable -@Since("3.4.0") class SparkConnectStreamHandler(responseObserver: StreamObserver[Response]) extends Logging { // The maximum batch size in bytes for a single batch of data to be returned via proto. @@ -60,7 +57,7 @@ class SparkConnectStreamHandler(responseObserver: StreamObserver[Response]) exte processRows(request.getClientId, rows) } - private[connect] def processRows(clientId: String, rows: DataFrame): Unit = { + def processRows(clientId: String, rows: DataFrame): Unit = { val timeZoneId = SQLConf.get.sessionLocalTimeZone // Only process up to 10MB of data. diff --git a/project/SparkBuild.scala b/project/SparkBuild.scala index 33883a2efaa51..20c537e0e672f 100644 --- a/project/SparkBuild.scala +++ b/project/SparkBuild.scala @@ -1177,6 +1177,7 @@ object Unidoc { .map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/util/collection"))) .map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/util/kvstore"))) .map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/sql/catalyst"))) + .map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/sql/connect"))) .map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/sql/execution"))) .map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/sql/internal"))) .map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/sql/hive")))