@@ -33,6 +33,142 @@ import com.typesafe.tools.mima.core.ProblemFilters._
33
33
*/
34
34
object MimaExcludes {
35
35
def excludes (version : String ) = version match {
36
+ case v if v.startsWith(" 2.0" ) =>
37
+ // When 1.6 is officially released, update this exclusion list.
38
+ Seq (
39
+ MimaBuild .excludeSparkPackage(" deploy" ),
40
+ MimaBuild .excludeSparkPackage(" network" ),
41
+ MimaBuild .excludeSparkPackage(" unsafe" ),
42
+ // These are needed if checking against the sbt build, since they are part of
43
+ // the maven-generated artifacts in 1.3.
44
+ excludePackage(" org.spark-project.jetty" ),
45
+ MimaBuild .excludeSparkPackage(" unused" ),
46
+ // SQL execution is considered private.
47
+ excludePackage(" org.apache.spark.sql.execution" ),
48
+ // SQL columnar is considered private.
49
+ excludePackage(" org.apache.spark.sql.columnar" ),
50
+ // The shuffle package is considered private.
51
+ excludePackage(" org.apache.spark.shuffle" ),
52
+ // The collections utlities are considered pricate.
53
+ excludePackage(" org.apache.spark.util.collection" )
54
+ ) ++
55
+ MimaBuild .excludeSparkClass(" streaming.flume.FlumeTestUtils" ) ++
56
+ MimaBuild .excludeSparkClass(" streaming.flume.PollingFlumeTestUtils" ) ++
57
+ Seq (
58
+ // MiMa does not deal properly with sealed traits
59
+ ProblemFilters .exclude[MissingMethodProblem ](
60
+ " org.apache.spark.ml.classification.LogisticRegressionSummary.featuresCol" )
61
+ ) ++ Seq (
62
+ // SPARK-11530
63
+ ProblemFilters .exclude[MissingMethodProblem ](" org.apache.spark.mllib.feature.PCAModel.this" )
64
+ ) ++ Seq (
65
+ // SPARK-10381 Fix types / units in private AskPermissionToCommitOutput RPC message.
66
+ // This class is marked as `private` but MiMa still seems to be confused by the change.
67
+ ProblemFilters .exclude[MissingMethodProblem ](
68
+ " org.apache.spark.scheduler.AskPermissionToCommitOutput.task" ),
69
+ ProblemFilters .exclude[IncompatibleResultTypeProblem ](
70
+ " org.apache.spark.scheduler.AskPermissionToCommitOutput.copy$default$2" ),
71
+ ProblemFilters .exclude[IncompatibleMethTypeProblem ](
72
+ " org.apache.spark.scheduler.AskPermissionToCommitOutput.copy" ),
73
+ ProblemFilters .exclude[MissingMethodProblem ](
74
+ " org.apache.spark.scheduler.AskPermissionToCommitOutput.taskAttempt" ),
75
+ ProblemFilters .exclude[IncompatibleResultTypeProblem ](
76
+ " org.apache.spark.scheduler.AskPermissionToCommitOutput.copy$default$3" ),
77
+ ProblemFilters .exclude[IncompatibleMethTypeProblem ](
78
+ " org.apache.spark.scheduler.AskPermissionToCommitOutput.this" ),
79
+ ProblemFilters .exclude[IncompatibleMethTypeProblem ](
80
+ " org.apache.spark.scheduler.AskPermissionToCommitOutput.apply" )
81
+ ) ++ Seq (
82
+ ProblemFilters .exclude[MissingClassProblem ](
83
+ " org.apache.spark.shuffle.FileShuffleBlockResolver$ShuffleFileGroup" )
84
+ ) ++ Seq (
85
+ ProblemFilters .exclude[MissingMethodProblem ](
86
+ " org.apache.spark.ml.regression.LeastSquaresAggregator.add" ),
87
+ ProblemFilters .exclude[MissingMethodProblem ](
88
+ " org.apache.spark.ml.regression.LeastSquaresCostFun.this" ),
89
+ ProblemFilters .exclude[MissingMethodProblem ](
90
+ " org.apache.spark.sql.SQLContext.clearLastInstantiatedContext" ),
91
+ ProblemFilters .exclude[MissingMethodProblem ](
92
+ " org.apache.spark.sql.SQLContext.setLastInstantiatedContext" ),
93
+ ProblemFilters .exclude[MissingClassProblem ](
94
+ " org.apache.spark.sql.SQLContext$SQLSession" ),
95
+ ProblemFilters .exclude[MissingMethodProblem ](
96
+ " org.apache.spark.sql.SQLContext.detachSession" ),
97
+ ProblemFilters .exclude[MissingMethodProblem ](
98
+ " org.apache.spark.sql.SQLContext.tlSession" ),
99
+ ProblemFilters .exclude[MissingMethodProblem ](
100
+ " org.apache.spark.sql.SQLContext.defaultSession" ),
101
+ ProblemFilters .exclude[MissingMethodProblem ](
102
+ " org.apache.spark.sql.SQLContext.currentSession" ),
103
+ ProblemFilters .exclude[MissingMethodProblem ](
104
+ " org.apache.spark.sql.SQLContext.openSession" ),
105
+ ProblemFilters .exclude[MissingMethodProblem ](
106
+ " org.apache.spark.sql.SQLContext.setSession" ),
107
+ ProblemFilters .exclude[MissingMethodProblem ](
108
+ " org.apache.spark.sql.SQLContext.createSession" )
109
+ ) ++ Seq (
110
+ ProblemFilters .exclude[MissingMethodProblem ](
111
+ " org.apache.spark.SparkContext.preferredNodeLocationData_=" ),
112
+ ProblemFilters .exclude[MissingClassProblem ](
113
+ " org.apache.spark.rdd.MapPartitionsWithPreparationRDD" ),
114
+ ProblemFilters .exclude[MissingClassProblem ](
115
+ " org.apache.spark.rdd.MapPartitionsWithPreparationRDD$" ),
116
+ ProblemFilters .exclude[MissingClassProblem ](" org.apache.spark.sql.SparkSQLParser" )
117
+ ) ++ Seq (
118
+ // SPARK-11485
119
+ ProblemFilters .exclude[MissingMethodProblem ](" org.apache.spark.sql.DataFrameHolder.df" ),
120
+ // SPARK-11541 mark various JDBC dialects as private
121
+ ProblemFilters .exclude[MissingMethodProblem ](" org.apache.spark.sql.jdbc.NoopDialect.productElement" ),
122
+ ProblemFilters .exclude[MissingMethodProblem ](" org.apache.spark.sql.jdbc.NoopDialect.productArity" ),
123
+ ProblemFilters .exclude[MissingMethodProblem ](" org.apache.spark.sql.jdbc.NoopDialect.canEqual" ),
124
+ ProblemFilters .exclude[MissingMethodProblem ](" org.apache.spark.sql.jdbc.NoopDialect.productIterator" ),
125
+ ProblemFilters .exclude[MissingMethodProblem ](" org.apache.spark.sql.jdbc.NoopDialect.productPrefix" ),
126
+ ProblemFilters .exclude[MissingMethodProblem ](" org.apache.spark.sql.jdbc.NoopDialect.toString" ),
127
+ ProblemFilters .exclude[MissingMethodProblem ](" org.apache.spark.sql.jdbc.NoopDialect.hashCode" ),
128
+ ProblemFilters .exclude[MissingTypesProblem ](" org.apache.spark.sql.jdbc.PostgresDialect$" ),
129
+ ProblemFilters .exclude[MissingMethodProblem ](" org.apache.spark.sql.jdbc.PostgresDialect.productElement" ),
130
+ ProblemFilters .exclude[MissingMethodProblem ](" org.apache.spark.sql.jdbc.PostgresDialect.productArity" ),
131
+ ProblemFilters .exclude[MissingMethodProblem ](" org.apache.spark.sql.jdbc.PostgresDialect.canEqual" ),
132
+ ProblemFilters .exclude[MissingMethodProblem ](" org.apache.spark.sql.jdbc.PostgresDialect.productIterator" ),
133
+ ProblemFilters .exclude[MissingMethodProblem ](" org.apache.spark.sql.jdbc.PostgresDialect.productPrefix" ),
134
+ ProblemFilters .exclude[MissingMethodProblem ](" org.apache.spark.sql.jdbc.PostgresDialect.toString" ),
135
+ ProblemFilters .exclude[MissingMethodProblem ](" org.apache.spark.sql.jdbc.PostgresDialect.hashCode" ),
136
+ ProblemFilters .exclude[MissingTypesProblem ](" org.apache.spark.sql.jdbc.NoopDialect$" )
137
+ ) ++ Seq (
138
+ ProblemFilters .exclude[MissingMethodProblem ](
139
+ " org.apache.spark.status.api.v1.ApplicationInfo.this" ),
140
+ ProblemFilters .exclude[MissingMethodProblem ](
141
+ " org.apache.spark.status.api.v1.StageData.this" )
142
+ ) ++ Seq (
143
+ // SPARK-11766 add toJson to Vector
144
+ ProblemFilters .exclude[MissingMethodProblem ](
145
+ " org.apache.spark.mllib.linalg.Vector.toJson" )
146
+ ) ++ Seq (
147
+ // SPARK-9065 Support message handler in Kafka Python API
148
+ ProblemFilters .exclude[MissingMethodProblem ](
149
+ " org.apache.spark.streaming.kafka.KafkaUtilsPythonHelper.createDirectStream" ),
150
+ ProblemFilters .exclude[MissingMethodProblem ](
151
+ " org.apache.spark.streaming.kafka.KafkaUtilsPythonHelper.createRDD" )
152
+ ) ++ Seq (
153
+ // SPARK-4557 Changed foreachRDD to use VoidFunction
154
+ ProblemFilters .exclude[MissingMethodProblem ](
155
+ " org.apache.spark.streaming.api.java.JavaDStreamLike.foreachRDD" )
156
+ ) ++ Seq (
157
+ // SPARK-11996 Make the executor thread dump work again
158
+ ProblemFilters .exclude[MissingClassProblem ](" org.apache.spark.executor.ExecutorEndpoint" ),
159
+ ProblemFilters .exclude[MissingClassProblem ](" org.apache.spark.executor.ExecutorEndpoint$" ),
160
+ ProblemFilters .exclude[MissingClassProblem ](
161
+ " org.apache.spark.storage.BlockManagerMessages$GetRpcHostPortForExecutor" ),
162
+ ProblemFilters .exclude[MissingClassProblem ](
163
+ " org.apache.spark.storage.BlockManagerMessages$GetRpcHostPortForExecutor$" )
164
+ ) ++ Seq (
165
+ // SPARK-3580 Add getNumPartitions method to JavaRDD
166
+ ProblemFilters .exclude[MissingMethodProblem ](
167
+ " org.apache.spark.api.java.JavaRDDLike.getNumPartitions" )
168
+ ) ++
169
+ // SPARK-11314: YARN backend moved to yarn sub-module and MiMA complains even though it's a
170
+ // private class.
171
+ MimaBuild .excludeSparkClass(" scheduler.cluster.YarnSchedulerBackend$YarnSchedulerEndpoint" )
36
172
case v if v.startsWith(" 1.6" ) =>
37
173
Seq (
38
174
MimaBuild .excludeSparkPackage(" deploy" ),
0 commit comments