@@ -160,15 +160,15 @@ object IsotonicRegressionModel extends Loader[IsotonicRegressionModel] {
160160 predictions : Array [Double ],
161161 isotonic : Boolean ): Unit = {
162162 val sqlContext = new SQLContext (sc)
163- import sqlContext .implicits ._
164163
165164 val metadata = compact(render(
166165 (" class" -> thisClassName) ~ (" version" -> thisFormatVersion) ~
167166 (" isotonic" -> isotonic)))
168167 sc.parallelize(Seq (metadata), 1 ).saveAsTextFile(metadataPath(path))
169168
170- sqlContext.createDataFrame(boundaries.toSeq.zip(predictions)
171- .map { case (b, p) => Data (b, p) }).saveAsParquetFile(dataPath(path))
169+ sqlContext.createDataFrame(
170+ boundaries.toSeq.zip(predictions).map { case (b, p) => Data (b, p) }
171+ ).saveAsParquetFile(dataPath(path))
172172 }
173173
174174 def load (sc : SparkContext , path : String ): (Array [Double ], Array [Double ]) = {
@@ -177,8 +177,9 @@ object IsotonicRegressionModel extends Loader[IsotonicRegressionModel] {
177177
178178 checkSchema[Data ](dataRDD.schema)
179179 val dataArray = dataRDD.select(" boundary" , " prediction" ).collect()
180- val (boundaries, predictions) = dataArray.map {
181- x => (x.getDouble(0 ), x.getDouble(1 )) }.toList.sortBy(_._1).unzip
180+ val (boundaries, predictions) = dataArray.map { x =>
181+ (x.getDouble(0 ), x.getDouble(1 ))
182+ }.toList.sortBy(_._1).unzip
182183 (boundaries.toArray, predictions.toArray)
183184 }
184185 }
0 commit comments