Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -17,14 +17,14 @@
package org.apache.spark.sql.jdbc

import java.io.File
import java.nio.file.Files
import java.sql.ResultSet

import scala.collection.mutable.ArrayBuffer
import scala.util.control.NonFatal

import org.apache.spark.sql.Row
import org.apache.spark.sql.SQLQueryTestHelper
import org.apache.spark.sql.catalyst.util.fileToString

/**
* This suite builds off of that to allow us to run other DBMS against the SQL test golden files (on
Expand Down Expand Up @@ -76,7 +76,7 @@ trait CrossDbmsQueryTestSuite extends DockerJDBCIntegrationSuite with SQLQueryTe
}

protected def runSqlTestCase(testCase: TestCase, listTestCases: Seq[TestCase]): Unit = {
val input = fileToString(new File(testCase.inputFile))
val input = Files.readString(new File(testCase.inputFile).toPath)
val (comments, code) = splitCommentsAndCodes(input)
val queries = getQueries(code, comments, listTestCases)

Expand Down Expand Up @@ -143,7 +143,7 @@ trait CrossDbmsQueryTestSuite extends DockerJDBCIntegrationSuite with SQLQueryTe
// Read back the golden files.
var curSegment = 0
val expectedOutputs: Seq[QueryTestOutput] = {
val goldenOutput = fileToString(new File(testCase.resultFile))
val goldenOutput = Files.readString(new File(testCase.resultFile).toPath)
val segments = goldenOutput.split("-- !query.*\n")
outputs.map { output =>
val result =
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@
package org.apache.spark.sql.catalyst

import java.io._
import java.nio.charset.Charset
import java.nio.charset.StandardCharsets.UTF_8

import com.google.common.io.ByteStreams
Expand Down Expand Up @@ -48,15 +47,6 @@ package object util extends Logging {
}
}

def fileToString(file: File, encoding: Charset = UTF_8): String = {
val inStream = new FileInputStream(file)
try {
new String(ByteStreams.toByteArray(inStream), encoding)
} finally {
inStream.close()
}
}

def resourceToBytes(
resource: String,
classLoader: ClassLoader = Utils.getSparkClassLoader): Array[Byte] = {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@ import scala.jdk.CollectionConverters._

import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.catalyst.plans.SQLHelper
import org.apache.spark.sql.catalyst.util.fileToString

trait SQLKeywordUtils extends SparkFunSuite with SQLHelper {

Expand All @@ -38,7 +37,8 @@ trait SQLKeywordUtils extends SparkFunSuite with SQLHelper {
getWorkspaceFilePath("sql", "api", "src", "main", "antlr4", "org",
"apache", "spark", "sql", "catalyst", "parser", "SqlBaseLexer.g4").toFile

(fileToString(sqlBaseParserPath) + fileToString(sqlBaseLexerPath)).split("\n")
(Files.readString(sqlBaseParserPath.toPath) +
Files.readString(sqlBaseLexerPath.toPath)).split("\n")
}

// each element is an array of 4 string: the keyword name, reserve or not in Spark ANSI mode,
Expand All @@ -47,7 +47,7 @@ trait SQLKeywordUtils extends SparkFunSuite with SQLHelper {
val docPath = {
getWorkspaceFilePath("docs", "sql-ref-ansi-compliance.md").toFile
}
fileToString(docPath).split("\n")
Files.readString(docPath.toPath).split("\n")
.dropWhile(!_.startsWith("|Keyword|")).drop(2).takeWhile(_.startsWith("|"))
.map(_.stripPrefix("|").split("\\|").map(_.trim))
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,11 @@
package org.apache.spark.sql

import java.io.File
import java.nio.file.Files

import scala.collection.mutable.ArrayBuffer

import org.apache.spark.sql.catalyst.util.{fileToString, stringToFile}
import org.apache.spark.sql.catalyst.util.stringToFile
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.tags.ExtendedSQLTest
import org.apache.spark.util.Utils
Expand Down Expand Up @@ -165,7 +166,7 @@ class ExpressionsSchemaSuite extends QueryTest with SharedSparkSession {
val outputSize = outputs.size
val headerSize = header.size
val expectedOutputs = {
val expectedGoldenOutput = fileToString(resultFile)
val expectedGoldenOutput = Files.readString(resultFile.toPath)
val lines = expectedGoldenOutput.split("\n")
val expectedSize = lines.size

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,10 @@

package org.apache.spark.sql

import java.nio.file.Files

import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.catalyst.util.{fileToString, stringToFile, CollationFactory}
import org.apache.spark.sql.catalyst.util.{stringToFile, CollationFactory}
import org.apache.spark.util.Utils

// scalastyle:off line.size.limit
Expand Down Expand Up @@ -61,7 +63,7 @@ class ICUCollationsMapSuite extends SparkFunSuite {
}

test("ICU locales map breaking change") {
val goldenLines = fileToString(collationsMapFile).split('\n')
val goldenLines = Files.readString(collationsMapFile.toPath).split('\n')
val goldenRelevantLines = goldenLines.slice(4, goldenLines.length) // skip header
val input = goldenRelevantLines.map(
s => (s.split('|')(2).strip(), s.split('|')(1).strip().toInt))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
package org.apache.spark.sql

import java.io.File
import java.nio.file.Files

import scala.collection.mutable.ArrayBuffer
import scala.util.control.NonFatal
Expand All @@ -31,7 +32,6 @@ import org.apache.spark.sql.catalyst.SQLConfHelper
import org.apache.spark.sql.catalyst.expressions.{CurrentDate, CurrentTime, CurrentTimestampLike, CurrentUser, Literal}
import org.apache.spark.sql.catalyst.planning.PhysicalOperation
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.util.fileToString
import org.apache.spark.sql.execution.HiveResult.hiveResultString
import org.apache.spark.sql.execution.SQLExecution
import org.apache.spark.sql.execution.command.{DescribeColumnCommand, DescribeCommandBase}
Expand Down Expand Up @@ -410,7 +410,7 @@ trait SQLQueryTestHelper extends SQLConfHelper with Logging {
val importedTestCaseName = comments.filter(_.startsWith("--IMPORT ")).map(_.substring(9))
val importedCode = importedTestCaseName.flatMap { testCaseName =>
allTestCases.find(_.name == testCaseName).map { testCase =>
val input = fileToString(new File(testCase.inputFile))
val input = Files.readString(new File(testCase.inputFile).toPath)
val (_, code) = splitCommentsAndCodes(input)
code
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ package org.apache.spark.sql

import java.io.File
import java.net.URI
import java.nio.file.Files
import java.util.Locale

import org.apache.spark.{SparkConf, TestUtils}
Expand All @@ -27,8 +28,8 @@ import org.apache.spark.sql.catalyst.parser.ParseException
import org.apache.spark.sql.catalyst.plans.SQLHelper
import org.apache.spark.sql.catalyst.plans.logical.{Command, LogicalPlan}
import org.apache.spark.sql.catalyst.rules.RuleExecutor
import org.apache.spark.sql.catalyst.util.{fileToString, stringToFile}
import org.apache.spark.sql.catalyst.util.DateTimeConstants.NANOS_PER_SECOND
import org.apache.spark.sql.catalyst.util.stringToFile
import org.apache.spark.sql.execution.WholeStageCodegenExec
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.internal.SQLConf.TimestampTypes
Expand Down Expand Up @@ -229,7 +230,7 @@ class SQLQueryTestSuite extends QueryTest with SharedSparkSession with SQLHelper

/** Run a test case. */
protected def runSqlTestCase(testCase: TestCase, listTestCases: Seq[TestCase]): Unit = {
val input = fileToString(new File(testCase.inputFile))
val input = Files.readString(new File(testCase.inputFile).toPath)
val (comments, code) = splitCommentsAndCodes(input)
val queries = getQueries(code, comments, listTestCases)
val settings = getSparkSettings(comments)
Expand Down Expand Up @@ -639,7 +640,7 @@ class SQLQueryTestSuite extends QueryTest with SharedSparkSession with SQLHelper
makeOutput: (String, Option[String], String) => QueryTestOutput): Unit = {
// Read back the golden file.
val expectedOutputs: Seq[QueryTestOutput] = {
val goldenOutput = fileToString(new File(resultFile))
val goldenOutput = Files.readString(new File(resultFile).toPath)
val segments = goldenOutput.split("-- !query.*\n")

val numSegments = outputs.map(_.numSegments).sum + 1
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ import java.nio.file.{Files, Paths}
import scala.jdk.CollectionConverters._

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.catalyst.util.{fileToString, resourceToString, stringToFile}
import org.apache.spark.sql.catalyst.util.{resourceToString, stringToFile}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.TestSparkSession
import org.apache.spark.tags.ExtendedSQLTest
Expand Down Expand Up @@ -130,7 +130,7 @@ class TPCDSQueryTestSuite extends QueryTest with TPCDSBase with SQLQueryTestHelp

// Read back the golden file.
val (expectedSchema, expectedOutput) = {
val goldenOutput = fileToString(goldenFile)
val goldenOutput = Files.readString(goldenFile.toPath)
val segments = goldenOutput.split("-- !query.*\n")

// query has 3 segments, plus the header
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
package org.apache.spark.sql.hive.thriftserver

import java.io.File
import java.nio.file.Files
import java.sql.{SQLException, Statement, Timestamp}
import java.util.{Locale, MissingFormatArgumentException}

Expand All @@ -27,7 +28,6 @@ import org.apache.spark.SparkException
import org.apache.spark.internal.Logging
import org.apache.spark.sql.SQLQueryTestSuite
import org.apache.spark.sql.catalyst.analysis.NoSuchTableException
import org.apache.spark.sql.catalyst.util.fileToString
import org.apache.spark.sql.execution.HiveResult.{getBinaryFormatter, getTimeFormatters, toHiveString, BinaryFormatter, TimeFormatters}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.internal.SQLConf.TimestampTypes
Expand Down Expand Up @@ -151,7 +151,7 @@ class ThriftServerQueryTestSuite extends SQLQueryTestSuite with SharedThriftServ

// Read back the golden file.
val expectedOutputs: Seq[QueryTestOutput] = {
val goldenOutput = fileToString(new File(testCase.resultFile))
val goldenOutput = Files.readString(new File(testCase.resultFile).toPath)
val segments = goldenOutput.split("-- !query.*\n")

// each query has 3 segments, plus the header
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ package org.apache.spark.sql.hive.execution

import java.io._
import java.nio.charset.StandardCharsets
import java.nio.file.Files
import java.util
import java.util.Locale

Expand Down Expand Up @@ -322,7 +323,7 @@ abstract class HiveComparisonTest extends SparkFunSuite with BeforeAndAfterAll {
val hiveCachedResults = hiveCacheFiles.flatMap { cachedAnswerFile =>
logDebug(s"Looking for cached answer file $cachedAnswerFile.")
if (cachedAnswerFile.exists) {
Some(fileToString(cachedAnswerFile))
Some(Files.readString(cachedAnswerFile.toPath))
} else {
logDebug(s"File $cachedAnswerFile not found")
None
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,7 @@
package org.apache.spark.sql.hive.execution

import java.io.File

import org.apache.spark.sql.catalyst.util._
import java.nio.file.Files

/**
* A framework for running the query tests that are listed as a set of text files.
Expand Down Expand Up @@ -67,7 +66,7 @@ abstract class HiveQueryFileTest extends HiveComparisonTest {
realIncludeList.map(_.r.pattern.matcher(testCaseName).matches()).reduceLeft(_||_) ||
runAll) {
// Build a test case and submit it to scala test framework...
val queriesString = fileToString(testCaseFile)
val queriesString = Files.readString(testCaseFile.toPath)
createQueryTest(testCaseName, queriesString, reset = true, tryWithoutResettingFirst = true)
} else {
// Only output warnings for the built in includeList as this clutters the output when the
Expand Down