-
Notifications
You must be signed in to change notification settings - Fork 29k
[SPARK-21596][SS]Ensure places calling HDFSMetadataLog.get check the return value #18799
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -123,7 +123,7 @@ class HDFSMetadataLog[T <: AnyRef : ClassTag](sparkSession: SparkSession, path: | |
| serialize(metadata, output) | ||
| return Some(tempPath) | ||
| } finally { | ||
| IOUtils.closeQuietly(output) | ||
| output.close() | ||
| } | ||
| } catch { | ||
| case e: FileAlreadyExistsException => | ||
|
|
@@ -211,13 +211,17 @@ class HDFSMetadataLog[T <: AnyRef : ClassTag](sparkSession: SparkSession, path: | |
| } | ||
|
|
||
| override def get(startId: Option[Long], endId: Option[Long]): Array[(Long, T)] = { | ||
| assert(startId.isEmpty || endId.isEmpty || startId.get <= endId.get) | ||
| val files = fileManager.list(metadataPath, batchFilesFilter) | ||
| val batchIds = files | ||
| .map(f => pathToBatchId(f.getPath)) | ||
| .filter { batchId => | ||
| (endId.isEmpty || batchId <= endId.get) && (startId.isEmpty || batchId >= startId.get) | ||
| } | ||
| batchIds.sorted.map(batchId => (batchId, get(batchId))).filter(_._2.isDefined).map { | ||
| }.sorted | ||
|
|
||
| verifyBatchIds(batchIds, startId, endId) | ||
|
|
||
| batchIds.map(batchId => (batchId, get(batchId))).filter(_._2.isDefined).map { | ||
| case (batchId, metadataOption) => | ||
| (batchId, metadataOption.get) | ||
| } | ||
|
|
@@ -437,4 +441,51 @@ object HDFSMetadataLog { | |
| } | ||
| } | ||
| } | ||
|
|
||
| /** | ||
| * Verify if batchIds are continuous and between `startId` and `endId`. | ||
| * | ||
| * @param batchIds the sorted ids to verify. | ||
| * @param startId the start id. If it's set, batchIds should start with this id. | ||
| * @param endId the start id. If it's set, batchIds should end with this id. | ||
| */ | ||
| def verifyBatchIds(batchIds: Seq[Long], startId: Option[Long], endId: Option[Long]): Unit = { | ||
| // Verify that we can get all batches between `startId` and `endId`. | ||
| if (startId.isDefined || endId.isDefined) { | ||
| if (batchIds.isEmpty) { | ||
| throw new IllegalStateException(s"batch ${startId.orElse(endId).get} doesn't exist") | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. would be good to print the range that was asked for. otherwise its hard to see what was expected while debugging. |
||
| } | ||
| if (startId.isDefined) { | ||
| val minBatchId = batchIds.head | ||
| assert(minBatchId >= startId.get) | ||
| if (minBatchId != startId.get) { | ||
| val missingBatchIds = startId.get to minBatchId | ||
| throw new IllegalStateException( | ||
| s"batches (${missingBatchIds.mkString(", ")}) don't exist " + | ||
| s"(startId: $startId, endId: $endId)") | ||
| } | ||
| } | ||
|
|
||
| if (endId.isDefined) { | ||
| val maxBatchId = batchIds.last | ||
| assert(maxBatchId <= endId.get) | ||
| if (maxBatchId != endId.get) { | ||
| val missingBatchIds = maxBatchId to endId.get | ||
| throw new IllegalStateException( | ||
| s"batches (${missingBatchIds.mkString(", ")}) don't exist " + | ||
| s"(startId: $startId, endId: $endId)") | ||
| } | ||
| } | ||
| } | ||
|
|
||
| if (batchIds.nonEmpty) { | ||
| val minBatchId = batchIds.head | ||
| val maxBatchId = batchIds.last | ||
| val missingBatchIds = (minBatchId to maxBatchId).toSet -- batchIds | ||
| if (missingBatchIds.nonEmpty) { | ||
| throw new IllegalStateException(s"batches (${missingBatchIds.mkString(", ")}) " + | ||
| s"don't exist (startId: $startId, endId: $endId)") | ||
| } | ||
| } | ||
| } | ||
| } | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -259,6 +259,23 @@ class HDFSMetadataLogSuite extends SparkFunSuite with SharedSQLContext { | |
| fm.rename(path2, path3) | ||
| } | ||
| } | ||
|
|
||
| test("verifyBatchIds") { | ||
| import HDFSMetadataLog.verifyBatchIds | ||
| verifyBatchIds(Seq(1L, 2L, 3L), Some(1L), Some(3L)) | ||
| verifyBatchIds(Seq(1L), Some(1L), Some(1L)) | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. you didnt test the valid cases when one of the start or end is None. |
||
| verifyBatchIds(Seq(1L, 2L, 3L), None, Some(3L)) | ||
| verifyBatchIds(Seq(1L, 2L, 3L), Some(1L), None) | ||
| verifyBatchIds(Seq(1L, 2L, 3L), None, None) | ||
|
|
||
| intercept[IllegalStateException](verifyBatchIds(Seq(), Some(1L), None)) | ||
| intercept[IllegalStateException](verifyBatchIds(Seq(), None, Some(1L))) | ||
| intercept[IllegalStateException](verifyBatchIds(Seq(), Some(1L), Some(1L))) | ||
| intercept[IllegalStateException](verifyBatchIds(Seq(2, 3, 4), Some(1L), None)) | ||
| intercept[IllegalStateException](verifyBatchIds(Seq(2, 3, 4), None, Some(5L))) | ||
| intercept[IllegalStateException](verifyBatchIds(Seq(2, 3, 4), Some(1L), Some(5L))) | ||
| intercept[IllegalStateException](verifyBatchIds(Seq(1, 2, 4, 5), Some(1L), Some(5L))) | ||
| } | ||
| } | ||
|
|
||
| /** FakeFileSystem to test fallback of the HDFSMetadataLog from FileContext to FileSystem API */ | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -1314,6 +1314,7 @@ class FileStreamSourceSuite extends FileStreamSourceTest { | |
| val metadataLog = | ||
| new FileStreamSourceLog(FileStreamSourceLog.VERSION, spark, dir.getAbsolutePath) | ||
| assert(metadataLog.add(0, Array(FileEntry(s"$scheme:///file1", 100L, 0)))) | ||
| assert(metadataLog.add(1, Array(FileEntry(s"$scheme:///file2", 200L, 0)))) | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. what was this change for?
Member
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
|
||
|
|
||
| val newSource = new FileStreamSource(spark, s"$scheme:///", "parquet", StructType(Nil), Nil, | ||
| dir.getAbsolutePath, Map.empty) | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The output stream may fail to close (e.g., fail to flush the internal buffer), if it happens, we should fail the query rather than ignoring it.