-
Notifications
You must be signed in to change notification settings - Fork 2.5k
[HUDI-4038] Avoid calling getDataSize after every record written
#5497
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
53d657e
e9ab768
2ae4ae4
18b0f4f
56fc702
750c4ea
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,87 @@ | ||
| /* | ||
| * Licensed to the Apache Software Foundation (ASF) under one | ||
| * or more contributor license agreements. See the NOTICE file | ||
| * distributed with this work for additional information | ||
| * regarding copyright ownership. The ASF licenses this file | ||
| * to you under the Apache License, Version 2.0 (the | ||
| * "License"); you may not use this file except in compliance | ||
| * with the License. You may obtain a copy of the License at | ||
| * | ||
| * http://www.apache.org/licenses/LICENSE-2.0 | ||
| * | ||
| * Unless required by applicable law or agreed to in writing, software | ||
| * distributed under the License is distributed on an "AS IS" BASIS, | ||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
| * See the License for the specific language governing permissions and | ||
| * limitations under the License. | ||
| */ | ||
|
|
||
| package org.apache.hudi.io.storage; | ||
|
|
||
| import org.apache.hadoop.fs.Path; | ||
| import org.apache.hudi.common.fs.FSUtils; | ||
| import org.apache.hudi.common.fs.HoodieWrapperFileSystem; | ||
| import org.apache.parquet.hadoop.ParquetFileWriter; | ||
| import org.apache.parquet.hadoop.ParquetWriter; | ||
| import org.apache.parquet.hadoop.api.WriteSupport; | ||
|
|
||
| import java.io.IOException; | ||
| import java.util.concurrent.atomic.AtomicLong; | ||
|
|
||
| /** | ||
| * Base class of Hudi's custom {@link ParquetWriter} implementations | ||
| * | ||
| * @param <R> target type of the object being written into Parquet files (for ex, | ||
| * {@code IndexedRecord}, {@code InternalRow}) | ||
| */ | ||
| public abstract class HoodieBaseParquetWriter<R> extends ParquetWriter<R> { | ||
|
|
||
| private static final int WRITTEN_RECORDS_THRESHOLD_FOR_FILE_SIZE_CHECK = 1000; | ||
|
|
||
| private final AtomicLong writtenRecordCount = new AtomicLong(0); | ||
| private final long maxFileSize; | ||
| private long lastCachedDataSize = -1; | ||
|
|
||
| public HoodieBaseParquetWriter(Path file, | ||
| HoodieBaseParquetConfig<? extends WriteSupport<R>> parquetConfig) throws IOException { | ||
| super(HoodieWrapperFileSystem.convertToHoodiePath(file, parquetConfig.getHadoopConf()), | ||
| ParquetFileWriter.Mode.CREATE, | ||
| parquetConfig.getWriteSupport(), | ||
| parquetConfig.getCompressionCodecName(), | ||
| parquetConfig.getBlockSize(), | ||
| parquetConfig.getPageSize(), | ||
| parquetConfig.getPageSize(), | ||
| parquetConfig.dictionaryEnabled(), | ||
| DEFAULT_IS_VALIDATING_ENABLED, | ||
| DEFAULT_WRITER_VERSION, | ||
| FSUtils.registerFileSystem(file, parquetConfig.getHadoopConf())); | ||
|
|
||
| // We cannot accurately measure the snappy compressed output file size. We are choosing a | ||
| // conservative 10% | ||
| // TODO - compute this compression ratio dynamically by looking at the bytes written to the | ||
| // stream and the actual file size reported by HDFS | ||
| this.maxFileSize = parquetConfig.getMaxFileSize() | ||
| + Math.round(parquetConfig.getMaxFileSize() * parquetConfig.getCompressionRatio()); | ||
| } | ||
|
|
||
| public boolean canWrite() { | ||
| // TODO we can actually do evaluation more accurately: | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. +1 to the overall idea. but here is the deal - the size may not update until a row group is actually flushed out to storage. so What pattern did you observe on writes to S3? is the I see the code here. which should respect the buffered data?
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
It won't: it always returns accurate metric, b/c
With the second being the problem -- it always traverse all of the cached all groups to accurately calculate the in-memory footprint (and there's no internal caching). So what ended up happening it kept growing the buffer for the whole file (120Mb) not flushing in until closure which was making traversals quadratic in runtime. |
||
| // if we cache last data size check, since we account for how many records | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Can we file a JIRA for this follow on work, after verifying the realtime ness of the
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I validated that |
||
| // were written we can accurately project avg record size, and therefore | ||
| // estimate how many more records we can write before cut off | ||
| if (lastCachedDataSize == -1 || getWrittenRecordCount() % WRITTEN_RECORDS_THRESHOLD_FOR_FILE_SIZE_CHECK == 0) { | ||
| lastCachedDataSize = getDataSize(); | ||
| } | ||
| return lastCachedDataSize < maxFileSize; | ||
| } | ||
|
|
||
| @Override | ||
| public void write(R object) throws IOException { | ||
| super.write(object); | ||
| writtenRecordCount.incrementAndGet(); | ||
| } | ||
|
|
||
| protected long getWrittenRecordCount() { | ||
| return writtenRecordCount.get(); | ||
| } | ||
| } | ||
Uh oh!
There was an error while loading. Please reload this page.