diff --git a/release/nightly_tests/dataset/read_tfrecords_benchmark.py b/release/nightly_tests/dataset/read_tfrecords_benchmark.py index 630e3d9306a0..48d1bb229195 100644 --- a/release/nightly_tests/dataset/read_tfrecords_benchmark.py +++ b/release/nightly_tests/dataset/read_tfrecords_benchmark.py @@ -67,11 +67,10 @@ def generate_features(batch): features = {k: v for (k, v) in features.items() if len(v) > 0} return pa.table(features) - ds = ray.data.range(num_rows).map_batches(generate_features) - assert ds.count() == num_rows, ds.count() - tfrecords_dir = tempfile.mkdtemp() - ds.write_tfrecords(tfrecords_dir) + ray.data.range(num_rows).map_batches(generate_features).write_tfrecords( + tfrecords_dir + ) return tfrecords_dir diff --git a/release/release_tests.yaml b/release/release_tests.yaml index e2679b69a4b0..6c22f91bf100 100644 --- a/release/release_tests.yaml +++ b/release/release_tests.yaml @@ -4342,8 +4342,8 @@ cluster_compute: single_node_benchmark_compute.yaml run: - # Expect the benchmark to finish around 22 minutes. - timeout: 1800 + # Expect the benchmark to finish around 30 minutes. + timeout: 2700 script: python read_tfrecords_benchmark.py variations: