From c52cfbf4145c9187e418bc81b3cec0706f4330d2 Mon Sep 17 00:00:00 2001 From: David Han Date: Tue, 21 Jan 2025 16:59:52 -0500 Subject: [PATCH] [fix] Use indirect writemethod for BigQuery writes to actually output partitioned tables. --- .../ai/chronon/integrations/cloud_gcp/GcpFormatProvider.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud_gcp/src/main/scala/ai/chronon/integrations/cloud_gcp/GcpFormatProvider.scala b/cloud_gcp/src/main/scala/ai/chronon/integrations/cloud_gcp/GcpFormatProvider.scala index 3abadff4b7..3b44fa0fe7 100644 --- a/cloud_gcp/src/main/scala/ai/chronon/integrations/cloud_gcp/GcpFormatProvider.scala +++ b/cloud_gcp/src/main/scala/ai/chronon/integrations/cloud_gcp/GcpFormatProvider.scala @@ -50,7 +50,7 @@ case class GcpFormatProvider(sparkSession: SparkSession) extends FormatProvider val sparkOptions: Map[String, String] = Map( // todo(tchow): No longer needed after https://github.com/GoogleCloudDataproc/spark-bigquery-connector/pull/1320 "temporaryGcsBucket" -> sparkSession.conf.get("spark.chronon.table.gcs.temporary_gcs_bucket"), - "writeMethod" -> "direct", + "writeMethod" -> "indirect", // writeMethod direct does not output partitioned tables. keep as indirect. "materializationProject" -> tableId.getProject, "materializationDataset" -> tableId.getDataset ) ++ partitionColumnOption