-
Notifications
You must be signed in to change notification settings - Fork 8
Changes to fix all the test failures with Bazel #310
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 36 commits
e06fe47
6fb913b
ef85a25
619ba85
e541be0
2d7f800
af6c6ad
f6f4dbd
be5d4a0
ece3475
112c4d8
d9daa7c
56eb85b
cef6479
d52edae
a68ce87
354e187
fe2b6b0
da04f6d
3b36159
52cb1d7
712590f
0c9347a
27176c1
252e429
fd7eb94
f2885f1
7746ece
37ac482
d47e270
93b3ced
8ad4c73
69dfce0
22c11b0
0dc715d
22408c7
58882eb
bf85d1d
389dc1a
d8c8181
21d9a5a
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change | ||||||||||||
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
|
|
@@ -56,8 +56,9 @@ jobs: | |||||||||||||
|
|
||||||||||||||
| - name: Run spark join tests | ||||||||||||||
| run: | | ||||||||||||||
| export SBT_OPTS="-Xmx8G -Xms2G --add-opens=java.base/sun.nio.ch=ALL-UNNAMED" | ||||||||||||||
| sbt "spark/testOnly -- -n jointest" | ||||||||||||||
| # export SBT_OPTS="-Xmx8G -Xms2G --add-opens=java.base/sun.nio.ch=ALL-UNNAMED" | ||||||||||||||
| # sbt "spark/testOnly -- -n jointest" | ||||||||||||||
| bazel test //spark:test_test_suite_src_test_scala_ai_chronon_spark_test_JoinUtilsTest.scala | ||||||||||||||
|
||||||||||||||
| # export SBT_OPTS="-Xmx8G -Xms2G --add-opens=java.base/sun.nio.ch=ALL-UNNAMED" | |
| # sbt "spark/testOnly -- -n jointest" | |
| bazel test //spark:test_test_suite_src_test_scala_ai_chronon_spark_test_JoinUtilsTest.scala | |
| # export SBT_OPTS="-Xmx8G -Xms2G --add-opens=java.base/sun.nio.ch=ALL-UNNAMED" | |
| # sbt "spark/testOnly -- -n jointest" | |
| bazel test //spark/test/scala/ai/chronon/spark/test:JoinUtilsTest |
🧰 Tools
🪛 YAMLlint (1.35.1)
[error] 61-61: syntax error: expected , but found ''
(syntax)
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,22 @@ | ||
| directories: | ||
|
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. (discussed on call) do we pull this up one level to avoid being overwritten by intellij? |
||
| # Add the directories you want added as source here | ||
| # By default, we've added your entire workspace ('.') | ||
| . | ||
|
|
||
| # Automatically includes all relevant targets under the 'directories' above | ||
| derive_targets_from_directories: true | ||
|
|
||
| targets: | ||
| # If source code isn't resolving, add additional targets that compile it here | ||
|
|
||
| additional_languages: | ||
| # Uncomment any additional languages you want supported | ||
| # android | ||
| # dart | ||
| # go | ||
| # javascript | ||
| # kotlin | ||
| python | ||
| scala | ||
| typescript | ||
| java | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1 @@ | ||
| package(default_visibility = ["//visibility:public"]) |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,136 @@ | ||
| namespace java ai.chronon.api | ||
|
|
||
|
|
||
| struct YarnAutoScalingSpec { | ||
| 1: optional i32 minInstances | ||
| 2: optional i32 maxInstances | ||
| 3: optional i32 scaleUpFactor // 1.5x, 2x etc | ||
| 4: optional i32 scaleDownFactor | ||
| 5: optional string cooldownPeriod | ||
| } | ||
|
|
||
| // our clusters are created transiently prior to running the job | ||
| struct YarnClusterSpec { | ||
| 1: optional string clusterName | ||
| 2: optional string hostType | ||
| 3: optional i32 hostCount | ||
|
|
||
| // dataproc = x.y.z, emr = x.y.z, etc | ||
| 10: optional string yarnOfferingVersion | ||
|
|
||
| // to access the right data and right back to kvstore | ||
| 20: optional string networkPolicy | ||
| 30: optional YarnAutoScalingSpec autoScalingSpec | ||
| } | ||
|
|
||
| enum YarnJobType { | ||
| SPARK = 0, | ||
| FLINK = 1, | ||
| } | ||
|
|
||
| struct YarnJob { | ||
| // create transient cluster with this name and runs an app with the same yarn name | ||
| 1: optional string appName | ||
| 2: optional YarnJobType jobType | ||
|
|
||
| 10: optional list<string> args | ||
| 11: optional map<string, string> env | ||
| 12: optional map<string, string> conf | ||
| // creates local file with this name and contents - relative to cwd | ||
| // contains the groupBy, join, queries etc | ||
| 13: optional map<string, string> fileWithContents | ||
|
|
||
| 20: optional string chrononVersion | ||
| 21: optional YarnClusterSpec clusterSpec | ||
| } | ||
|
|
||
| struct KvWrite { | ||
| 1: optional string key | ||
| 2: optional string value | ||
| 3: optional string timestamp | ||
| } | ||
|
|
||
| // currently used for writing join metadata to kvstore needed prior to fetching joins | ||
| struct KvWriteJob { | ||
| 1: optional string scope // projectId in gcp, account name in aws | ||
| 2: optional string dataset | ||
| 3: optional string table | ||
| 4: optional list<KvWrite> writes | ||
| } | ||
|
|
||
| struct PartitionListingJob { | ||
| 1: optional string scope // projectId in gcp, account name in aws | ||
| 2: optional string dataset | ||
| 3: optional string table | ||
| 4: optional string partitionColumn | ||
| 5: optional list<string> extraPartitionFilters | ||
| } | ||
|
|
||
| // agent accepts jobs and runs them | ||
| union JobBase { | ||
| 1: YarnJob yarnJob | ||
| 2: KvWriteJob kvWriteJob | ||
| 3: PartitionListingJob partitionListingJob | ||
| } | ||
|
|
||
| struct Job { | ||
| 1: optional string jobId | ||
| 2: optional JobBase jobUnion | ||
| 3: optional i32 statusReportInterval | ||
| 4: optional i32 maxRetries | ||
| } | ||
|
|
||
| struct JobListGetRequest { | ||
| // this is only sent on the first request after a start | ||
| 1: optional list<string> existingJobsIds | ||
| } | ||
|
|
||
| struct JobListResponse { | ||
| // controller responds with jobs data plane agent is not aware of | ||
| 1: optional list<Job> jobsToStart | ||
| 2: optional list<string> jobsToStop | ||
| } | ||
|
|
||
| enum JobStatusType { | ||
| PENDING = 0, | ||
| RUNNING = 1, | ||
| SUCCEEDED = 2, | ||
| FAILED = 3, | ||
| STOPPED = 4 | ||
| } | ||
|
|
||
| struct ResourceUsage { | ||
| 1: optional i64 vcoreSeconds | ||
| 2: optional i64 megaByteSeconds | ||
| 3: optional i64 cumulativeDiskWriteBytes | ||
| 4: optional i64 cumulativeDiskReadBytes | ||
| } | ||
|
|
||
| struct YarnIncrementalJobStatus { | ||
| // batch / streaming job | ||
| 1: optional map<JobStatusType, i64> statusChangeTimes | ||
| 2: optional ResourceUsage resourceUsage | ||
| // driver logs - probably only errors and exceptions | ||
| 3: optional list<string> logsSinceLastPush | ||
| } | ||
|
|
||
| struct JobInfo { | ||
| 1: optional string jobId | ||
| 2: optional JobStatusType currentStatus | ||
|
|
||
| 10: optional YarnIncrementalJobStatus yarnIcrementalStatus | ||
| } | ||
|
|
||
| struct DatePartitionRange { | ||
| 1: optional string start | ||
| 2: optional string end | ||
| } | ||
|
|
||
| struct PartitionListingPutRequest { | ||
| 1: optional map<PartitionListingJob, list<DatePartitionRange>> partitions | ||
| 2: optional map<PartitionListingJob, string> errors | ||
| } | ||
|
|
||
| struct JobInfoPutRequest { | ||
| 1: optional list<JobInfo> jobStatuses | ||
| } |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Fix YAML syntax error.
Remove the empty line between the commented lines and the bazel command to maintain valid YAML syntax.
📝 Committable suggestion
🧰 Tools
🪛 YAMLlint (1.35.1)
[error] 53-53: syntax error: expected , but found ''
(syntax)