diff --git a/.github/styles/config/vocabularies/TraceMachina/accept.txt b/.github/styles/config/vocabularies/TraceMachina/accept.txt index 14083e8cc..3f3e49d2c 100644 --- a/.github/styles/config/vocabularies/TraceMachina/accept.txt +++ b/.github/styles/config/vocabularies/TraceMachina/accept.txt @@ -5,10 +5,13 @@ Cloudflare ELB GPUs Goma +Kustomization +[Hh]ermeticity +Kustomization LLD LLVM Machina -Mintlify +[Mm]onorepo NVMe NativeLink OCI @@ -16,11 +19,11 @@ OSSF Reclient SPDX Starlark -TIP Tokio TraceMachina [Tt]oolchain Qwik +Verilog alex autoscaling blazingly diff --git a/.github/workflows/codeql.yaml b/.github/workflows/codeql.yaml index 579c0afa4..54fb4a466 100644 --- a/.github/workflows/codeql.yaml +++ b/.github/workflows/codeql.yaml @@ -12,6 +12,10 @@ on: permissions: read-all +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} + jobs: analyze: name: Analyze diff --git a/.github/workflows/image.yaml b/.github/workflows/image.yaml index a7be46849..7e96e730f 100644 --- a/.github/workflows/image.yaml +++ b/.github/workflows/image.yaml @@ -10,6 +10,10 @@ on: permissions: read-all +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} + jobs: publish-image: strategy: diff --git a/.github/workflows/lre.yaml b/.github/workflows/lre.yaml index bdcab0adf..ad3d5cf2e 100644 --- a/.github/workflows/lre.yaml +++ b/.github/workflows/lre.yaml @@ -9,6 +9,10 @@ on: permissions: read-all +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} + jobs: local: strategy: diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index de0a83992..6069cd5ee 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -9,6 +9,10 @@ on: permissions: read-all +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} + jobs: nativelink-dot-com-build-on-main-test: runs-on: ubuntu-22.04 @@ -30,6 +34,10 @@ jobs: bazel test \ --remote_cache=${{ secrets.NATIVELINK_COM_REMOTE_CACHE_URL }} \ --remote_header=${{ secrets.NATIVELINK_COM_API_HEADER }} \ + --bes_backend=${{ secrets.NATIVELINK_COM_BES_URL }} \ + --bes_header=${{ secrets.NATIVELINK_COM_API_HEADER }} \ + --bes_results_url=${{ secrets.NATIVELINK_COM_BES_RESULTS_URL }} \ + --remote_header=x-nativelink-project=nativelink-ci \ //... docker-compose-compiles-nativelink: diff --git a/.github/workflows/native-bazel.yaml b/.github/workflows/native-bazel.yaml index 89583df7f..c3f4e969d 100644 --- a/.github/workflows/native-bazel.yaml +++ b/.github/workflows/native-bazel.yaml @@ -9,6 +9,10 @@ on: permissions: read-all +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} + jobs: unit-tests: strategy: diff --git a/.github/workflows/native-cargo.yaml b/.github/workflows/native-cargo.yaml index bf30e5a97..93bb48775 100644 --- a/.github/workflows/native-cargo.yaml +++ b/.github/workflows/native-cargo.yaml @@ -9,6 +9,10 @@ on: permissions: read-all +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} + jobs: cargo-native: strategy: diff --git a/.github/workflows/nix.yaml b/.github/workflows/nix.yaml index 38a5494e2..212befa5f 100644 --- a/.github/workflows/nix.yaml +++ b/.github/workflows/nix.yaml @@ -9,6 +9,10 @@ on: permissions: read-all +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} + jobs: nix-bazel: strategy: diff --git a/.github/workflows/pre-commit.yaml b/.github/workflows/pre-commit.yaml index 27c37f66d..22ca28884 100644 --- a/.github/workflows/pre-commit.yaml +++ b/.github/workflows/pre-commit.yaml @@ -10,6 +10,10 @@ on: permissions: read-all +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} + jobs: pre-commit-checks: runs-on: ubuntu-22.04 diff --git a/.github/workflows/sanitizers.yaml b/.github/workflows/sanitizers.yaml index 3d86d433a..ad3ae280a 100644 --- a/.github/workflows/sanitizers.yaml +++ b/.github/workflows/sanitizers.yaml @@ -9,6 +9,10 @@ on: permissions: read-all +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} + jobs: sanitized-tests: strategy: diff --git a/.github/workflows/scorecard.yaml b/.github/workflows/scorecard.yaml index 8ec71a933..0f1551e2d 100644 --- a/.github/workflows/scorecard.yaml +++ b/.github/workflows/scorecard.yaml @@ -9,6 +9,10 @@ on: permissions: read-all +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} + jobs: analysis: name: Scorecard analysis diff --git a/.github/workflows/tagged_image.yaml b/.github/workflows/tagged_image.yaml index 3615206c7..e1b1d8f17 100644 --- a/.github/workflows/tagged_image.yaml +++ b/.github/workflows/tagged_image.yaml @@ -7,6 +7,10 @@ on: permissions: read-all +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} + jobs: publish-image: strategy: diff --git a/.github/workflows/vale.yaml b/.github/workflows/vale.yaml index 4ea2fdedf..8e16fb6b6 100644 --- a/.github/workflows/vale.yaml +++ b/.github/workflows/vale.yaml @@ -5,6 +5,10 @@ on: [pull_request] permissions: read-all +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} + jobs: vale: name: vale diff --git a/.vale.ini b/.vale.ini index 78a7e64e9..4ff9c184e 100644 --- a/.vale.ini +++ b/.vale.ini @@ -8,13 +8,15 @@ Vocab = TraceMachina Packages = alex, Microsoft, write-good -# TODO(aaronmondal): Fix mdx files and enable this. -# [formats] -# mdx = md +[formats] +mdx = md -[*.md] +[*.{md,mdx}] BasedOnStyles = alex, Vale, Microsoft, write-good +# Ignore code blocks in Starlight's TabItems. +BlockIgnores = (?s)(.*?```.*?```.*?) + # Too harsh. The `write-good.Passive` check already covers many cases. write-good.E-Prime = NO diff --git a/README.md b/README.md index adbdd868c..4a5e7c079 100644 --- a/README.md +++ b/README.md @@ -1,218 +1,97 @@ -# NativeLink - -[![License](https://img.shields.io/badge/License-Apache_2.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) +# +

+ + + + + NativeLink + + +

+ +[![Homepage](https://img.shields.io/badge/Homepage-8A2BE2)](https://nativelink.com) +[![GitHub stars](https://img.shields.io/github/stars/tracemachina/nativelink?style=social)](https://github.com/TraceMachina/nativelink) [![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/TraceMachina/nativelink/badge)](https://securityscorecards.dev/viewer/?uri=github.com/TraceMachina/nativelink) [![OpenSSF Best Practices](https://www.bestpractices.dev/projects/8050/badge)](https://www.bestpractices.dev/projects/8050) [![Slack](https://img.shields.io/badge/slack--channel-blue?logo=slack)](https://nativelink.slack.com/join/shared_invite/zt-281qk1ho0-krT7HfTUIYfQMdwflRuq7A#/shared-invite/email) +[![License](https://img.shields.io/badge/License-Apache_2.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) NativeLink is an extremely (blazingly?) fast and efficient build cache and remote executor for systems that communicate using the [Remote execution -protocol](https://github.com/bazelbuild/remote-apis/blob/main/build/bazel/remote/execution/v2/remote_execution.proto) such as [Bazel](https://bazel.build), [Buck2](https://buck2.build), [Goma](https://chromium.googlesource.com/infra/goma/client/) and -[Reclient](https://github.com/bazelbuild/reclient). NativeLink powers over one billion requests per month for customers using the system for their production workloads. +protocol](https://github.com/bazelbuild/remote-apis/blob/main/build/bazel/remote/execution/v2/remote_execution.proto) such as [Bazel](https://bazel.build), [Buck2](https://buck2.build), and +[Reclient](https://github.com/bazelbuild/reclient). NativeLink powers several billion requests per month in production workloads and powers operating systems deployed to over one billion edge devices and hundreds of thousands of data center servers in HPC environments. Supports Unix-based operating systems and Windows. -## Getting Started with NativeLink - -Below, you will find a few different options for getting started with NativeLink. - -### ๐Ÿ“ Clone the NativeLink repository -1. Go to the [NativeLink](https://github.com/TraceMachina/nativelink) repository on GitHub. Clone the repository via SSH or HTTPS. In this example the repository is cloned via SSH: -```bash -git clone git@github.com:TraceMachina/nativelink.git -``` - -### ๐Ÿ“ฆ Installing with Cargo - -1. First install Rust, but skip to step 2 if you have it already. -```bash -curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -``` -2. Install NativeLink with Cargo. -```bash -cargo install --git https://github.com/TraceMachina/nativelink --tag v0.4.0 -``` - -### โš™๏ธ Configure and ๐Ÿฆพ Start NativeLink - -The `nativelink` executable reads a JSON file as it's only parameter, -`--config`. See [`nativelink-config`](./nativelink-config/examples/basic_cas.json) -for more details and examples. +## ๐Ÿš€ Quickstart -To grab the example in your current working directory, run: +The setups below are **production-grade** installations. See the +[contribution docs](https://docs.nativelink.com/contribute/nix/) for +instructions on how to build from source with [Bazel](https://docs.nativelink.com/contribute/bazel/), +[Cargo](https://docs.nativelink.com/contribute/cargo/), and [Nix](https://docs.nativelink.com/contribute/nix/). -```bash -curl -O https://raw.githubusercontent.com/TraceMachina/nativelink/main/nativelink-config/examples/basic_cas.json +### ๐Ÿ“ฆ Prebuilt images -### you can modify the example above to replace the filesystem store with the memory store if you favor speed over data durability. -nativelink basic_cas.json -``` +Fast to spin up, but currently limited to `x86_64` systems. See the [container +registry](https://github.com/TraceMachina/nativelink/pkgs/container/nativelink) +for all image tags and the [contribution docs](https://docs.nativelink.com/contribute/nix) +for how to build the images yourself. -## ๐Ÿงช Evaluating NativeLink +**Linux x86_64** -1. Once you've built NativeLink and have an instance running with the `basic_cas.json` configuration, launch a separate terminal session. -2. Navigate to where you cloned the NativeLink repository: ```bash -cd $HOME/nativelink -``` -3. In the new terminal, run the following command to connect the running server launched above to Bazel or another RBE client: - -```sh -bazel test //... \ - --remote_instance_name=main \ - --remote_cache=grpc://127.0.0.1:50051 \ - --remote_executor=grpc://127.0.0.1:50051 \ - --remote_default_exec_properties=cpu_count=1 +curl -O \ + https://raw.githubusercontent.com/TraceMachina/nativelink/main/nativelink-config/examples/basic_cas.json + +# See https://github.com/TraceMachina/nativelink/pkgs/container/nativelink +# to find the latest tag +docker run \ + -v $(pwd)/basic_cas.json:/config \ + -p 50051 \ + ghcr.io/tracemachina/nativelink:v0.4.0 \ + config ``` -For Windows PowerShell; +**Windows x86_64** ```powershell -bazel test //... ` - --remote_instance_name=main ` - --remote_cache=grpc://127.0.0.1:50051 ` - --remote_executor=grpc://127.0.0.1:50051 ` - --remote_default_exec_properties=cpu_count=1 -``` -This causes Bazel to run the commands through an all-in-one `CAS`, `scheduler` -and `worker`. - -> [!WARNING] -> If you're using MacOS, encountering errors is anticipated at this stage. Our team is actively working on enhancing support for executing remoteable Bazel builds with MacOS. For now, you can run with [Docker](https://github.com/blakehatch/nativelink/tree/main/deployment-examples/docker-compose) or a Linux virtual machine. If you have any questions, reach out on the [NativeLink](https://join.slack.com/t/nativelink/shared_invite/zt-2forhp5n9-L7dTD21nCSY9_IRteQvZmw) slack. - -## How it Works - -This diagram is a high-level overview of the data flow in the NativeLink system. It refers to NativeLink concepts like Scheduler pool, Worker pool, and CAS rather than the cloud concepts like functions, compute nodes, and object storage to which they correspond. - -```mermaid -sequenceDiagram - participant build server (client) - participant scheduler pool - participant worker pool - participant cas - build server (client)->>scheduler pool: queue jobs - scheduler pool->>worker pool: route jobs - worker pool->>cas: upload artifacts - worker pool->>scheduler pool: result download instructions - scheduler pool->>build server (client): result download instructions - cas->>build server (client): service queries - build server (client)->>cas: service queries -``` -## โ„๏ธ Installing with Nix - -**Installation requirements:** - -* Nix with [flakes](https://nixos.wiki/wiki/Flakes) enabled - -This build doesn't require cloning the repository, but you need to provide a -configuration file, for instance the one at [`nativelink-config/examples/basic_cas.json`](./nativelink-config/examples/basic_cas.json). - -The following command builds and runs NativeLink in release (optimized) mode: - -```sh -nix run github:TraceMachina/nativelink ./basic_cas.json +# Download the configuration file +Invoke-WebRequest ` + -Uri "https://raw.githubusercontent.com/TraceMachina/nativelink/main/nativelink-config/examples/basic_cas.json" ` + -OutFile "basic_cas.json" + +# Run the Docker container +# Note: Adjust the path if the script is not run from the directory containing basic_cas.json +docker run ` + -v ${PWD}/basic_cas.json:/config ` + -p 50051 ` + ghcr.io/tracemachina/nativelink:v0.4.0 ` + config ``` -For use in production pin the executable to a specific revision: - -```sh -nix run github:TraceMachina/nativelink/ ./basic_cas.json -``` - -## ๐ŸŒฑ Building with Bazel - -**Build requirements:** +### โ„๏ธ Raw executable with Nix -* Bazel `7.0.2` -* A recent C++ toolchain with LLD as linker +Slower, since it's built from source, but more flexible and supports MacOS. +Doesn't support native Windows, but works in WSL2. -> [!TIP] -> This build supports Nix/direnv which provides Bazel but no C++ toolchain -> (yet). +Make sure your Nix version is recent and supports flakes. For instance, install +it via the [next-gen nix installer](https://github.com/NixOS/experimental-nix-installer). -The following commands places an executable in `./bazel-bin/nativelink` and -starts the service: +> [!CAUTION] +> Executables built for MacOS are dynamically linked against libraries from Nix +> and won't work on systems that don't have these libraries present. -```sh -# Unoptimized development build on Unix -bazel run nativelink -- $(pwd)/nativelink-config/examples/basic_cas.json +**Linux, MacOS, WSL2** -# Optimized release build on Unix -bazel run -c opt nativelink -- $(pwd)/nativelink-config/examples/basic_cas.json - -# Unoptimized development build on Windows -bazel run --config=windows nativelink -- $(pwd)/nativelink-config/examples/basic_cas.json - -# Optimized release build on Windows -bazel run --config=windows -c opt nativelink -- $(pwd)/nativelink-config/examples/basic_cas.json ``` +curl -O \ + https://raw.githubusercontent.com/TraceMachina/nativelink/main/nativelink-config/examples/basic_cas.json -> [!WARNING] -> The Rust compiler `rustc` generates numerous artifacts during compilation, -> including dependencies, macros, and intermediate files. -> When compiling programs from source, be mindful of the associated files' -> impact on your disk usage in the `bazel-bin/` directory. -> This directory can grow substantially in size. -> -> If the facing issues due to this, run the following command -> to clear cache files: -> ```sh -> bazel clean --expunge -> ``` - -## ๐Ÿ“ฆ Building with Cargo - -**Build requirements:** - -* Cargo 1.74.0+ -* A recent C++ toolchain with LLD as linker - -> [!TIP] -> This build supports Nix/direnv which provides Cargo but no C++ -> toolchain/stdenv (yet). - -```bash -# Unoptimized development build -cargo run --bin nativelink -- ./nativelink-config/examples/basic_cas.json - -# Optimized release build -cargo run --release --bin nativelink -- ./nativelink-config/examples/basic_cas.json +nix run github:TraceMachina/nativelink ./basic_cas.json ``` -> [!WARNING] -> The Rust compiler `rustc` generates numerous artifacts during compilation, -> including dependencies, macros, and intermediate files. -> When compiling programs from source, be mindful of the associated files' -> impact on your disk usage in the target/ directory. -> This directory can grow substantially in size. -> -> If the facing issues due to this, run the following command -> to clear cache files: -> ```sh -> cargo clean -> ``` - -## ๐Ÿš€ Example Deployments - -You can find a few example deployments in the [deployment-examples directory](./deployment-examples). - -## ๐Ÿบ History - -This project was first created due to frustration with similar projects not -working or being extremely inefficient. Rust was chosen as the language to write -it in because at the time Rust was going through a revolution in the new-ish -feature `async-await`. This made making multi-threading simpler when -paired with a runtime like [Tokio](https://github.com/tokio-rs/tokio) while -still giving all the lifetime and other protections that Rust gives. This pretty -much guarantees that we will never have crashes due to race conditions. This -kind of project seemed perfect, since there is so much asynchronous activity -happening and running them on different threads is most preferable. Other -languages like `Go` are good candidates, but other similar projects rely heavily -on channels and mutex locks which are cumbersome and have to be carefully -designed by the developer. Rust doesn't have these issues, since the compiler -will always tell you when the code you are writing might introduce undefined -behavior. The last major reason is because Rust is extremely fast and has no -garbage collection (like C++, but unlike `Java`, `Go`, or `Typescript`). +See the [contribution docs](https://docs.nativelink.com/contribute/nix) for +further information. ## ๐Ÿ“œ License diff --git a/deployment-examples/chromium/01_operations.sh b/deployment-examples/chromium/01_operations.sh index cd57c146f..b11905f6b 100755 --- a/deployment-examples/chromium/01_operations.sh +++ b/deployment-examples/chromium/01_operations.sh @@ -6,17 +6,17 @@ set -xeuo pipefail curl -v \ -H 'content-Type: application/json' \ - -d '{"flakeOutput": "./src_root#image"}' \ + -d '{"metadata": {"flakeOutput": "./src_root#image"}}' \ localhost:8082/eventlistener curl -v \ -H 'content-Type: application/json' \ - -d '{"flakeOutput": "./src_root#nativelink-worker-init"}' \ + -d '{"metadata": {"flakeOutput": "./src_root#nativelink-worker-init"}}' \ localhost:8082/eventlistener curl -v \ -H 'content-Type: application/json' \ - -d '{"flakeOutput": "./src_root#nativelink-worker-siso-chromium"}' \ + -d '{"metadata": {"flakeOutput": "./src_root#nativelink-worker-siso-chromium"}}' \ localhost:8082/eventlistener until kubectl get pipelinerun \ diff --git a/deployment-examples/kubernetes/01_operations.sh b/deployment-examples/kubernetes/01_operations.sh index 6265ea11d..360945c36 100755 --- a/deployment-examples/kubernetes/01_operations.sh +++ b/deployment-examples/kubernetes/01_operations.sh @@ -6,17 +6,17 @@ set -xeuo pipefail curl -v \ -H 'content-Type: application/json' \ - -d '{"flakeOutput": "./src_root#image"}' \ + -d '{"metadata": {"flakeOutput": "./src_root#image"}}' \ localhost:8082/eventlistener curl -v \ -H 'content-Type: application/json' \ - -d '{"flakeOutput": "./src_root#nativelink-worker-init"}' \ + -d '{"metadata": {"flakeOutput": "./src_root#nativelink-worker-init"}}' \ localhost:8082/eventlistener curl -v \ -H 'content-Type: application/json' \ - -d '{"flakeOutput": "./src_root#nativelink-worker-lre-cc"}' \ + -d '{"metadata": {"flakeOutput": "./src_root#nativelink-worker-lre-cc"}}' \ localhost:8082/eventlistener until kubectl get pipelinerun \ diff --git a/docs/.gitignore b/docs/.gitignore index 1c24988f1..51fc4a26f 100644 --- a/docs/.gitignore +++ b/docs/.gitignore @@ -27,12 +27,13 @@ pnpm-debug.log* stats.html # Generated files -src/content/docs/guides/contributing.mdx +src/content/docs/contribute/docs.mdx +src/content/docs/contribute/guidelines.mdx src/content/docs/explanations/lre.mdx -src/content/docs/reference/changelog.mdx -src/content/docs/reference/nativelink-config.mdx -src/content/docs/tutorials/setup.mdx src/content/docs/guides/chromium.mdx src/content/docs/guides/configuration.mdx src/content/docs/guides/kubernetes.mdx src/content/docs/guides/setup.md +src/content/docs/reference/changelog.mdx +src/content/docs/reference/nativelink-config.mdx +src/content/docs/tutorials/setup.mdx diff --git a/docs/README.md b/docs/README.md index 202f1a89b..ddd63e220 100644 --- a/docs/README.md +++ b/docs/README.md @@ -1,9 +1,9 @@ # The NativeLink documentation -This is the NativeLink documentation hosted at . +The NativeLink documentation gets deployed to . -> [!WARNING] -> Setup for working on these docs differs substantially between Linux and Mac. +> [!CAUTION] +> Setup for working on these docs differs between Linux and Mac. > > For Linux: Use the Nix flake and run `pnpm i`. > @@ -11,7 +11,7 @@ This is the NativeLink documentation hosted at . > revoke`. Then manually install `pnpm`, run `pnpm i` and run `pnpm exec > playwright install`. > -> Long term we'll add the automated setup to Mac. +> It's a long term goal to add the automated setup to Mac. ## ๐Ÿ“š Stack @@ -32,7 +32,8 @@ challenging. Feel free to copy-paste it into your own projects. ## ๐Ÿš€ Common workflows -See `package.json` for build scripts. +See [`docs/package.json`](https://github.com/TraceMachina/nativelink/blob/main/docs/package.json) +for build scripts. This project requires `pnpm`. The nix flake ships a compatible version. @@ -65,12 +66,12 @@ pnpm preview When deploying to Cloudflare, make sure to set the `PNPM_VERSION` to `8.15.5` to stay in sync with the flake. Also, use `pnpm exec playwright install && pnpm -build` on the Cloudflare worker. This sets up headless Chromium which is used to +build` on the Cloudflare worker. This sets up headless Chromium which to generate mermaid diagrams during the build. You don't need to set playwright up locally as it's already configured in the flake. ## ๐Ÿ› Known issues -- We use Bun as internal TypeScript processor, but can't use it as bundler yet. -- `"@playform/compress": "=0.0.12"` because `0.0.13` doesn't properly compress - CSS. +- The build process uses Bun as internal TypeScript processor, but can't use it + as bundler yet. +- `"@playform/compress": "=0.0.12"` because `0.0.13` doesn't compress CSS. diff --git a/docs/astro.config.mjs b/docs/astro.config.mjs index faf7cc850..505ed3d20 100644 --- a/docs/astro.config.mjs +++ b/docs/astro.config.mjs @@ -117,10 +117,6 @@ export default defineConfig({ label: "Kubernetes example", link: "/guides/kubernetes", }, - { - label: "Contributing", - link: "/guides/contributing", - }, ], }, { @@ -129,6 +125,10 @@ export default defineConfig({ // explain design decisions, constraints, etc. label: "Understanding NativeLink", items: [ + { + label: "Architecture", + link: "/explanations/architecture/", + }, { label: "Local Remote Execution", link: "/explanations/lre/", @@ -139,16 +139,37 @@ export default defineConfig({ }, ], }, + { + label: "For Contributors", + items: [ + { + label: "Contribution Guidelines", + link: "contribute/guidelines/", + }, + { + label: "Working on documentation", + link: "contribute/docs/", + }, + { + label: "Develop with Nix", + link: "contribute/nix/", + }, + { + label: "Develop with Bazel", + link: "contribute/bazel/", + }, + { + label: "Developing with Cargo", + link: "contribute/cargo/", + }, + ], + }, { // Corresponds to https://diataxis.fr/reference/. Technical // descriptions with the intent to be used as consulting material. // Mostly autogenerated to stay in sync with the codebase. label: "Reference", items: [ - { - label: "Configuration Reference", - link: "/reference/nativelink-config/", - }, { label: "Glossary", link: "/reference/glossary/", diff --git a/docs/scripts/md_to_mdx.ts b/docs/scripts/md_to_mdx.ts index 640c621c4..13cbfed9e 100644 --- a/docs/scripts/md_to_mdx.ts +++ b/docs/scripts/md_to_mdx.ts @@ -192,13 +192,18 @@ export function preProcessMarkdown(markdown: string): string { if ( line.trim().startsWith(">") || - /^\[!(TIP|NOTE|WARNING|IMPORTANT)\]/.test(line) + /^\[!(TIP|NOTE|WARNING|IMPORTANT|CAUTION)\]/.test(line) ) { processedLines.push(line); continue; } - processedLines.push(line.replace(//g, ">")); + const htmlTagPattern = /^[<\s][^>]*>/g; + if (htmlTagPattern.test(line)) { + processedLines.push(line); + } else { + processedLines.push(line.replace(//g, ">")); + } } return processedLines.join("\n"); diff --git a/docs/scripts/md_to_mdx_aot.ts b/docs/scripts/md_to_mdx_aot.ts index 613091d81..39e6017df 100644 --- a/docs/scripts/md_to_mdx_aot.ts +++ b/docs/scripts/md_to_mdx_aot.ts @@ -50,9 +50,14 @@ convertMarkdownToMdx( ); convertMarkdownToMdx( "../CONTRIBUTING.md", - "src/content/docs/guides/contributing.mdx", + "src/content/docs/contribute/guidelines.mdx", "NativeLink contribution guidelines", ); +convertMarkdownToMdx( + "README.md", + "src/content/docs/contribute/docs.mdx", + "Working on documentation", +); convertMarkdownToMdx( "../nativelink-config/README.md", "src/content/docs/guides/configuration.mdx", diff --git a/docs/scripts/metaphase_aot.ts b/docs/scripts/metaphase_aot.ts index 84d882e30..4178790ca 100644 --- a/docs/scripts/metaphase_aot.ts +++ b/docs/scripts/metaphase_aot.ts @@ -1,32 +1 @@ -// biome-ignore lint/correctness/noNodejsModules: Always runs ahead of time. -import { join } from "node:path"; -import { generateAstroContent } from "./metaphase"; -import type { Crate } from "./rustdoc_types"; - -export async function generateDocs(config: { - crateDataPath: string; - outputPath: string; -}) { - try { - const crateDataPath = join(import.meta.dir, config.crateDataPath); - const crateData: Crate = JSON.parse(await Bun.file(crateDataPath).text()); - - const markdownContent = generateAstroContent(crateData); - - const outputPath = join(import.meta.dir, config.outputPath); - await Bun.write(outputPath, markdownContent); - - console.info(`Generated: ${outputPath}`); - } catch (error) { - console.error("An error occurred during generation:", error); - } -} - -// Only run if this file is being executed directly -if (import.meta.main) { - await generateDocs({ - crateDataPath: - "../../bazel-bin/nativelink-config/docs_json.rustdoc/nativelink_config.json", - outputPath: "../src/content/docs/reference/nativelink-config.mdx", - }); -} +// TODO: Re-enable when docs are stable diff --git a/docs/src/content/docs/contribute/bazel.mdx b/docs/src/content/docs/contribute/bazel.mdx new file mode 100644 index 000000000..6b7782536 --- /dev/null +++ b/docs/src/content/docs/contribute/bazel.mdx @@ -0,0 +1,83 @@ +--- +title: Building NativeLink with Bazel +description: Instructions on building NativeLink with Bazel. +--- + +These instructions contain information on how to work with NativeLink for Bazel +users. + +If you're using the Nix flake you're all set. + +If you're running outside of nix, install [bazelisk](https://github.com/bazelbuild/bazelisk/tree/master) +manually and make sure you have a recent functional C++ toolchain with LLD as +linker. + +## Build + +The following commands places an executable in `./bazel-bin/nativelink` and +starts the service: + +import { Tabs, TabItem } from '@astrojs/starlight/components'; + + + + ```sh + bazel run nativelink -- \ + $(pwd)/nativelink-config/examples/basic_cas.json + ``` + + + ```sh + bazel run --config=windows nativelink -- \ + $(pwd)/nativelink-config/examples/basic_cas.json + ``` + + + +For optimized builds: + + + + ```sh + bazel run -c opt nativelink -- \ + $(pwd)/nativelink-config/examples/basic_cas.json + ``` + + + ```sh + bazel run --config=windows -c opt nativelink -- \ + $(pwd)/nativelink-config/examples/basic_cas.json + ``` + + + +:::caution +The Rust compiler `rustc` generates large artifacts during compilation, +including dependencies, macros, and intermediate files. When compiling programs +from source, be mindful of the associated files' impact on your disk usage in +the `bazel-bin/` directory. + +If you're facing issues due to this, run the following command to clear cache +files: + +```sh +bazel clean --expunge +``` +::: + +## Test + +To run tests with Bazel: + + + + ```sh + bazel test //... --verbose_failures + ``` + + + ```sh + bazel test --config=windows //... --verbose_failures + ``` + + diff --git a/docs/src/content/docs/contribute/cargo.mdx b/docs/src/content/docs/contribute/cargo.mdx new file mode 100644 index 000000000..99173d93a --- /dev/null +++ b/docs/src/content/docs/contribute/cargo.mdx @@ -0,0 +1,78 @@ +--- +title: Building NativeLink with Cargo +description: Instructions on building NativeLink with Cargo. +--- + +These instructions contain information on how to work with NativeLink for Cargo +users. + +If you're using the Nix flake you're all set. + +If you're running outside of nix, install Cargo via [`rustup`](https://www.rust-lang.org/tools/install). + +## Build + +To build the `nativelink` executable: + +import { Tabs, TabItem } from '@astrojs/starlight/components'; + + + + ```sh + cargo build --bin nativelink --profile=smol + ``` + + + ```sh + cargo build --bin nativelink + ``` + + + ```sh + cargo build --bin nativelink --release + ``` + + + +:::danger +These builds, including the optimized build are **development** builds. They're +non-hermetic and have unpredictable memory allocation behavior. + +Use the Nix build and Container images in production. +::: + +:::note +The Rust compiler `rustc` generates large artifacts during compilation, +including dependencies, macros, and intermediate files. When compiling programs +from source, be mindful of the associated files' impact on your disk usage in +the target/ directory. + +If you're facing issues due to this, run the following command to clear cache +files: + +```sh +cargo clean +``` +::: + +## Test + +To run the tests: + + + + ```sh + cargo test --all --profile=smol + ``` + + + ```sh + cargo test --all + ``` + + + ```sh + cargo test --all --release + ``` + + diff --git a/docs/src/content/docs/contribute/nix.mdx b/docs/src/content/docs/contribute/nix.mdx new file mode 100644 index 000000000..72245acfb --- /dev/null +++ b/docs/src/content/docs/contribute/nix.mdx @@ -0,0 +1,51 @@ +--- +title: Building NativeLink with Nix +description: Instructions on building NativeLink with Nix. +--- + +These instructions contain information on how to work with NativeLink for Nix +users. + +You'll need a recent version of Nix with flakes enabled, for instance installed +via the [next-gen nix installer](https://github.com/NixOS/experimental-nix-installer). + +This build doesn't require cloning the repository, but you need to provide a +configuration file, for instance the one at [`https://github.com/TraceMachina/nativelink/blob/main/nativelink-config/examples/basic_cas.json`](./nativelink-config/examples/basic_cas.json). + +The following command builds and runs NativeLink in release (optimized) mode: + +```sh +nix run github:TraceMachina/nativelink ./basic_cas.json +``` + +You can build a specific PR or branch like so: + +```sh +nix run github:TraceMachina/nativelink?ref=pull//head +``` + +## Working with container images + +View the flake outputs to list available images: + +```sh +nix flake show +``` + +The main `nativelink` container image is the `image` output. Other images follow +the pattern `nativelink-worker-*`. + +Images use [nix2container](https://github.com/nlewo/nix2container) as builder +and get tagged with nix derivation hashes. + +To build an image locally and make it available to your container runtime: + +```sh +nix run github:TraceMachina/nativelink#image.copyToDockerDaemon +``` + +To view the tag of an image + +```sh +nix eval github:TraceMachina/nativelink#image.imageTag --raw +``` diff --git a/docs/src/content/docs/explanations/architecture.mdx b/docs/src/content/docs/explanations/architecture.mdx new file mode 100644 index 000000000..f999d02d9 --- /dev/null +++ b/docs/src/content/docs/explanations/architecture.mdx @@ -0,0 +1,33 @@ +--- +title: 'Architecture' +description: "An overview of NativeLink's architecture." +--- + +This diagram presents a high-level overview of data flow in NativeLink. + +```mermaid +sequenceDiagram + participant build server (client) + participant scheduler pool + participant worker pool + participant cas + build server (client)->>scheduler pool: queue jobs + scheduler pool->>worker pool: route jobs + worker pool->>cas: upload artifacts + worker pool->>scheduler pool: result download instructions + scheduler pool->>build server (client): result download instructions + cas->>build server (client): service queries + build server (client)->>cas: service queries +``` + +1. The `client` such as Bazel, Buck2 or Reclient creates a job and sends it to + the `scheduler`'s job queue. +2. The `scheduler` finds a suitable worker in the worker pool and routes the job + to it. +3. The `worker` runs the job, sending output artifacts to the `cas`. +4. The `worker` provides download instructions for the artifact to the + `scheduler`. +5. The `scheduler` forwards the download instructions to the `client`. + +In conclusion, the client created a job and sent it to NativeLink and gets a +response that lets it download the built artifact. diff --git a/docs/src/content/docs/explanations/history.mdx b/docs/src/content/docs/explanations/history.mdx index c73f8a412..d58fad79b 100644 --- a/docs/src/content/docs/explanations/history.mdx +++ b/docs/src/content/docs/explanations/history.mdx @@ -6,16 +6,15 @@ description: 'What is NativeLink?' This project was first created due to frustration with similar projects not working or being extremely inefficient. Rust was chosen as the language to write it in because at the time Rust was going through a revolution in the -new-ish feature async-await. This made making multi-threading extremely -simple when paired with a runtime like tokio while still giving all the -lifetime and other protections that Rust gives. This pretty much guarantees -that we will never have crashes due to race conditions. This kind of project -seemed perfect, since there is so much asynchronous activity happening and -running them on different threads is most preferable. Other languages like -Go are good candidates, but other similar projects rely heavily on channels -and mutex locks which are cumbersome and have to be carefully designed by -the developer. Rust doesn't have these issues, since the compiler will -always tell you when the code you are writing might introduce undefined -behavior. The last major reason is because Rust is extremely fast, +/- a -few percent of C++ and has no garbage collection (like C++, but unlike Java, -Go, or Typescript). +new-ish feature async-await. This made making multi-threading more accessible +when paired with a runtime like Tokio while still giving all the lifetime and +other protections that Rust gives. This pretty much guarantees that we will +never have crashes due to race conditions. This kind of project seemed perfect, +since there is so much asynchronous activity happening and running them on +different threads is most preferable. Other languages like Go are good +candidates, but other similar projects rely heavily on channels and mutex locks +which are cumbersome and have to be carefully designed by the developer. Rust +doesn't have these issues, since the compiler will always tell you when the code +you are writing might introduce undefined behavior. The last major reason is +because Rust is extremely fast, comparable to C++ and has no garbage collection +(like C++, but unlike Java, Go, or Typescript). diff --git a/docs/src/content/docs/reference/glossary.mdx b/docs/src/content/docs/reference/glossary.mdx index 71b3f67c3..c9fbc1ffb 100644 --- a/docs/src/content/docs/reference/glossary.mdx +++ b/docs/src/content/docs/reference/glossary.mdx @@ -33,33 +33,33 @@ interacts with the NativeLink's server to leverage features such as caching Content Addressable Storage (CAS) is a storage system in which data is identified and accessed based on its content rather than its location. Each piece of data (artifact) is hashed, generating a unique identifier (digest) -based on the data's content. In NativeLink, the CAS stores the binary artifacts +based on the data content. In NativeLink, the CAS stores the binary artifacts resulting from build actions. It stores identical data pieces only once and reuses the data pieces as needed. ## Reclient Reclient is an open source build tool created by Google as the successor to -Goma. In terms of NativeLink, it is a client that interacts with NativeLink's +Goma. In terms of NativeLink, it's a client that interacts with NativeLink's server to leverage features such as caching (CAS), distributed execution, and -artifact management. It is mostly used to compile and build Chromium, an open +artifact management. It's mostly used to compile and build Chromium, an open source project behind the Google Chrome browser. ## Hermeticity For build systems, hermeticity refers to the property of creating isolated and self-contained environments for building and testing software. A hermetic build -environment ensures that builds are not influenced by the external state, such +environment ensures that builds aren't influenced by the external state, such as your local environment or variations in system dependencies. This isolation is achieved by using precise, versioned dependencies and fully specifying the build configuration. In NativeLink, hermeticity is achieved using Nix. -## Mono Repo +## Monorepo -A mono repository, also referred to as a mono repo, contains the source code for -multiple projects, often across different domains or areas of a single +A monolithic repository, also referred to as a monorepo, contains the source +code for multiple projects, often across different domains or areas of a single organization. They require unique tooling to handle multiple languages and -technologies within the repo. A good example of a mono repo is the Chromium +technologies within the repository. A good example of a monorepo is the Chromium project. ## NativeLink @@ -74,9 +74,9 @@ usage and reduce build times. Nix is an open source tool that builds packages in isolation from each other. It's how builds remain hermetic in NativeLink. Nix ensures that builds are -reproducible and don't have undeclared dependencies, makes it easy to share -development and build environments for projects, and ensures that installing or -upgrading packages cannot break other packages. +reproducible and don't have undeclared dependencies, lets you share development +and build environments for projects, and ensures that installing or upgrading +packages can't break other packages. ## Remote caching @@ -89,12 +89,12 @@ times and improves compute resource usage by avoiding redundant computations. Remote execution refers to the process of running computational tasks, such as building, compiling, and testing code, on remote servers rather than on a local machine. By using remote servers, tasks can be distributed across multiple -servers (i.e., parallel processing). This can speed up build and test processes -faster than local machines. +servers (that is, parallel processing). This can speed up build and test +processes faster than local machines. ## Remote Build Execution -The Remote Execution Protocol is a set of protobol buffers (As of V2) which act +The Remote Execution Protocol is a set of protocol buffers (As of V2) which act as standardized guidelines and API specifications that enable clients (such as build systems like Bazel) to distribute build and test action across multiple remote machines. This protocol facilitates the interaction between clients and @@ -109,7 +109,7 @@ source code is written in Rust. ## Scheduler -The scheduler is a core component in NativeLink. It is responsible for managing +The scheduler is a core component in NativeLink. It's responsible for managing and coordinating the execution of tasks on remote workers based on resource availability and dependencies. It leverages the DAG representation of task dependencies to ensure tasks are executed in the correct order and optimizes the @@ -121,12 +121,12 @@ A simulation is a computer-based model to replicate the behavior and interactions of robots within a virtual environment. People can test an analyze robotic systems without the need of physical robots in real-world environments. NativeLink executes high-fidelity simulations through its advanced caching -system, distributed execution of design layouts (with Verilog & VHDL), and +system, distributed execution of design layouts (such as Verilog & VHDL), and continuous, real-time monitoring to detect anomalies. ## Workers -The workers are one of the core components of NativeLink. They are responsible +The workers are one of the core components of NativeLink. They're responsible for executing the build tasks assigned by the scheduler and create the build artifacts. The worker pool can consist of multiple workers running on powerful remote machines. diff --git a/native-cli/clusters/localcluster.go b/native-cli/clusters/localcluster.go index dc7b25323..8b63ad47f 100644 --- a/native-cli/clusters/localcluster.go +++ b/native-cli/clusters/localcluster.go @@ -10,7 +10,7 @@ import ( "runtime" "text/template" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" "github.com/docker/docker/client" git "github.com/go-git/go-git/v5" "sigs.k8s.io/kind/pkg/cluster" @@ -210,7 +210,7 @@ func createRegistryConfigInNode( ) error { config := fmt.Sprintf("[host.\"http://%s:%d\"]", regName, internalPort) regDir := fmt.Sprintf("/etc/containerd/certs.d/localhost:%d", externalPort) - execConfig := types.ExecConfig{ + execConfig := container.ExecOptions{ Cmd: []string{ "sh", "-c", @@ -232,7 +232,7 @@ func createRegistryConfigInNode( ) } - if err := cli.ContainerExecStart(ctx, execID.ID, types.ExecStartCheck{}); err != nil { + if err := cli.ContainerExecStart(ctx, execID.ID, container.ExecAttachOptions{}); err != nil { return fmt.Errorf( "error starting exec command on node %s: %w", nodeName, diff --git a/native-cli/components/cilium.go b/native-cli/components/cilium.go index 31e61e1aa..feb3b7bd7 100644 --- a/native-cli/components/cilium.go +++ b/native-cli/components/cilium.go @@ -8,7 +8,7 @@ import ( "slices" "strings" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" "github.com/docker/docker/client" "github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes/apiextensions" helmv3 "github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes/helm/v3" @@ -134,7 +134,7 @@ func kindIPv4Subnet() (string, error) { return "", fmt.Errorf("%w: %w", errPulumi, err) } - networks, err := cli.NetworkList(dockerCtx, types.NetworkListOptions{}) + networks, err := cli.NetworkList(dockerCtx, network.ListOptions{}) if err != nil { return "", fmt.Errorf("%w: %w", errPulumi, err) } diff --git a/native-cli/components/embedded/capacitor.yaml b/native-cli/components/embedded/capacitor.yaml new file mode 100644 index 000000000..e9de00914 --- /dev/null +++ b/native-cli/components/embedded/capacitor.yaml @@ -0,0 +1,46 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: OCIRepository +metadata: + name: capacitor + namespace: flux-system +spec: + interval: 12h + url: oci://ghcr.io/gimlet-io/capacitor-manifests + ref: + semver: ">=0.1.0" +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: capacitor + namespace: flux-system +spec: + targetNamespace: flux-system + interval: 1h + retryInterval: 2m + timeout: 5m + wait: true + prune: true + path: "./" + sourceRef: + kind: OCIRepository + name: capacitor +--- +apiVersion: "cilium.io/v2" +kind: CiliumNetworkPolicy +metadata: + name: allow-ingress-to-capacitor + namespace: flux-system +spec: + endpointSelector: + matchLabels: + app.kubernetes.io/name: onechart + app.kubernetes.io/instance: capacitor + ingress: + - fromEntities: + - ingress + - toPorts: + - ports: + - port: "9000" + protocol: TCP diff --git a/native-cli/components/embedded/envoy.template.yaml b/native-cli/components/embedded/envoy.template.yaml index 4dbe7051d..443826555 100644 --- a/native-cli/components/embedded/envoy.template.yaml +++ b/native-cli/components/embedded/envoy.template.yaml @@ -32,6 +32,8 @@ static_resources: {{- end }} http_filters: - name: envoy.filters.http.router + upgrade_configs: + - upgrade_type: websocket {{- end }} clusters: {{- range .InternalGateways }} diff --git a/native-cli/components/embedded/kustomization.yaml b/native-cli/components/embedded/kustomization.yaml index 945ec1d9e..ee9687080 100644 --- a/native-cli/components/embedded/kustomization.yaml +++ b/native-cli/components/embedded/kustomization.yaml @@ -7,5 +7,7 @@ resources: - skopeo-check-hashlocked-url.yaml - nix2container-image-info.yaml - trigger.yaml + - update-image-tags.yaml + - capacitor.yaml # - nativelink-gateways.yaml # Gateways are handled in Pulumi via the # NativeLinkGateways resource. diff --git a/native-cli/components/embedded/nativelink-gateways.yaml b/native-cli/components/embedded/nativelink-gateways.yaml index a9ba34bf1..960289ad0 100644 --- a/native-cli/components/embedded/nativelink-gateways.yaml +++ b/native-cli/components/embedded/nativelink-gateways.yaml @@ -57,3 +57,15 @@ spec: - name: tkn-gateway protocol: HTTP port: 80 +--- +apiVersion: gateway.networking.k8s.io/v1beta1 +kind: Gateway +metadata: + name: capacitor-gateway + namespace: flux-system +spec: + gatewayClassName: cilium + listeners: + - name: capacitor-gateway + protocol: HTTP + port: 80 diff --git a/native-cli/components/embedded/nativelink-routes.yaml b/native-cli/components/embedded/nativelink-routes.yaml index 0dbc8e8e0..192b098ed 100644 --- a/native-cli/components/embedded/nativelink-routes.yaml +++ b/native-cli/components/embedded/nativelink-routes.yaml @@ -49,3 +49,20 @@ spec: backendRefs: - name: tekton-dashboard port: 9097 +--- +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: capacitor-route + namespace: flux-system +spec: + parentRefs: + - sectionName: capacitor-gateway + name: capacitor-gateway + rules: + - matches: + - path: + value: / + backendRefs: + - name: capacitor + port: 9000 diff --git a/native-cli/components/embedded/rebuild-nativelink.yaml b/native-cli/components/embedded/rebuild-nativelink.yaml index 4259377d7..f1435bdaa 100644 --- a/native-cli/components/embedded/rebuild-nativelink.yaml +++ b/native-cli/components/embedded/rebuild-nativelink.yaml @@ -1,5 +1,5 @@ --- -apiVersion: tekton.dev/v1beta1 +apiVersion: tekton.dev/v1 kind: Pipeline metadata: name: rebuild-nativelink @@ -163,3 +163,15 @@ spec: - input: "$(tasks.check-hashlocked-url.results.exists)" operator: notin values: ["true"] + + - name: update-image-tags + taskRef: + name: update-image-tags + params: + - name: imageName + value: "$(tasks.get-image-info.results.imageName)" + - name: imageTag + value: "$(tasks.get-image-info.results.imageTag)" + runAfter: + - copy-verified-prebuilt-image + - copy-nix-built-image diff --git a/native-cli/components/embedded/trigger.yaml b/native-cli/components/embedded/trigger.yaml index 1aaea3b57..9fa64dd04 100644 --- a/native-cli/components/embedded/trigger.yaml +++ b/native-cli/components/embedded/trigger.yaml @@ -1,4 +1,34 @@ --- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: configmap-updater + namespace: default +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: configmap-updater + namespace: default +rules: + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "create", "patch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: configmap-updater-binding + namespace: default +subjects: + - kind: ServiceAccount + name: configmap-updater + namespace: default +roleRef: + kind: Role + name: configmap-updater + apiGroup: rbac.authorization.k8s.io +--- apiVersion: triggers.tekton.dev/v1beta1 kind: TriggerTemplate metadata: @@ -29,6 +59,9 @@ spec: metadata: generateName: rebuild-nativelink-run- spec: + taskRunSpecs: + - pipelineTaskName: update-image-tags + taskServiceAccountName: configmap-updater pipelineRef: name: rebuild-nativelink workspaces: @@ -57,7 +90,7 @@ metadata: spec: params: - name: flakeOutput - value: "$(body.flakeOutput)" + value: "$(body.metadata.flakeOutput)" --- apiVersion: v1 kind: ServiceAccount diff --git a/native-cli/components/embedded/update-image-tags.yaml b/native-cli/components/embedded/update-image-tags.yaml new file mode 100644 index 000000000..7922686e9 --- /dev/null +++ b/native-cli/components/embedded/update-image-tags.yaml @@ -0,0 +1,38 @@ +--- +apiVersion: tekton.dev/v1beta1 +kind: Task +metadata: + name: update-image-tags + labels: + app.kubernetes.io/versions: "0.1" +spec: + description: > + Update or add image tags in a ConfigMap. + + Note: This task requires running under a ServiceAccount that has permissions + to "get", "patch" and "create" configmaps. + params: + - name: imageName + - name: imageTag + steps: + - name: update-configmap + image: bitnami/kubectl + env: + - name: CM_NAME + value: "nativelink-image-tags" + script: | + #!/bin/bash + set -e + + # Transform the key to a format that flux accepts. The transformation + # here looks like `nativelink-worker` -> `NATIVELINK_WORKER_TAG`. + TRANSFORMED_KEY=$(echo "$(params.imageName)" | tr '[:lower:]' '[:upper:]' | tr '-' '_')_TAG + + # Check if the ConfigMap exists + if kubectl get configmap $CM_NAME &>/dev/null; then + # ConfigMap exists, update it + kubectl patch configmap $CM_NAME --type=json -p='[{"op": "add", "path": "/data/'$TRANSFORMED_KEY'", "value": "$(params.imageTag)"}]' + else + # ConfigMap doesn't exist, create it + kubectl create configmap $CM_NAME --from-literal=$TRANSFORMED_KEY=$(params.imageTag) + fi diff --git a/native-cli/components/flux.go b/native-cli/components/flux.go new file mode 100644 index 000000000..d70555b60 --- /dev/null +++ b/native-cli/components/flux.go @@ -0,0 +1,31 @@ +package components + +import ( + "fmt" + + "github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes/yaml" + "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +) + +// The configuration for Flux. +type Flux struct { + Version string +} + +// Install sets up Flux in the cluster. +func (component *Flux) Install( + ctx *pulumi.Context, + name string, +) ([]pulumi.Resource, error) { + flux, err := yaml.NewConfigFile(ctx, name, &yaml.ConfigFileArgs{ + File: fmt.Sprintf( + "https://github.com/fluxcd/flux2/releases/download/v%s/install.yaml", + component.Version, + ), + }) + if err != nil { + return nil, fmt.Errorf("%w: %w", errPulumi, err) + } + + return []pulumi.Resource{flux}, nil +} diff --git a/native-cli/components/loadbalancer.go b/native-cli/components/loadbalancer.go index 7118b89a4..61f3a421d 100644 --- a/native-cli/components/loadbalancer.go +++ b/native-cli/components/loadbalancer.go @@ -310,6 +310,7 @@ func (component *Loadbalancer) Install( "el-gateway": false, "hubble-gateway": false, "tkn-gateway": false, + "capacitor-gateway": false, }, ), component.Gateways) if err != nil { diff --git a/native-cli/default.nix b/native-cli/default.nix index ade32b065..3aa0dc6b4 100644 --- a/native-cli/default.nix +++ b/native-cli/default.nix @@ -3,7 +3,7 @@ pkgs.buildGoModule { pname = "native-cli"; version = "0.4.0"; src = ./.; - vendorHash = "sha256-zB+gaJB+5KEnkPHX2BY8nbO/oOmPk4lfmGzdPBMOSxE="; + vendorHash = "sha256-lqOzzt97Xr9tcsAcrIubPtFii85s2m06Hz+cfruHQqQ="; buildInputs = [pkgs.makeWrapper]; ldflags = ["-s -w"]; installPhase = '' diff --git a/native-cli/go.mod b/native-cli/go.mod index d3fa6d457..38659d04e 100644 --- a/native-cli/go.mod +++ b/native-cli/go.mod @@ -3,11 +3,11 @@ module github.com/TraceMachina/nativelink/native-cli go 1.22.1 require ( - github.com/docker/docker v27.0.0+incompatible + github.com/docker/docker v27.0.3+incompatible github.com/go-git/go-git/v5 v5.12.0 github.com/pulumi/pulumi-docker/sdk/v3 v3.6.1 - github.com/pulumi/pulumi-kubernetes/sdk/v4 v4.13.1 - github.com/pulumi/pulumi/sdk/v3 v3.120.0 + github.com/pulumi/pulumi-kubernetes/sdk/v4 v4.14.0 + github.com/pulumi/pulumi/sdk/v3 v3.122.0 github.com/spf13/cobra v1.8.1 k8s.io/apimachinery v0.30.2 k8s.io/client-go v0.30.2 @@ -29,7 +29,7 @@ require ( github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect github.com/blang/semver v3.5.1+incompatible // indirect github.com/charmbracelet/bubbles v0.18.0 // indirect - github.com/charmbracelet/bubbletea v0.26.4 // indirect + github.com/charmbracelet/bubbletea v0.26.6 // indirect github.com/charmbracelet/lipgloss v0.11.0 // indirect github.com/charmbracelet/x/ansi v0.1.2 // indirect github.com/charmbracelet/x/input v0.1.2 // indirect @@ -68,7 +68,7 @@ require ( github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/hashicorp/hcl/v2 v2.20.1 // indirect + github.com/hashicorp/hcl/v2 v2.21.0 // indirect github.com/imdario/mergo v0.3.16 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect @@ -118,26 +118,26 @@ require ( github.com/xanzy/ssh-agent v0.3.3 // indirect github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect github.com/zclconf/go-cty v1.14.4 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 // indirect - go.opentelemetry.io/otel v1.27.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect + go.opentelemetry.io/otel v1.28.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.25.0 // indirect - go.opentelemetry.io/otel/metric v1.27.0 // indirect + go.opentelemetry.io/otel/metric v1.28.0 // indirect go.opentelemetry.io/otel/sdk v1.25.0 // indirect - go.opentelemetry.io/otel/trace v1.27.0 // indirect + go.opentelemetry.io/otel/trace v1.28.0 // indirect go.uber.org/atomic v1.11.0 // indirect - golang.org/x/crypto v0.24.0 // indirect + golang.org/x/crypto v0.25.0 // indirect golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 // indirect - golang.org/x/mod v0.18.0 // indirect - golang.org/x/net v0.26.0 // indirect + golang.org/x/mod v0.19.0 // indirect + golang.org/x/net v0.27.0 // indirect golang.org/x/oauth2 v0.21.0 // indirect golang.org/x/sync v0.7.0 // indirect - golang.org/x/sys v0.21.0 // indirect - golang.org/x/term v0.21.0 // indirect + golang.org/x/sys v0.22.0 // indirect + golang.org/x/term v0.22.0 // indirect golang.org/x/text v0.16.0 // indirect golang.org/x/time v0.5.0 // indirect - golang.org/x/tools v0.22.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240610135401-a8a62080eff3 // indirect - google.golang.org/grpc v1.64.0 // indirect + golang.org/x/tools v0.23.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect + google.golang.org/grpc v1.65.0 // indirect google.golang.org/protobuf v1.34.2 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect @@ -146,8 +146,8 @@ require ( gopkg.in/yaml.v3 v3.0.1 // indirect gotest.tools/v3 v3.5.1 // indirect k8s.io/api v0.30.2 // indirect - k8s.io/klog/v2 v2.130.0 // indirect - k8s.io/kube-openapi v0.0.0-20240521193020-835d969ad83a // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20240703190633-0aa61b46e8c2 // indirect k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 // indirect lukechampine.com/frand v1.4.2 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect diff --git a/native-cli/go.sum b/native-cli/go.sum index b4c54ee57..aa37f3f89 100644 --- a/native-cli/go.sum +++ b/native-cli/go.sum @@ -36,8 +36,8 @@ github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK3 github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/charmbracelet/bubbles v0.18.0 h1:PYv1A036luoBGroX6VWjQIE9Syf2Wby2oOl/39KLfy0= github.com/charmbracelet/bubbles v0.18.0/go.mod h1:08qhZhtIwzgrtBjAcJnij1t1H0ZRjwHyGsy6AL11PSw= -github.com/charmbracelet/bubbletea v0.26.4 h1:2gDkkzLZaTjMl/dQBpNVtnvcCxsh/FCkimep7FC9c40= -github.com/charmbracelet/bubbletea v0.26.4/go.mod h1:P+r+RRA5qtI1DOHNFn0otoNwB4rn+zNAzSj/EXz6xU0= +github.com/charmbracelet/bubbletea v0.26.6 h1:zTCWSuST+3yZYZnVSvbXwKOPRSNZceVeqpzOLN2zq1s= +github.com/charmbracelet/bubbletea v0.26.6/go.mod h1:dz8CWPlfCCGLFbBlTY4N7bjLiyOGDJEnd2Muu7pOWhk= github.com/charmbracelet/lipgloss v0.11.0 h1:UoAcbQ6Qml8hDwSWs0Y1cB5TEQuZkDPH/ZqwWWYTG4g= github.com/charmbracelet/lipgloss v0.11.0/go.mod h1:1UdRTH9gYgpcdNN5oBtjbu/IzNKtzVtb7sqN1t9LNn8= github.com/charmbracelet/x/ansi v0.1.2 h1:6+LR39uG8DE6zAmbu023YlqjJHkYXDF1z36ZwzO4xZY= @@ -65,8 +65,8 @@ github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5Qvfr github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c= github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0= -github.com/docker/docker v27.0.0+incompatible h1:JRugTYuelmWlW0M3jakcIadDx2HUoUO6+Tf2C5jVfwA= -github.com/docker/docker v27.0.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v27.0.3+incompatible h1:aBGI9TeQ4MPlhquTQKq9XbK79rKFVwXNUAYz9aXyEBE= +github.com/docker/docker v27.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -146,8 +146,8 @@ github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/hcl/v2 v2.20.1 h1:M6hgdyz7HYt1UN9e61j+qKJBqR3orTWbI1HKBJEdxtc= -github.com/hashicorp/hcl/v2 v2.20.1/go.mod h1:TZDqQ4kNKCbh1iJp99FdPiUaVDDUPivbqxZulxDYqL4= +github.com/hashicorp/hcl/v2 v2.21.0 h1:lve4q/o/2rqwYOgUg3y3V2YPyD1/zkCLGjIV74Jit14= +github.com/hashicorp/hcl/v2 v2.21.0/go.mod h1:62ZYHrXgPoX8xBnzl8QzbWq4dyDsDtfCRgIq1rbJEvA= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= @@ -244,10 +244,10 @@ github.com/pulumi/esc v0.9.1 h1:HH5eEv8sgyxSpY5a8yePyqFXzA8cvBvapfH8457+mIs= github.com/pulumi/esc v0.9.1/go.mod h1:oEJ6bOsjYlQUpjf70GiX+CXn3VBmpwFDxUTlmtUN84c= github.com/pulumi/pulumi-docker/sdk/v3 v3.6.1 h1:plWLn9O6u80Vr37LoCsckyobBfcrdTU9cERor72QjqA= github.com/pulumi/pulumi-docker/sdk/v3 v3.6.1/go.mod h1:N4Yu4c49QErfucPt9Y/fGmpTryRqc0VfhyKHsGR9/g8= -github.com/pulumi/pulumi-kubernetes/sdk/v4 v4.13.1 h1:Fp7siNqQBjwIoY/7Jaml/v1frOyGO+kYeeMrO4d2k7k= -github.com/pulumi/pulumi-kubernetes/sdk/v4 v4.13.1/go.mod h1:MZ+ci9Iq8f0K1aOTXgD3X+ENo2+dFbgQQ7Ahh0YZ8/g= -github.com/pulumi/pulumi/sdk/v3 v3.120.0 h1:KYtMkCmcSg4U+w41/Q0l3llKEodbfdyq6J0VMoEoVmY= -github.com/pulumi/pulumi/sdk/v3 v3.120.0/go.mod h1:/mQJPO+HehhoSJ9O3C6eUKAGeAr+4KSrbDhLsXHKldc= +github.com/pulumi/pulumi-kubernetes/sdk/v4 v4.14.0 h1:y9+1n+Qh37zCronhamSmxa946T0ekM7VaJE1+UARIcM= +github.com/pulumi/pulumi-kubernetes/sdk/v4 v4.14.0/go.mod h1:Smqya4ClBt2okzTDgPkihKu/hHtoZJNkBtUWoHUUCbw= +github.com/pulumi/pulumi/sdk/v3 v3.122.0 h1:rW/RJ1GRelCi/5VY1+7ppqeF0AblWyjyjgNffqw4dc4= +github.com/pulumi/pulumi/sdk/v3 v3.122.0/go.mod h1:p1U24en3zt51agx+WlNboSOV8eLlPWYAkxMzVEXKbnY= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= @@ -297,20 +297,20 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/zclconf/go-cty v1.14.4 h1:uXXczd9QDGsgu0i/QFR/hzI5NYCHLf6NQw/atrbnhq8= github.com/zclconf/go-cty v1.14.4/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 h1:9l89oX4ba9kHbBol3Xin3leYJ+252h0zszDtBwyKe2A= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0/go.mod h1:XLZfZboOJWHNKUv7eH0inh0E9VV6eWDFB/9yJyTLPp0= -go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= -go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= +go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= +go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.25.0 h1:dT33yIHtmsqpixFsSQPwNeY5drM9wTcoL8h0FWF4oGM= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.25.0/go.mod h1:h95q0LBGh7hlAC08X2DhSeyIG02YQ0UyioTCVAqRPmc= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.25.0 h1:Mbi5PKN7u322woPa85d7ebZ+SOvEoPvoiBu+ryHWgfA= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.25.0/go.mod h1:e7ciERRhZaOZXVjx5MiL8TK5+Xv7G5Gv5PA2ZDEJdL8= -go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik= -go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak= +go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= +go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= go.opentelemetry.io/otel/sdk v1.25.0 h1:PDryEJPC8YJZQSyLY5eqLeafHtG+X7FWnf3aXMtxbqo= go.opentelemetry.io/otel/sdk v1.25.0/go.mod h1:oFgzCM2zdsxKzz6zwpTZYLLQsFwc+K0daArPdIhuxkw= -go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw= -go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4= +go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= +go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= go.opentelemetry.io/proto/otlp v1.1.0 h1:2Di21piLrCqJ3U3eXGCTPHE9R8Nh+0uglSnOyxikMeI= go.opentelemetry.io/proto/otlp v1.1.0/go.mod h1:GpBHCBWiqvVLDqmHZsoMM3C5ySeKTC7ej/RNTae6MdY= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= @@ -322,8 +322,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= -golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= +golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= +golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 h1:yixxcjnhBmY0nkL253HFVIm0JsFHwrHdT3Yh6szTnfY= golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8/go.mod h1:jj3sYF3dwk5D+ghuXyeI3r5MFf+NT2An6/9dOA95KSI= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= @@ -332,8 +332,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0= -golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= +golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -345,8 +345,8 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= -golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= +golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= +golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -378,15 +378,15 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= -golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= +golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= -golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= -golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= +golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= +golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -406,19 +406,19 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= -golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= +golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= +golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/genproto v0.0.0-20230726155614-23370e0ffb3e h1:xIXmWJ303kJCuogpj0bHq+dcjcZHU+XFyc1I0Yl9cRg= -google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237 h1:RFiFrvy37/mpSpdySBDrUdipW/dHwsRwh3J3+A9VgT4= -google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237/go.mod h1:Z5Iiy3jtmioajWHDGFk7CeugTyHtPvMHA4UTmUkyalE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240610135401-a8a62080eff3 h1:9Xyg6I9IWQZhRVfCWjKK+l6kI0jHcPesVlMnT//aHNo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240610135401-a8a62080eff3/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= -google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= -google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= +google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 h1:7whR9kGa5LUwFtpLm2ArCEejtnxlGeLbAyjFY8sGNFw= +google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 h1:BwIjyKYGsK9dMCBOorzRri8MQwmi7mT9rGHsCEinZkA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= +google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= +google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -446,10 +446,10 @@ k8s.io/apimachinery v0.30.2 h1:fEMcnBj6qkzzPGSVsAZtQThU62SmQ4ZymlXRC5yFSCg= k8s.io/apimachinery v0.30.2/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= k8s.io/client-go v0.30.2 h1:sBIVJdojUNPDU/jObC+18tXWcTJVcwyqS9diGdWHk50= k8s.io/client-go v0.30.2/go.mod h1:JglKSWULm9xlJLx4KCkfLLQ7XwtlbflV6uFFSHTMgVs= -k8s.io/klog/v2 v2.130.0 h1:5nB3+3HpqKqXJIXNtJdtxcDCfaa9KL8StJgMzGJkUkM= -k8s.io/klog/v2 v2.130.0/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20240521193020-835d969ad83a h1:zD1uj3Jf+mD4zmA7W+goE5TxDkI7OGJjBNBzq5fJtLA= -k8s.io/kube-openapi v0.0.0-20240521193020-835d969ad83a/go.mod h1:UxDHUPsUwTOOxSU+oXURfFBcAS6JwiRXTYqYwfuGowc= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20240703190633-0aa61b46e8c2 h1:T5TEV4a+pEjc+j9Xui3MGGeoDLIN6uzZrx8NYotFMgQ= +k8s.io/kube-openapi v0.0.0-20240703190633-0aa61b46e8c2/go.mod h1:UxDHUPsUwTOOxSU+oXURfFBcAS6JwiRXTYqYwfuGowc= k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 h1:jgGTlFYnhF1PM1Ax/lAlxUPE+KfCIXHaathvJg1C3ak= k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= lukechampine.com/frand v1.4.2 h1:RzFIpOvkMXuPMBb9maa4ND4wjBn71E1Jpf8BzJHMaVw= diff --git a/native-cli/programs/local.go b/native-cli/programs/local.go index 09f2a22ab..fcc89286e 100644 --- a/native-cli/programs/local.go +++ b/native-cli/programs/local.go @@ -58,6 +58,16 @@ func ProgramForLocalCluster(ctx *pulumi.Context) error { os.Exit(1) } + flux, err := components.AddComponent( + ctx, + "flux", + &components.Flux{Version: "2.3.0"}, + ) + if err != nil { + log.Println(err) + os.Exit(1) + } + tektonPipelines, err := components.AddComponent( ctx, "tekton-pipelines", @@ -83,6 +93,7 @@ func ProgramForLocalCluster(ctx *pulumi.Context) error { "tekton-dashboard", &components.TektonDashboard{Version: "0.45.0"}, )) + components.Check(components.AddComponent( ctx, "rebuild-nativelink", @@ -93,6 +104,7 @@ func ProgramForLocalCluster(ctx *pulumi.Context) error { tektonTriggers, localSources, nixStore, + flux, ), }, )) @@ -143,6 +155,17 @@ func ProgramForLocalCluster(ctx *pulumi.Context) error { }, } + capacitorGateway := components.Gateway{ + ExternalPort: 9000, //nolint:mnd + InternalPort: 9000, //nolint:mnd + Routes: []components.RouteConfig{ + { + Prefix: "/", + Cluster: "capacitor-gateway", + }, + }, + } + nativelinkGateway := components.Gateway{ ExternalPort: 8082, //nolint:mnd InternalPort: 8089, //nolint:mnd @@ -172,6 +195,7 @@ func ProgramForLocalCluster(ctx *pulumi.Context) error { "kind-loadbalancer", &components.Loadbalancer{ Gateways: []components.Gateway{ + capacitorGateway, nativelinkGateway, hubbleGateway, tknGateway, diff --git a/nativelink-config/examples/filesystem_cas.json b/nativelink-config/examples/filesystem_cas.json index 00022465a..6f43bf6be 100644 --- a/nativelink-config/examples/filesystem_cas.json +++ b/nativelink-config/examples/filesystem_cas.json @@ -76,7 +76,7 @@ } }, "verify_size": true, - "hash_verification_function": "sha256" + "verify_hash": true } }, "AC_MAIN_STORE": { diff --git a/nativelink-config/src/cas_server.rs b/nativelink-config/src/cas_server.rs index 03e2c4535..afc3d37bb 100644 --- a/nativelink-config/src/cas_server.rs +++ b/nativelink-config/src/cas_server.rs @@ -71,7 +71,7 @@ pub struct HttpCompressionConfig { /// will consume a lot of CPU and add a lot of latency. /// see: /// - /// Defaults: {no supported compression} + /// Default: {no supported compression} pub accepted_compression_algorithms: Vec, } @@ -129,7 +129,7 @@ pub struct ExecutionConfig { pub scheduler: SchedulerRefName, } -#[derive(Deserialize, Debug)] +#[derive(Deserialize, Debug, Default)] #[serde(deny_unknown_fields)] pub struct ByteStreamConfig { /// Name of the store in the "stores" configuration. @@ -139,16 +139,22 @@ pub struct ByteStreamConfig { /// According to /// 16KiB - 64KiB is optimal. /// - /// Defaults: 64KiB + /// + /// Default: 64KiB #[serde(default, deserialize_with = "convert_data_size_with_shellexpand")] pub max_bytes_per_stream: usize, + /// Maximum number of bytes to decode on each grpc stream chunk. + /// Default: 4 MiB + #[serde(default, deserialize_with = "convert_data_size_with_shellexpand")] + pub max_decoding_message_size: usize, + /// In the event a client disconnects while uploading a blob, we will hold /// the internal stream open for this many seconds before closing it. /// This allows clients that disconnect to reconnect and continue uploading /// the same blob. /// - /// Defaults: 10 (seconds) + /// Default: 10 (seconds) #[serde(default, deserialize_with = "convert_duration_with_shellexpand")] pub persist_stream_on_disconnect_timeout: usize, } diff --git a/nativelink-service/src/bytestream_server.rs b/nativelink-service/src/bytestream_server.rs index 043b0238b..10c935abe 100644 --- a/nativelink-service/src/bytestream_server.rs +++ b/nativelink-service/src/bytestream_server.rs @@ -57,6 +57,9 @@ const DEFAULT_PERSIST_STREAM_ON_DISCONNECT_TIMEOUT: Duration = Duration::from_se /// If this value changes update the documentation in the config definition. const DEFAULT_MAX_BYTES_PER_STREAM: usize = 64 * 1024; +/// If this value changes update the documentation in the config definition. +const DEFAULT_MAX_DECODING_MESSAGE_SIZE: usize = 4 * 1024 * 1024; + type ReadStream = Pin> + Send + 'static>>; type StoreUpdateFuture = Pin> + Send + 'static>>; @@ -156,6 +159,7 @@ pub struct ByteStreamServer { stores: HashMap, // Max number of bytes to send on each grpc stream chunk. max_bytes_per_stream: usize, + max_decoding_message_size: usize, active_uploads: Arc>>, sleep_fn: SleepFn, } @@ -191,16 +195,23 @@ impl ByteStreamServer { } else { config.max_bytes_per_stream }; + let max_decoding_message_size = if config.max_decoding_message_size == 0 { + DEFAULT_MAX_DECODING_MESSAGE_SIZE + } else { + config.max_decoding_message_size + }; Ok(ByteStreamServer { stores, max_bytes_per_stream, + max_decoding_message_size, active_uploads: Arc::new(Mutex::new(HashMap::new())), sleep_fn, }) } pub fn into_service(self) -> Server { - Server::new(self) + let max_decoding_message_size = self.max_decoding_message_size; + Server::new(self).max_decoding_message_size(max_decoding_message_size) } fn create_or_join_upload_stream( diff --git a/nativelink-service/tests/bytestream_server_test.rs b/nativelink-service/tests/bytestream_server_test.rs index 45aa6304a..844dcac0f 100644 --- a/nativelink-service/tests/bytestream_server_test.rs +++ b/nativelink-service/tests/bytestream_server_test.rs @@ -14,12 +14,17 @@ use std::sync::Arc; -use futures::poll; +use bytes::Bytes; use futures::task::Poll; +use futures::{poll, Future}; use hyper::body::Sender; +use hyper::server::conn::Http; +use hyper::Uri; use maplit::hashmap; +use nativelink_config::cas_server::ByteStreamConfig; use nativelink_error::{make_err, Code, Error, ResultExt}; use nativelink_macro::nativelink_test; +use nativelink_proto::google::bytestream::byte_stream_client::ByteStreamClient; use nativelink_proto::google::bytestream::byte_stream_server::ByteStream; use nativelink_proto::google::bytestream::{ QueryWriteStatusRequest, QueryWriteStatusResponse, ReadRequest, WriteRequest, WriteResponse, @@ -28,16 +33,20 @@ use nativelink_service::bytestream_server::ByteStreamServer; use nativelink_store::default_store_factory::store_factory; use nativelink_store::store_manager::StoreManager; use nativelink_util::common::{encode_stream_proto, DigestInfo}; -use nativelink_util::spawn; use nativelink_util::store_trait::StoreLike; use nativelink_util::task::JoinHandleDropGuard; +use nativelink_util::{background_spawn, spawn}; use pretty_assertions::assert_eq; use prometheus_client::registry::Registry; +use tokio::io::DuplexStream; +use tokio::sync::mpsc::unbounded_channel; use tokio::task::yield_now; +use tokio_stream::wrappers::UnboundedReceiverStream; use tokio_stream::StreamExt; use tonic::codec::{Codec, CompressionEncoding, ProstCodec}; -use tonic::transport::Body; +use tonic::transport::{Body, Channel, Endpoint}; use tonic::{Request, Response, Streaming}; +use tower::service_fn; const INSTANCE_NAME: &str = "foo_instance_name"; const HASH1: &str = "0123456789abcdef000000000000000000000000000000000123456789abcdef"; @@ -59,42 +68,116 @@ async fn make_store_manager() -> Result, Error> { Ok(store_manager) } -fn make_bytestream_server(store_manager: &StoreManager) -> Result { - ByteStreamServer::new( - &nativelink_config::cas_server::ByteStreamConfig { - cas_stores: hashmap! { - "foo_instance_name".to_string() => "main_cas".to_string(), - }, - persist_stream_on_disconnect_timeout: 0, - max_bytes_per_stream: 1024, +fn make_bytestream_server( + store_manager: &StoreManager, + config: Option, +) -> Result { + let config = config.unwrap_or(nativelink_config::cas_server::ByteStreamConfig { + cas_stores: hashmap! { + "foo_instance_name".to_string() => "main_cas".to_string(), }, - store_manager, + persist_stream_on_disconnect_timeout: 0, + max_bytes_per_stream: 1024, + max_decoding_message_size: 0, + }); + ByteStreamServer::new(&config, store_manager) +} + +fn make_stream(encoding: Option) -> (Sender, Streaming) { + let (tx, body) = Body::channel(); + let mut codec = ProstCodec::::default(); + let stream = Streaming::new_request(codec.decoder(), body, encoding, None); + (tx, stream) +} + +fn make_stream_and_writer_spawn( + bs_server: Arc, + encoding: Option, +) -> ( + Sender, + JoinHandleDropGuard, tonic::Status>>, +) { + let (tx, stream) = make_stream(encoding); + let join_handle = spawn!("bs_server_write", async move { + bs_server.write(Request::new(stream)).await + },); + (tx, join_handle) +} + +fn make_resource_name(data_len: impl std::fmt::Display) -> String { + format!( + "{}/uploads/{}/blobs/{}/{}", + INSTANCE_NAME, + "4dcec57e-1389-4ab5-b188-4a59f22ceb4b", // Randomly generated. + HASH1, + data_len, ) } +async fn server_and_client_stub( + bs_server: ByteStreamServer, +) -> (JoinHandleDropGuard<()>, ByteStreamClient) { + let (tx, rx) = unbounded_channel::>(); + let mut rx = UnboundedReceiverStream::new(rx); + + #[derive(Clone)] + struct Executor; + impl hyper::rt::Executor for Executor + where + F: Future + Send + 'static, + F::Output: Send + 'static, + { + fn execute(&self, fut: F) { + background_spawn!("executor_spawn", fut); + } + } + + let server_spawn = spawn!("grpc_server", async move { + let http = Http::new().with_executor(Executor); + let bs_service = bs_server.into_service(); + + while let Some(stream) = rx.next().await { + let stream = stream.expect("Failed to get stream"); + http.serve_connection(stream, bs_service.clone()) + .await + .expect("Connection failed"); + } + }); + + // Note: This is a dummy address, it will not actually connect to it, + // instead it will be connecting via mpsc. + let channel = Endpoint::try_from("http://[::]:50051") + .unwrap() + .executor(Executor) + .connect_with_connector(service_fn(move |_: Uri| { + let tx = tx.clone(); + async move { + const MAX_BUFFER_SIZE: usize = 4096; + let (client, server) = tokio::io::duplex(MAX_BUFFER_SIZE); + tx.send(Ok(server)).unwrap(); + Result::<_, Error>::Ok(client) + } + })) + .await + .unwrap(); + + let client = ByteStreamClient::new(channel); + + (server_spawn, client) +} + #[nativelink_test] pub async fn chunked_stream_receives_all_data() -> Result<(), Box> { let store_manager = make_store_manager().await?; - let bs_server = make_bytestream_server(store_manager.as_ref())?; + let bs_server = Arc::new( + make_bytestream_server(store_manager.as_ref(), None).expect("Failed to make server"), + ); let store = store_manager.get_store("main_cas").unwrap(); // Setup stream. - let (mut tx, join_handle) = { - let (tx, body) = Body::channel(); - let mut codec = ProstCodec::::default(); - // Note: This is an undocumented function. - let stream = - Streaming::new_request(codec.decoder(), body, Some(CompressionEncoding::Gzip), None); - - let join_handle = spawn!( - "chunked_stream_receives_all_data_write_stream", - async move { - let response_future = bs_server.write(Request::new(stream)); - response_future.await - }, - ); - (tx, join_handle) - }; + let (mut tx, join_handle) = + make_stream_and_writer_spawn(bs_server, Some(CompressionEncoding::Gzip)); + // Send data. let raw_data = { let raw_data = "12456789abcdefghijk".as_bytes(); @@ -136,7 +219,10 @@ pub async fn chunked_stream_receives_all_data() -> Result<(), Box Result<(), Box Result<(), Box> { let store_manager = make_store_manager().await?; - let bs_server = make_bytestream_server(store_manager.as_ref())?; + let bs_server = Arc::new( + make_bytestream_server(store_manager.as_ref(), None).expect("Failed to make server"), + ); let store = store_manager.get_store("main_cas").unwrap(); - async fn setup_stream( - bs_server: ByteStreamServer, - ) -> Result< - ( - Sender, - JoinHandleDropGuard<( - Result, tonic::Status>, - ByteStreamServer, - )>, - ), - Error, - > { - let (tx, body) = Body::channel(); - let mut codec = ProstCodec::::default(); - // Note: This is an undocumented function. - let stream = - Streaming::new_request(codec.decoder(), body, Some(CompressionEncoding::Gzip), None); - - let join_handle = spawn!("resume_write_success_write_stream", async move { - let response_future = bs_server.write(Request::new(stream)); - (response_future.await, bs_server) - }); - Ok((tx, join_handle)) - } - let (mut tx, join_handle) = setup_stream(bs_server).await?; + let (mut tx, join_handle) = + make_stream_and_writer_spawn(bs_server.clone(), Some(CompressionEncoding::Gzip)); const WRITE_DATA: &str = "12456789abcdefghijk"; // Chunk our data into two chunks to simulate something a client @@ -217,15 +282,15 @@ pub async fn resume_write_success() -> Result<(), Box> { write_request.data = WRITE_DATA[..BYTE_SPLIT_OFFSET].into(); tx.send_data(encode_stream_proto(&write_request)?).await?; } - let bs_server = { + { // Now disconnect our stream. drop(tx); - let (result, bs_server) = join_handle.await?; + let result = join_handle.await.expect("Failed to join"); assert_eq!(result.is_err(), true, "Expected error to be returned"); - bs_server - }; + } // Now reconnect. - let (mut tx, join_handle) = setup_stream(bs_server).await?; + let (mut tx, join_handle) = + make_stream_and_writer_spawn(bs_server, Some(CompressionEncoding::Gzip)); { // Write the remainder of our data. write_request.write_offset = BYTE_SPLIT_OFFSET as i64; @@ -236,8 +301,10 @@ pub async fn resume_write_success() -> Result<(), Box> { { // Now disconnect our stream. drop(tx); - let (result, _bs_server) = join_handle.await?; - result?; + join_handle + .await + .expect("Failed to join") + .expect("Failed write"); } { // Check to make sure our store recorded the data properly. @@ -254,34 +321,13 @@ pub async fn resume_write_success() -> Result<(), Box> { #[nativelink_test] pub async fn restart_write_success() -> Result<(), Box> { let store_manager = make_store_manager().await?; - let bs_server = make_bytestream_server(store_manager.as_ref())?; + let bs_server = Arc::new( + make_bytestream_server(store_manager.as_ref(), None).expect("Failed to make server"), + ); let store = store_manager.get_store("main_cas").unwrap(); - async fn setup_stream( - bs_server: ByteStreamServer, - ) -> Result< - ( - Sender, - JoinHandleDropGuard<( - Result, tonic::Status>, - ByteStreamServer, - )>, - ), - Error, - > { - let (tx, body) = Body::channel(); - let mut codec = ProstCodec::::default(); - // Note: This is an undocumented function. - let stream = - Streaming::new_request(codec.decoder(), body, Some(CompressionEncoding::Gzip), None); - - let join_handle = spawn!("restart_write_success_write_stream", async move { - let response_future = bs_server.write(Request::new(stream)); - (response_future.await, bs_server) - }); - Ok((tx, join_handle)) - } - let (mut tx, join_handle) = setup_stream(bs_server).await?; + let (mut tx, join_handle) = + make_stream_and_writer_spawn(bs_server.clone(), Some(CompressionEncoding::Gzip)); const WRITE_DATA: &str = "12456789abcdefghijk"; // Chunk our data into two chunks to simulate something a client @@ -307,15 +353,15 @@ pub async fn restart_write_success() -> Result<(), Box> { write_request.data = WRITE_DATA[..BYTE_SPLIT_OFFSET].into(); tx.send_data(encode_stream_proto(&write_request)?).await?; } - let bs_server = { + { // Now disconnect our stream. drop(tx); - let (result, bs_server) = join_handle.await?; + let result = join_handle.await.expect("Failed to join"); assert_eq!(result.is_err(), true, "Expected error to be returned"); - bs_server - }; + } // Now reconnect. - let (mut tx, join_handle) = setup_stream(bs_server).await?; + let (mut tx, join_handle) = + make_stream_and_writer_spawn(bs_server, Some(CompressionEncoding::Gzip)); { // Write first chunk of data again. write_request.write_offset = 0; @@ -332,7 +378,7 @@ pub async fn restart_write_success() -> Result<(), Box> { { // Now disconnect our stream. drop(tx); - let (result, _bs_server) = join_handle.await?; + let result = join_handle.await.expect("Failed to join"); assert!(result.is_ok(), "Expected success to be returned"); } { @@ -350,37 +396,13 @@ pub async fn restart_write_success() -> Result<(), Box> { #[nativelink_test] pub async fn restart_mid_stream_write_success() -> Result<(), Box> { let store_manager = make_store_manager().await?; - let bs_server = make_bytestream_server(store_manager.as_ref())?; + let bs_server = Arc::new( + make_bytestream_server(store_manager.as_ref(), None).expect("Failed to make server"), + ); let store = store_manager.get_store("main_cas").unwrap(); - async fn setup_stream( - bs_server: ByteStreamServer, - ) -> Result< - ( - Sender, - JoinHandleDropGuard<( - Result, tonic::Status>, - ByteStreamServer, - )>, - ), - Error, - > { - let (tx, body) = Body::channel(); - let mut codec = ProstCodec::::default(); - // Note: This is an undocumented function. - let stream = - Streaming::new_request(codec.decoder(), body, Some(CompressionEncoding::Gzip), None); - - let join_handle = spawn!( - "restart_mid_stream_write_success_write_stream", - async move { - let response_future = bs_server.write(Request::new(stream)); - (response_future.await, bs_server) - }, - ); - Ok((tx, join_handle)) - } - let (mut tx, join_handle) = setup_stream(bs_server).await?; + let (mut tx, join_handle) = + make_stream_and_writer_spawn(bs_server.clone(), Some(CompressionEncoding::Gzip)); const WRITE_DATA: &str = "12456789abcdefghijk"; // Chunk our data into two chunks to simulate something a client @@ -406,15 +428,15 @@ pub async fn restart_mid_stream_write_success() -> Result<(), Box Result<(), Box Result<(), Box Result<(), Box> { let store_manager = make_store_manager().await?; - let bs_server = make_bytestream_server(store_manager.as_ref())?; + let bs_server = Arc::new( + make_bytestream_server(store_manager.as_ref(), None).expect("Failed to make server"), + ); let store = store_manager.get_store("main_cas").unwrap(); // Setup stream. - let (mut tx, mut write_fut) = { - let (tx, body) = Body::channel(); - let mut codec = ProstCodec::::default(); - // Note: This is an undocumented function. - let stream = - Streaming::new_request(codec.decoder(), body, Some(CompressionEncoding::Gzip), None); - - (tx, bs_server.write(Request::new(stream))) - }; + let (mut tx, stream) = make_stream(Some(CompressionEncoding::Gzip)); + let mut write_fut = bs_server.write(Request::new(stream)); + const WRITE_DATA: &str = "12456789abcdefghijk"; - let resource_name = format!( - "{}/uploads/{}/blobs/{}/{}", - INSTANCE_NAME, - "4dcec57e-1389-4ab5-b188-4a59f22ceb4b", // Randomly generated. - HASH1, - WRITE_DATA.len() - ); + let resource_name = make_resource_name(WRITE_DATA.len()); let mut write_request = WriteRequest { resource_name, write_offset: 0, @@ -534,46 +546,19 @@ pub async fn ensure_write_is_not_done_until_write_request_is_set( #[nativelink_test] pub async fn out_of_order_data_fails() -> Result<(), Box> { let store_manager = make_store_manager().await?; - let bs_server = make_bytestream_server(store_manager.as_ref())?; - - async fn setup_stream( - bs_server: ByteStreamServer, - ) -> Result< - ( - Sender, - JoinHandleDropGuard<( - Result, tonic::Status>, - ByteStreamServer, - )>, - ), - Error, - > { - let (tx, body) = Body::channel(); - let mut codec = ProstCodec::::default(); - // Note: This is an undocumented function. - let stream = - Streaming::new_request(codec.decoder(), body, Some(CompressionEncoding::Gzip), None); - - let join_handle = spawn!("out_of_order_data_fails_write_stream", async move { - let response_future = bs_server.write(Request::new(stream)); - (response_future.await, bs_server) - }); - Ok((tx, join_handle)) - } - let (mut tx, join_handle) = setup_stream(bs_server).await?; + let bs_server = Arc::new( + make_bytestream_server(store_manager.as_ref(), None).expect("Failed to make server"), + ); + + let (mut tx, join_handle) = + make_stream_and_writer_spawn(bs_server, Some(CompressionEncoding::Gzip)); const WRITE_DATA: &str = "12456789abcdefghijk"; // Chunk our data into two chunks to simulate something a client // might do. const BYTE_SPLIT_OFFSET: usize = 8; - let resource_name = format!( - "{}/uploads/{}/blobs/{}/{}", - INSTANCE_NAME, - "4dcec57e-1389-4ab5-b188-4a59f22ceb4b", // Randomly generated. - HASH1, - WRITE_DATA.len() - ); + let resource_name = make_resource_name(WRITE_DATA.len()); let mut write_request = WriteRequest { resource_name, write_offset: 0, @@ -593,7 +578,7 @@ pub async fn out_of_order_data_fails() -> Result<(), Box> tx.send_data(encode_stream_proto(&write_request)?).await?; } assert!( - join_handle.await?.0.is_err(), + join_handle.await.expect("Failed to join").is_err(), "Expected error to be returned" ); { @@ -613,42 +598,15 @@ pub async fn out_of_order_data_fails() -> Result<(), Box> #[nativelink_test] pub async fn upload_zero_byte_chunk() -> Result<(), Box> { let store_manager = make_store_manager().await?; - let bs_server = make_bytestream_server(store_manager.as_ref())?; + let bs_server = Arc::new( + make_bytestream_server(store_manager.as_ref(), None).expect("Failed to make server"), + ); let store = store_manager.get_store("main_cas").unwrap(); - async fn setup_stream( - bs_server: ByteStreamServer, - ) -> Result< - ( - Sender, - JoinHandleDropGuard<( - Result, tonic::Status>, - ByteStreamServer, - )>, - ), - Error, - > { - let (tx, body) = Body::channel(); - let mut codec = ProstCodec::::default(); - // Note: This is an undocumented function. - let stream = - Streaming::new_request(codec.decoder(), body, Some(CompressionEncoding::Gzip), None); - - let join_handle = spawn!("upload_zero_byte_chunk_write_stream", async move { - let response_future = bs_server.write(Request::new(stream)); - (response_future.await, bs_server) - }); - Ok((tx, join_handle)) - } - let (mut tx, join_handle) = setup_stream(bs_server).await?; + let (mut tx, join_handle) = + make_stream_and_writer_spawn(bs_server, Some(CompressionEncoding::Gzip)); - let resource_name = format!( - "{}/uploads/{}/blobs/{}/{}", - INSTANCE_NAME, - "4dcec57e-1389-4ab5-b188-4a59f22ceb4b", // Randomly generated. - HASH1, - 0 - ); + let resource_name = make_resource_name(0); let write_request = WriteRequest { resource_name, write_offset: 0, @@ -660,7 +618,10 @@ pub async fn upload_zero_byte_chunk() -> Result<(), Box> // Write our zero byte data. tx.send_data(encode_stream_proto(&write_request)?).await?; // Wait for stream to finish. - join_handle.await?.0?; + join_handle + .await + .expect("Failed to join") + .expect("Failed write"); } { // Check to make sure our store recorded the data properly. @@ -675,41 +636,14 @@ pub async fn upload_zero_byte_chunk() -> Result<(), Box> #[nativelink_test] pub async fn disallow_negative_write_offset() -> Result<(), Box> { let store_manager = make_store_manager().await?; - let bs_server = make_bytestream_server(store_manager.as_ref())?; - - async fn setup_stream( - bs_server: ByteStreamServer, - ) -> Result< - ( - Sender, - JoinHandleDropGuard<( - Result, tonic::Status>, - ByteStreamServer, - )>, - ), - Error, - > { - let (tx, body) = Body::channel(); - let mut codec = ProstCodec::::default(); - // Note: This is an undocumented function. - let stream = - Streaming::new_request(codec.decoder(), body, Some(CompressionEncoding::Gzip), None); - - let join_handle = spawn!("disallow_negative_write_offset_write_stream", async move { - let response_future = bs_server.write(Request::new(stream)); - (response_future.await, bs_server) - }); - Ok((tx, join_handle)) - } - let (mut tx, join_handle) = setup_stream(bs_server).await?; - - let resource_name = format!( - "{}/uploads/{}/blobs/{}/{}", - INSTANCE_NAME, - "4dcec57e-1389-4ab5-b188-4a59f22ceb4b", // Randomly generated. - HASH1, - 0 + let bs_server = Arc::new( + make_bytestream_server(store_manager.as_ref(), None).expect("Failed to make server"), ); + + let (mut tx, join_handle) = + make_stream_and_writer_spawn(bs_server, Some(CompressionEncoding::Gzip)); + + let resource_name = make_resource_name(0); let write_request = WriteRequest { resource_name, write_offset: -1, @@ -721,7 +655,7 @@ pub async fn disallow_negative_write_offset() -> Result<(), Box Result<(), Box Result<(), Box> { let store_manager = make_store_manager().await?; - let bs_server = make_bytestream_server(store_manager.as_ref())?; - - async fn setup_stream( - bs_server: ByteStreamServer, - ) -> Result< - ( - Sender, - JoinHandleDropGuard<( - Result, tonic::Status>, - ByteStreamServer, - )>, - ), - Error, - > { - let (tx, body) = Body::channel(); - let mut codec = ProstCodec::::default(); - // Note: This is an undocumented function. - let stream = - Streaming::new_request(codec.decoder(), body, Some(CompressionEncoding::Gzip), None); - - let join_handle = spawn!("out_of_sequence_write_write_stream", async move { - let response_future = bs_server.write(Request::new(stream)); - (response_future.await, bs_server) - }); - Ok((tx, join_handle)) - } - let (mut tx, join_handle) = setup_stream(bs_server).await?; - - let resource_name = format!( - "{}/uploads/{}/blobs/{}/{}", - INSTANCE_NAME, - "4dcec57e-1389-4ab5-b188-4a59f22ceb4b", // Randomly generated. - HASH1, - 100 + let bs_server = Arc::new( + make_bytestream_server(store_manager.as_ref(), None).expect("Failed to make server"), ); + + let (mut tx, join_handle) = + make_stream_and_writer_spawn(bs_server, Some(CompressionEncoding::Gzip)); + + let resource_name = make_resource_name(100); let write_request = WriteRequest { resource_name, write_offset: 10, @@ -775,7 +682,7 @@ pub async fn out_of_sequence_write() -> Result<(), Box> { // Write our zero byte data. tx.send_data(encode_stream_proto(&write_request)?).await?; // Expect the write command to fail. - assert!(join_handle.await?.0.is_err()); + assert!(join_handle.await.expect("Failed to join").is_err()); } Ok(()) } @@ -783,7 +690,9 @@ pub async fn out_of_sequence_write() -> Result<(), Box> { #[nativelink_test] pub async fn chunked_stream_reads_small_set_of_data() -> Result<(), Box> { let store_manager = make_store_manager().await?; - let bs_server = make_bytestream_server(store_manager.as_ref())?; + let bs_server = Arc::new( + make_bytestream_server(store_manager.as_ref(), None).expect("Failed to make server"), + ); let store = store_manager.get_store("main_cas").unwrap(); const VALUE1: &str = "12456789abcdefghijk"; @@ -817,7 +726,9 @@ pub async fn chunked_stream_reads_small_set_of_data() -> Result<(), Box Result<(), Box> { let store_manager = make_store_manager().await?; - let bs_server = make_bytestream_server(store_manager.as_ref())?; + let bs_server = Arc::new( + make_bytestream_server(store_manager.as_ref(), None).expect("Failed to make server"), + ); let store = store_manager.get_store("main_cas").unwrap(); const DATA_SIZE: usize = 10_000_000; @@ -869,8 +780,8 @@ pub async fn read_with_not_found_does_not_deadlock() -> Result<(), Error> { .await .err_tip(|| "Couldn't get store manager")?; let mut read_stream = { - let bs_server = - make_bytestream_server(store_manager.as_ref()).err_tip(|| "Couldn't make store")?; + let bs_server = make_bytestream_server(store_manager.as_ref(), None) + .err_tip(|| "Couldn't make store")?; let read_request = ReadRequest { resource_name: format!( "{}/blobs/{}/{}", @@ -908,17 +819,15 @@ pub async fn read_with_not_found_does_not_deadlock() -> Result<(), Error> { #[nativelink_test] pub async fn test_query_write_status_smoke_test() -> Result<(), Box> { - let store_manager = make_store_manager().await?; - let bs_server = Arc::new(make_bytestream_server(store_manager.as_ref())?); + let store_manager = make_store_manager() + .await + .expect("Failed to make store manager"); + let bs_server = Arc::new( + make_bytestream_server(store_manager.as_ref(), None).expect("Failed to make server"), + ); let raw_data = "12456789abcdefghijk".as_bytes(); - let resource_name = format!( - "{}/uploads/{}/blobs/{}/{}", - INSTANCE_NAME, - "4dcec57e-1389-4ab5-b188-4a59f22ceb4b", // Randomly generated. - HASH1, - raw_data.len() - ); + let resource_name = make_resource_name(raw_data.len()); { let response = bs_server @@ -937,20 +846,8 @@ pub async fn test_query_write_status_smoke_test() -> Result<(), Box::default(); - // Note: This is an undocumented function. - let stream = - Streaming::new_request(codec.decoder(), body, Some(CompressionEncoding::Gzip), None); - - let bs_server_clone = bs_server.clone(); - let join_handle = spawn!("query_write_status_smoke_test_write_stream", async move { - let response_future = bs_server_clone.write(Request::new(stream)); - response_future.await - }); - (tx, join_handle) - }; + let (mut tx, join_handle) = + make_stream_and_writer_spawn(bs_server.clone(), Some(CompressionEncoding::Gzip)); const BYTE_SPLIT_OFFSET: usize = 8; @@ -1005,7 +902,78 @@ pub async fn test_query_write_status_smoke_test() -> Result<(), Box Result<(), Box> { + const MAX_MESSAGE_SIZE: usize = 1024 * 1024; // 1MB. + let store_manager = make_store_manager().await?; + let config = ByteStreamConfig { + cas_stores: hashmap! { + INSTANCE_NAME.to_string() => "main_cas".to_string(), + }, + max_decoding_message_size: MAX_MESSAGE_SIZE, + ..Default::default() + }; + let bs_server = make_bytestream_server(store_manager.as_ref(), Some(config)) + .expect("Failed to make server"); + let (server_join_handle, mut bs_client) = server_and_client_stub(bs_server).await; + + // This is the size of the wrapper proto around the data. + const WRITE_REQUEST_MSG_WRAPPER_SIZE: usize = 150; + { + // Test to ensure if we send exactly our max message size, it will succeed. + let data = Bytes::from(vec![0u8; MAX_MESSAGE_SIZE - WRITE_REQUEST_MSG_WRAPPER_SIZE]); + let write_request = WriteRequest { + resource_name: make_resource_name(MAX_MESSAGE_SIZE), + write_offset: 0, + finish_write: true, + data, + }; + + let (tx, rx) = unbounded_channel(); + let rx = UnboundedReceiverStream::new(rx); + + tx.send(write_request).expect("Failed to send data"); + + let result = bs_client.write(Request::new(rx)).await; + assert!(result.is_ok(), "Expected success, got {result:?}"); + } + { + // Test to ensure if we send exactly our max message size plus one, it will fail. + let data = Bytes::from(vec![ + 0u8; + MAX_MESSAGE_SIZE - WRITE_REQUEST_MSG_WRAPPER_SIZE + 1 + ]); + let write_request = WriteRequest { + resource_name: make_resource_name(MAX_MESSAGE_SIZE), + write_offset: 0, + finish_write: true, + data, + }; + + let (tx, rx) = unbounded_channel(); + let rx = UnboundedReceiverStream::new(rx); + + tx.send(write_request).expect("Failed to send data"); + + let result = bs_client.write(Request::new(rx)).await; + assert!(result.is_err(), "Expected error, got {result:?}"); + assert!( + result + .unwrap_err() + .to_string() + .contains("Error, message length too large"), + "Message should be too large message" + ); + } + + drop(bs_client); + // Wait for server to shutdown. This should happen when `bs_client` is dropped. + server_join_handle.await.expect("Failed to join"); + Ok(()) }