From 7ac4ee53b31209a98262f4285a149ee3c75f3386 Mon Sep 17 00:00:00 2001 From: Kate Goldenring Date: Tue, 24 Aug 2021 13:30:01 -0400 Subject: [PATCH] Update Akri Kubernetes and Runtime Dependencies (#361) --- .../actions/build-component-per-arch/main.js | 2 +- .github/workflows/build-rust-code.yml | 2 +- .github/workflows/check-rust.yml | 2 +- .github/workflows/run-tarpaulin.yml | 2 +- Cargo.lock | 1166 ++++++++--------- Cargo.toml | 2 +- README.md | 2 +- agent/Cargo.toml | 34 +- agent/src/main.rs | 1 - agent/src/util/config_action.rs | 120 +- agent/src/util/crictl_containers.rs | 2 +- agent/src/util/device_plugin_builder.rs | 102 +- agent/src/util/device_plugin_service.rs | 65 +- agent/src/util/discovery_operator.rs | 165 ++- agent/src/util/embedded_discovery_handlers.rs | 6 +- agent/src/util/registration.rs | 29 +- agent/src/util/slot_reconciliation.rs | 24 +- agent/src/util/v1beta1.rs | 300 +++-- build/setup.sh | 4 +- controller/Cargo.toml | 13 +- controller/src/util/instance_action.rs | 141 +- controller/src/util/node_watcher.rs | 115 +- controller/src/util/pod_watcher.rs | 179 +-- controller/src/util/shared_test_utils.rs | 76 +- deployment/helm/Chart.yaml | 4 +- .../debug-echo-discovery-handler/Cargo.toml | 6 +- .../onvif-discovery-handler/Cargo.toml | 6 +- .../opcua-discovery-handler/Cargo.toml | 6 +- .../udev-discovery-handler/Cargo.toml | 6 +- discovery-handlers/debug-echo/Cargo.toml | 9 +- .../debug-echo/src/discovery_handler.rs | 17 +- discovery-handlers/onvif/Cargo.toml | 19 +- .../onvif/src/discovery_handler.rs | 18 +- .../onvif/src/discovery_impl.rs | 9 +- .../onvif/src/discovery_utils.rs | 12 +- discovery-handlers/opcua/Cargo.toml | 13 +- .../opcua/src/discovery_handler.rs | 16 +- .../opcua/src/discovery_impl.rs | 2 +- discovery-handlers/udev/Cargo.toml | 13 +- .../udev/src/discovery_handler.rs | 14 +- discovery-utils/Cargo.toml | 14 +- discovery-utils/src/discovery/mod.rs | 23 +- discovery-utils/src/discovery/v0.rs | 240 ++-- discovery-utils/src/registration_client.rs | 2 +- samples/brokers/udev-video-broker/Cargo.toml | 13 +- samples/brokers/udev-video-broker/src/main.rs | 20 +- .../udev-video-broker/src/util/camera.rs | 111 +- .../src/util/camera_capturer.rs | 4 +- .../src/util/camera_service.rs | 2 +- shared/Cargo.toml | 24 +- shared/src/akri/configuration.rs | 134 +- shared/src/akri/instance.rs | 237 ++-- shared/src/akri/mod.rs | 5 +- shared/src/k8s/mod.rs | 305 ++--- shared/src/k8s/node.rs | 22 +- shared/src/k8s/pod.rs | 155 +-- shared/src/k8s/service.rs | 287 ++-- shared/src/lib.rs | 1 - shared/src/os/mod.rs | 3 +- shared/src/os/signal.rs | 54 - shared/src/uds/unix_stream.rs | 32 +- test/json/local-instance.json | 2 + test/json/shared-instance-list-slots.json | 2 + test/json/shared-instance-list.json | 2 + test/json/shared-instance-update.json | 2 + test/json/shared-instance.json | 2 + version.txt | 2 +- webhooks/validating/configuration/Cargo.toml | 11 +- webhooks/validating/configuration/src/main.rs | 40 +- 69 files changed, 2155 insertions(+), 2320 deletions(-) delete mode 100644 shared/src/os/signal.rs diff --git a/.github/actions/build-component-per-arch/main.js b/.github/actions/build-component-per-arch/main.js index d3e4a7e5f..352debc22 100644 --- a/.github/actions/build-component-per-arch/main.js +++ b/.github/actions/build-component-per-arch/main.js @@ -67,7 +67,7 @@ async function shell_cmd(cmd) { if (core.getInput('build_rust') == '1') { console.log(`Install Rust`) - child_process.execSync(`curl https://sh.rustup.rs | sh -s -- -y --default-toolchain=1.51.0`); + child_process.execSync(`curl https://sh.rustup.rs | sh -s -- -y --default-toolchain=1.54.0`); const bindir = `${process.env.HOME}/.cargo/bin`; process.env.PATH = `${process.env.PATH}:${bindir}`; diff --git a/.github/workflows/build-rust-code.yml b/.github/workflows/build-rust-code.yml index 5fd394948..1437a36ee 100644 --- a/.github/workflows/build-rust-code.yml +++ b/.github/workflows/build-rust-code.yml @@ -74,7 +74,7 @@ jobs: - name: Rust install uses: actions-rs/toolchain@v1 with: - toolchain: 1.51.0 + toolchain: 1.54.0 override: true components: clippy, rustfmt diff --git a/.github/workflows/check-rust.yml b/.github/workflows/check-rust.yml index 8c06b7b26..a096bd290 100644 --- a/.github/workflows/check-rust.yml +++ b/.github/workflows/check-rust.yml @@ -33,7 +33,7 @@ jobs: - name: Rust install uses: actions-rs/toolchain@v1 with: - toolchain: 1.51.0 + toolchain: 1.54.0 override: true components: clippy, rustfmt - name: Install Linux requirements diff --git a/.github/workflows/run-tarpaulin.yml b/.github/workflows/run-tarpaulin.yml index 1a5e4bdb8..29a7a80ae 100644 --- a/.github/workflows/run-tarpaulin.yml +++ b/.github/workflows/run-tarpaulin.yml @@ -36,7 +36,7 @@ jobs: persist-credentials: false - name: Create tarpaulin instance - run: docker create --network host --security-opt seccomp=unconfined -v "${PWD}:/volume" xd009642/tarpaulin:0.16.0 bash -c "echo 'sleep 600m; echo bye' > /tmp/keep_alive.sh; chmod 777 /tmp/keep_alive.sh; /tmp/keep_alive.sh" > container_id.txt + run: docker create --network host --security-opt seccomp=unconfined -v "${PWD}:/volume" xd009642/tarpaulin:0.18.0 bash -c "echo 'sleep 600m; echo bye' > /tmp/keep_alive.sh; chmod 777 /tmp/keep_alive.sh; /tmp/keep_alive.sh" > container_id.txt - name: Start tarpaulin instance run: docker start $(cat container_id.txt) - name: Install linux requirement in tarpaulin instance diff --git a/Cargo.lock b/Cargo.lock index 9a7900c1f..e6eb52940 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,9 +4,9 @@ version = 3 [[package]] name = "actix" -version = "0.11.1" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "543c47e7827f8fcc9d1445bd98ba402137bfce80ee2187429de49c52b5131bd3" +checksum = "3720d0064a0ce5c0de7bd93bdb0a6caebab2a9b5668746145d7b3b0c5da02914" dependencies = [ "actix-rt 2.2.0", "actix_derive", @@ -55,7 +55,7 @@ dependencies = [ "derive_more", "either", "futures-util", - "http 0.2.4", + "http", "log", "openssl", "tokio-openssl", @@ -65,9 +65,9 @@ dependencies = [ [[package]] name = "actix-http" -version = "2.2.0" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "452299e87817ae5673910e53c243484ca38be3828db819b6011736fc6982e874" +checksum = "5cb8958da437716f3f31b0e76f8daf36554128517d7df37ceba7df00f09622ee" dependencies = [ "actix-codec", "actix-connect", @@ -90,8 +90,8 @@ dependencies = [ "futures-core", "futures-util", "fxhash", - "h2 0.2.6", - "http 0.2.4", + "h2 0.2.7", + "http", "httparse", "indexmap", "itoa", @@ -105,7 +105,7 @@ dependencies = [ "regex", "serde", "serde_json", - "serde_urlencoded 0.7.0", + "serde_urlencoded", "sha-1 0.9.7", "slab", "time 0.2.27", @@ -138,7 +138,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2ad299af73649e1fc893e333ccf86f377751eb95ff875d095131574c6f43452c" dependencies = [ "bytestring", - "http 0.2.4", + "http", "log", "regex", "serde", @@ -296,7 +296,7 @@ dependencies = [ "regex", "serde", "serde_json", - "serde_urlencoded 0.7.0", + "serde_urlencoded", "socket2 0.3.19", "time 0.2.27", "tinyvec", @@ -333,7 +333,7 @@ checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "agent" -version = "0.6.12" +version = "0.6.13" dependencies = [ "akri-debug-echo", "akri-discovery-utils", @@ -351,10 +351,11 @@ dependencies = [ "futures 0.3.16", "futures-core", "futures-util", - "h2 0.2.6", - "hyper 0.13.10", + "h2 0.3.3", + "hyper", "k8s-openapi", "kube", + "kube-runtime", "lazy_static", "log", "mock_instant", @@ -362,14 +363,15 @@ dependencies = [ "mockall_double", "prometheus 0.12.0", "prost", + "prost-types", "rand 0.8.4", "serde", "serde_derive", "serde_json", "serde_yaml", "tempfile", - "tokio 0.2.25", - "tokio-core", + "tokio 1.9.0", + "tokio-stream", "tonic", "tonic-build", "tower", @@ -388,7 +390,7 @@ dependencies = [ [[package]] name = "akri-debug-echo" -version = "0.6.12" +version = "0.6.13" dependencies = [ "akri-discovery-utils", "akri-shared", @@ -401,16 +403,18 @@ dependencies = [ "serde_derive", "serde_json", "serde_yaml", - "tokio 0.2.25", + "tokio 1.9.0", + "tokio-stream", "tonic", ] [[package]] name = "akri-discovery-utils" -version = "0.6.12" +version = "0.6.13" dependencies = [ "akri-shared", "anyhow", + "async-stream", "async-trait", "futures 0.3.16", "log", @@ -419,7 +423,8 @@ dependencies = [ "serde_derive", "serde_yaml", "tempfile", - "tokio 0.2.25", + "tokio 1.9.0", + "tokio-stream", "tonic", "tonic-build", "tower", @@ -427,16 +432,16 @@ dependencies = [ [[package]] name = "akri-onvif" -version = "0.6.12" +version = "0.6.13" dependencies = [ "akri-discovery-utils", "akri-shared", "anyhow", "async-trait", - "bytes 0.5.6", + "bytes 1.0.1", "env_logger", "futures-util", - "hyper 0.13.10", + "hyper", "log", "mockall", "serde", @@ -445,7 +450,8 @@ dependencies = [ "serde_yaml", "sxd-document", "sxd-xpath", - "tokio 0.2.25", + "tokio 1.9.0", + "tokio-stream", "tonic", "uuid", "xml-rs", @@ -455,7 +461,7 @@ dependencies = [ [[package]] name = "akri-opcua" -version = "0.6.12" +version = "0.6.13" dependencies = [ "akri-discovery-utils", "akri-shared", @@ -471,35 +477,36 @@ dependencies = [ "serde_derive", "serde_json", "serde_yaml", - "tokio 0.2.25", + "tokio 1.9.0", + "tokio-stream", "tonic", "url 2.2.2", ] [[package]] name = "akri-shared" -version = "0.6.12" +version = "0.6.13" dependencies = [ "anyhow", "async-trait", "either", "env_logger", - "futures 0.1.31", "futures 0.3.16", "futures-util", + "hyper", "k8s-openapi", "kube", + "kube-runtime", "log", "mockall", "prometheus 0.11.0", "rand 0.8.4", + "schemars", "serde", "serde_derive", "serde_json", "serde_yaml", - "tokio 0.2.25", - "tokio-core", - "tokio-signal", + "tokio 1.9.0", "tonic", "tower", "warp", @@ -507,7 +514,7 @@ dependencies = [ [[package]] name = "akri-udev" -version = "0.6.12" +version = "0.6.13" dependencies = [ "akri-discovery-utils", "anyhow", @@ -524,11 +531,21 @@ dependencies = [ "serde_derive", "serde_json", "serde_yaml", - "tokio 0.2.25", + "tokio 1.9.0", + "tokio-stream", "tonic", "udev", ] +[[package]] +name = "ansi_term" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b" +dependencies = [ + "winapi 0.3.9", +] + [[package]] name = "anyhow" version = "1.0.42" @@ -546,19 +563,6 @@ dependencies = [ "futures-core", ] -[[package]] -name = "async-compression" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5443ccbb270374a2b1055fc72da40e1f237809cd6bb0e97e66d264cd138473a6" -dependencies = [ - "bytes 0.5.6", - "flate2", - "futures-core", - "memchr", - "pin-project-lite 0.2.7", -] - [[package]] name = "async-executor" version = "1.4.1" @@ -655,9 +659,9 @@ dependencies = [ [[package]] name = "async-stream" -version = "0.2.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22068c0c19514942eefcfd4daf8976ef1aad84e61539f95cd200c35202f80af5" +checksum = "171374e7e3b2504e0e5236e3b59260560f9fe94bfe9ac39ba5e4e929c5590625" dependencies = [ "async-stream-impl", "futures-core", @@ -665,9 +669,9 @@ dependencies = [ [[package]] name = "async-stream-impl" -version = "0.2.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25f9db3b38af870bf7e5cc649167533b493928e50744e2c30ae350230b414670" +checksum = "648ed8c8d2ce5409ccd57453d9d1b214b342a0d69376a6feda1fd6cae3299308" dependencies = [ "proc-macro2", "quote", @@ -736,7 +740,7 @@ dependencies = [ "rand 0.7.3", "serde", "serde_json", - "serde_urlencoded 0.7.0", + "serde_urlencoded", ] [[package]] @@ -745,21 +749,6 @@ version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4521f3e3d031370679b3b140beb36dfe4801b09ac77e30c61941f97df3ef28b" -[[package]] -name = "base64" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b25d992356d2eb0ed82172f5248873db5560c4721f564b13cb5193bda5e668e" -dependencies = [ - "byteorder", -] - -[[package]] -name = "base64" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b41b7ea54a0c9d92199de89e20e58d49f02f8e699814ef3fdf266f6f748d15c7" - [[package]] name = "base64" version = "0.12.3" @@ -952,36 +941,19 @@ dependencies = [ [[package]] name = "clap" -version = "3.0.0-beta.2" +version = "2.33.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bd1061998a501ee7d4b6d449020df3266ca3124b941ec56cf2005c3779ca142" +checksum = "37e58ac78573c40708d45522f0d80fa2f01cc4f9b4e2bf749807255454312002" dependencies = [ + "ansi_term", "atty", "bitflags", - "clap_derive", - "indexmap", - "lazy_static", - "os_str_bytes", - "strsim", - "termcolor", + "strsim 0.8.0", "textwrap", "unicode-width", "vec_map", ] -[[package]] -name = "clap_derive" -version = "3.0.0-beta.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "370f715b81112975b1b69db93e0b56ea4cd4e5002ac43b2da8474106a54096a1" -dependencies = [ - "heck", - "proc-macro-error", - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "cloudabi" version = "0.0.3" @@ -1008,7 +980,7 @@ checksum = "f92cfa0fd5690b3cf8c1ef2cabbd9b7ef22fa53cf5e1f92b05103f6d5d1cf6e7" [[package]] name = "controller" -version = "0.6.12" +version = "0.6.13" dependencies = [ "akri-shared", "anyhow", @@ -1019,6 +991,7 @@ dependencies = [ "futures 0.3.16", "k8s-openapi", "kube", + "kube-runtime", "lazy_static", "log", "mockall", @@ -1027,7 +1000,7 @@ dependencies = [ "serde_derive", "serde_json", "serde_yaml", - "tokio 0.2.25", + "tokio 1.9.0", ] [[package]] @@ -1175,15 +1148,71 @@ dependencies = [ "syn", ] +[[package]] +name = "darling" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f2c43f534ea4b0b049015d00269734195e6d3f0f6635cb692251aca6f9f8b3c" +dependencies = [ + "darling_core", + "darling_macro", +] + +[[package]] +name = "darling_core" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e91455b86830a1c21799d94524df0845183fa55bafd9aa137b01c7d1065fa36" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim 0.10.0", + "syn", +] + +[[package]] +name = "darling_macro" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29b5acf0dea37a7f66f7b25d2c5e93fd46f8f6968b1a5d7a3e02e97768afc95a" +dependencies = [ + "darling_core", + "quote", + "syn", +] + +[[package]] +name = "dashmap" +version = "4.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e77a43b28d0668df09411cb0bc9a8c2adc40f9a048afe863e05fd43251e8e39c" +dependencies = [ + "cfg-if 1.0.0", + "num_cpus", +] + [[package]] name = "debug-echo-discovery-handler" -version = "0.6.12" +version = "0.6.13" dependencies = [ "akri-debug-echo", "akri-discovery-utils", "env_logger", "log", - "tokio 0.2.25", + "tokio 1.9.0", +] + +[[package]] +name = "derivative" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" +dependencies = [ + "proc-macro2", + "quote", + "syn", ] [[package]] @@ -1224,20 +1253,20 @@ dependencies = [ ] [[package]] -name = "dirs" -version = "2.0.2" +name = "dirs-next" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13aea89a5c93364a98e9b37b2fa237effbb694d5cfe01c5b70941f7eb087d5e3" +checksum = "b98cf8ebf19c3d1b223e151f99a4f9f0690dca41414773390fc824184ac833e1" dependencies = [ - "cfg-if 0.1.10", - "dirs-sys", + "cfg-if 1.0.0", + "dirs-sys-next", ] [[package]] -name = "dirs-sys" -version = "0.3.6" +name = "dirs-sys-next" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03d86534ed367a67548dc68113a0f5db55432fdfbb6e6f9d77704397d95d5780" +checksum = "4ebda144c4fe02d1f7ea1a7d9641b6fc6b580adcfa024ae48797ecdeb6825b4d" dependencies = [ "libc", "redox_users", @@ -1250,6 +1279,12 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "212d0f5754cb6769937f4501cc0e67f4f4483c8d2c3e1e922ee9edbe4ab4c7c0" +[[package]] +name = "doc-comment" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" + [[package]] name = "downcast" version = "0.10.0" @@ -1262,6 +1297,12 @@ version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56899898ce76aaf4a0f24d914c97ea6ed976d42fec6ad33fcbb0a1103e07b2b0" +[[package]] +name = "dyn-clone" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee2626afccd7561a06cf1367e2950c4718ea04565e20fb5029b6c7d8ad09abcf" + [[package]] name = "either" version = "1.6.1" @@ -1291,9 +1332,9 @@ dependencies = [ [[package]] name = "env_logger" -version = "0.8.4" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a19187fea3ac7e84da7dacf48de0c45d63c6a76f9490dae389aead16c243fce3" +checksum = "0b2cf0344971ee6c64c31be0d530793fba457d322dfec2810c453d0ef228f9c3" dependencies = [ "atty", "humantime", @@ -1497,12 +1538,6 @@ version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbe54a98670017f3be909561f6ad13e810d9a51f3f061b902062ca3da80799f2" -[[package]] -name = "futures-timer" -version = "2.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1de7508b218029b0f01662ed8f61b1c964b3ae99d6f25462d0f55a595109df6" - [[package]] name = "futures-util" version = "0.3.16" @@ -1599,34 +1634,35 @@ dependencies = [ [[package]] name = "h2" -version = "0.2.6" -source = "git+https://github.com/kate-goldenring/h2?branch=master#7c7ef6a579c9ce2392787c5728f805ce10f74ddf" +version = "0.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e4728fd124914ad25e99e3d15a9361a879f6620f63cb56bbb08f95abb97a535" dependencies = [ "bytes 0.5.6", "fnv", "futures-core", "futures-sink", "futures-util", - "http 0.2.4", + "http", "indexmap", "slab", "tokio 0.2.25", "tokio-util 0.3.1", "tracing", + "tracing-futures", ] [[package]] name = "h2" version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "825343c4eef0b63f541f8903f395dc5beb362a979b5799a84062527ef1e37726" +source = "git+https://github.com/kate-goldenring/h2?branch=v0.3.3-patch#353943d123e9cb7b29e376ee8927be5deffe5f57" dependencies = [ "bytes 1.0.1", "fnv", "futures-core", "futures-sink", "futures-util", - "http 0.2.4", + "http", "indexmap", "slab", "tokio 1.9.0", @@ -1650,7 +1686,7 @@ dependencies = [ "bitflags", "bytes 1.0.1", "headers-core", - "http 0.2.4", + "http", "mime", "sha-1 0.9.7", "time 0.1.44", @@ -1662,7 +1698,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7f66481bfee273957b1f20485a4ff3362987f85b2c236580d81b4eb7a326429" dependencies = [ - "http 0.2.4", + "http", ] [[package]] @@ -1700,17 +1736,6 @@ dependencies = [ "winapi 0.3.9", ] -[[package]] -name = "http" -version = "0.1.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6ccf5ede3a895d8856620237b2f02972c1bbc78d2965ad7fe8838d4a0ed41f0" -dependencies = [ - "bytes 0.4.12", - "fnv", - "itoa", -] - [[package]] name = "http" version = "0.2.4" @@ -1722,16 +1747,6 @@ dependencies = [ "itoa", ] -[[package]] -name = "http-body" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13d5ff830006f7646652e057693569bfe0d51760c0085a071769d142a205111b" -dependencies = [ - "bytes 0.5.6", - "http 0.2.4", -] - [[package]] name = "http-body" version = "0.4.2" @@ -1739,7 +1754,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "60daa14be0e0786db0f03a9e57cb404c9d756eed2b6c62b9ea98ec5743ec75a9" dependencies = [ "bytes 1.0.1", - "http 0.2.4", + "http", "pin-project-lite 0.2.7", ] @@ -1749,12 +1764,6 @@ version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3a87b616e37e93c22fb19bcd386f02f3af5ea98a25670ad0fce773de23c5e68" -[[package]] -name = "httpdate" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "494b4d60369511e7dea41cf646832512a94e542f68bb9c49e54518e0f468eb47" - [[package]] name = "httpdate" version = "1.0.1" @@ -1767,30 +1776,6 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" -[[package]] -name = "hyper" -version = "0.13.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a6f157065790a3ed2f88679250419b5cdd96e714a0d65f7797fd337186e96bb" -dependencies = [ - "bytes 0.5.6", - "futures-channel", - "futures-core", - "futures-util", - "h2 0.2.6", - "http 0.2.4", - "http-body 0.3.1", - "httparse", - "httpdate 0.3.2", - "itoa", - "pin-project 1.0.8", - "socket2 0.3.19", - "tokio 0.2.25", - "tower-service", - "tracing", - "want", -] - [[package]] name = "hyper" version = "0.14.11" @@ -1802,10 +1787,10 @@ dependencies = [ "futures-core", "futures-util", "h2 0.3.3", - "http 0.2.4", - "http-body 0.4.2", + "http", + "http-body", "httparse", - "httpdate 1.0.1", + "httpdate", "itoa", "pin-project-lite 0.2.7", "socket2 0.4.1", @@ -1815,19 +1800,37 @@ dependencies = [ "want", ] +[[package]] +name = "hyper-timeout" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" +dependencies = [ + "hyper", + "pin-project-lite 0.2.7", + "tokio 1.9.0", + "tokio-io-timeout", +] + [[package]] name = "hyper-tls" -version = "0.4.3" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d979acc56dcb5b8dddba3917601745e877576475aa046df3226eabdecef78eed" +checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" dependencies = [ - "bytes 0.5.6", - "hyper 0.13.10", + "bytes 1.0.1", + "hyper", "native-tls", - "tokio 0.2.25", - "tokio-tls", + "tokio 1.9.0", + "tokio-native-tls", ] +[[package]] +name = "ident_case" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" + [[package]] name = "idna" version = "0.1.5" @@ -1862,11 +1865,11 @@ dependencies = [ [[package]] name = "input_buffer" -version = "0.3.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19a8a95243d5a0398cae618ec29477c6e3cb631152be5c19481f80bc71559754" +checksum = "f97967975f448f1a7ddb12b0bc41069d09ed6a1c161a92687e057325db35d413" dependencies = [ - "bytes 0.5.6", + "bytes 1.0.1", ] [[package]] @@ -1907,9 +1910,9 @@ checksum = "68f2d64f2edebec4ce84ad108148e67e1064789bee435edc5b60ad398714a3a9" [[package]] name = "itertools" -version = "0.8.2" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f56a2d0bc861f9165be4eb3442afd3c236d8a98afd426f65d92324ae1091a484" +checksum = "69ddb889f9d0d08a67338271fa9b62996bc788c7796a5c18cf057420aaed5eaf" dependencies = [ "either", ] @@ -1922,28 +1925,48 @@ checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736" [[package]] name = "js-sys" -version = "0.3.51" +version = "0.3.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83bdfbace3a0e81a4253f73b49e960b053e396a11012cbd49b9b74d6a2b67062" +checksum = "ce791b7ca6638aae45be056e068fc756d871eb3b3b10b8efa62d1c9cec616752" dependencies = [ "wasm-bindgen", ] +[[package]] +name = "json-patch" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f995a3c8f2bc3dd52a18a583e90f9ec109c047fa1603a853e46bcda14d2e279d" +dependencies = [ + "serde", + "serde_json", + "treediff", +] + +[[package]] +name = "jsonpath_lib" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eaa63191d68230cccb81c5aa23abd53ed64d83337cacbb25a7b8c7979523774f" +dependencies = [ + "log", + "serde", + "serde_json", +] + [[package]] name = "k8s-openapi" -version = "0.6.0" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8eb97e4ea14cef484aa56f44e93653cb6faa351b5f130d38584b3184b6ef5d1" +checksum = "748acc444200aa3528dc131a8048e131a9e75a611a52d152e276e99199313d1a" dependencies = [ - "base64 0.10.1", - "bytes 0.4.12", + "base64 0.13.0", + "bytes 1.0.1", "chrono", - "http 0.1.21", - "percent-encoding 2.1.0", + "schemars", "serde", "serde-value", "serde_json", - "url 2.2.2", ] [[package]] @@ -1958,48 +1981,109 @@ dependencies = [ [[package]] name = "kube" -version = "0.23.0" +version = "0.59.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21a8a53ca3e8cb9f6ebdf09cdd8943d08926da7ca8d5c1d89dee1ed0e8df0a2a" +checksum = "f2bfa22c305a6d817b57a7afcd2e6ee23c80c6c93933edb02f210fdf73f837cc" dependencies = [ - "base64 0.11.0", + "base64 0.13.0", + "bytes 1.0.1", "chrono", - "dirs", + "dirs-next", "either", "futures 0.3.16", - "futures-timer", - "http 0.2.4", + "http", + "http-body", + "hyper", + "hyper-timeout", + "hyper-tls", + "jsonpath_lib", "k8s-openapi", - "log", + "kube-core", + "kube-derive", "openssl", - "reqwest 0.10.10", + "pem", + "pin-project 1.0.8", "serde", - "serde_derive", "serde_json", "serde_yaml", "thiserror", - "time 0.1.44", - "url 2.2.2", + "tokio 1.9.0", + "tokio-native-tls", + "tokio-util 0.6.7", + "tower", + "tower-http", + "tracing", ] [[package]] -name = "kv-log-macro" -version = "1.0.7" +name = "kube-core" +version = "0.59.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0de8b303297635ad57c9f5059fd9cee7a47f8e8daa09df0fcd07dd39fb22977f" +checksum = "9c33d2272d8e530938bafc6cf4ac76f2a6f6c9ca684defcfab6c357913a43bcc" dependencies = [ - "log", + "form_urlencoded", + "http", + "json-patch", + "k8s-openapi", + "once_cell", + "serde", + "serde_json", + "thiserror", ] [[package]] -name = "language-tags" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a91d884b6667cd606bb5a69aa0c99ba811a115fc68915e7056ec08a46e93199a" - -[[package]] -name = "lazy_static" -version = "1.4.0" +name = "kube-derive" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53dc9fa719dd21d1a4c155cf8936f618a687f3590885e09d9261727cd5dc56a5" +dependencies = [ + "darling", + "proc-macro2", + "quote", + "serde_json", + "syn", +] + +[[package]] +name = "kube-runtime" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb40d5730a3ac47b7153c7ad0494a66881bbdf0c17ead478b714dd82c9dba259" +dependencies = [ + "dashmap", + "derivative", + "futures 0.3.16", + "json-patch", + "k8s-openapi", + "kube", + "pin-project 1.0.8", + "serde", + "serde_json", + "smallvec 1.6.1", + "snafu", + "tokio 1.9.0", + "tokio-util 0.6.7", + "tracing", +] + +[[package]] +name = "kv-log-macro" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0de8b303297635ad57c9f5059fd9cee7a47f8e8daa09df0fcd07dd39fb22977f" +dependencies = [ + "log", +] + +[[package]] +name = "language-tags" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a91d884b6667cd606bb5a69aa0c99ba811a115fc68915e7056ec08a46e93199a" + +[[package]] +name = "lazy_static" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" @@ -2159,18 +2243,6 @@ dependencies = [ "winapi 0.3.9", ] -[[package]] -name = "mio-named-pipes" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0840c1c50fd55e521b247f949c241c9997709f23bd7f023b9762cd561e935656" -dependencies = [ - "log", - "mio 0.6.23", - "miow 0.3.7", - "winapi 0.3.9", -] - [[package]] name = "mio-uds" version = "0.6.8" @@ -2214,9 +2286,9 @@ dependencies = [ [[package]] name = "mockall" -version = "0.9.1" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18d614ad23f9bb59119b8b5670a85c7ba92c5e9adf4385c81ea00c51c8be33d5" +checksum = "6ab571328afa78ae322493cacca3efac6a0f2e0a67305b4df31fd439ef129ac0" dependencies = [ "cfg-if 1.0.0", "downcast", @@ -2229,9 +2301,9 @@ dependencies = [ [[package]] name = "mockall_derive" -version = "0.9.1" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dd4234635bca06fc96c7368d038061e0aae1b00a764dc817e900dc974e3deea" +checksum = "e7e25b214433f669161f414959594216d8e6ba83b6679d3db96899c0b4639033" dependencies = [ "cfg-if 1.0.0", "proc-macro2", @@ -2356,13 +2428,13 @@ checksum = "692fcb63b64b1758029e0a96ee63e049ce8c5948587f2f7208df04625e5f6b56" [[package]] name = "onvif-discovery-handler" -version = "0.6.12" +version = "0.6.13" dependencies = [ "akri-discovery-utils", "akri-onvif", "env_logger", "log", - "tokio 0.2.25", + "tokio 1.9.0", ] [[package]] @@ -2440,13 +2512,13 @@ dependencies = [ [[package]] name = "opcua-discovery-handler" -version = "0.6.12" +version = "0.6.13" dependencies = [ "akri-discovery-utils", "akri-opcua", "env_logger", "log", - "tokio 0.2.25", + "tokio 1.9.0", ] [[package]] @@ -2472,7 +2544,7 @@ name = "openapi" version = "1.1.0" source = "git+https://github.com/DazWilkin/openapi-admission-v1?tag=v1.1.0#60a9ba6bd64efda65cb136a21e0d6a53e962c415" dependencies = [ - "reqwest 0.11.4", + "reqwest", "serde", "serde_derive", "serde_json", @@ -2514,19 +2586,13 @@ dependencies = [ [[package]] name = "ordered-float" -version = "1.1.1" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3305af35278dd29f46fcdd139e0b1fbfae2153f0e5928b39b035542dd31e37b7" +checksum = "039f02eb0f69271f26abe3202189275d7aa2258b903cb0281b5de710a2570ff3" dependencies = [ "num-traits", ] -[[package]] -name = "os_str_bytes" -version = "2.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afb2e1c3ee07430c2cf76151675e583e0f19985fa6efae47d6848a3e2c824f85" - [[package]] name = "parking" version = "2.0.0" @@ -2584,6 +2650,17 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "pem" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd56cbd21fea48d0c440b41cd69c589faacade08c992d9a54e471b79d0fd13eb" +dependencies = [ + "base64 0.13.0", + "once_cell", + "regex", +] + [[package]] name = "percent-encoding" version = "1.0.1" @@ -2767,30 +2844,6 @@ dependencies = [ "treeline", ] -[[package]] -name = "proc-macro-error" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" -dependencies = [ - "proc-macro-error-attr", - "proc-macro2", - "quote", - "syn", - "version_check", -] - -[[package]] -name = "proc-macro-error-attr" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" -dependencies = [ - "proc-macro2", - "quote", - "version_check", -] - [[package]] name = "proc-macro-hack" version = "0.5.19" @@ -2862,21 +2915,21 @@ dependencies = [ [[package]] name = "prost" -version = "0.6.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce49aefe0a6144a45de32927c77bd2859a5f7677b55f220ae5b744e87389c212" +checksum = "de5e2533f59d08fcf364fd374ebda0692a70bd6d7e66ef97f306f45c6c5d8020" dependencies = [ - "bytes 0.5.6", + "bytes 1.0.1", "prost-derive", ] [[package]] name = "prost-build" -version = "0.6.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02b10678c913ecbd69350e8535c3aef91a8676c0773fc1d7b95cdd196d7f2f26" +checksum = "355f634b43cdd80724ee7848f95770e7e70eefa6dcf14fea676216573b8fd603" dependencies = [ - "bytes 0.5.6", + "bytes 1.0.1", "heck", "itertools", "log", @@ -2890,9 +2943,9 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.6.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "537aa19b95acde10a12fec4301466386f757403de4cd4e5b4fa78fb5ecb18f72" +checksum = "600d2f334aa05acb02a755e217ef1ab6dea4d51b58b7846588b747edec04efba" dependencies = [ "anyhow", "itertools", @@ -2903,11 +2956,11 @@ dependencies = [ [[package]] name = "prost-types" -version = "0.6.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1834f67c0697c001304b75be76f67add9c89742eda3a085ad8ee0bb38c3417aa" +checksum = "603bbd6394701d13f3f25aada59c7de9d35a6a5887cfc156181234a44002771b" dependencies = [ - "bytes 0.5.6", + "bytes 1.0.1", "prost", ] @@ -2943,7 +2996,6 @@ dependencies = [ "rand_chacha 0.2.2", "rand_core 0.5.1", "rand_hc 0.2.0", - "rand_pcg", ] [[package]] @@ -3014,15 +3066,6 @@ dependencies = [ "rand_core 0.6.3", ] -[[package]] -name = "rand_pcg" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16abd0c1b639e9eb4d7c50c0b8100b0d0f849be2349829c740fe8e6eb4816429" -dependencies = [ - "rand_core 0.5.1", -] - [[package]] name = "redox_syscall" version = "0.1.57" @@ -3074,43 +3117,6 @@ dependencies = [ "winapi 0.3.9", ] -[[package]] -name = "reqwest" -version = "0.10.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0718f81a8e14c4dbb3b34cf23dc6aaf9ab8a0dfec160c534b3dbca1aaa21f47c" -dependencies = [ - "async-compression", - "base64 0.13.0", - "bytes 0.5.6", - "encoding_rs", - "futures-core", - "futures-util", - "http 0.2.4", - "http-body 0.3.1", - "hyper 0.13.10", - "hyper-tls", - "ipnet", - "js-sys", - "lazy_static", - "log", - "mime", - "mime_guess", - "native-tls", - "percent-encoding 2.1.0", - "pin-project-lite 0.2.7", - "serde", - "serde_json", - "serde_urlencoded 0.7.0", - "tokio 0.2.25", - "tokio-tls", - "url 2.2.2", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", - "winreg 0.7.0", -] - [[package]] name = "reqwest" version = "0.11.4" @@ -3122,9 +3128,9 @@ dependencies = [ "encoding_rs", "futures-core", "futures-util", - "http 0.2.4", - "http-body 0.4.2", - "hyper 0.14.11", + "http", + "http-body", + "hyper", "ipnet", "js-sys", "lazy_static", @@ -3135,7 +3141,7 @@ dependencies = [ "pin-project-lite 0.2.7", "serde", "serde_json", - "serde_urlencoded 0.7.0", + "serde_urlencoded", "tokio 1.9.0", "url 2.2.2", "wasm-bindgen", @@ -3196,19 +3202,6 @@ dependencies = [ "semver 0.11.0", ] -[[package]] -name = "rustls" -version = "0.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b25a18b1bf7387f0145e7f8324e700805aade3842dd3db2e74e4cdeb4677c09e" -dependencies = [ - "base64 0.10.1", - "log", - "ring", - "sct", - "webpki", -] - [[package]] name = "rustls" version = "0.19.1" @@ -3245,10 +3238,28 @@ dependencies = [ ] [[package]] -name = "scoped-tls" -version = "0.1.2" +name = "schemars" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "332ffa32bf586782a3efaeb58f127980944bbc8c4d6913a86107ac2a5ab24b28" +checksum = "bc6ab463ae35acccb5cba66c0084c985257b797d288b6050cc2f6ac1b266cb78" +dependencies = [ + "dyn-clone", + "schemars_derive", + "serde", + "serde_json", +] + +[[package]] +name = "schemars_derive" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "902fdfbcf871ae8f653bddf4b2c05905ddaabc08f69d32a915787e3be0d31356" +dependencies = [ + "proc-macro2", + "quote", + "serde_derive_internals", + "syn", +] [[package]] name = "scoped-tls" @@ -3339,9 +3350,9 @@ dependencies = [ [[package]] name = "serde-value" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a65a7291a8a568adcae4c10a677ebcedbc6c9cec91c054dee2ce40b0e3290eb" +checksum = "f3a1a3341211875ef120e117ea7fd5228530ae7e7036a779fdc9117be6b3282c" dependencies = [ "ordered-float", "serde", @@ -3359,26 +3370,26 @@ dependencies = [ ] [[package]] -name = "serde_json" -version = "1.0.66" +name = "serde_derive_internals" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "336b10da19a12ad094b59d870ebde26a45402e5b470add4b5fd03c5048a32127" +checksum = "1dbab34ca63057a1f15280bdf3c39f2b1eb1b54c17e98360e511637aef7418c6" dependencies = [ - "itoa", - "ryu", - "serde", + "proc-macro2", + "quote", + "syn", ] [[package]] -name = "serde_urlencoded" -version = "0.6.1" +name = "serde_json" +version = "1.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ec5d77e2d4c73717816afac02670d5c4f534ea95ed430442cad02e7a6e32c97" +checksum = "336b10da19a12ad094b59d870ebde26a45402e5b470add4b5fd03c5048a32127" dependencies = [ - "dtoa", + "indexmap", "itoa", + "ryu", "serde", - "url 2.2.2", ] [[package]] @@ -3466,6 +3477,29 @@ version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fe0f37c9e8f3c5a4a66ad655a93c74daac4ad00c441533bf5c6e7990bb42604e" +[[package]] +name = "snafu" +version = "0.6.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eab12d3c261b2308b0d80c26fffb58d17eba81a4be97890101f416b478c79ca7" +dependencies = [ + "doc-comment", + "futures-core", + "pin-project 0.4.28", + "snafu-derive", +] + +[[package]] +name = "snafu-derive" +version = "0.6.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1508efa03c362e23817f96cde18abed596a25219a8b2c66e8db33c03543d315b" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "socket2" version = "0.3.19" @@ -3551,6 +3585,12 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" +[[package]] +name = "strsim" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" + [[package]] name = "strsim" version = "0.10.0" @@ -3620,9 +3660,9 @@ dependencies = [ [[package]] name = "textwrap" -version = "0.12.1" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "203008d98caf094106cfaba70acfed15e18ed3ddb7d94e49baec153a2b462789" +checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" dependencies = [ "unicode-width", ] @@ -3751,20 +3791,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6703a273949a90131b290be1fe7b039d0fc884aa1935860dfcbe056f28cd8092" dependencies = [ "bytes 0.5.6", - "fnv", "futures-core", "iovec", "lazy_static", "libc", "memchr", "mio 0.6.23", - "mio-named-pipes", "mio-uds", - "num_cpus", "pin-project-lite 0.1.12", "signal-hook-registry", "slab", - "tokio-macros", "winapi 0.3.9", ] @@ -3779,10 +3815,12 @@ dependencies = [ "libc", "memchr", "mio 0.7.13", + "num_cpus", "once_cell", "parking_lot 0.11.1", "pin-project-lite 0.2.7", "signal-hook-registry", + "tokio-macros", "winapi 0.3.9", ] @@ -3797,25 +3835,6 @@ dependencies = [ "tokio-io", ] -[[package]] -name = "tokio-core" -version = "0.1.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87b1395334443abca552f63d4f61d0486f12377c2ba8b368e523f89e828cffd4" -dependencies = [ - "bytes 0.4.12", - "futures 0.1.31", - "iovec", - "log", - "mio 0.6.23", - "scoped-tls 0.1.2", - "tokio 0.1.22", - "tokio-executor", - "tokio-io", - "tokio-reactor", - "tokio-timer", -] - [[package]] name = "tokio-current-thread" version = "0.1.7" @@ -3858,17 +3877,37 @@ dependencies = [ "log", ] +[[package]] +name = "tokio-io-timeout" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90c49f106be240de154571dd31fbe48acb10ba6c6dd6f6517ad603abffa42de9" +dependencies = [ + "pin-project-lite 0.2.7", + "tokio 1.9.0", +] + [[package]] name = "tokio-macros" -version = "0.2.6" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e44da00bfc73a25f814cd8d7e57a68a5c31b74b3152a0a1d1f590c97ed06265a" +checksum = "54473be61f4ebe4efd09cec9bd5d16fa51d70ea0192213d754d2d500457db110" dependencies = [ "proc-macro2", "quote", "syn", ] +[[package]] +name = "tokio-native-tls" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7d995660bd2b7f8c1568414c1126076c13fbb725c40112dc0120b78eb9b717b" +dependencies = [ + "native-tls", + "tokio 1.9.0", +] + [[package]] name = "tokio-openssl" version = "0.4.0" @@ -3900,31 +3939,24 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.12.3" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3068d891551949b37681724d6b73666787cc63fa8e255c812a41d2513aff9775" +checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" dependencies = [ - "futures-core", - "rustls 0.16.0", - "tokio 0.2.25", + "rustls", + "tokio 1.9.0", "webpki", ] [[package]] -name = "tokio-signal" -version = "0.2.9" +name = "tokio-stream" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0c34c6e548f101053321cba3da7cbb87a610b85555884c41b07da2eb91aff12" +checksum = "7b2f3f698253f03119ac0102beaa64f67a67e08074d03a22d18784104543727f" dependencies = [ - "futures 0.1.31", - "libc", - "mio 0.6.23", - "mio-uds", - "signal-hook-registry", - "tokio-executor", - "tokio-io", - "tokio-reactor", - "winapi 0.3.9", + "futures-core", + "pin-project-lite 0.2.7", + "tokio 1.9.0", ] [[package]] @@ -3980,26 +4012,16 @@ dependencies = [ "tokio-executor", ] -[[package]] -name = "tokio-tls" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a70f4fcd7b3b24fb194f837560168208f669ca8cb70d0c4b862944452396343" -dependencies = [ - "native-tls", - "tokio 0.2.25", -] - [[package]] name = "tokio-tungstenite" -version = "0.11.0" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d9e878ad426ca286e4dcae09cbd4e1973a7f8987d97570e2469703dd7f5720c" +checksum = "e1a5f475f1b9d077ea1017ecbc60890fda8e54942d680ca0b1d2b47cfa2d861b" dependencies = [ "futures-util", "log", - "pin-project 0.4.28", - "tokio 0.2.25", + "pin-project 1.0.8", + "tokio 1.9.0", "tungstenite", ] @@ -4036,20 +4058,6 @@ dependencies = [ "tokio-reactor", ] -[[package]] -name = "tokio-util" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "571da51182ec208780505a32528fc5512a8fe1443ab960b3f2f3ef093cd16930" -dependencies = [ - "bytes 0.5.6", - "futures-core", - "futures-sink", - "log", - "pin-project-lite 0.1.12", - "tokio 0.2.25", -] - [[package]] name = "tokio-util" version = "0.3.1" @@ -4075,35 +4083,37 @@ dependencies = [ "futures-sink", "log", "pin-project-lite 0.2.7", + "slab", "tokio 1.9.0", ] [[package]] name = "tonic" -version = "0.1.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08283643b1d483eb7f3fc77069e63b5cba3e4db93514b3d45470e67f123e4e48" +checksum = "796c5e1cd49905e65dd8e700d4cb1dffcbfdb4fc9d017de08c1a537afd83627c" dependencies = [ "async-stream", "async-trait", - "base64 0.10.1", - "bytes 0.5.6", + "base64 0.13.0", + "bytes 1.0.1", "futures-core", "futures-util", - "http 0.2.4", - "http-body 0.3.1", - "hyper 0.13.10", - "percent-encoding 1.0.1", - "pin-project 0.4.28", + "h2 0.3.3", + "http", + "http-body", + "hyper", + "hyper-timeout", + "percent-encoding 2.1.0", + "pin-project 1.0.8", "prost", "prost-derive", - "tokio 0.2.25", + "tokio 1.9.0", "tokio-rustls", - "tokio-util 0.2.0", + "tokio-stream", + "tokio-util 0.6.7", "tower", - "tower-balance", - "tower-load", - "tower-make", + "tower-layer", "tower-service", "tracing", "tracing-futures", @@ -4111,9 +4121,9 @@ dependencies = [ [[package]] name = "tonic-build" -version = "0.1.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0436413ba71545bcc6c2b9a0f9d78d72deb0123c6a75ccdfe7c056f9930f5e52" +checksum = "12b52d07035516c2b74337d2ac7746075e7dcae7643816c1b12c5ff8a7484c08" dependencies = [ "proc-macro2", "prost-build", @@ -4123,182 +4133,54 @@ dependencies = [ [[package]] name = "tower" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd3169017c090b7a28fce80abaad0ab4f5566423677c9331bb320af7e49cfe62" -dependencies = [ - "futures-core", - "tower-buffer", - "tower-discover", - "tower-layer", - "tower-limit", - "tower-load-shed", - "tower-retry", - "tower-service", - "tower-timeout", - "tower-util", -] - -[[package]] -name = "tower-balance" -version = "0.3.0" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a792277613b7052448851efcf98a2c433e6f1d01460832dc60bef676bc275d4c" +checksum = "f60422bc7fefa2f3ec70359b8ff1caff59d785877eb70595904605bcc412470f" dependencies = [ "futures-core", "futures-util", "indexmap", - "pin-project 0.4.28", - "rand 0.7.3", + "pin-project 1.0.8", + "rand 0.8.4", "slab", - "tokio 0.2.25", - "tower-discover", + "tokio 1.9.0", + "tokio-stream", + "tokio-util 0.6.7", "tower-layer", - "tower-load", - "tower-make", - "tower-ready-cache", "tower-service", "tracing", ] [[package]] -name = "tower-buffer" -version = "0.3.0" +name = "tower-http" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4887dc2a65d464c8b9b66e0e4d51c2fd6cf5b3373afc72805b0a60bce00446a" +checksum = "0b7b56efe69aa0ad2b5da6b942e57ea9f6fe683b7a314d4ff48662e2c8838de1" dependencies = [ + "base64 0.13.0", + "bytes 1.0.1", "futures-core", - "pin-project 0.4.28", - "tokio 0.2.25", + "futures-util", + "http", + "http-body", + "pin-project 1.0.8", "tower-layer", "tower-service", "tracing", ] -[[package]] -name = "tower-discover" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f6b5000c3c54d269cc695dff28136bb33d08cbf1df2c48129e143ab65bf3c2a" -dependencies = [ - "futures-core", - "pin-project 0.4.28", - "tower-service", -] - [[package]] name = "tower-layer" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "343bc9466d3fe6b0f960ef45960509f84480bf4fd96f92901afe7ff3df9d3a62" -[[package]] -name = "tower-limit" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92c3040c5dbed68abffaa0d4517ac1a454cd741044f33ab0eefab6b8d1361404" -dependencies = [ - "futures-core", - "pin-project 0.4.28", - "tokio 0.2.25", - "tower-layer", - "tower-load", - "tower-service", -] - -[[package]] -name = "tower-load" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cc79fc3afd07492b7966d7efa7c6c50f8ed58d768a6075dd7ae6591c5d2017b" -dependencies = [ - "futures-core", - "log", - "pin-project 0.4.28", - "tokio 0.2.25", - "tower-discover", - "tower-service", -] - -[[package]] -name = "tower-load-shed" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f021e23900173dc315feb4b6922510dae3e79c689b74c089112066c11f0ae4e" -dependencies = [ - "futures-core", - "pin-project 0.4.28", - "tower-layer", - "tower-service", -] - -[[package]] -name = "tower-make" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce50370d644a0364bf4877ffd4f76404156a248d104e2cc234cd391ea5cdc965" -dependencies = [ - "tokio 0.2.25", - "tower-service", -] - -[[package]] -name = "tower-ready-cache" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4eabb6620e5481267e2ec832c780b31cad0c15dcb14ed825df5076b26b591e1f" -dependencies = [ - "futures-core", - "futures-util", - "indexmap", - "log", - "tokio 0.2.25", - "tower-service", -] - -[[package]] -name = "tower-retry" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6727956aaa2f8957d4d9232b308fe8e4e65d99db30f42b225646e86c9b6a952" -dependencies = [ - "futures-core", - "pin-project 0.4.28", - "tokio 0.2.25", - "tower-layer", - "tower-service", -] - [[package]] name = "tower-service" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" -[[package]] -name = "tower-timeout" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "127b8924b357be938823eaaec0608c482d40add25609481027b96198b2e4b31e" -dependencies = [ - "pin-project 0.4.28", - "tokio 0.2.25", - "tower-layer", - "tower-service", -] - -[[package]] -name = "tower-util" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1093c19826d33807c72511e68f73b4a0469a3f22c2bd5f7d5212178b4b89674" -dependencies = [ - "futures-core", - "futures-util", - "pin-project 0.4.28", - "tower-service", -] - [[package]] name = "tracing" version = "0.1.26" @@ -4342,6 +4224,15 @@ dependencies = [ "tracing", ] +[[package]] +name = "treediff" +version = "3.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "761e8d5ad7ce14bb82b7e61ccc0ca961005a275a060b9644a2431aa11553c2ff" +dependencies = [ + "serde_json", +] + [[package]] name = "treeline" version = "0.1.0" @@ -4395,18 +4286,18 @@ checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" [[package]] name = "tungstenite" -version = "0.11.1" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0308d80d86700c5878b9ef6321f020f29b1bb9d5ff3cab25e75e23f3a492a23" +checksum = "8ada8297e8d70872fa9a551d93250a9f407beb9f37ef86494eb20012a2ff7c24" dependencies = [ - "base64 0.12.3", + "base64 0.13.0", "byteorder", - "bytes 0.5.6", - "http 0.2.4", + "bytes 1.0.1", + "http", "httparse", "input_buffer", "log", - "rand 0.7.3", + "rand 0.8.4", "sha-1 0.9.7", "url 2.2.2", "utf-8", @@ -4451,28 +4342,27 @@ dependencies = [ [[package]] name = "udev-discovery-handler" -version = "0.6.12" +version = "0.6.13" dependencies = [ "akri-discovery-utils", "akri-udev", "env_logger", "log", - "tokio 0.2.25", + "tokio 1.9.0", ] [[package]] name = "udev-video-broker" -version = "0.6.12" +version = "0.6.13" dependencies = [ "akri-shared", "env_logger", - "futures 0.1.31", "lazy_static", "log", "prometheus 0.12.0", "prost", "rscam", - "tokio 0.2.25", + "tokio 1.9.0", "tonic", "tonic-build", ] @@ -4551,12 +4441,6 @@ dependencies = [ "percent-encoding 2.1.0", ] -[[package]] -name = "urlencoding" -version = "1.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a1f0175e03a0973cf4afd476bef05c26e228520400eb1fd473ad417b1c00ffb" - [[package]] name = "utf-8" version = "0.7.6" @@ -4618,30 +4502,31 @@ dependencies = [ [[package]] name = "warp" -version = "0.2.5" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f41be6df54c97904af01aa23e613d4521eed7ab23537cede692d4058f6449407" +checksum = "332d47745e9a0c38636dbd454729b147d16bd1ed08ae67b3ab281c4506771054" dependencies = [ - "bytes 0.5.6", + "bytes 1.0.1", "futures 0.3.16", "headers", - "http 0.2.4", - "hyper 0.13.10", + "http", + "hyper", "log", "mime", "mime_guess", "multipart", - "pin-project 0.4.28", - "scoped-tls 1.0.0", + "percent-encoding 2.1.0", + "pin-project 1.0.8", + "scoped-tls", "serde", "serde_json", - "serde_urlencoded 0.6.1", - "tokio 0.2.25", + "serde_urlencoded", + "tokio 1.9.0", + "tokio-stream", "tokio-tungstenite", + "tokio-util 0.6.7", "tower-service", "tracing", - "tracing-futures", - "urlencoding", ] [[package]] @@ -4658,9 +4543,9 @@ checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" [[package]] name = "wasm-bindgen" -version = "0.2.74" +version = "0.2.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54ee1d4ed486f78874278e63e4069fc1ab9f6a18ca492076ffb90c5eb2997fd" +checksum = "b608ecc8f4198fe8680e2ed18eccab5f0cd4caaf3d83516fa5fb2e927fda2586" dependencies = [ "cfg-if 1.0.0", "serde", @@ -4670,9 +4555,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.74" +version = "0.2.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b33f6a0694ccfea53d94db8b2ed1c3a8a4c86dd936b13b9f0a15ec4a451b900" +checksum = "580aa3a91a63d23aac5b6b267e2d13cb4f363e31dce6c352fca4752ae12e479f" dependencies = [ "bumpalo", "lazy_static", @@ -4685,9 +4570,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.24" +version = "0.4.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fba7978c679d53ce2d0ac80c8c175840feb849a161664365d1287b41f2e67f1" +checksum = "16646b21c3add8e13fdb8f20172f8a28c3dbf62f45406bcff0233188226cfe0c" dependencies = [ "cfg-if 1.0.0", "js-sys", @@ -4697,9 +4582,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.74" +version = "0.2.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "088169ca61430fe1e58b8096c24975251700e7b1f6fd91cc9d59b04fb9b18bd4" +checksum = "171ebf0ed9e1458810dfcb31f2e766ad6b3a89dbda42d8901f2b268277e5f09c" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -4707,9 +4592,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.74" +version = "0.2.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be2241542ff3d9f241f5e2cb6dd09b37efe786df8851c54957683a49f0987a97" +checksum = "6c2657dd393f03aa2a659c25c6ae18a13a4048cebd220e147933ea837efc589f" dependencies = [ "proc-macro2", "quote", @@ -4720,15 +4605,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.74" +version = "0.2.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7cff876b8f18eed75a66cf49b65e7f967cb354a7aa16003fb55dbfd25b44b4f" +checksum = "2e0c4a743a309662d45f4ede961d7afa4ba4131a59a639f29b0069c3798bbcc2" [[package]] name = "web-sys" -version = "0.3.51" +version = "0.3.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e828417b379f3df7111d3a2a9e5753706cae29c41f7c4029ee9fd77f3e09e582" +checksum = "01c70a82d842c9979078c772d4a1344685045f1a5628f677c2b2eab4dd7d2696" dependencies = [ "js-sys", "wasm-bindgen", @@ -4736,7 +4621,7 @@ dependencies = [ [[package]] name = "webhook-configuration" -version = "0.6.12" +version = "0.6.13" dependencies = [ "actix", "actix-rt 2.2.0", @@ -4745,9 +4630,10 @@ dependencies = [ "clap", "k8s-openapi", "kube", + "kube-runtime", "openapi", "openssl", - "rustls 0.19.1", + "rustls", "serde", "serde_json", ] @@ -4773,10 +4659,12 @@ dependencies = [ [[package]] name = "which" -version = "3.1.1" +version = "4.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d011071ae14a2f6671d0b74080ae0cd8ebf3a6f8c9589a2cd45f23126fe29724" +checksum = "ea187a8ef279bc014ec368c27a920da2024d2a711109bfbe3440585d5cf27ad9" dependencies = [ + "either", + "lazy_static", "libc", ] @@ -4874,9 +4762,9 @@ dependencies = [ [[package]] name = "yaserde" -version = "0.6.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc096efbee9ec8fee0600a15bb4fd651ccc14570cb05b6d4dd66b0325e4a0b5e" +checksum = "a2776ec5bb20e76d89268e87e1ea66c078b94f55e9771e4d648adda3019f87fc" dependencies = [ "log", "xml-rs", @@ -4884,12 +4772,14 @@ dependencies = [ [[package]] name = "yaserde_derive" -version = "0.6.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9af81f1d48039716dd825cf4a7d61d39583f8b12705994abb446bae749a977bb" +checksum = "5c0b0a4701f203ebaecce4971a6bb8575aa07b617bdc39ddfc6ffeff3a38530d" dependencies = [ "heck", + "log", "proc-macro2", "quote", "syn", + "xml-rs", ] diff --git a/Cargo.toml b/Cargo.toml index 5fb515883..2cf3bca67 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,7 +1,7 @@ # Patch to allow invalid authority headers provided by grpc-go / kubelet # Issue to track: https://github.com/grpc/grpc-go/issues/2628 [patch.crates-io] -h2 = { git = "https://github.com/kate-goldenring/h2", branch = "master" } +h2 = { git = "https://github.com/kate-goldenring/h2", branch = "v0.3.3-patch" } [workspace] members = [ diff --git a/README.md b/README.md index 41d752960..c41f91c63 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@

Akri Logo

[![Slack channel #akri](https://img.shields.io/badge/slack-akri-blueviolet.svg?logo=slack)](https://kubernetes.slack.com/messages/akri) -[![Rust Version](https://img.shields.io/badge/rustc-1.51.0-blue.svg)](https://blog.rust-lang.org/2021/03/25/Rust-1.51.0.html) +[![Rust Version](https://img.shields.io/badge/rustc-1.54.0-blue.svg)](https://blog.rust-lang.org/2021/03/25/Rust-1.54.0.html) [![Kubernetes Version](https://img.shields.io/badge/kubernetes-≥%201.16-blue.svg)](https://kubernetes.io/) [![codecov](https://codecov.io/gh/deislabs/akri/branch/main/graph/badge.svg?token=V468HO7CDE)](https://codecov.io/gh/deislabs/akri) diff --git a/agent/Cargo.toml b/agent/Cargo.toml index 2416e808d..115cf36f1 100644 --- a/agent/Cargo.toml +++ b/agent/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "agent" -version = "0.6.12" +version = "0.6.13" authors = ["Kate Goldenring ", ""] edition = "2018" @@ -14,38 +14,40 @@ akri-opcua = { path = "../discovery-handlers/opcua", optional = true } akri-udev = { path = "../discovery-handlers/udev", optional = true } akri-shared = { path = "../shared" } anyhow = "1.0.38" -async-stream = "0.2" +async-stream = "0.3" async-trait = "0.1.0" blake2 = "0.9.0" chrono = "0.4.10" cfg-if = "1.0.0" -env_logger = "0.8.3" +env_logger = "0.9.0" futures = { version = "0.3.1", package = "futures" } futures-core = "0.3" futures-util = "0.3" -hyper = "0.13.10" -h2 = "=0.2.6" -kube = { version = "0.23.0", features = ["openapi"] } -k8s-openapi = { version = "0.6.0", features = ["v1_16"] } +hyper = "0.14.2" +h2 = "=0.3.3" +kube = { version = "0.59.0", features = ["derive"] } +kube-runtime = "0.59.0" +k8s-openapi = { version = "0.13.0", default-features = false, features = ["schemars", "v1_16"] } lazy_static = "1.4" log = "0.4" mockall_double = "0.2.0" prometheus = { version = "0.12.0", features = ["process"] } -prost = "0.6" -rand = "0.8.3" +prost = "0.8.0" +prost-types = "0.8.0" +rand = "0.8.2" serde = "1.0.104" serde_json = "1.0.45" serde_yaml = "0.8.11" serde_derive = "1.0.104" -tokio = { version = "0.2", features = ["full"] } -tokio-core = "0.1" -tonic = "0.1" -tower = "0.3" -url = "2.2.0" +tokio = { version = "1.0", features = ["rt-multi-thread", "time", "fs", "macros", "net"] } +tokio-stream = { version = "0.1", features = ["net"] } +tonic = "0.5.2" +tower = "0.4.8" +url = "2.1.0" uuid = { version = "0.8.1", features = ["v4"] } [build-dependencies] -tonic-build = "0.1.1" +tonic-build = "0.5.2" [dev-dependencies] # for testing using a simple discovery handler @@ -53,7 +55,7 @@ akri-discovery-utils = { path = "../discovery-utils", features = ["mock-discover akri-onvif = { path = "../discovery-handlers/onvif" } akri-opcua = { path = "../discovery-handlers/opcua" } akri-udev = { path = "../discovery-handlers/udev"} -mockall = "0.9.0" +mockall = "0.10.2" mock_instant = { version = "0.2", features = ["sync"] } tempfile = "3.1.0" diff --git a/agent/src/main.rs b/agent/src/main.rs index 358f38553..ea7568b87 100644 --- a/agent/src/main.rs +++ b/agent/src/main.rs @@ -5,7 +5,6 @@ extern crate lazy_static; extern crate log; #[macro_use] extern crate serde_derive; -extern crate tokio_core; mod util; use akri_shared::akri::{metrics::run_metrics_server, API_NAMESPACE}; diff --git a/agent/src/util/config_action.rs b/agent/src/util/config_action.rs index 89a8ac798..d125a6674 100644 --- a/agent/src/util/config_action.rs +++ b/agent/src/util/config_action.rs @@ -9,12 +9,12 @@ use super::{ registration::RegisteredDiscoveryHandlerMap, }; use akri_shared::{ - akri::{configuration::KubeAkriConfig, API_CONFIGURATIONS, API_NAMESPACE, API_VERSION}, + akri::configuration::Configuration, k8s, k8s::{try_delete_instance, KubeInterface}, }; -use futures::StreamExt; -use kube::api::{Informer, RawApi, WatchEvent}; +use futures::{StreamExt, TryStreamExt}; +use kube::api::{Api, ListParams, WatchEvent}; use log::{info, trace}; use std::{collections::HashMap, sync::Arc}; use tokio::sync::{broadcast, mpsc, Mutex}; @@ -43,7 +43,7 @@ pub async fn do_config_watch( ) -> Result<(), Box> { info!("do_config_watch - enter"); let config_map: ConfigMap = Arc::new(Mutex::new(HashMap::new())); - let kube_interface = k8s::create_kube_interface(); + let kube_interface = k8s::KubeImpl::new().await?; let mut tasks = Vec::new(); // Handle pre-existing configs @@ -54,7 +54,7 @@ pub async fn do_config_watch( let new_discovery_handler_sender = new_discovery_handler_sender.clone(); tasks.push(tokio::spawn(async move { handle_config_add( - Arc::new(Box::new(k8s::create_kube_interface())), + Arc::new(k8s::KubeImpl::new().await.unwrap()), &config, config_map, discovery_handler_map, @@ -90,36 +90,32 @@ async fn watch_for_config_changes( new_discovery_handler_sender: broadcast::Sender, ) -> Result<(), Box> { trace!("watch_for_config_changes - start"); - let akri_config_type = RawApi::customResource(API_CONFIGURATIONS) - .group(API_NAMESPACE) - .version(API_VERSION); - let informer = Informer::raw(kube_interface.get_kube_client(), akri_config_type) - .init() - .await?; - loop { - let mut configs = informer.poll().await?.boxed(); - - // Currently, this does not handle None except to break the - // while. - while let Some(event) = configs.next().await { - let new_discovery_handler_sender = new_discovery_handler_sender.clone(); - handle_config( - kube_interface, - event?, - config_map.clone(), - discovery_handler_map.clone(), - new_discovery_handler_sender, - ) - .await? - } + let resource = Api::::all(kube_interface.get_kube_client()); + let mut stream = resource + .watch(&ListParams::default(), akri_shared::akri::WATCH_VERSION) + .await? + .boxed(); + // Currently, this does not handle None except to break the + // while. + while let Some(event) = stream.try_next().await? { + let new_discovery_handler_sender = new_discovery_handler_sender.clone(); + handle_config( + kube_interface, + event, + config_map.clone(), + discovery_handler_map.clone(), + new_discovery_handler_sender, + ) + .await? } + Ok(()) } /// This takes an event off the Configuration stream and delegates it to the /// correct function based on the event type. async fn handle_config( kube_interface: &impl KubeInterface, - event: WatchEvent, + event: WatchEvent, config_map: ConfigMap, discovery_handler_map: RegisteredDiscoveryHandlerMap, new_discovery_handler_sender: broadcast::Sender, @@ -128,12 +124,12 @@ async fn handle_config( match event { WatchEvent::Added(config) => { info!( - "handle_config - added Configuration {}", + "handle_config - added Configuration {:?}", config.metadata.name ); tokio::spawn(async move { handle_config_add( - Arc::new(Box::new(k8s::create_kube_interface())), + Arc::new(k8s::KubeImpl::new().await.unwrap()), &config, config_map, discovery_handler_map, @@ -146,7 +142,7 @@ async fn handle_config( } WatchEvent::Deleted(config) => { info!( - "handle_config - deleted Configuration {}", + "handle_config - deleted Configuration {:?}", config.metadata.name, ); handle_config_delete(kube_interface, &config, config_map).await?; @@ -155,13 +151,13 @@ async fn handle_config( // If a config is updated, delete all associated instances and device plugins and then recreate them to reflect updated config WatchEvent::Modified(config) => { info!( - "handle_config - modified Configuration {}", + "handle_config - modified Configuration {:?}", config.metadata.name, ); handle_config_delete(kube_interface, &config, config_map.clone()).await?; tokio::spawn(async move { handle_config_add( - Arc::new(Box::new(k8s::create_kube_interface())), + Arc::new(k8s::KubeImpl::new().await.unwrap()), &config, config_map, discovery_handler_map, @@ -176,14 +172,15 @@ async fn handle_config( error!("handle_config - error for Configuration: {}", e); Ok(()) } + WatchEvent::Bookmark(_) => Ok(()), } } /// This handles added Configuration by creating a new ConfigInfo for it and adding it to the ConfigMap. /// Then calls a function to continually observe the availability of instances associated with the Configuration. async fn handle_config_add( - kube_interface: Arc>, - config: &KubeAkriConfig, + kube_interface: Arc, + config: &Configuration, config_map: ConfigMap, discovery_handler_map: RegisteredDiscoveryHandlerMap, new_discovery_handler_sender: broadcast::Sender, @@ -203,7 +200,7 @@ async fn handle_config_add( config_map .lock() .await - .insert(config_name.clone(), config_info); + .insert(config_name.clone().unwrap(), config_info); let config = config.clone(); // Keep discovering instances until the config is deleted, signaled by a message from handle_config_delete @@ -229,18 +226,19 @@ async fn handle_config_add( /// and deletes the Instance CRD. async fn handle_config_delete( kube_interface: &impl KubeInterface, - config: &KubeAkriConfig, + config: &Configuration, config_map: ConfigMap, ) -> Result<(), Box> { + let name = config.metadata.name.clone().unwrap(); trace!( "handle_config_delete - for config {} telling do_periodic_discovery to end", - config.metadata.name + name ); // Send message to stop observing instances' availability and waits until response is received if config_map .lock() .await - .get(&config.metadata.name) + .get(&name) .unwrap() .stop_discovery_sender .clone() @@ -250,7 +248,7 @@ async fn handle_config_delete( config_map .lock() .await - .get_mut(&config.metadata.name) + .get_mut(&name) .unwrap() .finished_discovery_receiver .recv() @@ -258,12 +256,12 @@ async fn handle_config_delete( .unwrap(); trace!( "handle_config_delete - for config {} received message that do_periodic_discovery ended", - config.metadata.name + name ); } else { trace!( "handle_config_delete - for config {} do_periodic_discovery receiver has been dropped", - config.metadata.name + name ); } @@ -271,12 +269,8 @@ async fn handle_config_delete( let instance_map: InstanceMap; { let mut config_map_locked = config_map.lock().await; - instance_map = config_map_locked - .get(&config.metadata.name) - .unwrap() - .instance_map - .clone(); - config_map_locked.remove(&config.metadata.name); + instance_map = config_map_locked.get(&name).unwrap().instance_map.clone(); + config_map_locked.remove(&name); } delete_all_instances_in_map(kube_interface, instance_map, config).await?; Ok(()) @@ -286,14 +280,14 @@ async fn handle_config_delete( pub async fn delete_all_instances_in_map( kube_interface: &impl k8s::KubeInterface, instance_map: InstanceMap, - config: &KubeAkriConfig, + config: &Configuration, ) -> Result<(), Box> { let mut instance_map_locked = instance_map.lock().await; let instances_to_delete_map = instance_map_locked.clone(); let namespace = config.metadata.namespace.as_ref().unwrap(); for (instance_name, instance_info) in instances_to_delete_map { trace!( - "handle_config_delete - found Instance {} associated with deleted config {} ... sending message to end list_and_watch", + "handle_config_delete - found Instance {} associated with deleted config {:?} ... sending message to end list_and_watch", instance_name, config.metadata.name ); @@ -302,7 +296,7 @@ pub async fn delete_all_instances_in_map( .send(device_plugin_service::ListAndWatchMessageKind::End) .unwrap(); instance_map_locked.remove(&instance_name); - try_delete_instance(kube_interface, &instance_name, &namespace).await?; + try_delete_instance(kube_interface, &instance_name, namespace).await?; } Ok(()) } @@ -317,7 +311,7 @@ mod config_action_tests { }; use super::*; use akri_discovery_utils::discovery::{mock_discovery_handler, v0::Device}; - use akri_shared::{akri::configuration::KubeAkriConfig, k8s::MockKubeInterface}; + use akri_shared::{akri::configuration::Configuration, k8s::MockKubeInterface}; use std::{collections::HashMap, fs, sync::Arc}; use tokio::sync::{broadcast, Mutex}; @@ -326,8 +320,8 @@ mod config_action_tests { let _ = env_logger::builder().is_test(true).try_init(); let path_to_config = "../test/yaml/config-a.yaml"; let config_yaml = fs::read_to_string(path_to_config).expect("Unable to read file"); - let config: KubeAkriConfig = serde_yaml::from_str(&config_yaml).unwrap(); - let config_name = config.metadata.name.clone(); + let config: Configuration = serde_yaml::from_str(&config_yaml).unwrap(); + let config_name = config.metadata.name.clone().unwrap(); let mut list_and_watch_message_receivers = Vec::new(); let mut visible_discovery_results = Vec::new(); let mut mock = MockKubeInterface::new(); @@ -339,7 +333,7 @@ mod config_action_tests { ) .await; let (stop_discovery_sender, mut stop_discovery_receiver) = broadcast::channel(2); - let (mut finished_discovery_sender, finished_discovery_receiver) = mpsc::channel(2); + let (finished_discovery_sender, finished_discovery_receiver) = mpsc::channel(2); let mut map: HashMap = HashMap::new(); map.insert( config_name.clone(), @@ -386,7 +380,7 @@ mod config_action_tests { async fn run_and_test_handle_config_add( discovery_handler_map: RegisteredDiscoveryHandlerMap, config_map: ConfigMap, - config: KubeAkriConfig, + config: Configuration, dh_endpoint: &DiscoveryHandlerEndpoint, dh_name: &str, ) -> tokio::task::JoinHandle<()> { @@ -396,8 +390,7 @@ mod config_action_tests { .expect_create_instance() .times(1) .returning(move |_, _, _, _, _| Ok(())); - let arc_mock_kube_interface: Arc> = - Arc::new(Box::new(mock_kube_interface)); + let arc_mock_kube_interface: Arc = Arc::new(mock_kube_interface); let config_add_config = config.clone(); let config_add_config_map = config_map.clone(); let config_add_discovery_handler_map = discovery_handler_map.clone(); @@ -414,10 +407,11 @@ mod config_action_tests { }); // Loop until the Configuration and single discovered Instance are added to the ConfigMap + let config_name = config.metadata.name.unwrap(); let mut x: i8 = 0; while x < 5 { - tokio::time::delay_for(std::time::Duration::from_millis(200)).await; - if let Some(config_info) = config_map.lock().await.get(&config.metadata.name) { + tokio::time::sleep(std::time::Duration::from_millis(200)).await; + if let Some(config_info) = config_map.lock().await.get(&config_name) { if config_info.instance_map.lock().await.len() == 1 { break; } @@ -444,7 +438,7 @@ mod config_action_tests { ) { let mut x: i8 = 0; while x < 5 { - tokio::time::delay_for(std::time::Duration::from_millis(200)).await; + tokio::time::sleep(std::time::Duration::from_millis(200)).await; let dh_map = discovery_handler_map.lock().unwrap(); if let Some(dh_details_map) = dh_map.get(dh_name) { if dh_details_map.get(dh_endpoint).unwrap().connectivity_status == dh_status { @@ -497,8 +491,8 @@ mod config_action_tests { // Discovery Handler should create an instance and be marked as Active let path_to_config = "../test/yaml/config-a.yaml"; let config_yaml = fs::read_to_string(path_to_config).expect("Unable to read file"); - let config: KubeAkriConfig = serde_yaml::from_str(&config_yaml).unwrap(); - let config_name = config.metadata.name.clone(); + let config: Configuration = serde_yaml::from_str(&config_yaml).unwrap(); + let config_name = config.metadata.name.clone().unwrap(); let config_map: ConfigMap = Arc::new(Mutex::new(HashMap::new())); let first_add_handle = run_and_test_handle_config_add( discovery_handler_map.clone(), diff --git a/agent/src/util/crictl_containers.rs b/agent/src/util/crictl_containers.rs index 8a44179b0..e3dea472a 100644 --- a/agent/src/util/crictl_containers.rs +++ b/agent/src/util/crictl_containers.rs @@ -17,7 +17,7 @@ struct CriCtlContainer { /// This gets the usage slots for an instance by getting the annotations that were stored at id `AKRI_SLOT_ANNOTATION_NAME` during allocate. pub fn get_container_slot_usage(crictl_output: &str) -> HashSet { - match serde_json::from_str::(&crictl_output) { + match serde_json::from_str::(crictl_output) { Ok(crictl_output_parsed) => crictl_output_parsed .containers .iter() diff --git a/agent/src/util/device_plugin_builder.rs b/agent/src/util/device_plugin_builder.rs index ed31124b2..0a208cb9d 100644 --- a/agent/src/util/device_plugin_builder.rs +++ b/agent/src/util/device_plugin_builder.rs @@ -9,11 +9,11 @@ use super::{ }; use akri_discovery_utils::discovery::v0::Device; use akri_shared::{ - akri::{configuration::KubeAkriConfig, AKRI_PREFIX}, + akri::{configuration::Configuration, AKRI_PREFIX}, uds::unix_stream, }; use async_trait::async_trait; -use futures::stream::TryStreamExt; +use futures::TryFutureExt; use log::{info, trace}; #[cfg(test)] use mockall::{automock, predicate::*}; @@ -33,7 +33,7 @@ pub trait DevicePluginBuilderInterface: Send + Sync { async fn build_device_plugin( &self, instance_name: String, - config: &KubeAkriConfig, + config: &Configuration, shared: bool, instance_map: InstanceMap, device: Device, @@ -65,7 +65,7 @@ impl DevicePluginBuilderInterface for DevicePluginBuilder { async fn build_device_plugin( &self, instance_name: String, - config: &KubeAkriConfig, + config: &Configuration, shared: bool, instance_map: InstanceMap, device: Device, @@ -87,7 +87,7 @@ impl DevicePluginBuilderInterface for DevicePluginBuilder { instance_name: instance_name.clone(), endpoint: device_endpoint.clone(), config: config.spec.clone(), - config_name: config.metadata.name.clone(), + config_name: config.metadata.name.clone().unwrap(), config_uid: config.metadata.uid.as_ref().unwrap().clone(), config_namespace: config.metadata.namespace.as_ref().unwrap().clone(), shared, @@ -131,25 +131,31 @@ impl DevicePluginBuilderInterface for DevicePluginBuilder { tokio::fs::create_dir_all(Path::new(&socket_path[..]).parent().unwrap()) .await .expect("Failed to create dir at socket path"); - let mut uds = - UnixListener::bind(socket_path.clone()).expect("Failed to bind to socket path"); let service = DevicePluginServer::new(device_plugin_service); - let socket_path_to_delete = socket_path.clone(); + let task_socket_path = socket_path.clone(); task::spawn(async move { + let socket_to_delete = task_socket_path.clone(); + let incoming = { + let uds = + UnixListener::bind(task_socket_path).expect("Failed to bind to socket path"); + + async_stream::stream! { + while let item = uds.accept().map_ok(|(st, _)| unix_stream::UnixStream(st)).await { + yield item; + } + } + }; Server::builder() .add_service(service) - .serve_with_incoming_shutdown( - uds.incoming().map_ok(unix_stream::UnixStream), - shutdown_signal(server_ender_receiver), - ) + .serve_with_incoming_shutdown(incoming, shutdown_signal(server_ender_receiver)) .await .unwrap(); trace!( "serve - gracefully shutdown ... deleting socket {}", - socket_path_to_delete + socket_to_delete ); // Socket may already be deleted in the case of the kubelet restart - std::fs::remove_file(socket_path_to_delete).unwrap_or(()); + std::fs::remove_file(socket_to_delete).unwrap_or(()); }); akri_shared::uds::unix_stream::try_connect(&socket_path).await?; @@ -167,7 +173,7 @@ impl DevicePluginBuilderInterface for DevicePluginBuilder { capability_id: &str, socket_name: &str, instance_name: &str, - mut server_ender_sender: mpsc::Sender<()>, + server_ender_sender: mpsc::Sender<()>, kubelet_socket: &str, ) -> Result<(), Box> { info!( @@ -256,34 +262,45 @@ pub mod tests { } } + async fn serve_for_test>( + service: RegistrationServer, + socket: P, + ) { + let incoming = { + let uds = UnixListener::bind(socket).expect("Failed to bind to socket path"); + + async_stream::stream! { + while let item = uds.accept().map_ok(|(st, _)| unix_stream::UnixStream(st)).await { + yield item; + } + } + }; + + Server::builder() + .add_service(service) + .serve_with_incoming(incoming) + .await + .unwrap(); + } + #[tokio::test] async fn test_register() { let device_plugins_dirs = Builder::new().prefix("device-plugins").tempdir().unwrap(); - let kubelet_socket = device_plugins_dirs - .path() - .join("kubelet.sock") - .to_str() - .unwrap() - .to_string(); + let kubelet_socket = device_plugins_dirs.path().join("kubelet.sock"); + let kubelet_socket_clone = kubelet_socket.clone(); + let kubelet_socket_str = kubelet_socket_clone.to_str().unwrap(); // Start kubelet registration server - let mut uds = - UnixListener::bind(kubelet_socket.clone()).expect("Failed to bind to socket path"); - let registration = MockRegistration { return_error: false, }; let service = RegistrationServer::new(registration); task::spawn(async move { - Server::builder() - .add_service(service) - .serve_with_incoming(uds.incoming().map_ok(unix_stream::UnixStream)) - .await - .unwrap(); + serve_for_test(service, kubelet_socket).await; }); // Make sure registration server has started - akri_shared::uds::unix_stream::try_connect(&kubelet_socket) + akri_shared::uds::unix_stream::try_connect(kubelet_socket_str) .await .unwrap(); @@ -296,7 +313,7 @@ pub mod tests { "socket.sock", "random_instance", server_ender_sender, - &kubelet_socket + kubelet_socket_str ) .await .is_ok()); @@ -307,12 +324,9 @@ pub mod tests { let device_plugin_builder = DevicePluginBuilder {}; let (server_ender_sender, mut server_ender_receiver) = mpsc::channel(1); let device_plugins_dirs = Builder::new().prefix("device-plugins").tempdir().unwrap(); - let kubelet_socket = device_plugins_dirs - .path() - .join("kubelet.sock") - .to_str() - .unwrap() - .to_string(); + let kubelet_socket = device_plugins_dirs.path().join("kubelet.sock"); + let kubelet_socket_clone = kubelet_socket.clone(); + let kubelet_socket_str = kubelet_socket_clone.to_str().unwrap(); // Try to register when no registration service exists assert!(device_plugin_builder @@ -321,26 +335,20 @@ pub mod tests { "socket.sock", "random_instance", server_ender_sender.clone(), - &kubelet_socket + kubelet_socket_str ) .await .is_err()); // Start kubelet registration server - let mut uds = - UnixListener::bind(kubelet_socket.clone()).expect("Failed to bind to socket path"); let registration = MockRegistration { return_error: true }; let service = RegistrationServer::new(registration); task::spawn(async move { - Server::builder() - .add_service(service) - .serve_with_incoming(uds.incoming().map_ok(unix_stream::UnixStream)) - .await - .unwrap(); + serve_for_test(service, kubelet_socket).await; }); // Make sure registration server has started - akri_shared::uds::unix_stream::try_connect(&kubelet_socket) + akri_shared::uds::unix_stream::try_connect(&kubelet_socket_str) .await .unwrap(); @@ -351,7 +359,7 @@ pub mod tests { "socket.sock", "random_instance", server_ender_sender, - &kubelet_socket + kubelet_socket_str ) .await .is_ok()); diff --git a/agent/src/util/device_plugin_service.rs b/agent/src/util/device_plugin_service.rs index c0566f702..f8db0d7b4 100644 --- a/agent/src/util/device_plugin_service.rs +++ b/agent/src/util/device_plugin_service.rs @@ -10,8 +10,8 @@ use super::v1beta1::{ use akri_discovery_utils::discovery::v0::Device; use akri_shared::{ akri::{ - configuration::Configuration, - instance::Instance, + configuration::ConfigurationSpec, + instance::InstanceSpec, retry::{random_delay, MAX_INSTANCE_UPDATE_TRIES}, AKRI_SLOT_ANNOTATION_NAME, }, @@ -28,6 +28,7 @@ use tokio::{ sync::{broadcast, mpsc, Mutex}, time::timeout, }; +use tokio_stream::wrappers::ReceiverStream; use tonic::{Code, Request, Response, Status}; /// Message sent in channel to `list_and_watch`. @@ -75,7 +76,7 @@ pub struct DevicePluginService { /// Socket endpoint pub endpoint: String, /// Instance's Configuration - pub config: Configuration, + pub config: ConfigurationSpec, /// Name of Instance's Configuration CRD pub config_name: String, /// UID of Instance's Configuration CRD @@ -115,7 +116,7 @@ impl DevicePlugin for DevicePluginService { Ok(Response::new(resp)) } - type ListAndWatchStream = mpsc::Receiver>; + type ListAndWatchStream = ReceiverStream>; /// Called by Kubelet right after the DevicePluginService registers with Kubelet. /// Returns a stream of List of "virtual" Devices over a channel. @@ -136,13 +137,13 @@ impl DevicePlugin for DevicePluginService { let mut list_and_watch_message_receiver = self.list_and_watch_message_sender.subscribe(); // Create a channel that list_and_watch can periodically send updates to kubelet on - let (mut kubelet_update_sender, kubelet_update_receiver) = + let (kubelet_update_sender, kubelet_update_receiver) = mpsc::channel(KUBELET_UPDATE_CHANNEL_CAPACITY); // Spawn thread so can send kubelet the receiving end of the channel to listen on tokio::spawn(async move { let mut keep_looping = true; #[cfg(not(test))] - let kube_interface = Arc::new(k8s::create_kube_interface()); + let kube_interface = Arc::new(k8s::KubeImpl::new().await.unwrap()); // Try to create an Instance CRD for this plugin and add it to the global InstanceMap else shutdown #[cfg(not(test))] @@ -229,7 +230,8 @@ impl DevicePlugin for DevicePluginService { } trace!("list_and_watch - for Instance {} ending", dps.instance_name); }); - Ok(Response::new(kubelet_update_receiver)) + + Ok(Response::new(ReceiverStream::new(kubelet_update_receiver))) } /// Kubelet calls allocate during pod creation. @@ -243,7 +245,7 @@ impl DevicePlugin for DevicePluginService { "allocate - kubelet called allocate for Instance {}", self.instance_name ); - let kube_interface = Arc::new(k8s::create_kube_interface()); + let kube_interface = Arc::new(k8s::KubeImpl::new().await.unwrap()); match self.internal_allocate(requests, kube_interface).await { Ok(resp) => Ok(resp), Err(e) => Err(e), @@ -349,7 +351,7 @@ impl DevicePluginService { fn get_slot_value( device_usage_id: &str, node_name: &str, - instance: &Instance, + instance: &InstanceSpec, ) -> Result { if let Some(allocated_node) = instance.device_usage.get(device_usage_id) { if allocated_node.is_empty() { @@ -386,11 +388,11 @@ async fn try_update_instance_device_usage( instance_namespace: &str, kube_interface: Arc, ) -> Result<(), Status> { - let mut instance: Instance; + let mut instance: InstanceSpec; for x in 0..MAX_INSTANCE_UPDATE_TRIES { // Grab latest instance match kube_interface - .find_instance(&instance_name, &instance_namespace) + .find_instance(instance_name, instance_namespace) .await { Ok(instance_object) => instance = instance_object.spec, @@ -418,7 +420,7 @@ async fn try_update_instance_device_usage( .insert(device_usage_id.to_string(), value.clone()); match kube_interface - .update_instance(&instance, &instance_name, &instance_namespace) + .update_instance(&instance, instance_name, instance_namespace) .await { Ok(()) => { @@ -484,7 +486,7 @@ fn build_container_allocate_response( async fn try_create_instance( dps: Arc, kube_interface: Arc, -) -> Result<(), Box> { +) -> Result<(), anyhow::Error> { // Make sure Configuration exists for instance if let Err(e) = kube_interface .find_configuration(&dps.config_name, &dps.config_namespace) @@ -500,7 +502,7 @@ async fn try_create_instance( let device_usage: std::collections::HashMap = (0..dps.config.capacity) .map(|x| (format!("{}-{}", dps.instance_name, x), "".to_string())) .collect(); - let instance = Instance { + let instance = InstanceSpec { configuration_name: dps.config_name.clone(), shared: dps.shared, nodes: vec![dps.node_name.clone()], @@ -530,7 +532,7 @@ async fn try_create_instance( match kube_interface .update_instance( &instance_object.spec, - &instance_object.metadata.name, + &instance_object.metadata.name.unwrap(), &dps.config_namespace, ) .await @@ -767,9 +769,9 @@ mod device_plugin_service_tests { v1beta1::device_plugin_client::DevicePluginClient, }; use super::*; - use akri_shared::akri::configuration::KubeAkriConfig; + use akri_shared::akri::configuration::Configuration; use akri_shared::{ - akri::instance::{Instance, KubeAkriInstance}, + akri::instance::{Instance, InstanceSpec}, k8s::MockKubeInterface, }; use std::{ @@ -815,11 +817,11 @@ mod device_plugin_service_tests { NodeName::ThisNode => "node-a", NodeName::OtherNode => "other", }; - instance_json = instance_json.replace("node-a", &host_name); + instance_json = instance_json.replace("node-a", host_name); instance_json = instance_json.replace("config-a-b494b6", &instance_name_clone); instance_json = instance_json.replace("\":\"\"", &format!("\":\"{}\"", device_usage_node)); - let instance: KubeAkriInstance = serde_json::from_str(&instance_json).unwrap(); + let instance: Instance = serde_json::from_str(&instance_json).unwrap(); Ok(instance) }); } @@ -831,10 +833,9 @@ mod device_plugin_service_tests { let path_to_config = "../test/yaml/config-a.yaml"; let kube_akri_config_yaml = fs::read_to_string(path_to_config).expect("Unable to read file"); - let kube_akri_config: KubeAkriConfig = - serde_yaml::from_str(&kube_akri_config_yaml).unwrap(); - let device_instance_name = - get_device_instance_name("b494b6", &kube_akri_config.metadata.name); + let kube_akri_config: Configuration = serde_yaml::from_str(&kube_akri_config_yaml).unwrap(); + let config_name = kube_akri_config.metadata.name.as_ref().unwrap(); + let device_instance_name = get_device_instance_name("b494b6", config_name); let unique_time = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH); let device_endpoint: String = format!( "{}-{}.sock", @@ -866,7 +867,7 @@ mod device_plugin_service_tests { instance_name: device_instance_name, endpoint: device_endpoint, config: kube_akri_config.spec.clone(), - config_name: kube_akri_config.metadata.name, + config_name: config_name.to_string(), config_uid: kube_akri_config.metadata.uid.unwrap(), config_namespace: kube_akri_config.metadata.namespace.unwrap(), shared: false, @@ -901,7 +902,7 @@ mod device_plugin_service_tests { fn get_kube_not_found_error() -> kube::Error { // Mock error thrown when instance not found - kube::Error::Api(kube::ErrorResponse { + kube::Error::Api(kube::error::ErrorResponse { status: "Failure".to_string(), message: "instances.akri.sh \"akri-blah-901a7b\" not found".to_string(), reason: "NotFound".to_string(), @@ -956,7 +957,7 @@ mod device_plugin_service_tests { let path_to_config = "../test/yaml/config-a.yaml"; let kube_akri_config_yaml = fs::read_to_string(path_to_config).expect("Unable to read file"); - let kube_akri_config: KubeAkriConfig = + let kube_akri_config: Configuration = serde_yaml::from_str(&kube_akri_config_yaml).unwrap(); Ok(kube_akri_config) }); @@ -983,7 +984,7 @@ mod device_plugin_service_tests { .withf(move |name: &str, namespace: &str| { namespace == config_namespace && name == instance_name }) - .returning(move |_, _| Err(get_kube_not_found_error())); + .returning(move |_, _| Err(get_kube_not_found_error().into())); let instance_name = device_plugin_service.instance_name.clone(); let config_namespace = device_plugin_service.config_namespace.clone(); mock.expect_create_instance() @@ -1097,7 +1098,7 @@ mod device_plugin_service_tests { }) .returning(move |_, _| { let error = Error::new(ErrorKind::InvalidInput, "Configuration doesn't exist"); - Err(Box::new(error)) + Err(error.into()) }); assert!( try_create_instance(Arc::new(device_plugin_service), Arc::new(mock)) @@ -1127,7 +1128,7 @@ mod device_plugin_service_tests { .withf(move |name: &str, namespace: &str| { namespace == config_namespace && name == instance_name }) - .returning(move |_, _| Err(get_kube_not_found_error())); + .returning(move |_, _| Err(get_kube_not_found_error().into())); let instance_name = device_plugin_service.instance_name.clone(); let config_namespace = device_plugin_service.config_namespace.clone(); mock.expect_create_instance() @@ -1139,7 +1140,7 @@ mod device_plugin_service_tests { && owner_name == config_name && owner_uid == config_uid }) - .returning(move |_, _, _, _, _| Err(None.ok_or("failure")?)); + .returning(move |_, _, _, _, _| Err(anyhow::anyhow!("failure"))); let dps = Arc::new(device_plugin_service); assert!(try_create_instance(dps.clone(), Arc::new(mock)) @@ -1288,7 +1289,7 @@ mod device_plugin_service_tests { .withf(move |name: &str, namespace: &str| { namespace == instance_namespace && name == instance_name }) - .returning(move |_, _| Err(get_kube_not_found_error())); + .returning(move |_, _| Err(get_kube_not_found_error().into())); let devices = build_list_and_watch_response(Arc::new(device_plugin_service), Arc::new(mock)) .await @@ -1340,7 +1341,7 @@ mod device_plugin_service_tests { ); mock.expect_update_instance() .times(1) - .withf(move |instance_to_update: &Instance, _, _| { + .withf(move |instance_to_update: &InstanceSpec, _, _| { instance_to_update .device_usage .get(&device_usage_id_slot) diff --git a/agent/src/util/discovery_operator.rs b/agent/src/util/discovery_operator.rs index d3241d9ae..9a5154736 100644 --- a/agent/src/util/discovery_operator.rs +++ b/agent/src/util/discovery_operator.rs @@ -19,7 +19,7 @@ use akri_discovery_utils::discovery::v0::{ discovery_handler_client::DiscoveryHandlerClient, Device, DiscoverRequest, DiscoverResponse, }; use akri_shared::{ - akri::configuration::KubeAkriConfig, + akri::configuration::Configuration, k8s, os::env_var::{ActualEnvVarQuery, EnvVarQuery}, }; @@ -62,7 +62,7 @@ pub struct DiscoveryOperator { discovery_handler_map: RegisteredDiscoveryHandlerMap, /// The Akri Configuration associated with this `DiscoveryOperator`. /// The Configuration tells the `DiscoveryOperator` what to look for. - config: KubeAkriConfig, + config: Configuration, /// Map of Akri Instances discovered by this `DiscoveryOperator` instance_map: InstanceMap, } @@ -71,7 +71,7 @@ pub struct DiscoveryOperator { impl DiscoveryOperator { pub fn new( discovery_handler_map: RegisteredDiscoveryHandlerMap, - config: KubeAkriConfig, + config: Configuration, instance_map: InstanceMap, ) -> Self { DiscoveryOperator { @@ -87,7 +87,7 @@ impl DiscoveryOperator { } /// Returns config field. Allows the struct to be mocked. #[allow(dead_code)] - pub fn get_config(&self) -> KubeAkriConfig { + pub fn get_config(&self) -> Configuration { self.config.clone() } /// Returns instance_map field. Allows the struct to be mocked. @@ -125,9 +125,11 @@ impl DiscoveryOperator { self.config.spec.discovery_handler.name ); match discovery_handler.discover(discover_request).await { - Ok(device_update_receiver) => { - Some(StreamType::Embedded(device_update_receiver.into_inner())) - } + Ok(device_update_receiver) => Some(StreamType::Embedded( + // `discover` returns `Result, tonic::Status>` + // Get the `Receiver` from the `DiscoverStream` wrapper + device_update_receiver.into_inner().into_inner(), + )), Err(e) => { error!("get_stream - could not connect to DiscoveryHandler at endpoint {:?} with error {}", endpoint, e); None @@ -204,7 +206,7 @@ impl DiscoveryOperator { #[allow(dead_code)] pub async fn internal_do_discover<'a>( &'a self, - kube_interface: Arc>, + kube_interface: Arc, dh_details: &'a DiscoveryDetails, stream: &'a mut dyn StreamingExt, ) -> Result<(), Status> { @@ -295,10 +297,10 @@ impl DiscoveryOperator { /// the associated Device Plugin and Instance are terminated and deleted, respectively. pub async fn delete_offline_instances( &self, - kube_interface: Arc>, + kube_interface: Arc, ) -> Result<(), Box> { trace!( - "delete_offline_instances - entered for configuration {}", + "delete_offline_instances - entered for configuration {:?}", self.config.metadata.name ); let kube_interface_clone = kube_interface.clone(); @@ -318,7 +320,7 @@ impl DiscoveryOperator { .await .unwrap(); k8s::try_delete_instance( - (*kube_interface_clone).as_ref(), + kube_interface_clone.as_ref(), &instance, self.config.metadata.namespace.as_ref().unwrap(), ) @@ -335,26 +337,27 @@ impl DiscoveryOperator { /// of the instance or deletes it if it is a local device. pub async fn handle_discovery_results( &self, - kube_interface: Arc>, + kube_interface: Arc, discovery_results: Vec, shared: bool, device_plugin_builder: Box, ) -> Result<(), Box> { + let config_name = self.config.metadata.name.clone().unwrap(); trace!( "handle_discovery_results - for config {} with discovery results {:?}", - self.config.metadata.name, + config_name, discovery_results ); let currently_visible_instances: HashMap = discovery_results .iter() .map(|discovery_result| { let id = generate_instance_digest(&discovery_result.id, shared); - let instance_name = get_device_instance_name(&id, &self.config.metadata.name); + let instance_name = get_device_instance_name(&id, &config_name); (instance_name, discovery_result.clone()) }) .collect(); INSTANCE_COUNT_METRIC - .with_label_values(&[&self.config.metadata.name, &shared.to_string()]) + .with_label_values(&[&config_name, &shared.to_string()]) .set(currently_visible_instances.len() as i64); // Update the connectivity status of instances and return list of visible instances that don't have Instance CRs let instance_map = self.instance_map.lock().await.clone(); @@ -375,7 +378,7 @@ impl DiscoveryOperator { if !new_discovery_results.is_empty() { for discovery_result in new_discovery_results { let id = generate_instance_digest(&discovery_result.id, shared); - let instance_name = get_device_instance_name(&id, &self.config.metadata.name); + let instance_name = get_device_instance_name(&id, &config_name); trace!( "handle_discovery_results - new instance {} came online", instance_name @@ -406,7 +409,7 @@ impl DiscoveryOperator { /// (A) non-local Instance is still not visible after 5 minutes or (B) local instance is still not visible. pub async fn update_instance_connectivity_status( &self, - kube_interface: Arc>, + kube_interface: Arc, currently_visible_instances: HashMap, shared: bool, ) -> Result<(), Box> { @@ -495,7 +498,7 @@ impl DiscoveryOperator { .await .unwrap(); k8s::try_delete_instance( - (*kube_interface).as_ref(), + kube_interface.as_ref(), &instance, self.config.metadata.namespace.as_ref().unwrap(), ) @@ -534,14 +537,14 @@ pub mod start_discovery { new_discovery_handler_sender: broadcast::Sender, stop_all_discovery_sender: broadcast::Sender<()>, finished_all_discovery_sender: &mut mpsc::Sender<()>, - kube_interface: Arc>, + kube_interface: Arc, ) -> Result<(), Box> { let config = discovery_operator.get_config(); info!( "start_discovery - entered for {} discovery handler", config.spec.discovery_handler.name ); - let config_name = config.metadata.name.clone(); + let config_name = config.metadata.name.clone().unwrap(); let mut tasks = Vec::new(); let discovery_operator = Arc::new(discovery_operator); @@ -606,7 +609,7 @@ pub mod start_discovery { loop { tokio::select! { _ = stop_all_discovery_receiver.recv() => { - trace!("listen_for_new_discovery_handlers - received message to stop discovery for configuration {}", discovery_operator.get_config().metadata.name); + trace!("listen_for_new_discovery_handlers - received message to stop discovery for configuration {:?}", discovery_operator.get_config().metadata.name); discovery_operator.stop_all_discovery().await; break; }, @@ -614,10 +617,10 @@ pub mod start_discovery { // Check if it is one of this Configuration's discovery handlers if let Ok(discovery_handler_name) = result { if discovery_handler_name == discovery_operator.get_config().spec.discovery_handler.name { - trace!("listen_for_new_discovery_handlers - received new registered discovery handler for configuration {}", discovery_operator.get_config().metadata.name); + trace!("listen_for_new_discovery_handlers - received new registered discovery handler for configuration {:?}", discovery_operator.get_config().metadata.name); let new_discovery_operator = discovery_operator.clone(); discovery_tasks.push(tokio::spawn(async move { - do_discover(new_discovery_operator, Arc::new(Box::new(k8s::create_kube_interface()))).await.unwrap(); + do_discover(new_discovery_operator, Arc::new(k8s::KubeImpl::new().await.unwrap())).await.unwrap(); })); } } @@ -641,7 +644,7 @@ pub mod start_discovery { /// Removes the discovery handler from the `RegisteredDiscoveryHandlerMap` if it has been offline for longer than the grace period. pub async fn do_discover( discovery_operator: Arc, - kube_interface: Arc>, + kube_interface: Arc, ) -> Result<(), Box> { let mut discovery_tasks = Vec::new(); let config = discovery_operator.get_config(); @@ -697,17 +700,17 @@ pub mod start_discovery { /// Try to connect to discovery handler until connection has been established or grace period has passed async fn do_discover_on_discovery_handler<'a>( discovery_operator: Arc, - kube_interface: Arc>, + kube_interface: Arc, endpoint: &'a DiscoveryHandlerEndpoint, dh_details: &'a DiscoveryDetails, ) -> Result<(), Box> { loop { let deregistered; - match discovery_operator.get_stream(&endpoint).await { + match discovery_operator.get_stream(endpoint).await { Some(stream_type) => { // Since connection was established, be sure that the Discovery Handler is marked as having a client discovery_operator.set_discovery_handler_connectivity_status( - &endpoint, + endpoint, DiscoveryHandlerStatus::Active, ); match stream_type { @@ -715,14 +718,14 @@ pub mod start_discovery { match discovery_operator .internal_do_discover( kube_interface.clone(), - &dh_details, + dh_details, &mut stream, ) .await { Ok(_) => { discovery_operator.set_discovery_handler_connectivity_status( - &endpoint, + endpoint, DiscoveryHandlerStatus::Waiting, ); break; @@ -739,7 +742,7 @@ pub mod start_discovery { ) .await?; deregistered = discovery_operator - .mark_offline_or_deregister_discovery_handler(&endpoint) + .mark_offline_or_deregister_discovery_handler(endpoint) .await .unwrap(); } else { @@ -755,7 +758,7 @@ pub mod start_discovery { .await?; discovery_operator .set_discovery_handler_connectivity_status( - &endpoint, + endpoint, DiscoveryHandlerStatus::Waiting, ); break; @@ -767,13 +770,13 @@ pub mod start_discovery { discovery_operator .internal_do_discover( kube_interface.clone(), - &dh_details, + dh_details, &mut stream, ) .await .unwrap(); discovery_operator.set_discovery_handler_connectivity_status( - &endpoint, + endpoint, DiscoveryHandlerStatus::Waiting, ); break; @@ -782,7 +785,7 @@ pub mod start_discovery { } None => { deregistered = discovery_operator - .mark_offline_or_deregister_discovery_handler(&endpoint) + .mark_offline_or_deregister_discovery_handler(endpoint) .await .unwrap(); } @@ -861,7 +864,7 @@ pub mod tests { use super::*; use akri_discovery_utils::discovery::mock_discovery_handler; use akri_shared::{ - akri::configuration::KubeAkriConfig, k8s::MockKubeInterface, os::env_var::MockEnvVarQuery, + akri::configuration::Configuration, k8s::MockKubeInterface, os::env_var::MockEnvVarQuery, }; use mock_instant::{Instant, MockClock}; use mockall::Sequence; @@ -869,7 +872,7 @@ pub mod tests { use tokio::sync::broadcast; pub async fn build_instance_map( - config: &KubeAkriConfig, + config: &Configuration, visible_discovery_results: &mut Vec, list_and_watch_message_receivers: &mut Vec< broadcast::Receiver, @@ -894,7 +897,7 @@ pub mod tests { discovery_results, list_and_watch_message_receivers, connectivity_status, - &config.metadata.name, + config.metadata.name.as_ref().unwrap(), ) } @@ -913,7 +916,7 @@ pub mod tests { let (list_and_watch_message_sender, list_and_watch_message_receiver) = broadcast::channel(2); list_and_watch_message_receivers.push(list_and_watch_message_receiver); - let instance_name = get_device_instance_name(&device.id, &config_name); + let instance_name = get_device_instance_name(&device.id, config_name); ( instance_name, InstanceInfo { @@ -928,7 +931,7 @@ pub mod tests { fn create_mock_discovery_operator( discovery_handler_map: RegisteredDiscoveryHandlerMap, - config: KubeAkriConfig, + config: Configuration, instance_map: InstanceMap, ) -> MockDiscoveryOperator { let ctx = MockDiscoveryOperator::new_context(); @@ -946,8 +949,7 @@ pub mod tests { .returning(move || instance_map_clone.clone()); mock }); - let mock = MockDiscoveryOperator::new(discovery_handler_map, config, instance_map); - mock + MockDiscoveryOperator::new(discovery_handler_map, config, instance_map) } // Creates a discovery handler with specified properties and adds it to the RegisteredDiscoveryHandlerMap. @@ -987,7 +989,7 @@ pub mod tests { name: name.to_string(), endpoint, shared, - close_discovery_handler_connection: close_discovery_handler_connection.clone(), + close_discovery_handler_connection: close_discovery_handler_connection, connectivity_status: DiscoveryHandlerStatus::Waiting, } } @@ -1004,7 +1006,7 @@ pub mod tests { // Build discovery operator let path_to_config = "../test/yaml/config-a.yaml"; let config_yaml = std::fs::read_to_string(path_to_config).expect("Unable to read file"); - let config: KubeAkriConfig = serde_yaml::from_str(&config_yaml).unwrap(); + let config: Configuration = serde_yaml::from_str(&config_yaml).unwrap(); let discovery_operator = create_mock_discovery_operator( discovery_handler_map.clone(), config, @@ -1064,7 +1066,7 @@ pub mod tests { .subscribe(); let path_to_config = "../test/yaml/config-a.yaml"; let config_yaml = std::fs::read_to_string(path_to_config).expect("Unable to read file"); - let config: KubeAkriConfig = serde_yaml::from_str(&config_yaml).unwrap(); + let config: Configuration = serde_yaml::from_str(&config_yaml).unwrap(); let discovery_operator = Arc::new(DiscoveryOperator::new( discovery_handler_map, config, @@ -1114,8 +1116,7 @@ pub mod tests { .get(&DiscoveryHandlerEndpoint::Uds("socket.sock".to_string())) .unwrap() .clone() - .close_discovery_handler_connection - .clone(); + .close_discovery_handler_connection; mock_discovery_operator .expect_stop_all_discovery() .times(1) @@ -1128,8 +1129,7 @@ pub mod tests { tokio::sync::mpsc::channel(2); let thread_new_dh_sender = new_dh_sender.clone(); let thread_stop_all_discovery_sender = stop_all_discovery_sender.clone(); - let mock_kube_interface: Arc> = - Arc::new(Box::new(MockKubeInterface::new())); + let mock_kube_interface: Arc = Arc::new(MockKubeInterface::new()); let handle = tokio::spawn(async move { start_discovery::start_discovery( mock_discovery_operator, @@ -1211,8 +1211,7 @@ pub mod tests { .times(1) .returning(|_, _| ()) .in_sequence(&mut discovery_handler_status_seq); - let mock_kube_interface: Arc> = - Arc::new(Box::new(MockKubeInterface::new())); + let mock_kube_interface: Arc = Arc::new(MockKubeInterface::new()); start_discovery::do_discover(Arc::new(mock_discovery_operator), mock_kube_interface) .await .unwrap(); @@ -1223,14 +1222,13 @@ pub mod tests { let _ = env_logger::builder().is_test(true).try_init(); // Set node name for generating instance id std::env::set_var("AGENT_NODE_NAME", "node-a"); - let mock_kube_interface: Arc> = - Arc::new(Box::new(MockKubeInterface::new())); + let mock_kube_interface: Arc = Arc::new(MockKubeInterface::new()); let discovery_handler_map: RegisteredDiscoveryHandlerMap = Arc::new(std::sync::Mutex::new(HashMap::new())); let path_to_config = "../test/yaml/config-a.yaml"; let config_yaml = std::fs::read_to_string(path_to_config).expect("Unable to read file"); - let config: KubeAkriConfig = serde_yaml::from_str(&config_yaml).unwrap(); - let config_name = config.metadata.name.clone(); + let config: Configuration = serde_yaml::from_str(&config_yaml).unwrap(); + let config_name = config.metadata.name.clone().unwrap(); INSTANCE_COUNT_METRIC .with_label_values(&[&config_name, "true"]) .set(0); @@ -1290,7 +1288,7 @@ pub mod tests { for _x in 0..tries { println!("try number {}", _x); keep_looping = false; - tokio::time::delay_for(Duration::from_millis(100)).await; + tokio::time::sleep(Duration::from_millis(100)).await; let unwrapped_instance_map = instance_map.lock().await.clone(); if check_empty && unwrapped_instance_map.is_empty() { map_is_empty = true; @@ -1324,7 +1322,7 @@ pub mod tests { let _ = env_logger::builder().is_test(true).try_init(); let path_to_config = "../test/yaml/config-a.yaml"; let config_yaml = std::fs::read_to_string(path_to_config).expect("Unable to read file"); - let config: KubeAkriConfig = serde_yaml::from_str(&config_yaml).unwrap(); + let config: Configuration = serde_yaml::from_str(&config_yaml).unwrap(); let mut list_and_watch_message_receivers = Vec::new(); let discovery_handler_map: RegisteredDiscoveryHandlerMap = Arc::new(std::sync::Mutex::new(HashMap::new())); @@ -1345,7 +1343,7 @@ pub mod tests { instance_map, )); discovery_operator - .delete_offline_instances(Arc::new(Box::new(mock))) + .delete_offline_instances(Arc::new(mock)) .await .unwrap(); @@ -1366,7 +1364,7 @@ pub mod tests { instance_map, )); discovery_operator - .delete_offline_instances(Arc::new(Box::new(mock))) + .delete_offline_instances(Arc::new(mock)) .await .unwrap(); @@ -1390,7 +1388,7 @@ pub mod tests { instance_map.clone(), )); discovery_operator - .delete_offline_instances(Arc::new(Box::new(mock))) + .delete_offline_instances(Arc::new(mock)) .await .unwrap(); // Make sure all instances are deleted from map. Note, first 3 arguments are ignored. @@ -1401,13 +1399,13 @@ pub mod tests { // 1: InstanceConnectivityStatus of all instances that go offline is changed from Online to Offline // 2: InstanceConnectivityStatus of shared instances that come back online in under 5 minutes is changed from Offline to Online // 3: InstanceConnectivityStatus of unshared instances that come back online before next periodic discovery is changed from Offline to Online - #[tokio::test(core_threads = 2)] + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_update_instance_connectivity_status_factory() { let _ = env_logger::builder().is_test(true).try_init(); let path_to_config = "../test/yaml/config-a.yaml"; let config_yaml = std::fs::read_to_string(path_to_config).expect("Unable to read file"); - let config: KubeAkriConfig = serde_yaml::from_str(&config_yaml).unwrap(); - let config_name = config.metadata.name.clone(); + let config: Configuration = serde_yaml::from_str(&config_yaml).unwrap(); + let config_name = config.metadata.name.clone().unwrap(); let mut list_and_watch_message_receivers = Vec::new(); let mut visible_discovery_results = Vec::new(); let discovery_handler_map: RegisteredDiscoveryHandlerMap = @@ -1554,7 +1552,7 @@ pub mod tests { } async fn run_update_instance_connectivity_status( - config: KubeAkriConfig, + config: Configuration, currently_visible_instances: HashMap, shared: bool, instance_map: InstanceMap, @@ -1568,7 +1566,7 @@ pub mod tests { )); discovery_operator .update_instance_connectivity_status( - Arc::new(Box::new(mock)), + Arc::new(mock), currently_visible_instances, shared, ) @@ -1582,7 +1580,7 @@ pub mod tests { ) -> DiscoveryOperator { let path_to_config = "../test/yaml/config-a.yaml"; let config_yaml = std::fs::read_to_string(path_to_config).expect("Unable to read file"); - let config: KubeAkriConfig = serde_yaml::from_str(&config_yaml).unwrap(); + let config: Configuration = serde_yaml::from_str(&config_yaml).unwrap(); let discovery_handler_map = Arc::new(std::sync::Mutex::new(HashMap::new())); add_discovery_handler_to_map(dh_name, endpoint, false, discovery_handler_map.clone()); DiscoveryOperator::new( @@ -1624,13 +1622,10 @@ pub mod tests { let endpoint = DiscoveryHandlerEndpoint::Uds("socket.sock".to_string()); let discovery_operator = setup_non_mocked_dh(discovery_handler_name, &endpoint); // Test that an online discovery handler is marked offline - assert_eq!( - discovery_operator - .mark_offline_or_deregister_discovery_handler(&endpoint) - .await - .unwrap(), - false - ); + assert!(!discovery_operator + .mark_offline_or_deregister_discovery_handler(&endpoint) + .await + .unwrap()); if let DiscoveryHandlerStatus::Offline(_) = discovery_operator .discovery_handler_map .lock() @@ -1648,13 +1643,10 @@ pub mod tests { panic!("DiscoveryHandlerStatus should be changed to offline"); } // Test that an offline discovery handler IS NOT deregistered if the time has not passed - assert_eq!( - discovery_operator - .mark_offline_or_deregister_discovery_handler(&endpoint) - .await - .unwrap(), - false - ); + assert!(!discovery_operator + .mark_offline_or_deregister_discovery_handler(&endpoint) + .await + .unwrap()); // Test that an offline discovery handler IS deregistered if the time has passed let mock_now = Instant::now(); @@ -1668,13 +1660,10 @@ pub mod tests { .get_mut(&endpoint) .unwrap() .connectivity_status = DiscoveryHandlerStatus::Offline(mock_now); - assert_eq!( - discovery_operator - .mark_offline_or_deregister_discovery_handler(&endpoint) - .await - .unwrap(), - true - ); + assert!(discovery_operator + .mark_offline_or_deregister_discovery_handler(&endpoint) + .await + .unwrap()); } #[tokio::test] @@ -1683,7 +1672,7 @@ pub mod tests { std::env::set_var(super::super::constants::ENABLE_DEBUG_ECHO_LABEL, "yes"); let path_to_config = "../test/yaml/config-a.yaml"; let config_yaml = std::fs::read_to_string(path_to_config).expect("Unable to read file"); - let config: KubeAkriConfig = serde_yaml::from_str(&config_yaml).unwrap(); + let config: Configuration = serde_yaml::from_str(&config_yaml).unwrap(); let discovery_handler_map = Arc::new(std::sync::Mutex::new(HashMap::new())); let endpoint = DiscoveryHandlerEndpoint::Embedded; let dh_name = akri_debug_echo::DISCOVERY_HANDLER_NAME.to_string(); @@ -1720,7 +1709,7 @@ pub mod tests { ) .await; // Make sure registration server has started - akri_shared::uds::unix_stream::try_connect(&endpoint) + akri_shared::uds::unix_stream::try_connect(endpoint) .await .unwrap(); discovery_operator diff --git a/agent/src/util/embedded_discovery_handlers.rs b/agent/src/util/embedded_discovery_handlers.rs index 2e0427fd5..541781f6e 100644 --- a/agent/src/util/embedded_discovery_handlers.rs +++ b/agent/src/util/embedded_discovery_handlers.rs @@ -93,7 +93,7 @@ mod tests { discoveryDetails: |+ udevRules: [] "#; - let deserialized: DiscoveryHandlerInfo = serde_yaml::from_str(&udev_yaml).unwrap(); + let deserialized: DiscoveryHandlerInfo = serde_yaml::from_str(udev_yaml).unwrap(); assert!(inner_get_discovery_handler(&deserialized, &mock_query).is_ok()); let yaml = r#" @@ -102,7 +102,7 @@ mod tests { opcuaDiscoveryMethod: standard: {} "#; - let deserialized: DiscoveryHandlerInfo = serde_yaml::from_str(&yaml).unwrap(); + let deserialized: DiscoveryHandlerInfo = serde_yaml::from_str(yaml).unwrap(); assert!(inner_get_discovery_handler(&deserialized, &mock_query).is_ok()); let deserialized = serde_json::from_str::( @@ -121,7 +121,7 @@ mod tests { descriptions: - "foo1" "#; - let deserialized: DiscoveryHandlerInfo = serde_yaml::from_str(&debug_echo_yaml).unwrap(); + let deserialized: DiscoveryHandlerInfo = serde_yaml::from_str(debug_echo_yaml).unwrap(); // Test that errors without environment var set let mut mock_query_without_var_set = MockEnvVarQuery::new(); mock_query_without_var_set diff --git a/agent/src/util/registration.rs b/agent/src/util/registration.rs index a70cdd109..059e922bc 100644 --- a/agent/src/util/registration.rs +++ b/agent/src/util/registration.rs @@ -1,16 +1,15 @@ -use super::constants::{ - CLOSE_DISCOVERY_HANDLER_CONNECTION_CHANNEL_CAPACITY, ENABLE_DEBUG_ECHO_LABEL, -}; +use super::constants::CLOSE_DISCOVERY_HANDLER_CONNECTION_CHANNEL_CAPACITY; +#[cfg(any(test, feature = "agent-full"))] +use super::constants::ENABLE_DEBUG_ECHO_LABEL; use akri_discovery_utils::discovery::v0::{ register_discovery_handler_request::EndpointType, registration_server::{Registration, RegistrationServer}, Empty, RegisterDiscoveryHandlerRequest, }; -use akri_shared::{ - os::env_var::{ActualEnvVarQuery, EnvVarQuery}, - uds::unix_stream, -}; -use futures::TryStreamExt; +#[cfg(any(test, feature = "agent-full"))] +use akri_shared::os::env_var::{ActualEnvVarQuery, EnvVarQuery}; +use akri_shared::uds::unix_stream; +use futures::TryFutureExt; #[cfg(test)] use mock_instant::Instant; use std::collections::HashMap; @@ -199,11 +198,19 @@ pub async fn internal_run_registration_server( ); // Delete socket in case previously created/used std::fs::remove_file(&socket_path).unwrap_or(()); - let mut uds = - tokio::net::UnixListener::bind(socket_path).expect("Failed to bind to socket path"); + let incoming = { + let uds = + tokio::net::UnixListener::bind(socket_path).expect("Failed to bind to socket path"); + + async_stream::stream! { + while let item = uds.accept().map_ok(|(st, _)| unix_stream::UnixStream(st)).await { + yield item; + } + } + }; Server::builder() .add_service(RegistrationServer::new(registration)) - .serve_with_incoming(uds.incoming().map_ok(unix_stream::UnixStream)) + .serve_with_incoming(incoming) .await?; trace!( "internal_run_registration_server - gracefully shutdown ... deleting socket {}", diff --git a/agent/src/util/slot_reconciliation.rs b/agent/src/util/slot_reconciliation.rs index c59600a16..22b009b5c 100644 --- a/agent/src/util/slot_reconciliation.rs +++ b/agent/src/util/slot_reconciliation.rs @@ -1,5 +1,5 @@ use super::{constants::SLOT_RECONCILIATION_CHECK_DELAY_SECS, crictl_containers}; -use akri_shared::{akri::instance::Instance, k8s::KubeInterface}; +use akri_shared::{akri::instance::InstanceSpec, k8s::KubeInterface}; use async_trait::async_trait; use k8s_openapi::api::core::v1::PodStatus; #[cfg(test)] @@ -249,7 +249,7 @@ impl DevicePluginSlotReconciler { ) }) .collect::>(); - let modified_instance = Instance { + let modified_instance = InstanceSpec { configuration_name: instance.spec.configuration_name.clone(), broker_properties: instance.spec.broker_properties.clone(), shared: instance.spec.shared, @@ -261,7 +261,7 @@ impl DevicePluginSlotReconciler { match kube_interface .update_instance( &modified_instance, - &instance.metadata.name, + &instance.metadata.name.unwrap(), &instance.metadata.namespace.unwrap(), ) .await @@ -311,7 +311,7 @@ pub async fn periodic_slot_reconciliation( slot_grace_period: std::time::Duration, ) -> Result<(), Box> { trace!("periodic_slot_reconciliation - start"); - let kube_interface = akri_shared::k8s::create_kube_interface(); + let kube_interface = akri_shared::k8s::KubeImpl::new().await?; let node_name = std::env::var("AGENT_NODE_NAME").unwrap(); let crictl_path = std::env::var("HOST_CRICTL_PATH").unwrap(); let runtime_endpoint = std::env::var("HOST_RUNTIME_ENDPOINT").unwrap(); @@ -327,8 +327,8 @@ pub async fn periodic_slot_reconciliation( }; loop { - trace!("periodic_slot_reconciliation - iteration pre delay_for"); - tokio::time::delay_for(std::time::Duration::from_secs( + trace!("periodic_slot_reconciliation - iteration pre sleep"); + tokio::time::sleep(std::time::Duration::from_secs( SLOT_RECONCILIATION_CHECK_DELAY_SECS, )) .await; @@ -345,9 +345,9 @@ pub async fn periodic_slot_reconciliation( #[cfg(test)] mod reconcile_tests { use super::*; - use akri_shared::{akri::instance::KubeAkriInstanceList, k8s::MockKubeInterface, os::file}; - use k8s_openapi::api::core::v1::{PodSpec, PodStatus}; - use kube::api::{Object, ObjectList}; + use akri_shared::{akri::instance::InstanceList, k8s::MockKubeInterface, os::file}; + use k8s_openapi::api::core::v1::Pod; + use kube::api::ObjectList; fn configure_get_node_slots(mock: &mut MockSlotQuery, result: HashSet, error: bool) { mock.expect_get_node_slots().times(1).returning(move || { @@ -362,8 +362,7 @@ mod reconcile_tests { fn configure_get_instances(mock: &mut MockKubeInterface, result_file: &'static str) { mock.expect_get_instances().times(1).returning(move || { let instance_list_json = file::read_file_to_string(result_file); - let instance_list: KubeAkriInstanceList = - serde_json::from_str(&instance_list_json).unwrap(); + let instance_list: InstanceList = serde_json::from_str(&instance_list_json).unwrap(); Ok(instance_list) }); } @@ -378,8 +377,7 @@ mod reconcile_tests { .withf(move |s| s == selector) .returning(move |_| { let pods_json = file::read_file_to_string(result_file); - let pods: ObjectList> = - serde_json::from_str(&pods_json).unwrap(); + let pods: ObjectList = serde_json::from_str(&pods_json).unwrap(); Ok(pods) }); } diff --git a/agent/src/util/v1beta1.rs b/agent/src/util/v1beta1.rs index 2b179f098..6cea16015 100644 --- a/agent/src/util/v1beta1.rs +++ b/agent/src/util/v1beta1.rs @@ -8,17 +8,17 @@ pub struct DevicePluginOptions { pub struct RegisterRequest { /// Version of the API the Device Plugin was built against #[prost(string, tag = "1")] - pub version: std::string::String, + pub version: ::prost::alloc::string::String, /// Name of the unix socket the device plugin is listening on /// PATH = path.Join(DevicePluginPath, endpoint) #[prost(string, tag = "2")] - pub endpoint: std::string::String, + pub endpoint: ::prost::alloc::string::String, /// Schedulable resource name. As of now it's expected to be a DNS Label #[prost(string, tag = "3")] - pub resource_name: std::string::String, + pub resource_name: ::prost::alloc::string::String, /// Options to be communicated with Device Manager #[prost(message, optional, tag = "4")] - pub options: ::std::option::Option, + pub options: ::core::option::Option, } #[derive(Clone, PartialEq, ::prost::Message)] pub struct Empty {} @@ -28,7 +28,7 @@ pub struct Empty {} #[derive(Clone, PartialEq, ::prost::Message)] pub struct ListAndWatchResponse { #[prost(message, repeated, tag = "1")] - pub devices: ::std::vec::Vec, + pub devices: ::prost::alloc::vec::Vec, } /// E.g: /// struct Device { @@ -41,10 +41,10 @@ pub struct Device { /// to identify devices during the communication /// Max length of this field is 63 characters #[prost(string, tag = "1")] - pub id: std::string::String, + pub id: ::prost::alloc::string::String, /// Health of the device, can be healthy or unhealthy, see constants.go #[prost(string, tag = "2")] - pub health: std::string::String, + pub health: ::prost::alloc::string::String, } /// - PreStartContainer is expected to be called before each container start if indicated by plugin during registration phase. /// - PreStartContainer allows kubelet to pass reinitialized devices to containers. @@ -53,7 +53,7 @@ pub struct Device { #[derive(Clone, PartialEq, ::prost::Message)] pub struct PreStartContainerRequest { #[prost(string, repeated, tag = "1")] - pub devices_i_ds: ::std::vec::Vec, + pub devices_i_ds: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, } /// PreStartContainerResponse will be send by plugin in response to PreStartContainerRequest #[derive(Clone, PartialEq, ::prost::Message)] @@ -67,12 +67,12 @@ pub struct PreStartContainerResponse {} #[derive(Clone, PartialEq, ::prost::Message)] pub struct AllocateRequest { #[prost(message, repeated, tag = "1")] - pub container_requests: ::std::vec::Vec, + pub container_requests: ::prost::alloc::vec::Vec, } #[derive(Clone, PartialEq, ::prost::Message)] pub struct ContainerAllocateRequest { #[prost(string, repeated, tag = "1")] - pub devices_i_ds: ::std::vec::Vec, + pub devices_i_ds: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, } /// AllocateResponse includes the artifacts that needs to be injected into /// a container for accessing 'deviceIDs' that were mentioned as part of @@ -85,22 +85,24 @@ pub struct ContainerAllocateRequest { #[derive(Clone, PartialEq, ::prost::Message)] pub struct AllocateResponse { #[prost(message, repeated, tag = "1")] - pub container_responses: ::std::vec::Vec, + pub container_responses: ::prost::alloc::vec::Vec, } #[derive(Clone, PartialEq, ::prost::Message)] pub struct ContainerAllocateResponse { /// List of environment variable to be set in the container to access one of more devices. #[prost(map = "string, string", tag = "1")] - pub envs: ::std::collections::HashMap, + pub envs: + ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, /// Mounts for the container. #[prost(message, repeated, tag = "2")] - pub mounts: ::std::vec::Vec, + pub mounts: ::prost::alloc::vec::Vec, /// Devices for the container. #[prost(message, repeated, tag = "3")] - pub devices: ::std::vec::Vec, + pub devices: ::prost::alloc::vec::Vec, /// Container annotations to pass to the container runtime #[prost(map = "string, string", tag = "4")] - pub annotations: ::std::collections::HashMap, + pub annotations: + ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, } /// Mount specifies a host volume to mount into a container. /// where device library or tools are installed on host and container @@ -108,10 +110,10 @@ pub struct ContainerAllocateResponse { pub struct Mount { /// Path of the mount within the container. #[prost(string, tag = "1")] - pub container_path: std::string::String, + pub container_path: ::prost::alloc::string::String, /// Path of the mount on the host. #[prost(string, tag = "2")] - pub host_path: std::string::String, + pub host_path: ::prost::alloc::string::String, /// If set, the mount is read-only. #[prost(bool, tag = "3")] pub read_only: bool, @@ -121,20 +123,20 @@ pub struct Mount { pub struct DeviceSpec { /// Path of the device within the container. #[prost(string, tag = "1")] - pub container_path: std::string::String, + pub container_path: ::prost::alloc::string::String, /// Path of the device on the host. #[prost(string, tag = "2")] - pub host_path: std::string::String, + pub host_path: ::prost::alloc::string::String, /// Cgroups permissions of the device, candidates are one or more of /// * r - allows container to read from the specified device. /// * w - allows container to write to the specified device. /// * m - allows container to create device files that do not yet exist. #[prost(string, tag = "3")] - pub permissions: std::string::String, + pub permissions: ::prost::alloc::string::String, } #[doc = r" Generated client implementations."] pub mod registration_client { - #![allow(unused_variables, dead_code, missing_docs)] + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] use tonic::codegen::*; #[doc = " Registration is the service advertised by the Kubelet"] #[doc = " Only when Kubelet answers with a success code to a Register Request"] @@ -142,6 +144,7 @@ pub mod registration_client { #[doc = " Registration may fail when device plugin version is not supported by"] #[doc = " Kubelet or the registered resourceName is already taken by another"] #[doc = " active device plugin. Device plugin is expected to terminate upon registration failure"] + #[derive(Debug, Clone)] pub struct RegistrationClient { inner: tonic::client::Grpc, } @@ -159,17 +162,43 @@ pub mod registration_client { impl RegistrationClient where T: tonic::client::GrpcService, - T::ResponseBody: Body + HttpBody + Send + 'static, + T::ResponseBody: Body + Send + Sync + 'static, T::Error: Into, - ::Error: Into + Send, + ::Error: Into + Send, { pub fn new(inner: T) -> Self { let inner = tonic::client::Grpc::new(inner); Self { inner } } - pub fn with_interceptor(inner: T, interceptor: impl Into) -> Self { - let inner = tonic::client::Grpc::with_interceptor(inner, interceptor); - Self { inner } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> RegistrationClient> + where + F: tonic::service::Interceptor, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + >>::Error: + Into + Send + Sync, + { + RegistrationClient::new(InterceptedService::new(inner, interceptor)) + } + #[doc = r" Compress requests with `gzip`."] + #[doc = r""] + #[doc = r" This requires the server to support it otherwise it might respond with an"] + #[doc = r" error."] + pub fn send_gzip(mut self) -> Self { + self.inner = self.inner.send_gzip(); + self + } + #[doc = r" Enable decompressing responses with `gzip`."] + pub fn accept_gzip(mut self) -> Self { + self.inner = self.inner.accept_gzip(); + self } pub async fn register( &mut self, @@ -186,19 +215,13 @@ pub mod registration_client { self.inner.unary(request.into_request(), path, codec).await } } - impl Clone for RegistrationClient { - fn clone(&self) -> Self { - Self { - inner: self.inner.clone(), - } - } - } } #[doc = r" Generated client implementations."] pub mod device_plugin_client { - #![allow(unused_variables, dead_code, missing_docs)] + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] use tonic::codegen::*; #[doc = " DevicePlugin is the service advertised by Device Plugins"] + #[derive(Debug, Clone)] pub struct DevicePluginClient { inner: tonic::client::Grpc, } @@ -216,17 +239,43 @@ pub mod device_plugin_client { impl DevicePluginClient where T: tonic::client::GrpcService, - T::ResponseBody: Body + HttpBody + Send + 'static, + T::ResponseBody: Body + Send + Sync + 'static, T::Error: Into, - ::Error: Into + Send, + ::Error: Into + Send, { pub fn new(inner: T) -> Self { let inner = tonic::client::Grpc::new(inner); Self { inner } } - pub fn with_interceptor(inner: T, interceptor: impl Into) -> Self { - let inner = tonic::client::Grpc::with_interceptor(inner, interceptor); - Self { inner } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> DevicePluginClient> + where + F: tonic::service::Interceptor, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + >>::Error: + Into + Send + Sync, + { + DevicePluginClient::new(InterceptedService::new(inner, interceptor)) + } + #[doc = r" Compress requests with `gzip`."] + #[doc = r""] + #[doc = r" This requires the server to support it otherwise it might respond with an"] + #[doc = r" error."] + pub fn send_gzip(mut self) -> Self { + self.inner = self.inner.send_gzip(); + self + } + #[doc = r" Enable decompressing responses with `gzip`."] + pub fn accept_gzip(mut self) -> Self { + self.inner = self.inner.accept_gzip(); + self } #[doc = " GetDevicePluginOptions returns options to be communicated with Device"] #[doc = " Manager"] @@ -304,17 +353,10 @@ pub mod device_plugin_client { self.inner.unary(request.into_request(), path, codec).await } } - impl Clone for DevicePluginClient { - fn clone(&self) -> Self { - Self { - inner: self.inner.clone(), - } - } - } } #[doc = r" Generated server implementations."] pub mod registration_server { - #![allow(unused_variables, dead_code, missing_docs)] + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] use tonic::codegen::*; #[doc = "Generated trait containing gRPC methods that should be implemented for use with RegistrationServer."] #[async_trait] @@ -331,34 +373,46 @@ pub mod registration_server { #[doc = " Kubelet or the registered resourceName is already taken by another"] #[doc = " active device plugin. Device plugin is expected to terminate upon registration failure"] #[derive(Debug)] - #[doc(hidden)] pub struct RegistrationServer { inner: _Inner, + accept_compression_encodings: (), + send_compression_encodings: (), } - struct _Inner(Arc, Option); + struct _Inner(Arc); impl RegistrationServer { pub fn new(inner: T) -> Self { let inner = Arc::new(inner); - let inner = _Inner(inner, None); - Self { inner } + let inner = _Inner(inner); + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + } } - pub fn with_interceptor(inner: T, interceptor: impl Into) -> Self { - let inner = Arc::new(inner); - let inner = _Inner(inner, Some(interceptor.into())); - Self { inner } + pub fn with_interceptor(inner: T, interceptor: F) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) } } - impl Service> for RegistrationServer { + impl tonic::codegen::Service> for RegistrationServer + where + T: Registration, + B: Body + Send + Sync + 'static, + B::Error: Into + Send + 'static, + { type Response = http::Response; type Error = Never; type Future = BoxFuture; fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { Poll::Ready(Ok(())) } - fn call(&mut self, req: http::Request) -> Self::Future { + fn call(&mut self, req: http::Request) -> Self::Future { let inner = self.inner.clone(); match req.uri().path() { "/v1beta1.Registration/Register" => { + #[allow(non_camel_case_types)] struct RegisterSvc(pub Arc); impl tonic::server::UnaryService for RegisterSvc { type Response = super::Empty; @@ -368,21 +422,21 @@ pub mod registration_server { request: tonic::Request, ) -> Self::Future { let inner = self.0.clone(); - let fut = async move { inner.register(request).await }; + let fut = async move { (*inner).register(request).await }; Box::pin(fut) } } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; let inner = self.inner.clone(); let fut = async move { - let interceptor = inner.1.clone(); let inner = inner.0; let method = RegisterSvc(inner); let codec = tonic::codec::ProstCodec::default(); - let mut grpc = if let Some(interceptor) = interceptor { - tonic::server::Grpc::with_interceptor(codec, interceptor) - } else { - tonic::server::Grpc::new(codec) - }; + let mut grpc = tonic::server::Grpc::new(codec).apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -392,7 +446,8 @@ pub mod registration_server { Ok(http::Response::builder() .status(200) .header("grpc-status", "12") - .body(tonic::body::BoxBody::empty()) + .header("content-type", "application/grpc") + .body(empty_body()) .unwrap()) }), } @@ -401,12 +456,16 @@ pub mod registration_server { impl Clone for RegistrationServer { fn clone(&self) -> Self { let inner = self.inner.clone(); - Self { inner } + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + } } } impl Clone for _Inner { fn clone(&self) -> Self { - Self(self.0.clone(), self.1.clone()) + Self(self.0.clone()) } } impl std::fmt::Debug for _Inner { @@ -420,7 +479,7 @@ pub mod registration_server { } #[doc = r" Generated server implementations."] pub mod device_plugin_server { - #![allow(unused_variables, dead_code, missing_docs)] + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] use tonic::codegen::*; #[doc = "Generated trait containing gRPC methods that should be implemented for use with DevicePluginServer."] #[async_trait] @@ -432,7 +491,7 @@ pub mod device_plugin_server { request: tonic::Request, ) -> Result, tonic::Status>; #[doc = "Server streaming response type for the ListAndWatch method."] - type ListAndWatchStream: Stream> + type ListAndWatchStream: futures_core::Stream> + Send + Sync + 'static; @@ -460,61 +519,75 @@ pub mod device_plugin_server { } #[doc = " DevicePlugin is the service advertised by Device Plugins"] #[derive(Debug)] - #[doc(hidden)] pub struct DevicePluginServer { inner: _Inner, + accept_compression_encodings: (), + send_compression_encodings: (), } - struct _Inner(Arc, Option); + struct _Inner(Arc); impl DevicePluginServer { pub fn new(inner: T) -> Self { let inner = Arc::new(inner); - let inner = _Inner(inner, None); - Self { inner } + let inner = _Inner(inner); + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + } } - pub fn with_interceptor(inner: T, interceptor: impl Into) -> Self { - let inner = Arc::new(inner); - let inner = _Inner(inner, Some(interceptor.into())); - Self { inner } + pub fn with_interceptor(inner: T, interceptor: F) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) } } - impl Service> for DevicePluginServer { + impl tonic::codegen::Service> for DevicePluginServer + where + T: DevicePlugin, + B: Body + Send + Sync + 'static, + B::Error: Into + Send + 'static, + { type Response = http::Response; type Error = Never; type Future = BoxFuture; fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { Poll::Ready(Ok(())) } - fn call(&mut self, req: http::Request) -> Self::Future { + fn call(&mut self, req: http::Request) -> Self::Future { let inner = self.inner.clone(); match req.uri().path() { "/v1beta1.DevicePlugin/GetDevicePluginOptions" => { + #[allow(non_camel_case_types)] struct GetDevicePluginOptionsSvc(pub Arc); impl tonic::server::UnaryService for GetDevicePluginOptionsSvc { type Response = super::DevicePluginOptions; type Future = BoxFuture, tonic::Status>; fn call(&mut self, request: tonic::Request) -> Self::Future { let inner = self.0.clone(); - let fut = async move { inner.get_device_plugin_options(request).await }; + let fut = + async move { (*inner).get_device_plugin_options(request).await }; Box::pin(fut) } } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; let inner = self.inner.clone(); let fut = async move { - let interceptor = inner.1.clone(); let inner = inner.0; let method = GetDevicePluginOptionsSvc(inner); let codec = tonic::codec::ProstCodec::default(); - let mut grpc = if let Some(interceptor) = interceptor { - tonic::server::Grpc::with_interceptor(codec, interceptor) - } else { - tonic::server::Grpc::new(codec) - }; + let mut grpc = tonic::server::Grpc::new(codec).apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); let res = grpc.unary(method, req).await; Ok(res) }; Box::pin(fut) } "/v1beta1.DevicePlugin/ListAndWatch" => { + #[allow(non_camel_case_types)] struct ListAndWatchSvc(pub Arc); impl tonic::server::ServerStreamingService for ListAndWatchSvc { type Response = super::ListAndWatchResponse; @@ -523,27 +596,28 @@ pub mod device_plugin_server { BoxFuture, tonic::Status>; fn call(&mut self, request: tonic::Request) -> Self::Future { let inner = self.0.clone(); - let fut = async move { inner.list_and_watch(request).await }; + let fut = async move { (*inner).list_and_watch(request).await }; Box::pin(fut) } } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; let inner = self.inner.clone(); let fut = async move { - let interceptor = inner.1; let inner = inner.0; let method = ListAndWatchSvc(inner); let codec = tonic::codec::ProstCodec::default(); - let mut grpc = if let Some(interceptor) = interceptor { - tonic::server::Grpc::with_interceptor(codec, interceptor) - } else { - tonic::server::Grpc::new(codec) - }; + let mut grpc = tonic::server::Grpc::new(codec).apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); let res = grpc.server_streaming(method, req).await; Ok(res) }; Box::pin(fut) } "/v1beta1.DevicePlugin/Allocate" => { + #[allow(non_camel_case_types)] struct AllocateSvc(pub Arc); impl tonic::server::UnaryService for AllocateSvc { type Response = super::AllocateResponse; @@ -553,27 +627,28 @@ pub mod device_plugin_server { request: tonic::Request, ) -> Self::Future { let inner = self.0.clone(); - let fut = async move { inner.allocate(request).await }; + let fut = async move { (*inner).allocate(request).await }; Box::pin(fut) } } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; let inner = self.inner.clone(); let fut = async move { - let interceptor = inner.1.clone(); let inner = inner.0; let method = AllocateSvc(inner); let codec = tonic::codec::ProstCodec::default(); - let mut grpc = if let Some(interceptor) = interceptor { - tonic::server::Grpc::with_interceptor(codec, interceptor) - } else { - tonic::server::Grpc::new(codec) - }; + let mut grpc = tonic::server::Grpc::new(codec).apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); let res = grpc.unary(method, req).await; Ok(res) }; Box::pin(fut) } "/v1beta1.DevicePlugin/PreStartContainer" => { + #[allow(non_camel_case_types)] struct PreStartContainerSvc(pub Arc); impl tonic::server::UnaryService @@ -586,21 +661,21 @@ pub mod device_plugin_server { request: tonic::Request, ) -> Self::Future { let inner = self.0.clone(); - let fut = async move { inner.pre_start_container(request).await }; + let fut = async move { (*inner).pre_start_container(request).await }; Box::pin(fut) } } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; let inner = self.inner.clone(); let fut = async move { - let interceptor = inner.1.clone(); let inner = inner.0; let method = PreStartContainerSvc(inner); let codec = tonic::codec::ProstCodec::default(); - let mut grpc = if let Some(interceptor) = interceptor { - tonic::server::Grpc::with_interceptor(codec, interceptor) - } else { - tonic::server::Grpc::new(codec) - }; + let mut grpc = tonic::server::Grpc::new(codec).apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -610,7 +685,8 @@ pub mod device_plugin_server { Ok(http::Response::builder() .status(200) .header("grpc-status", "12") - .body(tonic::body::BoxBody::empty()) + .header("content-type", "application/grpc") + .body(empty_body()) .unwrap()) }), } @@ -619,12 +695,16 @@ pub mod device_plugin_server { impl Clone for DevicePluginServer { fn clone(&self) -> Self { let inner = self.inner.clone(); - Self { inner } + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + } } } impl Clone for _Inner { fn clone(&self) -> Self { - Self(self.0.clone(), self.1.clone()) + Self(self.0.clone()) } } impl std::fmt::Debug for _Inner { diff --git a/build/setup.sh b/build/setup.sh index 662633deb..ef1ab63bd 100755 --- a/build/setup.sh +++ b/build/setup.sh @@ -23,10 +23,10 @@ then if [ -x "$(command -v sudo)" ]; then echo "Install rustup" - sudo curl https://sh.rustup.rs -sSf | sh -s -- -y --default-toolchain=1.51.0 + sudo curl https://sh.rustup.rs -sSf | sh -s -- -y --default-toolchain=1.54.0 else echo "Install rustup" - curl https://sh.rustup.rs -sSf | sh -s -- -y --default-toolchain=1.51.0 + curl https://sh.rustup.rs -sSf | sh -s -- -y --default-toolchain=1.54.0 fi else echo "Found rustup" diff --git a/controller/Cargo.toml b/controller/Cargo.toml index 988d0d33b..656393e39 100644 --- a/controller/Cargo.toml +++ b/controller/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "controller" -version = "0.6.12" +version = "0.6.13" authors = [""] edition = "2018" @@ -11,11 +11,12 @@ akri-shared = { path = "../shared" } async-std = "1.5.0" async-trait = "0.1.0" chrono = "0.4.10" -env_logger = "0.8.3" +env_logger = "0.9.0" futures = "0.3.1" anyhow = "1.0.38" -kube = { version = "0.23.0", features = ["openapi"] } -k8s-openapi = { version = "0.6.0", features = ["v1_16"] } +kube = { version = "0.59.0", features = ["derive"] } +kube-runtime = "0.59.0" +k8s-openapi = { version = "0.13.0", default-features = false, features = ["schemars", "v1_16"] } lazy_static = "1.4" log = "0.4" prometheus = { version = "0.12.0", features = ["process"] } @@ -23,7 +24,7 @@ serde = "1.0.104" serde_derive = "1.0.104" serde_json = "1.0.45" serde_yaml = "0.8.11" -tokio = { version = "0.2", features = ["full"] } +tokio = { version = "1.0.2", features = ["full"] } [dev-dependencies] -mockall = "0.9.0" \ No newline at end of file +mockall = "0.10.2" diff --git a/controller/src/util/instance_action.rs b/controller/src/util/instance_action.rs index 4f46b276b..0d6252d59 100644 --- a/controller/src/util/instance_action.rs +++ b/controller/src/util/instance_action.rs @@ -1,10 +1,7 @@ use super::super::BROKER_POD_COUNT_METRIC; use super::{pod_action::PodAction, pod_action::PodActionInfo}; use akri_shared::{ - akri::{ - configuration::KubeAkriConfig, instance::KubeAkriInstance, AKRI_PREFIX, API_INSTANCES, - API_NAMESPACE, API_VERSION, - }, + akri::{configuration::Configuration, instance::Instance, AKRI_PREFIX}, k8s, k8s::{ pod, @@ -13,9 +10,9 @@ use akri_shared::{ }, }; use async_std::sync::Mutex; -use futures::StreamExt; -use k8s_openapi::api::core::v1::{PodSpec, PodStatus}; -use kube::api::{Informer, Object, RawApi, WatchEvent}; +use futures::{StreamExt, TryStreamExt}; +use k8s_openapi::api::core::v1::Pod; +use kube::api::{Api, ListParams, WatchEvent}; use log::{error, info, trace}; use std::collections::HashMap; use std::sync::Arc; @@ -43,7 +40,7 @@ pub enum InstanceAction { /// This invokes an internal method that watches for Instance events pub async fn handle_existing_instances( ) -> Result<(), Box> { - internal_handle_existing_instances(&k8s::create_kube_interface()).await + internal_handle_existing_instances(&k8s::KubeImpl::new().await?).await } /// This invokes an internal method that watches for Instance events @@ -51,7 +48,7 @@ pub async fn do_instance_watch( synchronization: Arc>, ) -> Result<(), Box> { // Watch for instance changes - internal_do_instance_watch(&synchronization, &k8s::create_kube_interface()).await + internal_do_instance_watch(&synchronization, &k8s::KubeImpl::new().await?).await } /// This invokes an internal method that watches for Instance events @@ -64,7 +61,7 @@ async fn internal_handle_existing_instances( let pre_existing_instances = kube_interface.get_instances().await?; for instance in pre_existing_instances { tasks.push(tokio::spawn(async move { - let inner_kube_interface = k8s::create_kube_interface(); + let inner_kube_interface = k8s::KubeImpl::new().await.unwrap(); handle_instance_change(&instance, &InstanceAction::Update, &inner_kube_interface) .await .unwrap(); @@ -80,40 +77,35 @@ async fn internal_do_instance_watch( kube_interface: &impl KubeInterface, ) -> Result<(), Box> { trace!("internal_do_instance_watch - enter"); - let akri_instance_type = RawApi::customResource(API_INSTANCES) - .group(API_NAMESPACE) - .version(API_VERSION); - - let informer = Informer::raw(kube_interface.get_kube_client(), akri_instance_type) - .init() - .await?; - loop { - let mut instances = informer.poll().await?.boxed(); - - // Currently, this does not handle None except to break the - // while. - while let Some(event) = instances.next().await { - // Aquire lock to ensure cleanup_instance_and_configuration_svcs and the - // inner loop handle_instance call in internal_do_instance_watch - // cannot execute at the same time. - let _lock = synchronization.lock().await; - trace!("internal_do_instance_watch - aquired sync lock"); - handle_instance(event?, kube_interface).await?; - } + let resource = Api::::all(kube_interface.get_kube_client()); + let mut stream = resource + .watch(&ListParams::default(), akri_shared::akri::WATCH_VERSION) + .await? + .boxed(); + // Currently, this does not handle None except to break the + // while. + while let Some(event) = stream.try_next().await? { + // Aquire lock to ensure cleanup_instance_and_configuration_svcs and the + // inner loop handle_instance call in internal_do_instance_watch + // cannot execute at the same time. + let _lock = synchronization.lock().await; + trace!("internal_do_instance_watch - aquired sync lock"); + handle_instance(event, kube_interface).await?; } + Ok(()) } /// This takes an event off the Instance stream and delegates it to the /// correct function based on the event type. async fn handle_instance( - event: WatchEvent, + event: WatchEvent, kube_interface: &impl KubeInterface, ) -> Result<(), Box> { trace!("handle_instance - enter"); match event { WatchEvent::Added(instance) => { info!( - "handle_instance - added Akri Instance {}: {:?}", + "handle_instance - added Akri Instance {:?}: {:?}", instance.metadata.name, instance.spec ); handle_instance_change(&instance, &InstanceAction::Add, kube_interface).await?; @@ -121,7 +113,7 @@ async fn handle_instance( } WatchEvent::Deleted(instance) => { info!( - "handle_instance - deleted Akri Instance {}: {:?}", + "handle_instance - deleted Akri Instance {:?}: {:?}", instance.metadata.name, instance.spec ); handle_instance_change(&instance, &InstanceAction::Remove, kube_interface).await?; @@ -129,7 +121,7 @@ async fn handle_instance( } WatchEvent::Modified(instance) => { info!( - "handle_instance - modified Akri Instance {}: {:?}", + "handle_instance - modified Akri Instance {:?}: {:?}", instance.metadata.name, instance.spec ); handle_instance_change(&instance, &InstanceAction::Update, kube_interface).await?; @@ -139,6 +131,7 @@ async fn handle_instance( trace!("handle_instance - error for Akri Instance: {}", e); Ok(()) } + WatchEvent::Bookmark(_) => Ok(()), } } @@ -162,13 +155,13 @@ struct PodContext { /// the Instance event action. If this method has enough information, /// it will update the nodes_to_act_on map with the required action. fn determine_action_for_pod( - k8s_pod: &Object, + k8s_pod: &Pod, action: &InstanceAction, nodes_to_act_on: &mut HashMap, ) { if k8s_pod.status.is_none() { error!( - "determine_action_for_pod - no pod status found for {}", + "determine_action_for_pod - no pod status found for {:?}", &k8s_pod.metadata.name ); return; @@ -176,7 +169,7 @@ fn determine_action_for_pod( if k8s_pod.status.as_ref().unwrap().phase.is_none() { error!( - "determine_action_for_pod - no pod phase found for {}", + "determine_action_for_pod - no pod phase found for {:?}", &k8s_pod.metadata.name ); return; @@ -184,35 +177,31 @@ fn determine_action_for_pod( // Early exits above ensure unwrap will not panic let pod_phase = k8s_pod.status.as_ref().unwrap().phase.as_ref().unwrap(); + let labels = match &k8s_pod.metadata.labels { + Some(labels) => labels, + None => { + error!( + "determine_action_for_pod - no {} label found for {:?}", + AKRI_TARGET_NODE_LABEL_NAME, &k8s_pod.metadata.name + ); + return; + } + }; - if k8s_pod - .metadata - .labels - .get(AKRI_TARGET_NODE_LABEL_NAME) - .is_none() - { + if labels.get(AKRI_TARGET_NODE_LABEL_NAME).is_none() { error!( - "determine_action_for_pod - no {} label found for {}", + "determine_action_for_pod - no {} label found for {:?}", AKRI_TARGET_NODE_LABEL_NAME, &k8s_pod.metadata.name ); return; } // Early exits above ensure unwrap will not panic - let node_to_run_pod_on = k8s_pod - .metadata - .labels - .get(AKRI_TARGET_NODE_LABEL_NAME) - .unwrap(); + let node_to_run_pod_on = labels.get(AKRI_TARGET_NODE_LABEL_NAME).unwrap(); - if k8s_pod - .metadata - .labels - .get(AKRI_INSTANCE_LABEL_NAME) - .is_none() - { + if labels.get(AKRI_INSTANCE_LABEL_NAME).is_none() { error!( - "determine_action_for_pod - no {} label found for {}", + "determine_action_for_pod - no {} label found for {:?}", AKRI_INSTANCE_LABEL_NAME, &k8s_pod.metadata.name ); return; @@ -234,7 +223,7 @@ fn determine_action_for_pod( instance_action: action.clone(), status_start_time: pod_start_time, unknown_node: !nodes_to_act_on.contains_key(node_to_run_pod_on), - trace_node_name: k8s_pod.metadata.name.clone(), + trace_node_name: k8s_pod.metadata.name.clone().unwrap(), }; update_pod_context.action = match pod_action_info.select_pod_action() { Ok(action) => action, @@ -277,7 +266,7 @@ async fn handle_deletion_work( &"pod".to_string() ); let pod_app_name = pod::create_pod_app_name( - &instance_name, + instance_name, context_node_name, instance_shared, &"pod".to_string(), @@ -288,7 +277,7 @@ async fn handle_deletion_work( &context_namespace ); kube_interface - .remove_pod(&pod_app_name, &context_namespace) + .remove_pod(&pod_app_name, context_namespace) .await?; trace!("handle_deletion_work - pod::remove_pod succeeded",); BROKER_POD_COUNT_METRIC @@ -356,7 +345,7 @@ async fn handle_addition_work( instance_class_name: &str, instance_shared: bool, new_node: &str, - instance_configuration: &KubeAkriConfig, + instance_configuration: &Configuration, kube_interface: &impl KubeInterface, ) -> Result<(), Box> { trace!( @@ -367,9 +356,9 @@ async fn handle_addition_work( if let Some(broker_pod_spec) = &instance_configuration.spec.broker_pod_spec { let capability_id = format!("{}/{}", AKRI_PREFIX, instance_name); let new_pod = pod::create_new_pod_from_spec( - &instance_namespace, - &instance_name, - &instance_class_name, + instance_namespace, + instance_name, + instance_class_name, OwnershipInfo::new( OwnershipType::Instance, instance_name.to_string(), @@ -378,13 +367,13 @@ async fn handle_addition_work( &capability_id, &new_node.to_string(), instance_shared, - &broker_pod_spec, + broker_pod_spec, )?; trace!("handle_addition_work - New pod spec={:?}", new_pod); kube_interface - .create_pod(&new_pod, &instance_namespace) + .create_pod(&new_pod, instance_namespace) .await?; trace!("handle_addition_work - pod::create_pod succeeded",); BROKER_POD_COUNT_METRIC @@ -399,13 +388,13 @@ async fn handle_addition_work( /// disappearances, starting broker Pods/Services that are missing, /// and stopping Pods/Services that are no longer needed. pub async fn handle_instance_change( - instance: &KubeAkriInstance, + instance: &Instance, action: &InstanceAction, kube_interface: &impl KubeInterface, ) -> Result<(), Box> { trace!("handle_instance_change - enter {:?}", action); - let instance_name = instance.metadata.name.clone(); + let instance_name = instance.metadata.name.clone().unwrap(); let instance_namespace = instance.metadata.namespace.as_ref().ok_or(format!( "Namespace not found for instance: {}", &instance_name @@ -502,7 +491,7 @@ pub async fn handle_instance_change( &instance.spec.configuration_name ); let instance_configuration = match kube_interface - .find_configuration(&instance.spec.configuration_name, &instance_namespace) + .find_configuration(&instance.spec.configuration_name, instance_namespace) .await { Ok(config) => config, @@ -511,7 +500,7 @@ pub async fn handle_instance_change( // Furthermore, Akri Agent is still modifying the Instances. This should not happen beacuse Agent // is designed to shutdown when it's Configuration watcher fails. error!( - "handle_instance_change - no configuration found for {} yet instance {} exists - check that device plugin is running propertly", + "handle_instance_change - no configuration found for {:?} yet instance {:?} exists - check that device plugin is running propertly", &instance.spec.configuration_name, &instance.metadata.name ); return Ok(()); @@ -530,12 +519,12 @@ pub async fn handle_instance_change( for new_node in nodes_to_add { handle_addition_work( &instance_name, - &instance_uid, - &instance_namespace, + instance_uid, + instance_namespace, &instance.spec.configuration_name, instance.spec.shared, &new_node, - &instance_configuration_option.as_ref().unwrap(), + instance_configuration_option.as_ref().unwrap(), kube_interface, ) .await?; @@ -552,7 +541,7 @@ mod handle_instance_tests { use super::super::shared_test_utils::config_for_tests::PodList; use super::*; use akri_shared::{ - akri::instance::KubeAkriInstance, + akri::instance::Instance, k8s::{pod::AKRI_INSTANCE_LABEL_NAME, MockKubeInterface}, os::file, }; @@ -789,7 +778,7 @@ mod handle_instance_tests { ) { trace!("run_handle_instance_change_test enter"); let instance_json = file::read_file_to_string(instance_file); - let instance: KubeAkriInstance = serde_json::from_str(&instance_json).unwrap(); + let instance: Instance = serde_json::from_str(&instance_json).unwrap(); handle_instance( match action { InstanceAction::Add => WatchEvent::Added(instance), @@ -948,7 +937,7 @@ mod handle_instance_tests { let deleted_node = "node-b"; let instance_file = "../test/json/shared-instance-update.json"; let instance_json = file::read_file_to_string(instance_file); - let kube_object_instance: KubeAkriInstance = serde_json::from_str(&instance_json).unwrap(); + let kube_object_instance: Instance = serde_json::from_str(&instance_json).unwrap(); let mut instance = kube_object_instance.spec; instance.nodes = instance .nodes @@ -988,7 +977,7 @@ mod handle_instance_tests { )), }, ); - run_handle_instance_change_test(&mut mock, &instance_file, &InstanceAction::Update).await; + run_handle_instance_change_test(&mut mock, instance_file, &InstanceAction::Update).await; } /// Checks that the BROKER_POD_COUNT_METRIC is appropriately incremented diff --git a/controller/src/util/node_watcher.rs b/controller/src/util/node_watcher.rs index 889afd99e..5bd100734 100644 --- a/controller/src/util/node_watcher.rs +++ b/controller/src/util/node_watcher.rs @@ -1,19 +1,17 @@ use akri_shared::{ akri::{ - instance::{Instance, KubeAkriInstance}, + instance::{Instance, InstanceSpec}, retry::{random_delay, MAX_INSTANCE_UPDATE_TRIES}, }, k8s, k8s::KubeInterface, }; -use futures::StreamExt; -use k8s_openapi::api::core::v1::{NodeSpec, NodeStatus}; -use kube::api::{Api, Informer, Object, WatchEvent}; +use futures::{StreamExt, TryStreamExt}; +use k8s_openapi::api::core::v1::{Node, NodeStatus}; +use kube::api::{Api, ListParams, WatchEvent}; use log::trace; use std::collections::HashMap; -type NodeObject = Object; - /// Node states that NodeWatcher is interested in /// /// NodeState describes the various states that the controller can @@ -54,19 +52,19 @@ impl NodeWatcher { &mut self, ) -> Result<(), Box> { trace!("watch - enter"); - let kube_interface = k8s::create_kube_interface(); - let resource = Api::v1Node(kube_interface.get_kube_client()); - let inf = Informer::new(resource.clone()).init().await?; - - loop { - let mut nodes = inf.poll().await?.boxed(); - - // Currently, this does not handle None except to break the - // while. - while let Some(event) = nodes.next().await { - self.handle_node(event?, &kube_interface).await?; - } + let kube_interface = k8s::KubeImpl::new().await?; + let resource = Api::::all(kube_interface.get_kube_client()); + let mut stream = resource + .watch(&ListParams::default(), akri_shared::akri::WATCH_VERSION) + .await? + .boxed(); + // Currently, this does not handle None except to break the + // while. + while let Some(event) = stream.try_next().await? { + self.handle_node(event, &kube_interface).await?; } + + Ok(()) } /// This takes an event off the Node stream and if a Node is no longer @@ -89,7 +87,7 @@ impl NodeWatcher { /// non-Running Node. async fn handle_node( &mut self, - event: WatchEvent, + event: WatchEvent, kube_interface: &impl KubeInterface, ) -> Result<(), Box> { trace!("handle_node - enter"); @@ -98,10 +96,10 @@ impl NodeWatcher { trace!("handle_node - Added: {:?}", &node.metadata.name); if self.is_node_ready(&node) { self.known_nodes - .insert(node.metadata.name, NodeState::Running); + .insert(node.metadata.name.unwrap(), NodeState::Running); } else { self.known_nodes - .insert(node.metadata.name, NodeState::Known); + .insert(node.metadata.name.unwrap(), NodeState::Known); } } WatchEvent::Modified(node) => { @@ -113,7 +111,7 @@ impl NodeWatcher { ); if self.is_node_ready(&node) { self.known_nodes - .insert(node.metadata.name.clone(), NodeState::Running); + .insert(node.metadata.name.unwrap(), NodeState::Running); } else { self.call_handle_node_disappearance_if_needed(&node, kube_interface) .await?; @@ -127,6 +125,7 @@ impl NodeWatcher { WatchEvent::Error(e) => { trace!("handle_node - error for Node: {}", e); } + WatchEvent::Bookmark(_) => {} }; Ok(()) } @@ -136,16 +135,17 @@ impl NodeWatcher { /// only once for any Node as it disappears. async fn call_handle_node_disappearance_if_needed( &mut self, - node: &NodeObject, + node: &Node, kube_interface: &impl KubeInterface, ) -> Result<(), Box> { + let node_name = node.metadata.name.clone().unwrap(); trace!( "call_handle_node_disappearance_if_needed - enter: {:?}", &node.metadata.name ); let last_known_state = self .known_nodes - .get(&node.metadata.name) + .get(&node_name) .unwrap_or(&NodeState::Running); trace!( "call_handle_node_disappearance_if_needed - last_known_state: {:?}", @@ -161,16 +161,16 @@ impl NodeWatcher { "call_handle_node_disappearance_if_needed - call handle_node_disappearance: {:?}", &node.metadata.name ); - self.handle_node_disappearance(&node.metadata.name, kube_interface) + self.handle_node_disappearance(&node_name, kube_interface) .await?; self.known_nodes - .insert(node.metadata.name.clone(), NodeState::InstancesCleaned); + .insert(node_name, NodeState::InstancesCleaned); } Ok(()) } /// This determines if a node is in the Ready state. - fn is_node_ready(&self, k8s_node: &NodeObject) -> bool { + fn is_node_ready(&self, k8s_node: &Node) -> bool { trace!("is_node_ready - for node {:?}", k8s_node.metadata.name); k8s_node .status @@ -200,7 +200,7 @@ impl NodeWatcher { &self, vanished_node_name: &str, kube_interface: &impl KubeInterface, - ) -> Result<(), Box> { + ) -> Result<(), anyhow::Error> { trace!( "handle_node_disappearance - enter vanished_node_name={:?}", vanished_node_name, @@ -212,11 +212,10 @@ impl NodeWatcher { instances.items.len() ); for instance in instances.items { - let instance_name = instance.metadata.name.clone(); - let instance_namespace = instance.metadata.namespace.as_ref().ok_or(format!( - "Namespace not found for instance: {}", - instance_name - ))?; + let instance_name = instance.metadata.name.clone().unwrap(); + let instance_namespace = instance.metadata.namespace.as_ref().ok_or_else(|| { + anyhow::anyhow!("Namespace not found for instance: {}", instance_name) + })?; trace!( "handle_node_disappearance - make sure node is not referenced here: {:?}", @@ -227,21 +226,21 @@ impl NodeWatcher { for x in 0..MAX_INSTANCE_UPDATE_TRIES { match if x == 0 { self.try_remove_nodes_from_instance( - &vanished_node_name, + vanished_node_name, &instance_name, - &instance_namespace, + instance_namespace, &instance, kube_interface, ) .await } else { let retry_instance = kube_interface - .find_instance(&instance_name, &instance_namespace) + .find_instance(&instance_name, instance_namespace) .await?; self.try_remove_nodes_from_instance( - &vanished_node_name, + vanished_node_name, &instance_name, - &instance_namespace, + instance_namespace, &retry_instance, kube_interface, ) @@ -270,9 +269,9 @@ impl NodeWatcher { vanished_node_name: &str, instance_name: &str, instance_namespace: &str, - instance: &KubeAkriInstance, + instance: &Instance, kube_interface: &impl KubeInterface, - ) -> Result<(), Box> { + ) -> Result<(), anyhow::Error> { trace!( "try_remove_nodes_from_instance - vanished_node_name: {:?}", &vanished_node_name @@ -302,7 +301,7 @@ impl NodeWatcher { .collect::>(); // Save the instance - let modified_instance = Instance { + let modified_instance = InstanceSpec { configuration_name: instance.spec.configuration_name.clone(), broker_properties: instance.spec.broker_properties.clone(), shared: instance.spec.shared, @@ -318,7 +317,7 @@ impl NodeWatcher { ); kube_interface - .update_instance(&modified_instance, &instance_name, &instance_namespace) + .update_instance(&modified_instance, instance_name, instance_namespace) .await } } @@ -327,11 +326,11 @@ impl NodeWatcher { mod tests { use super::super::shared_test_utils::config_for_tests; use super::*; - use akri_shared::{akri::instance::KubeAkriInstanceList, k8s::MockKubeInterface, os::file}; + use akri_shared::{akri::instance::InstanceList, k8s::MockKubeInterface, os::file}; #[derive(Clone)] struct UpdateInstance { - instance_to_update: Instance, + instance_to_update: InstanceSpec, instance_name: &'static str, instance_namespace: &'static str, } @@ -368,7 +367,7 @@ mod tests { async fn test_handle_node_added_unready() { let _ = env_logger::builder().is_test(true).try_init(); let node_json = file::read_file_to_string("../test/json/node-a-not-ready.json"); - let node: NodeObject = serde_json::from_str(&node_json).unwrap(); + let node: Node = serde_json::from_str(&node_json).unwrap(); let mut node_watcher = NodeWatcher::new(); node_watcher .handle_node(WatchEvent::Added(node), &MockKubeInterface::new()) @@ -388,7 +387,7 @@ mod tests { let _ = env_logger::builder().is_test(true).try_init(); let node_json = file::read_file_to_string("../test/json/node-a.json"); - let node: NodeObject = serde_json::from_str(&node_json).unwrap(); + let node: Node = serde_json::from_str(&node_json).unwrap(); let mut node_watcher = NodeWatcher::new(); node_watcher .handle_node(WatchEvent::Added(node), &MockKubeInterface::new()) @@ -408,12 +407,11 @@ mod tests { let _ = env_logger::builder().is_test(true).try_init(); let node_json = file::read_file_to_string("../test/json/node-b-not-ready.json"); - let node: NodeObject = serde_json::from_str(&node_json).unwrap(); + let node: Node = serde_json::from_str(&node_json).unwrap(); let mut node_watcher = NodeWatcher::new(); - let instance_file = "../test/json/shared-instance-update.json"; let instance_json = file::read_file_to_string(instance_file); - let kube_object_instance: KubeAkriInstance = serde_json::from_str(&instance_json).unwrap(); + let kube_object_instance: Instance = serde_json::from_str(&instance_json).unwrap(); let mut instance = kube_object_instance.spec; instance.nodes.clear(); instance @@ -452,7 +450,7 @@ mod tests { let _ = env_logger::builder().is_test(true).try_init(); let node_json = file::read_file_to_string("../test/json/node-b.json"); - let node: NodeObject = serde_json::from_str(&node_json).unwrap(); + let node: Node = serde_json::from_str(&node_json).unwrap(); let mut node_watcher = NodeWatcher::new(); let mock = MockKubeInterface::new(); @@ -474,12 +472,12 @@ mod tests { let _ = env_logger::builder().is_test(true).try_init(); let node_json = file::read_file_to_string("../test/json/node-b-not-ready.json"); - let node: NodeObject = serde_json::from_str(&node_json).unwrap(); + let node: Node = serde_json::from_str(&node_json).unwrap(); let mut node_watcher = NodeWatcher::new(); let instance_file = "../test/json/shared-instance-update.json"; let instance_json = file::read_file_to_string(instance_file); - let kube_object_instance: KubeAkriInstance = serde_json::from_str(&instance_json).unwrap(); + let kube_object_instance: Instance = serde_json::from_str(&instance_json).unwrap(); let mut instance = kube_object_instance.spec; instance.nodes.clear(); instance @@ -538,26 +536,26 @@ mod tests { let instance_file = "../test/json/shared-instance-update.json"; let instance_json = file::read_file_to_string(instance_file); let instance_list_json = listify_node(&instance_json); - let list: KubeAkriInstanceList = serde_json::from_str(&instance_list_json).unwrap(); + let list: InstanceList = serde_json::from_str(&instance_list_json).unwrap(); Ok(list) }); mock.expect_update_instance() .times(MAX_INSTANCE_UPDATE_TRIES as usize) .withf(move |_instance, n, ns| n == "config-a-359973" && ns == "config-a-namespace") - .returning(move |_, _, _| Err(None.ok_or("failure")?)); + .returning(move |_, _, _| Err(None.ok_or(anyhow::anyhow!("failure"))?)); mock.expect_find_instance() .times((MAX_INSTANCE_UPDATE_TRIES - 1) as usize) .withf(move |n, ns| n == "config-a-359973" && ns == "config-a-namespace") .returning(move |_, _| { let instance_file = "../test/json/shared-instance-update.json"; let instance_json = file::read_file_to_string(instance_file); - let instance: KubeAkriInstance = serde_json::from_str(&instance_json).unwrap(); + let instance: Instance = serde_json::from_str(&instance_json).unwrap(); Ok(instance) }); let node_watcher = NodeWatcher::new(); assert!(node_watcher - .handle_node_disappearance(&"foo-a", &mock,) + .handle_node_disappearance("foo-a", &mock) .await .is_err()); } @@ -568,7 +566,7 @@ mod tests { let instance_file = "../test/json/shared-instance-update.json"; let instance_json = file::read_file_to_string(instance_file); - let kube_object_instance: KubeAkriInstance = serde_json::from_str(&instance_json).unwrap(); + let kube_object_instance: Instance = serde_json::from_str(&instance_json).unwrap(); let mut mock = MockKubeInterface::new(); mock.expect_update_instance() @@ -625,8 +623,7 @@ mod tests { ); let node_json = file::read_file_to_string(node_file); - let kube_object_node: Object = - serde_json::from_str(&node_json).unwrap(); + let kube_object_node: Node = serde_json::from_str(&node_json).unwrap(); let node_watcher = NodeWatcher::new(); assert_eq!( diff --git a/controller/src/util/pod_watcher.rs b/controller/src/util/pod_watcher.rs index 630fc3ef0..2f46769b4 100644 --- a/controller/src/util/pod_watcher.rs +++ b/controller/src/util/pod_watcher.rs @@ -1,6 +1,6 @@ use akri_shared::{ akri::{ - configuration::KubeAkriConfig, + configuration::Configuration, retry::{random_delay, MAX_INSTANCE_UPDATE_TRIES}, }, k8s, @@ -12,14 +12,13 @@ use akri_shared::{ }, }; use async_std::sync::Mutex; -use futures::StreamExt; -use k8s_openapi::api::core::v1::{PodSpec, PodStatus, ServiceSpec}; -use kube::api::{Api, Informer, Object, WatchEvent}; +use futures::{StreamExt, TryStreamExt}; +use k8s_openapi::api::core::v1::{Pod, ServiceSpec}; +use kube::api::{Api, ListParams, WatchEvent}; use log::trace; use std::{collections::HashMap, sync::Arc}; -type PodObject = Object; -type PodSlice = [PodObject]; +type PodSlice = [Pod]; /// Pod states that BrokerPodWatcher is interested in /// @@ -80,28 +79,27 @@ impl BrokerPodWatcher { &mut self, ) -> Result<(), Box> { trace!("watch - enter"); - let kube_interface = k8s::create_kube_interface(); - let resource = Api::v1Pod(kube_interface.get_kube_client()); - let informer = Informer::new(resource.clone()) - .labels(AKRI_TARGET_NODE_LABEL_NAME) - .init() - .await?; + let kube_interface = k8s::KubeImpl::new().await?; + let resource = Api::::all(kube_interface.get_kube_client()); + let mut stream = resource + .watch( + &ListParams::default().labels(AKRI_TARGET_NODE_LABEL_NAME), + akri_shared::akri::WATCH_VERSION, + ) + .await? + .boxed(); let synchronization = Arc::new(Mutex::new(())); - - loop { - let mut pods = informer.poll().await?.boxed(); - - // Currently, this does not handle None except to break the - // while. - while let Some(event) = pods.next().await { - let _lock = synchronization.lock().await; - self.handle_pod(event?, &kube_interface).await?; - } + // Currently, this does not handle None except to break the + // while. + while let Some(event) = stream.try_next().await? { + let _lock = synchronization.lock().await; + self.handle_pod(event, &kube_interface).await?; } + Ok(()) } /// Gets Pods phase and returns "Unknown" if no phase exists - fn get_pod_phase(&mut self, pod: &PodObject) -> String { + fn get_pod_phase(&mut self, pod: &Pod) -> String { if pod.status.is_some() { pod.status .as_ref() @@ -120,19 +118,19 @@ impl BrokerPodWatcher { /// ensure that the instance and configuration services are removed as needed. async fn handle_pod( &mut self, - event: WatchEvent, + event: WatchEvent, kube_interface: &impl KubeInterface, ) -> Result<(), Box> { trace!("handle_pod - enter [event: {:?}]", event); match event { WatchEvent::Added(pod) | WatchEvent::Modified(pod) => { - trace!("handle_pod - pod name {:?}", &pod.metadata.name); + trace!("handle_pod - pod name {:?}", pod.metadata.name); let phase = self.get_pod_phase(&pod); - trace!("handle_pod - pod phase {:?}", &phase); + trace!("handle_pod - pod phase {:?}", phase); match phase.as_str() { "Unknown" | "Pending" => { self.known_pods - .insert(pod.metadata.name.clone(), PodState::Pending); + .insert(pod.metadata.name.unwrap(), PodState::Pending); } "Running" => { self.handle_running_pod_if_needed(&pod, kube_interface) @@ -143,18 +141,19 @@ impl BrokerPodWatcher { .await?; } _ => { - trace!("handle_pod - Unknown phase: {:?}", &phase); + trace!("handle_pod - Unknown phase: {:?}", phase); } } } WatchEvent::Deleted(pod) => { - trace!("handle_pod - Deleted: {:?}", &pod.metadata.name); + trace!("handle_pod - Deleted: {:?}", pod.metadata.name); self.handle_deleted_pod_if_needed(&pod, kube_interface) .await?; } WatchEvent::Error(err) => { trace!("handle_pod - error for Pod: {}", err); } + WatchEvent::Bookmark(_) => {} }; Ok(()) } @@ -163,11 +162,15 @@ impl BrokerPodWatcher { /// any Pod as it exits the Running phase. async fn handle_running_pod_if_needed( &mut self, - pod: &PodObject, + pod: &Pod, kube_interface: &impl KubeInterface, ) -> Result<(), Box> { trace!("handle_running_pod_if_needed - enter"); - let pod_name = pod.metadata.name.clone(); + let pod_name = pod + .metadata + .name + .clone() + .ok_or_else(|| anyhow::format_err!("Pod {:?} does not have name", pod))?; let last_known_state = self.known_pods.get(&pod_name).unwrap_or(&PodState::Pending); trace!( "handle_running_pod_if_needed - last_known_state: {:?}", @@ -177,7 +180,7 @@ impl BrokerPodWatcher { // per transition into the Running state if last_known_state != &PodState::Running { trace!("handle_running_pod_if_needed - call handle_running_pod"); - self.handle_running_pod(&pod, kube_interface).await?; + self.handle_running_pod(pod, kube_interface).await?; self.known_pods.insert(pod_name, PodState::Running); } Ok(()) @@ -189,11 +192,15 @@ impl BrokerPodWatcher { /// expected and accepted. async fn handle_ended_pod_if_needed( &mut self, - pod: &PodObject, + pod: &Pod, kube_interface: &impl KubeInterface, ) -> Result<(), Box> { trace!("handle_ended_pod_if_needed - enter"); - let pod_name = pod.metadata.name.clone(); + let pod_name = pod + .metadata + .name + .clone() + .ok_or_else(|| anyhow::format_err!("Pod {:?} does not have name", pod))?; let last_known_state = self.known_pods.get(&pod_name).unwrap_or(&PodState::Pending); trace!( "handle_ended_pod_if_needed - last_known_state: {:?}", @@ -203,7 +210,7 @@ impl BrokerPodWatcher { // per transition into the Ended state if last_known_state != &PodState::Ended { trace!("handle_ended_pod_if_needed - call handle_non_running_pod"); - self.handle_non_running_pod(&pod, kube_interface).await?; + self.handle_non_running_pod(pod, kube_interface).await?; self.known_pods.insert(pod_name, PodState::Ended); } Ok(()) @@ -215,11 +222,15 @@ impl BrokerPodWatcher { /// expected and accepted. async fn handle_deleted_pod_if_needed( &mut self, - pod: &PodObject, + pod: &Pod, kube_interface: &impl KubeInterface, ) -> Result<(), Box> { trace!("handle_deleted_pod_if_needed - enter"); - let pod_name = pod.metadata.name.clone(); + let pod_name = pod + .metadata + .name + .clone() + .ok_or_else(|| anyhow::format_err!("Pod {:?} does not have name", pod))?; let last_known_state = self.known_pods.get(&pod_name).unwrap_or(&PodState::Pending); trace!( "handle_deleted_pod_if_needed - last_known_state: {:?}", @@ -229,7 +240,7 @@ impl BrokerPodWatcher { // per transition into the Deleted state if last_known_state != &PodState::Deleted { trace!("handle_deleted_pod_if_needed - call handle_non_running_pod"); - self.handle_non_running_pod(&pod, kube_interface).await?; + self.handle_non_running_pod(pod, kube_interface).await?; self.known_pods.insert(pod_name, PodState::Deleted); } Ok(()) @@ -239,17 +250,18 @@ impl BrokerPodWatcher { /// error if the annotations are not found. fn get_instance_and_configuration_from_pod( &self, - pod: &PodObject, + pod: &Pod, ) -> Result<(String, String), Box> { trace!("get_instance_and_configuration_from_pod - enter"); - let instance_id = pod + let labels = pod .metadata .labels + .as_ref() + .ok_or("Pod doesn't have labels")?; + let instance_id = labels .get(AKRI_INSTANCE_LABEL_NAME) .ok_or("No configuration name found.")?; - let config_name = pod - .metadata - .labels + let config_name = labels .get(AKRI_CONFIGURATION_LABEL_NAME) .ok_or("No instance id found.")?; Ok((instance_id.to_string(), config_name.to_string())) @@ -260,19 +272,19 @@ impl BrokerPodWatcher { /// supported by Running broker Pods. async fn handle_non_running_pod( &self, - pod: &PodObject, + pod: &Pod, kube_interface: &impl KubeInterface, ) -> Result<(), Box> { trace!("handle_non_running_pod - enter"); let namespace = pod.metadata.namespace.as_ref().ok_or(format!( - "Namespace not found for pod: {}", + "Namespace not found for pod: {:?}", &pod.metadata.name ))?; let (instance_id, config_name) = self.get_instance_and_configuration_from_pod(pod)?; self.find_pods_and_cleanup_svc_if_unsupported( &instance_id, &config_name, - &namespace, + namespace, true, kube_interface, ) @@ -280,14 +292,14 @@ impl BrokerPodWatcher { self.find_pods_and_cleanup_svc_if_unsupported( &instance_id, &config_name, - &namespace, + namespace, false, kube_interface, ) .await?; // Make sure instance has required Pods - if let Ok(instance) = kube_interface.find_instance(&instance_id, &namespace).await { + if let Ok(instance) = kube_interface.find_instance(&instance_id, namespace).await { super::instance_action::handle_instance_change( &instance, &super::instance_action::InstanceAction::Update, @@ -330,8 +342,8 @@ impl BrokerPodWatcher { ); let svc_name = service::create_service_app_name( - &configuration_name, - &instance_id, + configuration_name, + instance_id, &"svc".to_string(), handle_instance_svc, ); @@ -374,7 +386,7 @@ impl BrokerPodWatcher { &svc_name, &svc_namespace ); kube_interface - .remove_service(&svc_name, &svc_namespace) + .remove_service(svc_name, svc_namespace) .await?; trace!("cleanup_svc_if_unsupported - service::remove_service succeeded"); } @@ -386,18 +398,18 @@ impl BrokerPodWatcher { /// by the configuration. async fn handle_running_pod( &self, - pod: &PodObject, + pod: &Pod, kube_interface: &impl KubeInterface, ) -> Result<(), Box> { trace!("handle_running_pod - enter"); let namespace = pod.metadata.namespace.as_ref().ok_or(format!( - "Namespace not found for pod: {}", + "Namespace not found for pod: {:?}", &pod.metadata.name ))?; let (instance_name, configuration_name) = self.get_instance_and_configuration_from_pod(pod)?; let configuration = match kube_interface - .find_configuration(&configuration_name, &namespace) + .find_configuration(&configuration_name, namespace) .await { Ok(config) => config, @@ -412,7 +424,7 @@ impl BrokerPodWatcher { } }; let instance = match kube_interface - .find_instance(&instance_name, &namespace) + .find_instance(&instance_name, namespace) .await { Ok(instance) => instance, @@ -433,8 +445,8 @@ impl BrokerPodWatcher { .ok_or(format!("UID not found for instance: {}", instance_name))?; self.add_instance_and_configuration_services( &instance_name, - &instance_uid, - &namespace, + instance_uid, + namespace, &configuration_name, &configuration, kube_interface, @@ -470,7 +482,9 @@ impl BrokerPodWatcher { { for existing_svc in existing_svcs { let mut existing_svc = existing_svc.clone(); - let svc_name = existing_svc.metadata.name.clone(); + let svc_name = existing_svc.metadata.name.clone().ok_or_else(|| { + anyhow::format_err!("Service {:?} does not have name", existing_svc) + })?; let svc_namespace = existing_svc.metadata.namespace.as_ref().unwrap().clone(); trace!( "create_or_update_service - Update existing svc={:?}", @@ -488,9 +502,9 @@ impl BrokerPodWatcher { if create_new_service { let new_instance_svc = service::create_new_service_from_spec( - &namespace, - &instance_name, - &configuration_name, + namespace, + instance_name, + configuration_name, ownership.clone(), service_spec, is_instance_service, @@ -501,7 +515,7 @@ impl BrokerPodWatcher { ); kube_interface - .create_service(&new_instance_svc, &namespace) + .create_service(&new_instance_svc, namespace) .await?; trace!("create_or_update_service - service::create_service succeeded"); } @@ -515,7 +529,7 @@ impl BrokerPodWatcher { instance_uid: &str, namespace: &str, configuration_name: &str, - configuration: &KubeAkriConfig, + configuration: &Configuration, kube_interface: &impl KubeInterface, ) -> Result<(), Box> { trace!( @@ -602,7 +616,6 @@ mod tests { use super::super::shared_test_utils::config_for_tests::PodList; use super::*; use akri_shared::{k8s::MockKubeInterface, os::file}; - use kube::ErrorResponse; fn create_pods_with_phase(result_file: &'static str, specified_phase: &'static str) -> PodList { let pods_json = file::read_file_to_string(result_file); @@ -617,12 +630,11 @@ mod tests { #[tokio::test] async fn test_handle_pod_error() { let _ = env_logger::builder().is_test(true).try_init(); - let mut pod_watcher = BrokerPodWatcher::new(); trace!("test_handle_pod_error WatchEvent::Error"); pod_watcher .handle_pod( - WatchEvent::Error(ErrorResponse { + WatchEvent::Error(kube::error::ErrorResponse { status: "status".to_string(), message: "message".to_string(), reason: "reason".to_string(), @@ -632,7 +644,6 @@ mod tests { ) .await .unwrap(); - trace!("test_handle_pod_error pod_watcher:{:?}", &pod_watcher); assert_eq!(0, pod_watcher.known_pods.len()); } @@ -1131,6 +1142,8 @@ mod tests { instanceless_pod .metadata .labels + .as_mut() + .unwrap() .remove(AKRI_INSTANCE_LABEL_NAME); assert!(pod_watcher .get_instance_and_configuration_from_pod(&instanceless_pod) @@ -1140,6 +1153,8 @@ mod tests { configurationless_pod .metadata .labels + .as_mut() + .unwrap() .remove(AKRI_CONFIGURATION_LABEL_NAME); assert!(pod_watcher .get_instance_and_configuration_from_pod(&configurationless_pod) @@ -1150,8 +1165,8 @@ mod tests { async fn test_create_or_update_service_successful_update() { let _ = env_logger::builder().is_test(true).try_init(); - let dcc_json = file::read_file_to_string("../test/json/config-a.json"); - let dcc: KubeAkriConfig = serde_json::from_str(&dcc_json).unwrap(); + let config_json = file::read_file_to_string("../test/json/config-a.json"); + let config: Configuration = serde_json::from_str(&config_json).unwrap(); let pod_watcher = BrokerPodWatcher::new(); let mut mock = MockKubeInterface::new(); @@ -1180,7 +1195,7 @@ mod tests { AKRI_INSTANCE_LABEL_NAME, "config-a-b494b6", ownership, - &dcc.spec.instance_service_spec.unwrap().clone(), + &config.spec.instance_service_spec.unwrap().clone(), true, &mock, ) @@ -1192,8 +1207,8 @@ mod tests { async fn test_create_or_update_service_failed_update() { let _ = env_logger::builder().is_test(true).try_init(); - let dcc_json = file::read_file_to_string("../test/json/config-a.json"); - let dcc: KubeAkriConfig = serde_json::from_str(&dcc_json).unwrap(); + let config_json = file::read_file_to_string("../test/json/config-a.json"); + let config: Configuration = serde_json::from_str(&config_json).unwrap(); let pod_watcher = BrokerPodWatcher::new(); let mut mock = MockKubeInterface::new(); @@ -1223,7 +1238,7 @@ mod tests { AKRI_INSTANCE_LABEL_NAME, "config-a-b494b6", ownership, - &dcc.spec.instance_service_spec.unwrap().clone(), + &config.spec.instance_service_spec.unwrap().clone(), true, &mock ) @@ -1235,8 +1250,8 @@ mod tests { async fn test_create_or_update_service_successful_create() { let _ = env_logger::builder().is_test(true).try_init(); - let dcc_json = file::read_file_to_string("../test/json/config-a.json"); - let dcc: KubeAkriConfig = serde_json::from_str(&dcc_json).unwrap(); + let config_json = file::read_file_to_string("../test/json/config-a.json"); + let config: Configuration = serde_json::from_str(&config_json).unwrap(); let pod_watcher = BrokerPodWatcher::new(); let mut mock = MockKubeInterface::new(); @@ -1267,7 +1282,7 @@ mod tests { AKRI_INSTANCE_LABEL_NAME, "config-a-b494b6", ownership, - &dcc.spec.instance_service_spec.unwrap().clone(), + &config.spec.instance_service_spec.unwrap().clone(), true, &mock, ) @@ -1279,8 +1294,8 @@ mod tests { async fn test_create_or_update_service_failed_create() { let _ = env_logger::builder().is_test(true).try_init(); - let dcc_json = file::read_file_to_string("../test/json/config-a.json"); - let dcc: KubeAkriConfig = serde_json::from_str(&dcc_json).unwrap(); + let config_json = file::read_file_to_string("../test/json/config-a.json"); + let config: Configuration = serde_json::from_str(&config_json).unwrap(); let pod_watcher = BrokerPodWatcher::new(); let mut mock = MockKubeInterface::new(); @@ -1291,7 +1306,7 @@ mod tests { false, ); mock.expect_create_service() - .returning(move |_, _| Err(None.ok_or("failure")?)); + .returning(move |_, _| Err(anyhow::anyhow!("Failure"))); let ownership = OwnershipInfo::new( OwnershipType::Instance, @@ -1307,7 +1322,7 @@ mod tests { AKRI_INSTANCE_LABEL_NAME, "config-a-b494b6", ownership, - &dcc.spec.instance_service_spec.unwrap().clone(), + &config.spec.instance_service_spec.unwrap().clone(), true, &mock ) @@ -1466,11 +1481,11 @@ mod tests { fn configure_for_handle_pod(mock: &mut MockKubeInterface, handle_pod: &HandlePod) { if let Some(running) = &handle_pod.running { - configure_for_running_pod_work(mock, &running); + configure_for_running_pod_work(mock, running); } if let Some(ended) = &handle_pod.ended { - configure_for_cleanup_broker_and_configuration_svcs(mock, &ended); + configure_for_cleanup_broker_and_configuration_svcs(mock, ended); } } } diff --git a/controller/src/util/shared_test_utils.rs b/controller/src/util/shared_test_utils.rs index 44654c780..b8425de09 100644 --- a/controller/src/util/shared_test_utils.rs +++ b/controller/src/util/shared_test_utils.rs @@ -2,20 +2,18 @@ pub mod config_for_tests { use akri_shared::{ akri::{ - configuration::KubeAkriConfig, - instance::{Instance, KubeAkriInstance, KubeAkriInstanceList}, + configuration::Configuration, + instance::{Instance, InstanceList, InstanceSpec}, }, k8s::MockKubeInterface, os::file, }; - use k8s_openapi::api::core::v1::{PodSpec, PodStatus, ServiceSpec, ServiceStatus}; - use kube::api::{Object, ObjectList}; + use k8s_openapi::api::core::v1::{Pod, Service}; + use kube::api::ObjectList; use log::trace; - pub type PodObject = Object; - pub type PodList = ObjectList; - pub type ServiceObject = Object; - pub type ServiceList = ObjectList; + pub type PodList = ObjectList; + pub type ServiceList = ObjectList; pub fn configure_find_instance( mock: &mut MockKubeInterface, @@ -31,16 +29,18 @@ pub mod config_for_tests { .returning(move |_, _| { if result_error { // Return error that instance could not be found - Err(kube::Error::Api(kube::ErrorResponse { - status: "Failure".to_string(), - message: "instances.akri.sh \"akri-blah-901a7b\" not found".to_string(), - reason: "NotFound".to_string(), - code: akri_shared::k8s::ERROR_NOT_FOUND, - })) + Err(anyhow::anyhow!(kube::Error::Api( + kube::error::ErrorResponse { + status: "Failure".to_string(), + message: "instances.akri.sh \"akri-blah-901a7b\" not found".to_string(), + reason: "NotFound".to_string(), + code: akri_shared::k8s::ERROR_NOT_FOUND, + } + ))) } else { - let dci_json = file::read_file_to_string(result_file); - let dci: KubeAkriInstance = serde_json::from_str(&dci_json).unwrap(); - Ok(dci) + let instance_json = file::read_file_to_string(result_file); + let instance: Instance = serde_json::from_str(&instance_json).unwrap(); + Ok(instance) } }); } @@ -74,14 +74,14 @@ pub mod config_for_tests { } else { json }; - let list: KubeAkriInstanceList = serde_json::from_str(&instance_list_json).unwrap(); + let list: InstanceList = serde_json::from_str(&instance_list_json).unwrap(); Ok(list) }); } pub fn configure_update_instance( mock: &mut MockKubeInterface, - instance_to_update: Instance, + instance_to_update: InstanceSpec, instance_name: &'static str, instance_namespace: &'static str, result_error: bool, @@ -102,7 +102,7 @@ pub mod config_for_tests { }) .returning(move |_, _, _| { if result_error { - Err(None.ok_or("failure")?) + Err(None.ok_or(anyhow::anyhow!("failure"))?) } else { Ok(()) } @@ -122,11 +122,11 @@ pub mod config_for_tests { .withf(move |name, namespace| name == config_name && namespace == config_namespace) .returning(move |_, _| { if result_error { - Err(None.ok_or("failure")?) + Err(None.ok_or(anyhow::anyhow!("failure"))?) } else { - let dcc_json = file::read_file_to_string(result_file); - let dcc: KubeAkriConfig = serde_json::from_str(&dcc_json).unwrap(); - Ok(dcc) + let config_json = file::read_file_to_string(result_file); + let config: Configuration = serde_json::from_str(&config_json).unwrap(); + Ok(config) } }); } @@ -143,7 +143,7 @@ pub mod config_for_tests { .withf(move |selector| selector == svc_selector) .returning(move |_| { if result_error { - Err(None.ok_or("failure")?) + Err(None.ok_or(anyhow::anyhow!("failure"))?) } else { let svcs_json = file::read_file_to_string(result_file); let svcs: ServiceList = serde_json::from_str(&svcs_json).unwrap(); @@ -167,18 +167,9 @@ pub mod config_for_tests { ); mock.expect_create_service() .withf(move |svc_to_create, ns| { - svc_to_create - .metadata - .as_ref() - .unwrap() - .name - .as_ref() - .unwrap() - == svc_name + svc_to_create.metadata.name.as_ref().unwrap() == svc_name && svc_to_create .metadata - .as_ref() - .unwrap() .labels .as_ref() .unwrap() @@ -225,7 +216,7 @@ pub mod config_for_tests { .withf(move |_svc, name, namespace| name == svc_name && namespace == svc_namespace) .returning(move |_, _, _| { if result_error { - Err(None.ok_or("failure")?) + Err(None.ok_or(anyhow::anyhow!("failure"))?) } else { Ok(()) } @@ -247,7 +238,7 @@ pub mod config_for_tests { .withf(move |selector| selector == pod_selector) .returning(move |_| { if result_error { - Err(None.ok_or("failure")?) + Err(None.ok_or(anyhow::anyhow!("failure"))?) } else { let pods_json = file::read_file_to_string(result_file); let pods: PodList = serde_json::from_str(&pods_json).unwrap(); @@ -267,18 +258,9 @@ pub mod config_for_tests { mock.expect_create_pod() .times(1) .withf(move |pod_to_create, namespace| { - pod_to_create - .metadata - .as_ref() - .unwrap() - .name - .as_ref() - .unwrap() - == pod_name + pod_to_create.metadata.name.as_ref().unwrap() == pod_name && pod_to_create .metadata - .as_ref() - .unwrap() .labels .as_ref() .unwrap() diff --git a/deployment/helm/Chart.yaml b/deployment/helm/Chart.yaml index 0040ba1a1..c7bde958a 100644 --- a/deployment/helm/Chart.yaml +++ b/deployment/helm/Chart.yaml @@ -15,9 +15,9 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.6.12 +version: 0.6.13 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. -appVersion: 0.6.12 +appVersion: 0.6.13 diff --git a/discovery-handler-modules/debug-echo-discovery-handler/Cargo.toml b/discovery-handler-modules/debug-echo-discovery-handler/Cargo.toml index 13875d094..66a5c9955 100644 --- a/discovery-handler-modules/debug-echo-discovery-handler/Cargo.toml +++ b/discovery-handler-modules/debug-echo-discovery-handler/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "debug-echo-discovery-handler" -version = "0.6.12" +version = "0.6.13" authors = ["Kate Goldenring "] edition = "2018" @@ -9,6 +9,6 @@ edition = "2018" [dependencies] akri-discovery-utils = { path = "../../discovery-utils" } akri-debug-echo = { path = "../../discovery-handlers/debug-echo" } -env_logger = "0.8.3" +env_logger = "0.9.0" log = "0.4" -tokio = { version = "0.2", features = ["rt-threaded", "sync", "time", "stream", "fs", "macros", "uds"] } +tokio = { version = "1.0.1" } diff --git a/discovery-handler-modules/onvif-discovery-handler/Cargo.toml b/discovery-handler-modules/onvif-discovery-handler/Cargo.toml index 7e4cc75b4..fc231bccf 100644 --- a/discovery-handler-modules/onvif-discovery-handler/Cargo.toml +++ b/discovery-handler-modules/onvif-discovery-handler/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "onvif-discovery-handler" -version = "0.6.12" +version = "0.6.13" authors = ["Kate Goldenring "] edition = "2018" @@ -9,6 +9,6 @@ edition = "2018" [dependencies] akri-discovery-utils = { path = "../../discovery-utils" } akri-onvif = { path = "../../discovery-handlers/onvif" } -env_logger = "0.8.3" +env_logger = "0.9.0" log = "0.4" -tokio = { version = "0.2", features = ["rt-threaded", "sync", "time", "stream", "fs", "macros", "uds"] } \ No newline at end of file +tokio = { version = "1.0.1" } \ No newline at end of file diff --git a/discovery-handler-modules/opcua-discovery-handler/Cargo.toml b/discovery-handler-modules/opcua-discovery-handler/Cargo.toml index fe8d5dac0..21c9925d7 100644 --- a/discovery-handler-modules/opcua-discovery-handler/Cargo.toml +++ b/discovery-handler-modules/opcua-discovery-handler/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "opcua-discovery-handler" -version = "0.6.12" +version = "0.6.13" authors = ["Kate Goldenring "] edition = "2018" @@ -9,6 +9,6 @@ edition = "2018" [dependencies] akri-discovery-utils = { path = "../../discovery-utils" } akri-opcua = { path = "../../discovery-handlers/opcua" } -env_logger = "0.8.3" +env_logger = "0.9.0" log = "0.4" -tokio = { version = "0.2", features = ["rt-threaded", "sync", "time", "stream", "fs", "macros", "uds"] } \ No newline at end of file +tokio = { version = "1.0.1" } \ No newline at end of file diff --git a/discovery-handler-modules/udev-discovery-handler/Cargo.toml b/discovery-handler-modules/udev-discovery-handler/Cargo.toml index dafbc5f76..7a18fca40 100644 --- a/discovery-handler-modules/udev-discovery-handler/Cargo.toml +++ b/discovery-handler-modules/udev-discovery-handler/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "udev-discovery-handler" -version = "0.6.12" +version = "0.6.13" authors = ["Kate Goldenring "] edition = "2018" @@ -9,6 +9,6 @@ edition = "2018" [dependencies] akri-discovery-utils = { path = "../../discovery-utils" } akri-udev = { path = "../../discovery-handlers/udev" } -env_logger = "0.8.3" +env_logger = "0.9.0" log = "0.4" -tokio = { version = "0.2", features = ["rt-threaded", "sync", "time", "stream", "fs", "macros", "uds"] } +tokio = { version = "1.0.1" } diff --git a/discovery-handlers/debug-echo/Cargo.toml b/discovery-handlers/debug-echo/Cargo.toml index 09b8e9d37..5f2d13faf 100644 --- a/discovery-handlers/debug-echo/Cargo.toml +++ b/discovery-handlers/debug-echo/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "akri-debug-echo" -version = "0.6.12" +version = "0.6.13" authors = ["Kate Goldenring "] edition = "2018" @@ -10,15 +10,16 @@ edition = "2018" akri-discovery-utils = { path = "../../discovery-utils" } anyhow = "1.0.38" async-trait = "0.1.0" -env_logger = "0.8.3" +env_logger = "0.9.0" futures-util = "0.3" log = "0.4" serde = "1.0.104" serde_json = "1.0.45" serde_yaml = "0.8.11" serde_derive = "1.0.104" -tokio = { version = "0.2", features = ["rt-threaded", "sync", "time", "stream", "fs", "macros", "uds"] } -tonic = {version = "0.1.0", features = ["tls"] } +tokio = { version = "1.0.1", features = ["time", "net", "sync"] } +tokio-stream = { version = "0.1", features = ["net"] } +tonic = { version = "0.5.2", features = ["tls"] } [dev-dependencies] akri-shared = { path = "../../shared" } \ No newline at end of file diff --git a/discovery-handlers/debug-echo/src/discovery_handler.rs b/discovery-handlers/debug-echo/src/discovery_handler.rs index a5be894c6..b5dd4d39e 100644 --- a/discovery-handlers/debug-echo/src/discovery_handler.rs +++ b/discovery-handlers/debug-echo/src/discovery_handler.rs @@ -8,7 +8,7 @@ use log::{error, info, trace}; use std::time::Duration; use std::{collections::HashMap, fs}; use tokio::sync::mpsc; -use tokio::time::delay_for; +use tokio::time::sleep; use tonic::{Response, Status}; // TODO: make this configurable @@ -55,7 +55,7 @@ impl DiscoveryHandler for DiscoveryHandlerImpl { info!("discover - called for debug echo protocol"); let register_sender = self.register_sender.clone(); let discover_request = request.get_ref(); - let (mut discovered_devices_sender, discovered_devices_receiver) = + let (discovered_devices_sender, discovered_devices_receiver) = mpsc::channel(DISCOVERED_DEVICES_CHANNEL_CAPACITY); let discovery_handler_config: DebugEchoDiscoveryDetails = deserialize_discovery_details(&discover_request.discovery_details) @@ -86,7 +86,7 @@ impl DiscoveryHandler for DiscoveryHandlerImpl { .await { error!("discover - for debugEcho failed to send discovery response with error {}", e); - if let Some(mut sender) = register_sender { + if let Some(sender) = register_sender { sender.send(()).await.unwrap(); } break; @@ -118,16 +118,18 @@ impl DiscoveryHandler for DiscoveryHandlerImpl { { // TODO: consider re-registering here error!("discover - for debugEcho failed to send discovery response with error {}", e); - if let Some(mut sender) = register_sender { + if let Some(sender) = register_sender { sender.send(()).await.unwrap(); } break; } } - delay_for(Duration::from_secs(DISCOVERY_INTERVAL_SECS)).await; + sleep(Duration::from_secs(DISCOVERY_INTERVAL_SECS)).await; } }); - Ok(Response::new(discovered_devices_receiver)) + Ok(Response::new(tokio_stream::wrappers::ReceiverStream::new( + discovered_devices_receiver, + ))) } } @@ -172,7 +174,7 @@ mod tests { descriptions: - "foo1" "#; - let deserialized: DiscoveryHandlerInfo = serde_yaml::from_str(&debug_echo_yaml).unwrap(); + let deserialized: DiscoveryHandlerInfo = serde_yaml::from_str(debug_echo_yaml).unwrap(); let discovery_handler = DiscoveryHandlerImpl::new(None); let properties: HashMap = [( super::super::DEBUG_ECHO_DESCRIPTION_LABEL.to_string(), @@ -194,6 +196,7 @@ mod tests { .discover(discover_request) .await .unwrap() + .into_inner() .into_inner(); let devices = stream.recv().await.unwrap().unwrap().devices; assert_eq!(1, devices.len()); diff --git a/discovery-handlers/onvif/Cargo.toml b/discovery-handlers/onvif/Cargo.toml index c2dcee6b9..2187ac160 100644 --- a/discovery-handlers/onvif/Cargo.toml +++ b/discovery-handlers/onvif/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "akri-onvif" -version = "0.6.12" +version = "0.6.13" authors = ["Kate Goldenring "] edition = "2018" @@ -11,10 +11,10 @@ akri-discovery-utils = { path = "../../discovery-utils" } akri-shared = { path = "../../shared" } anyhow = "1.0.38" async-trait = "0.1.0" -bytes = "0.5" -env_logger = "0.8.3" +bytes = "1.0.1" +env_logger = "0.9.0" futures-util = "0.3" -hyper = { version = "0.13.5", package = "hyper" } +hyper = { version = "0.14.11", package = "hyper" } log = "0.4" serde = "1.0.104" serde_json = "1.0.45" @@ -22,12 +22,13 @@ serde_yaml = "0.8.11" serde_derive = "1.0.104" sxd-document = "0.3.0" sxd-xpath = "0.4.0" -tokio = { version = "0.2", features = ["rt-threaded", "sync", "time", "stream", "fs", "macros", "uds"] } -tonic = {version = "0.1.0", features = ["tls"] } +tokio = { version = "1.0", features = ["time", "net", "sync"] } +tokio-stream = { version = "0.1", features = ["net"] } +tonic = { version = "0.5.2", features = ["tls"] } uuid = { version = "0.8.1", features = ["v4"] } xml-rs = { version = "0.8.0" } -yaserde = "0.6.0" -yaserde_derive = "0.6.0" +yaserde = "0.7.1" +yaserde_derive = "0.7.1" [dev-dependencies] -mockall = "0.9.0" \ No newline at end of file +mockall = "0.10.2" \ No newline at end of file diff --git a/discovery-handlers/onvif/src/discovery_handler.rs b/discovery-handlers/onvif/src/discovery_handler.rs index b6302105f..867b4f72a 100644 --- a/discovery-handlers/onvif/src/discovery_handler.rs +++ b/discovery-handlers/onvif/src/discovery_handler.rs @@ -16,7 +16,7 @@ use akri_discovery_utils::{ use async_trait::async_trait; use log::{error, info, trace}; use std::{collections::HashMap, time::Duration}; -use tokio::{sync::mpsc, time::delay_for}; +use tokio::{sync::mpsc, time::sleep}; use tonic::{Response, Status}; // TODO: make this configurable @@ -67,7 +67,7 @@ impl DiscoveryHandler for DiscoveryHandlerImpl { info!("discover - called for ONVIF protocol"); let register_sender = self.register_sender.clone(); let discover_request = request.get_ref(); - let (mut discovered_devices_sender, discovered_devices_receiver) = + let (discovered_devices_sender, discovered_devices_receiver) = mpsc::channel(DISCOVERED_DEVICES_CHANNEL_CAPACITY); let discovery_handler_config: OnvifDiscoveryDetails = deserialize_discovery_details(&discover_request.discovery_details) @@ -124,16 +124,18 @@ impl DiscoveryHandler for DiscoveryHandlerImpl { "discover - for ONVIF failed to send discovery response with error {}", e ); - if let Some(mut sender) = register_sender { + if let Some(sender) = register_sender { sender.send(()).await.unwrap(); } break; } } - delay_for(Duration::from_secs(DISCOVERY_INTERVAL_SECS)).await; + sleep(Duration::from_secs(DISCOVERY_INTERVAL_SECS)).await; } }); - Ok(Response::new(discovered_devices_receiver)) + Ok(Response::new(tokio_stream::wrappers::ReceiverStream::new( + discovered_devices_receiver, + ))) } } @@ -207,9 +209,9 @@ mod tests { if let Some(ip_and_mac_) = ip_and_mac { configure_get_device_ip_and_mac_address( mock, - &ip_and_mac_.mock_uri, - &ip_and_mac_.mock_ip, - &ip_and_mac_.mock_mac, + ip_and_mac_.mock_uri, + ip_and_mac_.mock_ip, + ip_and_mac_.mock_mac, ) } } diff --git a/discovery-handlers/onvif/src/discovery_impl.rs b/discovery-handlers/onvif/src/discovery_impl.rs index be8ef0d85..550ab50c4 100644 --- a/discovery-handlers/onvif/src/discovery_impl.rs +++ b/discovery-handlers/onvif/src/discovery_impl.rs @@ -183,7 +183,7 @@ pub mod util { scopes: Option<&FilterList>, ) -> Vec { let response_envelope = - yaserde::de::from_str::(&discovery_response); + yaserde::de::from_str::(discovery_response); // The response envelope follows this format: // // https://10.0.0.1:5357/svc @@ -288,7 +288,7 @@ pub mod util { "get_discovery_response_socket - binding to: {:?}", local_socket_addr ); - let mut socket = UdpSocket::bind(local_socket_addr).await?; + let socket = UdpSocket::bind(local_socket_addr).await?; trace!( "get_discovery_response_socket - joining multicast: {:?} {:?}", &MULTI_IPV4_ADDR, @@ -359,8 +359,7 @@ pub mod util { async fn try_recv_string(s: &mut UdpSocket, timeout: Duration) -> std::io::Result { let mut buf = vec![0; 16 * 1024]; - // TODO: use `try_recv_from` when update tokio - let (len, _src) = time::timeout(timeout, s.recv_from(&mut buf)).await??; + let len = time::timeout(timeout, s.recv(&mut buf)).await??; Ok(String::from_utf8_lossy(&buf[..len]).to_string()) } @@ -372,7 +371,7 @@ pub mod util { time::{Duration, SystemTime}, }; - #[tokio::test(core_threads = 2)] + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_timeout_for_simple_onvif_discover() { let _ = env_logger::builder().is_test(true).try_init(); diff --git a/discovery-handlers/onvif/src/discovery_utils.rs b/discovery-handlers/onvif/src/discovery_utils.rs index 29cb10586..184f87a13 100644 --- a/discovery-handlers/onvif/src/discovery_utils.rs +++ b/discovery-handlers/onvif/src/discovery_utils.rs @@ -100,7 +100,7 @@ struct HttpRequest {} impl HttpRequest { /// This converts an http response body into an sxd_document::Package fn handle_request_body(body: &str) -> Result { - let xml_as_tree = match parser::parse(&body) { + let xml_as_tree = match parser::parse(body) { Ok(xml_as_tree) => xml_as_tree, Err(e) => return Err(Error::new(ErrorKind::InvalidData, e).into()), }; @@ -157,7 +157,7 @@ impl Http for HttpRequest { .await? .freeze(); let response_body_str = std::str::from_utf8(&response_body)?; - match HttpRequest::handle_request_body(&response_body_str) { + match HttpRequest::handle_request_body(response_body_str) { Ok(dom) => Ok(dom), Err(e) => Err(Error::new(ErrorKind::InvalidData, e).into()), } @@ -244,7 +244,7 @@ async fn inner_get_device_service_uri( ) -> Result { let services_xml = match http .post( - &url, + url, &get_action(DEVICE_WSDL, "GetServices"), &GET_SERVICES_TEMPLATE.to_string(), ) @@ -296,7 +296,7 @@ async fn inner_get_device_profiles( ) -> Result, anyhow::Error> { let action = get_action(MEDIA_WSDL, "GetProfiles"); let message = GET_PROFILES_TEMPLATE.to_string(); - let profiles_xml = match http.post(&url, &action, &message).await { + let profiles_xml = match http.post(url, &action, &message).await { Ok(xml) => xml, Err(e) => { return Err(anyhow::format_err!( @@ -332,9 +332,9 @@ async fn inner_get_device_profile_streaming_uri( profile_token: &str, http: &impl Http, ) -> Result { - let stream_soap = get_stream_uri_message(&profile_token); + let stream_soap = get_stream_uri_message(profile_token); let stream_uri_xml = match http - .post(&url, &get_action(MEDIA_WSDL, "GetStreamUri"), &stream_soap) + .post(url, &get_action(MEDIA_WSDL, "GetStreamUri"), &stream_soap) .await { Ok(xml) => xml, diff --git a/discovery-handlers/opcua/Cargo.toml b/discovery-handlers/opcua/Cargo.toml index cfc8fafd5..eaf1a92b1 100644 --- a/discovery-handlers/opcua/Cargo.toml +++ b/discovery-handlers/opcua/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "akri-opcua" -version = "0.6.12" +version = "0.6.13" authors = ["Kate Goldenring "] edition = "2018" @@ -11,18 +11,19 @@ akri-discovery-utils = { path = "../../discovery-utils" } akri-shared = { path = "../../shared" } anyhow = "1.0.38" async-trait = "0.1.0" -env_logger = "0.8.3" +env_logger = "0.9.0" futures-util = "0.3" log = "0.4" opcua-client = "0.8.0" -prost = "0.6" +prost = "0.8.0" serde = "1.0.104" serde_json = "1.0.45" serde_yaml = "0.8.11" serde_derive = "1.0.1" -tokio = { version = "0.2", features = ["rt-threaded", "sync", "time", "stream", "fs", "macros", "uds"] } -tonic = {version = "0.1.0", features = ["tls"] } +tokio = { version = "1.0.2", features = ["time", "net", "sync"] } +tokio-stream = { version = "0.1", features = ["net"] } +tonic = { version = "0.5.2", features = ["tls"] } url = "2.2.0" [dev-dependencies] -mockall = "0.9.0" \ No newline at end of file +mockall = "0.10.2" \ No newline at end of file diff --git a/discovery-handlers/opcua/src/discovery_handler.rs b/discovery-handlers/opcua/src/discovery_handler.rs index a59f22132..628ee8437 100644 --- a/discovery-handlers/opcua/src/discovery_handler.rs +++ b/discovery-handlers/opcua/src/discovery_handler.rs @@ -13,7 +13,7 @@ use async_trait::async_trait; use log::{error, info, trace}; use std::time::Duration; use tokio::sync::mpsc; -use tokio::time::delay_for; +use tokio::time::sleep; use tonic::{Response, Status}; // TODO: make this configurable @@ -79,7 +79,7 @@ impl DiscoveryHandler for DiscoveryHandlerImpl { info!("discover - called for OPC UA protocol"); let register_sender = self.register_sender.clone(); let discover_request = request.get_ref(); - let (mut discovered_devices_sender, discovered_devices_receiver) = + let (discovered_devices_sender, discovered_devices_receiver) = mpsc::channel(DISCOVERED_DEVICES_CHANNEL_CAPACITY); let discovery_handler_config: OpcuaDiscoveryDetails = deserialize_discovery_details(&discover_request.discovery_details) @@ -141,16 +141,18 @@ impl DiscoveryHandler for DiscoveryHandlerImpl { "discover - for OPC UA failed to send discovery response with error {}", e ); - if let Some(mut sender) = register_sender { + if let Some(sender) = register_sender { sender.send(()).await.unwrap(); } break; } } - delay_for(Duration::from_secs(DISCOVERY_INTERVAL_SECS)).await; + sleep(Duration::from_secs(DISCOVERY_INTERVAL_SECS)).await; } }); - Ok(Response::new(discovered_devices_receiver)) + Ok(Response::new(tokio_stream::wrappers::ReceiverStream::new( + discovered_devices_receiver, + ))) } } @@ -165,7 +167,7 @@ mod tests { opcuaDiscoveryMethod: standard: {} "#; - let dh_config: OpcuaDiscoveryDetails = deserialize_discovery_details(&yaml).unwrap(); + let dh_config: OpcuaDiscoveryDetails = deserialize_discovery_details(yaml).unwrap(); let serialized = serde_json::to_string(&dh_config).unwrap(); let expected_deserialized = r#"{"opcuaDiscoveryMethod":{"standard":{"discoveryUrls":["opc.tcp://localhost:4840/"]}}}"#; assert_eq!(expected_deserialized, serialized); @@ -184,7 +186,7 @@ mod tests { items: - "Some application name" "#; - let dh_config: OpcuaDiscoveryDetails = deserialize_discovery_details(&yaml).unwrap(); + let dh_config: OpcuaDiscoveryDetails = deserialize_discovery_details(yaml).unwrap(); let serialized = serde_json::to_string(&dh_config).unwrap(); let expected_serialized = r#"{"opcuaDiscoveryMethod":{"standard":{"discoveryUrls":["opc.tcp://127.0.0.1:4855/"]}},"applicationNames":{"items":["Some application name"],"action":"Include"}}"#; assert_eq!(expected_serialized, serialized); diff --git a/discovery-handlers/opcua/src/discovery_impl.rs b/discovery-handlers/opcua/src/discovery_impl.rs index 262421ee5..9a7129a23 100644 --- a/discovery-handlers/opcua/src/discovery_impl.rs +++ b/discovery-handlers/opcua/src/discovery_impl.rs @@ -161,7 +161,7 @@ fn get_discovery_url_from_application_description( /// This returns a socket address for the OPC UA DiscoveryURL else an error if not properly formatted fn get_socket_addr(url: &str) -> Result { - let url = Url::parse(&url).map_err(|_| anyhow::format_err!("could not parse url"))?; + let url = Url::parse(url).map_err(|_| anyhow::format_err!("could not parse url"))?; if url.scheme() != OPC_TCP_SCHEME { return Err(anyhow::format_err!( "format of OPC UA url {} is not valid", diff --git a/discovery-handlers/udev/Cargo.toml b/discovery-handlers/udev/Cargo.toml index 017a1bcdb..cb3c6e278 100644 --- a/discovery-handlers/udev/Cargo.toml +++ b/discovery-handlers/udev/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "akri-udev" -version = "0.6.12" +version = "0.6.13" authors = ["Kate Goldenring "] edition = "2018" @@ -10,20 +10,21 @@ edition = "2018" akri-discovery-utils = { path = "../../discovery-utils" } anyhow = "1.0.38" async-trait = "0.1.0" -env_logger = "0.8.3" +env_logger = "0.9.0" futures-util = "0.3" log = "0.4" pest = "2.0" pest_derive = "2.0" -prost = "0.6" +prost = "0.8.0" regex = "1" serde = "1.0.104" serde_json = "1.0.45" serde_yaml = "0.8.11" serde_derive = "1.0.104" -tokio = { version = "0.2", features = ["rt-threaded", "sync", "time", "stream", "fs", "macros", "uds"] } -tonic = {version = "0.1.0", features = ["tls"] } +tokio = { version = "1.0", features = ["time", "net", "sync"] } +tokio-stream = { version = "0.1", features = ["net"] } +tonic = { version = "0.5.2", features = ["tls"] } udev = "0.5" [dev-dependencies] -mockall = "0.9.0" \ No newline at end of file +mockall = "0.10.2" \ No newline at end of file diff --git a/discovery-handlers/udev/src/discovery_handler.rs b/discovery-handlers/udev/src/discovery_handler.rs index ee217f334..9b12ec97b 100644 --- a/discovery-handlers/udev/src/discovery_handler.rs +++ b/discovery-handlers/udev/src/discovery_handler.rs @@ -12,7 +12,7 @@ use log::{error, info, trace}; use std::collections::HashSet; use std::time::Duration; use tokio::sync::mpsc; -use tokio::time::delay_for; +use tokio::time::sleep; use tonic::{Response, Status}; // TODO: make this configurable @@ -47,7 +47,7 @@ impl DiscoveryHandler for DiscoveryHandlerImpl { info!("discover - called for udev protocol"); let register_sender = self.register_sender.clone(); let discover_request = request.get_ref(); - let (mut discovered_devices_sender, discovered_devices_receiver) = + let (discovered_devices_sender, discovered_devices_receiver) = mpsc::channel(DISCOVERED_DEVICES_CHANNEL_CAPACITY); let discovery_handler_config: UdevDiscoveryDetails = deserialize_discovery_details(&discover_request.discovery_details) @@ -60,7 +60,7 @@ impl DiscoveryHandler for DiscoveryHandlerImpl { let mut devpaths: HashSet = HashSet::new(); udev_rules.iter().for_each(|rule| { let enumerator = udev_enumerator::create_enumerator(); - let paths = do_parse_and_find(enumerator, &rule).unwrap(); + let paths = do_parse_and_find(enumerator, rule).unwrap(); paths.into_iter().for_each(|path| { devpaths.insert(path); }); @@ -112,16 +112,18 @@ impl DiscoveryHandler for DiscoveryHandlerImpl { "discover - for udev failed to send discovery response with error {}", e ); - if let Some(mut sender) = register_sender { + if let Some(sender) = register_sender { sender.send(()).await.unwrap(); } break; } } - delay_for(Duration::from_secs(DISCOVERY_INTERVAL_SECS)).await; + sleep(Duration::from_secs(DISCOVERY_INTERVAL_SECS)).await; } }); - Ok(Response::new(discovered_devices_receiver)) + Ok(Response::new(tokio_stream::wrappers::ReceiverStream::new( + discovered_devices_receiver, + ))) } } diff --git a/discovery-utils/Cargo.toml b/discovery-utils/Cargo.toml index 5a163fbf8..4201e19bb 100644 --- a/discovery-utils/Cargo.toml +++ b/discovery-utils/Cargo.toml @@ -1,25 +1,27 @@ [package] name = "akri-discovery-utils" -version = "0.6.12" +version = "0.6.13" authors = ["Kate Goldenring "] edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -prost = "0.6" -tonic = { version = "0.1.0", features = ["tls"] } akri-shared = { path = "../shared" } +async-stream = "0.3" anyhow = "1.0.38" async-trait = { version = "0.1.0", optional = true } futures = { version = "0.3.1", package = "futures" } log = "0.4" +prost = "0.8" serde = "1.0" serde_derive = "1.0" serde_yaml = "0.8.11" tempfile = { version = "3.1.0", optional = true } -tokio = { version = "0.2", features = ["time", "net", "sync"] } -tower = "0.3" +tokio = { version = "1.0.1", features = ["time", "net", "sync"] } +tokio-stream = { version = "0.1", features = ["net"] } +tonic = { version = "0.5.2", features = ["tls"] } +tower = "0.4.8" [features] mock-discovery-handler = ["async-trait", "tempfile"] @@ -29,4 +31,4 @@ async-trait = "0.1.0" tempfile = "3.1.0" [build-dependencies] -tonic-build = "0.1.1" \ No newline at end of file +tonic-build = "0.5.2" \ No newline at end of file diff --git a/discovery-utils/src/discovery/mod.rs b/discovery-utils/src/discovery/mod.rs index 46e038ef0..25dbbbe79 100644 --- a/discovery-utils/src/discovery/mod.rs +++ b/discovery-utils/src/discovery/mod.rs @@ -2,7 +2,8 @@ pub mod v0; /// Definition of the DiscoverStream type expected for supported embedded Akri DiscoveryHandlers -pub type DiscoverStream = tokio::sync::mpsc::Receiver>; +pub type DiscoverStream = + tokio_stream::wrappers::ReceiverStream>; pub mod discovery_handler { use super::super::registration_client::{ @@ -117,7 +118,7 @@ pub mod mock_discovery_handler { &self, _: tonic::Request, ) -> Result, tonic::Status> { - let (mut discovered_devices_sender, discovered_devices_receiver) = + let (discovered_devices_sender, discovered_devices_receiver) = mpsc::channel(super::discovery_handler::DISCOVERED_DEVICES_CHANNEL_CAPACITY); let devices = self.devices.clone(); tokio::spawn(async move { @@ -132,7 +133,9 @@ pub mod mock_discovery_handler { "mock discovery handler error", )) } else { - Ok(tonic::Response::new(discovered_devices_receiver)) + Ok(tonic::Response::new( + tokio_stream::wrappers::ReceiverStream::new(discovered_devices_receiver), + )) } } } @@ -189,7 +192,7 @@ pub mod mock_discovery_handler { pub mod server { use super::v0::discovery_handler_server::{DiscoveryHandler, DiscoveryHandlerServer}; use akri_shared::uds::unix_stream; - use futures::stream::TryStreamExt; + use futures::TryFutureExt; use log::info; use std::path::Path; use tokio::net::UnixListener; @@ -221,10 +224,18 @@ pub mod server { tokio::fs::create_dir_all(Path::new(discovery_endpoint).parent().unwrap()).await?; // Delete socket if it already exists std::fs::remove_file(discovery_endpoint).unwrap_or(()); - let mut uds = UnixListener::bind(discovery_endpoint)?; + let incoming = { + let uds = UnixListener::bind(discovery_endpoint)?; + + async_stream::stream! { + while let item = uds.accept().map_ok(|(st, _)| unix_stream::UnixStream(st)).await { + yield item; + } + } + }; Server::builder() .add_service(DiscoveryHandlerServer::new(discovery_handler)) - .serve_with_incoming(uds.incoming().map_ok(unix_stream::UnixStream)) + .serve_with_incoming(incoming) .await?; std::fs::remove_file(discovery_endpoint).unwrap_or(()); } else { diff --git a/discovery-utils/src/discovery/v0.rs b/discovery-utils/src/discovery/v0.rs index 699178fa5..f21bb2105 100644 --- a/discovery-utils/src/discovery/v0.rs +++ b/discovery-utils/src/discovery/v0.rs @@ -3,10 +3,10 @@ pub struct RegisterDiscoveryHandlerRequest { /// Name of the `DiscoveryHandler`. This name is specified in an /// Akri Configuration, to request devices discovered by this `DiscoveryHandler`. #[prost(string, tag = "1")] - pub name: std::string::String, + pub name: ::prost::alloc::string::String, /// Endpoint for the registering `DiscoveryHandler` #[prost(string, tag = "2")] - pub endpoint: std::string::String, + pub endpoint: ::prost::alloc::string::String, #[prost( enumeration = "register_discovery_handler_request::EndpointType", tag = "3" @@ -17,6 +17,7 @@ pub struct RegisterDiscoveryHandlerRequest { #[prost(bool, tag = "4")] pub shared: bool, } +/// Nested message and enum types in `RegisterDiscoveryHandlerRequest`. pub mod register_discovery_handler_request { /// Specifies the type of endpoint. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] @@ -33,30 +34,31 @@ pub struct DiscoverRequest { /// String containing all the details (such as filtering options) /// the `DiscoveryHandler` needs to find a set of devices. #[prost(string, tag = "1")] - pub discovery_details: std::string::String, + pub discovery_details: ::prost::alloc::string::String, } #[derive(Clone, PartialEq, ::prost::Message)] pub struct DiscoverResponse { /// List of discovered devices #[prost(message, repeated, tag = "1")] - pub devices: ::std::vec::Vec, + pub devices: ::prost::alloc::vec::Vec, } #[derive(Clone, PartialEq, ::prost::Message)] pub struct Device { /// Identifier for this device #[prost(string, tag = "1")] - pub id: std::string::String, + pub id: ::prost::alloc::string::String, /// Properties that identify the device. These are stored in the device's instance /// and set as environment variables in the device's broker Pods. May be information /// about where to find the device such as an RTSP URL or a device node (e.g. `/dev/video1`) #[prost(map = "string, string", tag = "2")] - pub properties: ::std::collections::HashMap, + pub properties: + ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, /// Optionally specify mounts for Pods that request this device as a resource #[prost(message, repeated, tag = "3")] - pub mounts: ::std::vec::Vec, + pub mounts: ::prost::alloc::vec::Vec, /// Optionally specify device information to be mounted for Pods that request this device as a resource #[prost(message, repeated, tag = "4")] - pub device_specs: ::std::vec::Vec, + pub device_specs: ::prost::alloc::vec::Vec, } /// From Device Plugin API /// Mount specifies a host volume to mount into a container. @@ -65,10 +67,10 @@ pub struct Device { pub struct Mount { /// Path of the mount within the container. #[prost(string, tag = "1")] - pub container_path: std::string::String, + pub container_path: ::prost::alloc::string::String, /// Path of the mount on the host. #[prost(string, tag = "2")] - pub host_path: std::string::String, + pub host_path: ::prost::alloc::string::String, /// If set, the mount is read-only. #[prost(bool, tag = "3")] pub read_only: bool, @@ -79,23 +81,24 @@ pub struct Mount { pub struct DeviceSpec { /// Path of the device within the container. #[prost(string, tag = "1")] - pub container_path: std::string::String, + pub container_path: ::prost::alloc::string::String, /// Path of the device on the host. #[prost(string, tag = "2")] - pub host_path: std::string::String, + pub host_path: ::prost::alloc::string::String, /// Cgroups permissions of the device, candidates are one or more of /// * r - allows container to read from the specified device. /// * w - allows container to write to the specified device. /// * m - allows container to create device files that do not yet exist. #[prost(string, tag = "3")] - pub permissions: std::string::String, + pub permissions: ::prost::alloc::string::String, } #[doc = r" Generated client implementations."] pub mod registration_client { - #![allow(unused_variables, dead_code, missing_docs)] + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] use tonic::codegen::*; #[doc = " Registration is the service advertised by the Akri Agent."] #[doc = " Any `DiscoveryHandler` can register with the Akri Agent."] + #[derive(Debug, Clone)] pub struct RegistrationClient { inner: tonic::client::Grpc, } @@ -113,17 +116,43 @@ pub mod registration_client { impl RegistrationClient where T: tonic::client::GrpcService, - T::ResponseBody: Body + HttpBody + Send + 'static, + T::ResponseBody: Body + Send + Sync + 'static, T::Error: Into, - ::Error: Into + Send, + ::Error: Into + Send, { pub fn new(inner: T) -> Self { let inner = tonic::client::Grpc::new(inner); Self { inner } } - pub fn with_interceptor(inner: T, interceptor: impl Into) -> Self { - let inner = tonic::client::Grpc::with_interceptor(inner, interceptor); - Self { inner } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> RegistrationClient> + where + F: tonic::service::Interceptor, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + >>::Error: + Into + Send + Sync, + { + RegistrationClient::new(InterceptedService::new(inner, interceptor)) + } + #[doc = r" Compress requests with `gzip`."] + #[doc = r""] + #[doc = r" This requires the server to support it otherwise it might respond with an"] + #[doc = r" error."] + pub fn send_gzip(mut self) -> Self { + self.inner = self.inner.send_gzip(); + self + } + #[doc = r" Enable decompressing responses with `gzip`."] + pub fn accept_gzip(mut self) -> Self { + self.inner = self.inner.accept_gzip(); + self } pub async fn register_discovery_handler( &mut self, @@ -141,18 +170,12 @@ pub mod registration_client { self.inner.unary(request.into_request(), path, codec).await } } - impl Clone for RegistrationClient { - fn clone(&self) -> Self { - Self { - inner: self.inner.clone(), - } - } - } } #[doc = r" Generated client implementations."] pub mod discovery_handler_client { - #![allow(unused_variables, dead_code, missing_docs)] + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] use tonic::codegen::*; + #[derive(Debug, Clone)] pub struct DiscoveryHandlerClient { inner: tonic::client::Grpc, } @@ -170,17 +193,43 @@ pub mod discovery_handler_client { impl DiscoveryHandlerClient where T: tonic::client::GrpcService, - T::ResponseBody: Body + HttpBody + Send + 'static, + T::ResponseBody: Body + Send + Sync + 'static, T::Error: Into, - ::Error: Into + Send, + ::Error: Into + Send, { pub fn new(inner: T) -> Self { let inner = tonic::client::Grpc::new(inner); Self { inner } } - pub fn with_interceptor(inner: T, interceptor: impl Into) -> Self { - let inner = tonic::client::Grpc::with_interceptor(inner, interceptor); - Self { inner } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> DiscoveryHandlerClient> + where + F: tonic::service::Interceptor, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + >>::Error: + Into + Send + Sync, + { + DiscoveryHandlerClient::new(InterceptedService::new(inner, interceptor)) + } + #[doc = r" Compress requests with `gzip`."] + #[doc = r""] + #[doc = r" This requires the server to support it otherwise it might respond with an"] + #[doc = r" error."] + pub fn send_gzip(mut self) -> Self { + self.inner = self.inner.send_gzip(); + self + } + #[doc = r" Enable decompressing responses with `gzip`."] + pub fn accept_gzip(mut self) -> Self { + self.inner = self.inner.accept_gzip(); + self } pub async fn discover( &mut self, @@ -200,17 +249,10 @@ pub mod discovery_handler_client { .await } } - impl Clone for DiscoveryHandlerClient { - fn clone(&self) -> Self { - Self { - inner: self.inner.clone(), - } - } - } } #[doc = r" Generated server implementations."] pub mod registration_server { - #![allow(unused_variables, dead_code, missing_docs)] + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] use tonic::codegen::*; #[doc = "Generated trait containing gRPC methods that should be implemented for use with RegistrationServer."] #[async_trait] @@ -223,34 +265,46 @@ pub mod registration_server { #[doc = " Registration is the service advertised by the Akri Agent."] #[doc = " Any `DiscoveryHandler` can register with the Akri Agent."] #[derive(Debug)] - #[doc(hidden)] pub struct RegistrationServer { inner: _Inner, + accept_compression_encodings: (), + send_compression_encodings: (), } - struct _Inner(Arc, Option); + struct _Inner(Arc); impl RegistrationServer { pub fn new(inner: T) -> Self { let inner = Arc::new(inner); - let inner = _Inner(inner, None); - Self { inner } + let inner = _Inner(inner); + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + } } - pub fn with_interceptor(inner: T, interceptor: impl Into) -> Self { - let inner = Arc::new(inner); - let inner = _Inner(inner, Some(interceptor.into())); - Self { inner } + pub fn with_interceptor(inner: T, interceptor: F) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) } } - impl Service> for RegistrationServer { + impl tonic::codegen::Service> for RegistrationServer + where + T: Registration, + B: Body + Send + Sync + 'static, + B::Error: Into + Send + 'static, + { type Response = http::Response; type Error = Never; type Future = BoxFuture; fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { Poll::Ready(Ok(())) } - fn call(&mut self, req: http::Request) -> Self::Future { + fn call(&mut self, req: http::Request) -> Self::Future { let inner = self.inner.clone(); match req.uri().path() { "/v0.Registration/RegisterDiscoveryHandler" => { + #[allow(non_camel_case_types)] struct RegisterDiscoveryHandlerSvc(pub Arc); impl tonic::server::UnaryService @@ -264,21 +318,21 @@ pub mod registration_server { ) -> Self::Future { let inner = self.0.clone(); let fut = - async move { inner.register_discovery_handler(request).await }; + async move { (*inner).register_discovery_handler(request).await }; Box::pin(fut) } } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; let inner = self.inner.clone(); let fut = async move { - let interceptor = inner.1.clone(); let inner = inner.0; let method = RegisterDiscoveryHandlerSvc(inner); let codec = tonic::codec::ProstCodec::default(); - let mut grpc = if let Some(interceptor) = interceptor { - tonic::server::Grpc::with_interceptor(codec, interceptor) - } else { - tonic::server::Grpc::new(codec) - }; + let mut grpc = tonic::server::Grpc::new(codec).apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -288,7 +342,8 @@ pub mod registration_server { Ok(http::Response::builder() .status(200) .header("grpc-status", "12") - .body(tonic::body::BoxBody::empty()) + .header("content-type", "application/grpc") + .body(empty_body()) .unwrap()) }), } @@ -297,12 +352,16 @@ pub mod registration_server { impl Clone for RegistrationServer { fn clone(&self) -> Self { let inner = self.inner.clone(); - Self { inner } + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + } } } impl Clone for _Inner { fn clone(&self) -> Self { - Self(self.0.clone(), self.1.clone()) + Self(self.0.clone()) } } impl std::fmt::Debug for _Inner { @@ -316,13 +375,13 @@ pub mod registration_server { } #[doc = r" Generated server implementations."] pub mod discovery_handler_server { - #![allow(unused_variables, dead_code, missing_docs)] + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] use tonic::codegen::*; #[doc = "Generated trait containing gRPC methods that should be implemented for use with DiscoveryHandlerServer."] #[async_trait] pub trait DiscoveryHandler: Send + Sync + 'static { #[doc = "Server streaming response type for the Discover method."] - type DiscoverStream: Stream> + type DiscoverStream: futures_core::Stream> + Send + Sync + 'static; @@ -332,34 +391,46 @@ pub mod discovery_handler_server { ) -> Result, tonic::Status>; } #[derive(Debug)] - #[doc(hidden)] pub struct DiscoveryHandlerServer { inner: _Inner, + accept_compression_encodings: (), + send_compression_encodings: (), } - struct _Inner(Arc, Option); + struct _Inner(Arc); impl DiscoveryHandlerServer { pub fn new(inner: T) -> Self { let inner = Arc::new(inner); - let inner = _Inner(inner, None); - Self { inner } + let inner = _Inner(inner); + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + } } - pub fn with_interceptor(inner: T, interceptor: impl Into) -> Self { - let inner = Arc::new(inner); - let inner = _Inner(inner, Some(interceptor.into())); - Self { inner } + pub fn with_interceptor(inner: T, interceptor: F) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) } } - impl Service> for DiscoveryHandlerServer { + impl tonic::codegen::Service> for DiscoveryHandlerServer + where + T: DiscoveryHandler, + B: Body + Send + Sync + 'static, + B::Error: Into + Send + 'static, + { type Response = http::Response; type Error = Never; type Future = BoxFuture; fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { Poll::Ready(Ok(())) } - fn call(&mut self, req: http::Request) -> Self::Future { + fn call(&mut self, req: http::Request) -> Self::Future { let inner = self.inner.clone(); match req.uri().path() { "/v0.DiscoveryHandler/Discover" => { + #[allow(non_camel_case_types)] struct DiscoverSvc(pub Arc); impl tonic::server::ServerStreamingService @@ -374,21 +445,21 @@ pub mod discovery_handler_server { request: tonic::Request, ) -> Self::Future { let inner = self.0.clone(); - let fut = async move { inner.discover(request).await }; + let fut = async move { (*inner).discover(request).await }; Box::pin(fut) } } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; let inner = self.inner.clone(); let fut = async move { - let interceptor = inner.1; let inner = inner.0; let method = DiscoverSvc(inner); let codec = tonic::codec::ProstCodec::default(); - let mut grpc = if let Some(interceptor) = interceptor { - tonic::server::Grpc::with_interceptor(codec, interceptor) - } else { - tonic::server::Grpc::new(codec) - }; + let mut grpc = tonic::server::Grpc::new(codec).apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); let res = grpc.server_streaming(method, req).await; Ok(res) }; @@ -398,7 +469,8 @@ pub mod discovery_handler_server { Ok(http::Response::builder() .status(200) .header("grpc-status", "12") - .body(tonic::body::BoxBody::empty()) + .header("content-type", "application/grpc") + .body(empty_body()) .unwrap()) }), } @@ -407,12 +479,16 @@ pub mod discovery_handler_server { impl Clone for DiscoveryHandlerServer { fn clone(&self) -> Self { let inner = self.inner.clone(); - Self { inner } + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + } } } impl Clone for _Inner { fn clone(&self) -> Self { - Self(self.0.clone(), self.1.clone()) + Self(self.0.clone()) } } impl std::fmt::Debug for _Inner { diff --git a/discovery-utils/src/registration_client.rs b/discovery-utils/src/registration_client.rs index 7c7fc1231..44c50b5e4 100644 --- a/discovery-utils/src/registration_client.rs +++ b/discovery-utils/src/registration_client.rs @@ -26,7 +26,7 @@ pub async fn register_discovery_handler( break; } trace!("register_discovery_handler - sleeping for 10 seconds and trying again"); - tokio::time::delay_for(std::time::Duration::from_secs(10)).await; + tokio::time::sleep(std::time::Duration::from_secs(10)).await; } Ok(()) } diff --git a/samples/brokers/udev-video-broker/Cargo.toml b/samples/brokers/udev-video-broker/Cargo.toml index 067c5abc8..87d73d6f5 100644 --- a/samples/brokers/udev-video-broker/Cargo.toml +++ b/samples/brokers/udev-video-broker/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "udev-video-broker" -version = "0.6.12" +version = "0.6.13" authors = ["Kate Goldenring ", ""] edition = "2018" @@ -8,15 +8,14 @@ edition = "2018" [dependencies] akri-shared = { path = "../../../shared" } -env_logger = "0.8.3" -futures = { version = "0.1", package = "futures" } +env_logger = "0.9.0" lazy_static = "1.4" log = "0.4.3" prometheus = { version = "0.12.0", features = ["process"] } -prost = "0.6" -tokio = { version = "0.2", features = ["rt-threaded", "time", "stream", "fs", "macros", "uds"] } -tonic = "0.1" +prost = "0.8.0" +tokio = { version = "1.0.1", features = ["time", "fs", "macros", "signal"] } +tonic = "0.5.2" rscam = "0.5.5" [build-dependencies] -tonic-build = "0.1.1" \ No newline at end of file +tonic-build = "0.5.2" \ No newline at end of file diff --git a/samples/brokers/udev-video-broker/src/main.rs b/samples/brokers/udev-video-broker/src/main.rs index 529a550eb..407186b68 100644 --- a/samples/brokers/udev-video-broker/src/main.rs +++ b/samples/brokers/udev-video-broker/src/main.rs @@ -3,14 +3,11 @@ mod util; extern crate lazy_static; use akri_shared::{ akri::{metrics::run_metrics_server, API_NAMESPACE}, - os::{ - env_var::{ActualEnvVarQuery, EnvVarQuery}, - signal, - }, + os::env_var::{ActualEnvVarQuery, EnvVarQuery}, }; -use futures::Future; use log::{info, trace}; use prometheus::IntCounter; +use tokio::signal; use util::{camera_capturer, camera_service}; lazy_static! { @@ -36,13 +33,6 @@ async fn main() -> Result<(), Box run_metrics_server().await.unwrap(); }); - // Set up shutdown channel - let (exit_tx, exit_rx) = std::sync::mpsc::channel::<()>(); - let _shutdown_signal = signal::shutdown().then(|_| { - trace!("{} Udev Broker shutdown signal received", API_NAMESPACE); - exit_tx.send(()) - }); - let env_var_query = ActualEnvVarQuery {}; let devnode = get_video_devnode(&env_var_query); @@ -51,9 +41,9 @@ async fn main() -> Result<(), Box .await .unwrap(); - trace!("Waiting for shutdown signal"); - // wait for exit signal - exit_rx.recv().unwrap(); + trace!("Waiting for ctrl C shutdown signal"); + // Wait for exit signal + signal::ctrl_c().await?; trace!("Udev broker ending"); Ok(()) diff --git a/samples/brokers/udev-video-broker/src/util/camera.rs b/samples/brokers/udev-video-broker/src/util/camera.rs index dd8ecb32b..58b176209 100644 --- a/samples/brokers/udev-video-broker/src/util/camera.rs +++ b/samples/brokers/udev-video-broker/src/util/camera.rs @@ -2,15 +2,16 @@ pub struct NotifyRequest {} #[derive(Clone, PartialEq, ::prost::Message)] pub struct NotifyResponse { - #[prost(bytes, tag = "1")] - pub frame: std::vec::Vec, + #[prost(bytes = "vec", tag = "1")] + pub frame: ::prost::alloc::vec::Vec, #[prost(string, tag = "2")] - pub camera: std::string::String, + pub camera: ::prost::alloc::string::String, } #[doc = r" Generated client implementations."] pub mod camera_client { - #![allow(unused_variables, dead_code, missing_docs)] + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] use tonic::codegen::*; + #[derive(Debug, Clone)] pub struct CameraClient { inner: tonic::client::Grpc, } @@ -28,17 +29,43 @@ pub mod camera_client { impl CameraClient where T: tonic::client::GrpcService, - T::ResponseBody: Body + HttpBody + Send + 'static, + T::ResponseBody: Body + Send + Sync + 'static, T::Error: Into, - ::Error: Into + Send, + ::Error: Into + Send, { pub fn new(inner: T) -> Self { let inner = tonic::client::Grpc::new(inner); Self { inner } } - pub fn with_interceptor(inner: T, interceptor: impl Into) -> Self { - let inner = tonic::client::Grpc::with_interceptor(inner, interceptor); - Self { inner } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> CameraClient> + where + F: tonic::service::Interceptor, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + >>::Error: + Into + Send + Sync, + { + CameraClient::new(InterceptedService::new(inner, interceptor)) + } + #[doc = r" Compress requests with `gzip`."] + #[doc = r""] + #[doc = r" This requires the server to support it otherwise it might respond with an"] + #[doc = r" error."] + pub fn send_gzip(mut self) -> Self { + self.inner = self.inner.send_gzip(); + self + } + #[doc = r" Enable decompressing responses with `gzip`."] + pub fn accept_gzip(mut self) -> Self { + self.inner = self.inner.accept_gzip(); + self } pub async fn get_frame( &mut self, @@ -55,17 +82,10 @@ pub mod camera_client { self.inner.unary(request.into_request(), path, codec).await } } - impl Clone for CameraClient { - fn clone(&self) -> Self { - Self { - inner: self.inner.clone(), - } - } - } } #[doc = r" Generated server implementations."] pub mod camera_server { - #![allow(unused_variables, dead_code, missing_docs)] + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] use tonic::codegen::*; #[doc = "Generated trait containing gRPC methods that should be implemented for use with CameraServer."] #[async_trait] @@ -76,34 +96,46 @@ pub mod camera_server { ) -> Result, tonic::Status>; } #[derive(Debug)] - #[doc(hidden)] pub struct CameraServer { inner: _Inner, + accept_compression_encodings: (), + send_compression_encodings: (), } - struct _Inner(Arc, Option); + struct _Inner(Arc); impl CameraServer { pub fn new(inner: T) -> Self { let inner = Arc::new(inner); - let inner = _Inner(inner, None); - Self { inner } + let inner = _Inner(inner); + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + } } - pub fn with_interceptor(inner: T, interceptor: impl Into) -> Self { - let inner = Arc::new(inner); - let inner = _Inner(inner, Some(interceptor.into())); - Self { inner } + pub fn with_interceptor(inner: T, interceptor: F) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) } } - impl Service> for CameraServer { + impl tonic::codegen::Service> for CameraServer + where + T: Camera, + B: Body + Send + Sync + 'static, + B::Error: Into + Send + 'static, + { type Response = http::Response; type Error = Never; type Future = BoxFuture; fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { Poll::Ready(Ok(())) } - fn call(&mut self, req: http::Request) -> Self::Future { + fn call(&mut self, req: http::Request) -> Self::Future { let inner = self.inner.clone(); match req.uri().path() { "/camera.Camera/GetFrame" => { + #[allow(non_camel_case_types)] struct GetFrameSvc(pub Arc); impl tonic::server::UnaryService for GetFrameSvc { type Response = super::NotifyResponse; @@ -113,21 +145,21 @@ pub mod camera_server { request: tonic::Request, ) -> Self::Future { let inner = self.0.clone(); - let fut = async move { inner.get_frame(request).await }; + let fut = async move { (*inner).get_frame(request).await }; Box::pin(fut) } } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; let inner = self.inner.clone(); let fut = async move { - let interceptor = inner.1.clone(); let inner = inner.0; let method = GetFrameSvc(inner); let codec = tonic::codec::ProstCodec::default(); - let mut grpc = if let Some(interceptor) = interceptor { - tonic::server::Grpc::with_interceptor(codec, interceptor) - } else { - tonic::server::Grpc::new(codec) - }; + let mut grpc = tonic::server::Grpc::new(codec).apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -137,7 +169,8 @@ pub mod camera_server { Ok(http::Response::builder() .status(200) .header("grpc-status", "12") - .body(tonic::body::BoxBody::empty()) + .header("content-type", "application/grpc") + .body(empty_body()) .unwrap()) }), } @@ -146,12 +179,16 @@ pub mod camera_server { impl Clone for CameraServer { fn clone(&self) -> Self { let inner = self.inner.clone(); - Self { inner } + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + } } } impl Clone for _Inner { fn clone(&self) -> Self { - Self(self.0.clone(), self.1.clone()) + Self(self.0.clone()) } } impl std::fmt::Debug for _Inner { diff --git a/samples/brokers/udev-video-broker/src/util/camera_capturer.rs b/samples/brokers/udev-video-broker/src/util/camera_capturer.rs index 90e162308..3082ed988 100644 --- a/samples/brokers/udev-video-broker/src/util/camera_capturer.rs +++ b/samples/brokers/udev-video-broker/src/util/camera_capturer.rs @@ -41,9 +41,9 @@ pub fn build_and_start_camera_capturer(devnode: &str) -> RsCamera { .collect(); let format_string = get_format(&env_var_query, format_options); let format = format_string[..].as_bytes(); - let resolution_info = camera_capturer.resolutions(&format).unwrap(); + let resolution_info = camera_capturer.resolutions(format).unwrap(); let resolution = get_resolution(&env_var_query, resolution_info); - let interval_info = camera_capturer.intervals(&format, resolution).unwrap(); + let interval_info = camera_capturer.intervals(format, resolution).unwrap(); let interval = get_interval(&env_var_query, interval_info); trace!("build_and_start_camera_capturer - before starting camera"); camera_capturer diff --git a/samples/brokers/udev-video-broker/src/util/camera_service.rs b/samples/brokers/udev-video-broker/src/util/camera_service.rs index 79e491678..88bf31a2b 100644 --- a/samples/brokers/udev-video-broker/src/util/camera_service.rs +++ b/samples/brokers/udev-video-broker/src/util/camera_service.rs @@ -98,7 +98,7 @@ pub async fn serve(devnode: &str, camera_capturer: RsCamera) -> Result<(), Strin } Err(e) => { trace!("Unable to connect to server, continue polling: {:?}", e); - tokio::time::delay_for(Duration::from_secs(1)).await; + tokio::time::sleep(Duration::from_secs(1)).await; false } }; diff --git a/shared/Cargo.toml b/shared/Cargo.toml index d508a9360..43c120b09 100644 --- a/shared/Cargo.toml +++ b/shared/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "akri-shared" -version = "0.6.12" +version = "0.6.13" authors = [""] edition = "2018" @@ -11,22 +11,22 @@ async-trait = "0.1.0" either = '*' anyhow = "1.0.38" futures = "0.3.1" -futures-old = { version = "0.1", package = "futures" } futures-util = "0.3" -env_logger = "0.8.3" -kube = { version = "0.23.0", features = ["openapi"] } -k8s-openapi = { version = "0.6.0", features = ["v1_16"] } +env_logger = "0.9.0" +hyper = { version = "0.14.2", package = "hyper" } +kube = { version = "0.59.0", features = ["derive"] } +kube-runtime = "0.59.0" +k8s-openapi = { version = "0.13.0", default-features = false, features = ["schemars", "v1_16"] } log = "0.4" -mockall = "0.9.0" +mockall = "0.10.2" prometheus = { version = "0.11.0", features = ["process"] } rand = "0.8.3" +schemars = "0.8.0" serde = "1.0" serde_derive = "1.0" serde_json = "1.0" serde_yaml = "0.8" -tokio = { version = "0.2", features = ["full"] } -tokio-core = "0.1" -tokio-signal = "0.2" -tonic = "0.1" -tower = "0.3" -warp = "0.2" +tokio = { version = "1.0.1", features = ["full"] } +tonic = "0.5.2" +tower = "0.4.8" +warp = "0.3.1" diff --git a/shared/src/akri/configuration.rs b/shared/src/akri/configuration.rs index 7b917076e..1c9cb5ca2 100644 --- a/shared/src/akri/configuration.rs +++ b/shared/src/akri/configuration.rs @@ -4,24 +4,22 @@ // in favor of camelCase) // #![allow(non_camel_case_types)] - -use super::API_CONFIGURATIONS; -use super::API_NAMESPACE; -use super::API_VERSION; use k8s_openapi::api::core::v1::PodSpec; use k8s_openapi::api::core::v1::ServiceSpec; +use kube::CustomResource; use kube::{ - api::{ListParams, Object, ObjectList, RawApi, Void}, - client::APIClient, + api::{Api, ListParams, ObjectList}, + client::Client, }; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; use std::collections::HashMap; -pub type KubeAkriConfig = Object; -pub type KubeAkriConfigList = ObjectList>; +pub type ConfigurationList = ObjectList; /// This specifies which `DiscoveryHandler` should be used for discovery /// and any details that need to be sent to the `DiscoveryHandler`. -#[derive(Serialize, Deserialize, Clone, Debug)] +#[derive(Serialize, Deserialize, Clone, Debug, JsonSchema)] #[serde(rename_all = "camelCase")] pub struct DiscoveryHandlerInfo { pub name: String, @@ -36,9 +34,12 @@ pub struct DiscoveryHandlerInfo { /// capabilities. For any specific capability found that is described by this /// configuration, an Instance /// is created. -#[derive(Serialize, Deserialize, Clone, Debug)] +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, JsonSchema)] +// group = API_NAMESPACE and version = API_VERSION +#[kube(group = "akri.sh", version = "v0", kind = "Configuration", namespaced)] +#[kube(apiextensions = "v1")] #[serde(rename_all = "camelCase")] -pub struct Configuration { +pub struct ConfigurationSpec { /// This defines the `DiscoveryHandler` that should be used to /// discover the capability and any information needed by the `DiscoveryHandler`. pub discovery_handler: DiscoveryHandlerInfo, @@ -84,35 +85,22 @@ pub struct Configuration { /// /// ```no_run /// use akri_shared::akri::configuration; -/// use kube::client::APIClient; +/// use kube::client::Client; /// use kube::config; /// /// # #[tokio::main] /// # async fn main() { -/// let api_client = APIClient::new(config::incluster_config().unwrap()); -/// let dccs = configuration::get_configurations(&api_client).await.unwrap(); +/// let api_client = Client::try_default().await.unwrap(); +/// let configs = configuration::get_configurations(&api_client).await.unwrap(); /// # } /// ``` -pub async fn get_configurations( - kube_client: &APIClient, -) -> Result> { - log::trace!("get_configurations enter"); - let akri_config_type = RawApi::customResource(API_CONFIGURATIONS) - .group(API_NAMESPACE) - .version(API_VERSION); - - log::trace!("get_configurations kube_client.request::(akri_config_type.list(...)?).await?"); - - let dcc_list_params = ListParams { - ..Default::default() - }; - match kube_client - .request::(akri_config_type.list(&dcc_list_params)?) - .await - { - Ok(configs_retrieved) => { +pub async fn get_configurations(kube_client: &Client) -> Result { + let configurations_client: Api = Api::all(kube_client.clone()); + let lp = ListParams::default(); + match configurations_client.list(&lp).await { + Ok(configurations_retrieved) => { log::trace!("get_configurations return"); - Ok(configs_retrieved) + Ok(configurations_retrieved) } Err(kube::Error::Api(ae)) => { log::trace!( @@ -134,14 +122,14 @@ pub async fn get_configurations( /// /// ```no_run /// use akri_shared::akri::configuration; -/// use kube::client::APIClient; +/// use kube::client::Client; /// use kube::config; /// /// # #[tokio::main] /// # async fn main() { -/// let api_client = APIClient::new(config::incluster_config().unwrap()); -/// let dcc = configuration::find_configuration( -/// "dcc-1", +/// let api_client = Client::try_default().await.unwrap(); +/// let config = configuration::find_configuration( +/// "config-1", /// "default", /// &api_client).await.unwrap(); /// # } @@ -149,38 +137,33 @@ pub async fn get_configurations( pub async fn find_configuration( name: &str, namespace: &str, - kube_client: &APIClient, -) -> Result> { + kube_client: &Client, +) -> Result { log::trace!("find_configuration enter"); - let akri_config_type = RawApi::customResource(API_CONFIGURATIONS) - .group(API_NAMESPACE) - .version(API_VERSION) - .within(&namespace); + let configurations_client: Api = Api::namespaced(kube_client.clone(), namespace); - log::trace!("find_configuration kube_client.request::(akri_config_type.get(...)?).await?"); + log::trace!("find_configuration getting instance with name {}", name); - match kube_client - .request::(akri_config_type.get(&name)?) - .await - { - Ok(config_retrieved) => { + match configurations_client.get(name).await { + Ok(configuration_retrieved) => { log::trace!("find_configuration return"); - Ok(config_retrieved) - } - Err(kube::Error::Api(ae)) => { - log::trace!( - "find_configuration kube_client.request returned kube error: {:?}", - ae - ); - Err(ae.into()) - } - Err(e) => { - log::trace!("find_configuration kube_client.request error: {:?}", e); - Err(e.into()) + Ok(configuration_retrieved) } + Err(e) => match e { + kube::Error::Api(ae) => { + log::trace!( + "find_configuration kube_client.request returned kube error: {:?}", + ae + ); + Err(anyhow::anyhow!(ae)) + } + _ => { + log::trace!("find_configuration kube_client.request error: {:?}", e); + Err(anyhow::anyhow!(e)) + } + }, } } - fn default_capacity() -> i32 { 1 } @@ -191,38 +174,29 @@ mod crd_serialization_tests { use super::*; use env_logger; - #[derive(Serialize, Deserialize, Clone, Debug)] - #[serde(rename_all = "camelCase")] - struct ConfigurationCRD { - api_version: String, - kind: String, - metadata: HashMap, - spec: Configuration, - } - #[test] fn test_config_defaults_with_serialization() { let _ = env_logger::builder().is_test(true).try_init(); - if serde_json::from_str::(r#"{}"#).is_ok() { + if serde_json::from_str::(r#"{}"#).is_ok() { panic!("discovery handler is required"); } - serde_json::from_str::( + serde_json::from_str::( r#"{"discoveryHandler":{"name":"random", "discoveryDetails":"serialized details"}}"#, ) .unwrap(); - if serde_json::from_str::(r#"{"discoveryHandler":{"name":"random"}}"#) + if serde_json::from_str::(r#"{"discoveryHandler":{"name":"random"}}"#) .is_err() { panic!("discovery details are not required"); } - if serde_json::from_str::(r#"{"discoveryHandler":{}}"#).is_ok() { + if serde_json::from_str::(r#"{"discoveryHandler":{}}"#).is_ok() { panic!("discovery handler name is required"); } let json = r#"{"discoveryHandler":{"name":"onvif", "discoveryDetails":"{\"onvif\":{}}"}}"#; - let deserialized: Configuration = serde_json::from_str(json).unwrap(); + let deserialized: ConfigurationSpec = serde_json::from_str(json).unwrap(); assert_eq!(default_capacity(), deserialized.capacity); assert_eq!(None, deserialized.broker_pod_spec); assert_eq!(None, deserialized.instance_service_spec); @@ -235,7 +209,7 @@ mod crd_serialization_tests { let _ = env_logger::builder().is_test(true).try_init(); let json = r#"{"discoveryHandler":{"name":"random", "discoveryDetails":""}, "capacity":4}"#; - let deserialized: Configuration = serde_json::from_str(json).unwrap(); + let deserialized: ConfigurationSpec = serde_json::from_str(json).unwrap(); assert_eq!(4, deserialized.capacity); assert_eq!(None, deserialized.broker_pod_spec); assert_eq!(None, deserialized.instance_service_spec); @@ -259,9 +233,9 @@ mod crd_serialization_tests { ]; for file in &files { log::trace!("test file: {}", &file); - let yaml = file::read_file_to_string(&file); + let yaml = file::read_file_to_string(file); log::trace!("test file contents: {}", &yaml); - let deserialized: ConfigurationCRD = serde_yaml::from_str(&yaml).unwrap(); + let deserialized: Configuration = serde_yaml::from_str(&yaml).unwrap(); log::trace!("test file deserialized: {:?}", &deserialized); let reserialized = serde_json::to_string(&deserialized).unwrap(); log::trace!("test file reserialized: {:?}", &reserialized); @@ -324,7 +298,7 @@ mod crd_serialization_tests { } } "#; - let deserialized: Configuration = serde_json::from_str(json).unwrap(); + let deserialized: ConfigurationSpec = serde_json::from_str(json).unwrap(); assert_eq!(deserialized.discovery_handler.name, "random".to_string()); assert!(deserialized.discovery_handler.discovery_details.is_empty()); assert_eq!(5, deserialized.capacity); diff --git a/shared/src/akri/instance.rs b/shared/src/akri/instance.rs index 5e133e0f2..3e7caefd1 100644 --- a/shared/src/akri/instance.rs +++ b/shared/src/akri/instance.rs @@ -1,15 +1,14 @@ -use super::{API_INSTANCES, API_NAMESPACE, API_VERSION}; +use super::{API_NAMESPACE, API_VERSION}; use kube::{ - api::{ - DeleteParams, ListParams, Object, ObjectList, ObjectMeta, OwnerReference, PatchParams, - PostParams, RawApi, TypeMeta, Void, - }, - client::APIClient, + api::{Api, DeleteParams, ListParams, ObjectList, ObjectMeta, Patch, PatchParams, PostParams}, + Client, CustomResource, }; + +use k8s_openapi::apimachinery::pkg::apis::meta::v1::OwnerReference; +use schemars::JsonSchema; use std::collections::HashMap; -pub type KubeAkriInstance = Object; -pub type KubeAkriInstanceList = ObjectList>; +pub type InstanceList = ObjectList; /// Defines the information in the Instance CRD /// @@ -17,9 +16,12 @@ pub type KubeAkriInstanceList = ObjectList>; /// a Configuration. For example, a Configuration /// may describe many cameras, each camera will be represented by a /// Instance. -#[derive(Serialize, Deserialize, Clone, Debug)] +#[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)] #[serde(rename_all = "camelCase")] -pub struct Instance { +// group = API_NAMESPACE and version = API_VERSION +#[kube(group = "akri.sh", version = "v0", kind = "Instance", namespaced)] +#[kube(apiextensions = "v1")] +pub struct InstanceSpec { /// This contains the name of the corresponding Configuration pub configuration_name: String, @@ -56,35 +58,23 @@ pub struct Instance { /// /// ```no_run /// use akri_shared::akri::instance; -/// use kube::client::APIClient; +/// use kube::client::Client; /// use kube::config; /// /// # #[tokio::main] /// # async fn main() { -/// let api_client = APIClient::new(config::incluster_config().unwrap()); +/// let api_client = Client::try_default().await.unwrap(); /// let instances = instance::get_instances(&api_client).await.unwrap(); /// # } /// ``` -pub async fn get_instances( - kube_client: &APIClient, -) -> Result> { +pub async fn get_instances(kube_client: &Client) -> Result { log::trace!("get_instances enter"); - let akri_instance_type = RawApi::customResource(API_INSTANCES) - .group(API_NAMESPACE) - .version(API_VERSION); - - log::trace!("get_instances kube_client.request::(akri_instance_type.list(...)?).await?"); - - let instance_list_params = ListParams { - ..Default::default() - }; - match kube_client - .request::(akri_instance_type.list(&instance_list_params)?) - .await - { - Ok(configs_retrieved) => { + let instances_client: Api = Api::all(kube_client.clone()); + let lp = ListParams::default(); + match instances_client.list(&lp).await { + Ok(instances_retrieved) => { log::trace!("get_instances return"); - Ok(configs_retrieved) + Ok(instances_retrieved) } Err(kube::Error::Api(ae)) => { log::trace!( @@ -106,14 +96,14 @@ pub async fn get_instances( /// /// ```no_run /// use akri_shared::akri::instance; -/// use kube::client::APIClient; +/// use kube::client::Client; /// use kube::config; /// /// # #[tokio::main] /// # async fn main() { -/// let api_client = APIClient::new(config::incluster_config().unwrap()); +/// let api_client = Client::try_default().await.unwrap(); /// let instance = instance::find_instance( -/// "dcc-1", +/// "config-1", /// "default", /// &api_client).await.unwrap(); /// # } @@ -121,25 +111,17 @@ pub async fn get_instances( pub async fn find_instance( name: &str, namespace: &str, - kube_client: &APIClient, -) -> Result { + kube_client: &Client, +) -> Result { log::trace!("find_instance enter"); - let akri_instance_type = RawApi::customResource(API_INSTANCES) - .group(API_NAMESPACE) - .version(API_VERSION) - .within(&namespace); + let instances_client: Api = Api::namespaced(kube_client.clone(), namespace); - log::trace!( - "find_instance kube_client.request::(akri_instance_type.get(...)?).await?" - ); + log::trace!("find_instance getting instance with name {}", name); - match kube_client - .request::(akri_instance_type.get(&name)?) - .await - { - Ok(config_retrieved) => { + match instances_client.get(name).await { + Ok(instance_retrieved) => { log::trace!("find_instance return"); - Ok(config_retrieved) + Ok(instance_retrieved) } Err(e) => match e { kube::Error::Api(ae) => { @@ -147,11 +129,11 @@ pub async fn find_instance( "find_instance kube_client.request returned kube error: {:?}", ae ); - Err(kube::Error::Api(ae)) + Err(anyhow::anyhow!(ae)) } _ => { log::trace!("find_instance kube_client.request error: {:?}", e); - Err(e) + Err(anyhow::anyhow!(e)) } }, } @@ -162,16 +144,16 @@ pub async fn find_instance( /// Example: /// /// ```no_run -/// use akri_shared::akri::instance::Instance; +/// use akri_shared::akri::instance::InstanceSpec; /// use akri_shared::akri::instance; -/// use kube::client::APIClient; +/// use kube::client::Client; /// use kube::config; /// /// # #[tokio::main] /// # async fn main() { -/// let api_client = APIClient::new(config::incluster_config().unwrap()); +/// let api_client = Client::try_default().await.unwrap(); /// let instance = instance::create_instance( -/// &Instance { +/// &InstanceSpec { /// configuration_name: "capability_configuration_name".to_string(), /// shared: true, /// nodes: Vec::new(), @@ -186,48 +168,31 @@ pub async fn find_instance( /// # } /// ``` pub async fn create_instance( - instance_to_create: &Instance, + instance_to_create: &InstanceSpec, name: &str, namespace: &str, owner_config_name: &str, owner_config_uid: &str, - kube_client: &APIClient, -) -> Result<(), Box> { + kube_client: &Client, +) -> Result<(), anyhow::Error> { log::trace!("create_instance enter"); - let akri_instance_type = RawApi::customResource(API_INSTANCES) - .group(API_NAMESPACE) - .version(API_VERSION) - .within(&namespace); - - let kube_instance = KubeAkriInstance { - metadata: ObjectMeta { - name: name.to_string(), - ownerReferences: vec![OwnerReference { - apiVersion: format!("{}/{}", API_NAMESPACE, API_VERSION), - kind: "Configuration".to_string(), - controller: true, - blockOwnerDeletion: true, - name: owner_config_name.to_string(), - uid: owner_config_uid.to_string(), - }], - ..Default::default() - }, - spec: instance_to_create.clone(), - status: None, - types: TypeMeta { - apiVersion: Some(format!("{}/{}", API_NAMESPACE, API_VERSION)), - kind: Some("Instance".to_string()), - }, + let instances_client: Api = Api::namespaced(kube_client.clone(), namespace); + + let mut instance = Instance::new(name, instance_to_create.clone()); + instance.metadata = ObjectMeta { + name: Some(name.to_string()), + owner_references: Some(vec![OwnerReference { + api_version: format!("{}/{}", API_NAMESPACE, API_VERSION), + kind: "Configuration".to_string(), + controller: Some(true), + block_owner_deletion: Some(true), + name: owner_config_name.to_string(), + uid: owner_config_uid.to_string(), + }]), + ..Default::default() }; - let binary_instance = serde_json::to_vec(&kube_instance)?; - log::trace!("create_instance akri_instance_type.create"); - let instance_create_params = PostParams::default(); - let create_request = akri_instance_type - .create(&instance_create_params, binary_instance) - .expect("failed to create request"); - log::trace!("create_instance kube_client.request::(akri_instance_type.create(...)?).await?"); - match kube_client - .request::(create_request) + match instances_client + .create(&PostParams::default(), &instance) .await { Ok(_instance_created) => { @@ -254,12 +219,12 @@ pub async fn create_instance( /// /// ```no_run /// use akri_shared::akri::instance; -/// use kube::client::APIClient; +/// use kube::client::Client; /// use kube::config; /// /// # #[tokio::main] /// # async fn main() { -/// let api_client = APIClient::new(config::incluster_config().unwrap()); +/// let api_client = Client::try_default().await.unwrap(); /// let instance = instance::delete_instance( /// "instance-1", /// "default", @@ -269,21 +234,13 @@ pub async fn create_instance( pub async fn delete_instance( name: &str, namespace: &str, - kube_client: &APIClient, -) -> Result<(), Box> { + kube_client: &Client, +) -> Result<(), anyhow::Error> { log::trace!("delete_instance enter"); - let akri_instance_type = RawApi::customResource(API_INSTANCES) - .group(API_NAMESPACE) - .version(API_VERSION) - .within(&namespace); - - log::trace!("delete_instance akri_instance_type.delete"); + let instances_client: Api = Api::namespaced(kube_client.clone(), namespace); let instance_delete_params = DeleteParams::default(); - let delete_request = akri_instance_type - .delete(name, &instance_delete_params) - .expect("failed to delete request"); - log::trace!("delete_instance kube_client.request::(akri_instance_type.delete(...)?).await?"); - match kube_client.request::(delete_request).await { + log::trace!("delete_instance instances_client.delete(name, &instance_delete_params).await?"); + match instances_client.delete(name, &instance_delete_params).await { Ok(_void_response) => { log::trace!("delete_instance return"); Ok(()) @@ -307,16 +264,16 @@ pub async fn delete_instance( /// Example: /// /// ```no_run -/// use akri_shared::akri::instance::Instance; +/// use akri_shared::akri::instance::InstanceSpec; /// use akri_shared::akri::instance; -/// use kube::client::APIClient; +/// use kube::client::Client; /// use kube::config; /// /// # #[tokio::main] /// # async fn main() { -/// let api_client = APIClient::new(config::incluster_config().unwrap()); +/// let api_client = Client::try_default().await.unwrap(); /// let instance = instance::update_instance( -/// &Instance { +/// &InstanceSpec { /// configuration_name: "capability_configuration_name".to_string(), /// shared: true, /// nodes: Vec::new(), @@ -329,37 +286,22 @@ pub async fn delete_instance( /// # } /// ``` pub async fn update_instance( - instance_to_update: &Instance, + instance_to_update: &InstanceSpec, name: &str, namespace: &str, - kube_client: &APIClient, -) -> Result<(), Box> { + kube_client: &Client, +) -> Result<(), anyhow::Error> { log::trace!("update_instance enter"); - let akri_instance_type = RawApi::customResource(API_INSTANCES) - .group(API_NAMESPACE) - .version(API_VERSION) - .within(&namespace); - - let existing_kube_akri_instance_type = find_instance(name, namespace, kube_client).await?; - let modified_kube_instance = KubeAkriInstance { - metadata: existing_kube_akri_instance_type.metadata, - spec: instance_to_update.clone(), - status: existing_kube_akri_instance_type.status, - types: existing_kube_akri_instance_type.types, - }; - log::trace!( - "update_instance wrapped_instance: {:?}", - serde_json::to_string(&modified_kube_instance).unwrap() - ); - let binary_instance = serde_json::to_vec(&modified_kube_instance)?; - - log::trace!("update_instance akri_instance_type.patch"); - let instance_patch_params = PatchParams::default(); - let patch_request = akri_instance_type - .patch(name, &instance_patch_params, binary_instance) - .expect("failed to create request"); - log::trace!("update_instance kube_client.request::(akri_instance_type.patch(...)?).await?"); - match kube_client.request::(patch_request).await { + let instances_client: Api = Api::namespaced(kube_client.clone(), namespace); + let modified_instance = Instance::new(name, instance_to_update.clone()); + match instances_client + .patch( + name, + &PatchParams::default(), + &Patch::Merge(&modified_instance), + ) + .await + { Ok(_instance_modified) => { log::trace!("update_instance return"); Ok(()) @@ -388,22 +330,13 @@ mod crd_serializeation_tests { use super::*; use env_logger; - #[derive(Serialize, Deserialize, Clone, Debug)] - #[serde(rename_all = "camelCase")] - struct InstanceCRD { - api_version: String, - kind: String, - metadata: HashMap, - spec: Instance, - } - #[test] #[should_panic] fn test_instance_no_class_name_failure() { let _ = env_logger::builder().is_test(true).try_init(); let json = r#"{}"#; - let _: Instance = serde_json::from_str(json).unwrap(); + let _: InstanceSpec = serde_json::from_str(json).unwrap(); } #[test] @@ -411,7 +344,7 @@ mod crd_serializeation_tests { let _ = env_logger::builder().is_test(true).try_init(); let json = r#"{"configurationName": "foo"}"#; - let deserialized: Instance = serde_json::from_str(json).unwrap(); + let deserialized: InstanceSpec = serde_json::from_str(json).unwrap(); assert_eq!("foo".to_string(), deserialized.configuration_name); assert_eq!(0, deserialized.broker_properties.len()); assert_eq!(default_shared(), deserialized.shared); @@ -430,7 +363,7 @@ mod crd_serializeation_tests { let json = r#" configurationName: foo "#; - let deserialized: Instance = serde_yaml::from_str(json).unwrap(); + let deserialized: InstanceSpec = serde_yaml::from_str(json).unwrap(); assert_eq!("foo".to_string(), deserialized.configuration_name); assert_eq!(0, deserialized.broker_properties.len()); assert_eq!(default_shared(), deserialized.shared); @@ -447,7 +380,7 @@ mod crd_serializeation_tests { let _ = env_logger::builder().is_test(true).try_init(); let json = r#"{"configurationName":"blah","brokerProperties":{"a":"two"},"shared":true,"nodes":["n1","n2"],"deviceUsage":{"0":"","1":"n1"}}"#; - let deserialized: Instance = serde_json::from_str(json).unwrap(); + let deserialized: InstanceSpec = serde_json::from_str(json).unwrap(); assert_eq!("blah".to_string(), deserialized.configuration_name); assert_eq!(1, deserialized.broker_properties.len()); assert_eq!(true, deserialized.shared); @@ -466,8 +399,8 @@ mod crd_serializeation_tests { "../test/yaml/akri-instance-usb-camera.yaml", ]; for file in &files { - let yaml = file::read_file_to_string(&file); - let deserialized: InstanceCRD = serde_yaml::from_str(&yaml).unwrap(); + let yaml = file::read_file_to_string(file); + let deserialized: Instance = serde_yaml::from_str(&yaml).unwrap(); let _ = serde_json::to_string(&deserialized).unwrap(); } } diff --git a/shared/src/akri/mod.rs b/shared/src/akri/mod.rs index f258b4b02..7a8ba22a3 100644 --- a/shared/src/akri/mod.rs +++ b/shared/src/akri/mod.rs @@ -1,5 +1,8 @@ /// Akri API Version pub const API_VERSION: &str = "v0"; +/// Version for kube:Api::watch +/// TODO: use kube_runtime::watcher::watcher to avoid managing version +pub const WATCH_VERSION: &str = "0"; /// Akri CRD Namespace pub const API_NAMESPACE: &str = "akri.sh"; /// Akri Configuration CRD name @@ -30,6 +33,6 @@ pub mod retry { pub async fn random_delay() { let random_decimal: f32 = random::(); let random_delay_0_to_200: u64 = (200_f32 * random_decimal) as u64; - time::delay_for(Duration::from_millis(random_delay_0_to_200)).await; + time::sleep(Duration::from_millis(random_delay_0_to_200)).await; } } diff --git a/shared/src/k8s/mod.rs b/shared/src/k8s/mod.rs index b90c56d08..e0a50efce 100644 --- a/shared/src/k8s/mod.rs +++ b/shared/src/k8s/mod.rs @@ -1,21 +1,14 @@ use super::akri::{ configuration, - configuration::{KubeAkriConfig, KubeAkriConfigList}, + configuration::{Configuration, ConfigurationList}, instance, - instance::{Instance, KubeAkriInstance, KubeAkriInstanceList}, + instance::{Instance, InstanceList, InstanceSpec}, retry::{random_delay, MAX_INSTANCE_UPDATE_TRIES}, API_NAMESPACE, API_VERSION, }; use async_trait::async_trait; -use futures::executor::block_on; -use k8s_openapi::api::core::v1::{ - NodeSpec, NodeStatus, Pod, PodSpec, PodStatus, Service, ServiceSpec, ServiceStatus, -}; -use kube::{ - api::{Object, ObjectList}, - client::APIClient, - config, -}; +use k8s_openapi::api::core::v1::{Node, Pod, Service}; +use kube::{api::ObjectList, client::Client}; use mockall::{automock, predicate::*}; pub mod node; @@ -75,12 +68,12 @@ impl OwnershipInfo { .to_string() } - pub fn get_controller(&self) -> bool { - true + pub fn get_controller(&self) -> Option { + Some(true) } - pub fn get_block_owner_deletion(&self) -> bool { - true + pub fn get_block_owner_deletion(&self) -> Option { + Some(true) } pub fn get_name(&self) -> String { @@ -95,133 +88,78 @@ impl OwnershipInfo { #[automock] #[async_trait] pub trait KubeInterface: Send + Sync { - fn get_kube_client(&self) -> APIClient; + fn get_kube_client(&self) -> Client; - async fn find_node( - &self, - name: &str, - ) -> Result, Box>; + async fn find_node(&self, name: &str) -> Result; - async fn find_pods_with_label( - &self, - selector: &str, - ) -> Result< - ObjectList>, - Box, - >; - async fn find_pods_with_field( - &self, - selector: &str, - ) -> Result< - ObjectList>, - Box, - >; - async fn create_pod( - &self, - pod_to_create: &Pod, - namespace: &str, - ) -> Result<(), Box>; - async fn remove_pod( - &self, - pod_to_remove: &str, - namespace: &str, - ) -> Result<(), Box>; + async fn find_pods_with_label(&self, selector: &str) -> Result, anyhow::Error>; + async fn find_pods_with_field(&self, selector: &str) -> Result, anyhow::Error>; + async fn create_pod(&self, pod_to_create: &Pod, namespace: &str) -> Result<(), anyhow::Error>; + async fn remove_pod(&self, pod_to_remove: &str, namespace: &str) -> Result<(), anyhow::Error>; - async fn find_services( - &self, - selector: &str, - ) -> Result< - ObjectList>, - Box, - >; + async fn find_services(&self, selector: &str) -> Result, anyhow::Error>; async fn create_service( &self, svc_to_create: &Service, namespace: &str, - ) -> Result<(), Box>; + ) -> Result<(), anyhow::Error>; async fn remove_service( &self, svc_to_remove: &str, namespace: &str, - ) -> Result<(), Box>; + ) -> Result<(), anyhow::Error>; async fn update_service( &self, - svc_to_update: &Object, + svc_to_update: &Service, name: &str, namespace: &str, - ) -> Result<(), Box>; + ) -> Result<(), anyhow::Error>; async fn find_configuration( &self, name: &str, namespace: &str, - ) -> Result>; - async fn get_configurations( - &self, - ) -> Result>; + ) -> Result; + async fn get_configurations(&self) -> Result; - async fn find_instance( - &self, - name: &str, - namespace: &str, - ) -> Result; - async fn get_instances( - &self, - ) -> Result>; + async fn find_instance(&self, name: &str, namespace: &str) -> Result; + async fn get_instances(&self) -> Result; async fn create_instance( &self, - instance_to_create: &Instance, + instance_to_create: &InstanceSpec, name: &str, namespace: &str, owner_config_name: &str, owner_config_uid: &str, - ) -> Result<(), Box>; - async fn delete_instance( - &self, - name: &str, - namespace: &str, - ) -> Result<(), Box>; + ) -> Result<(), anyhow::Error>; + async fn delete_instance(&self, name: &str, namespace: &str) -> Result<(), anyhow::Error>; async fn update_instance( &self, - instance_to_update: &Instance, + instance_to_update: &InstanceSpec, name: &str, namespace: &str, - ) -> Result<(), Box>; -} - -/// Create new KubeInetrace implementation -pub fn create_kube_interface() -> impl KubeInterface { - KubeImpl::new() + ) -> Result<(), anyhow::Error>; } #[derive(Clone)] -struct KubeImpl { - kube_configuration: kube::config::Configuration, +pub struct KubeImpl { + client: kube::Client, } impl KubeImpl { /// Create new instance of KubeImpl - fn new() -> Self { - KubeImpl { - kube_configuration: match std::env::var("KUBERNETES_PORT") { - Ok(_val) => { - log::trace!("Loading in-cluster config"); - config::incluster_config().unwrap() // pub fn incluster_config() -> Result { - } - Err(_e) => { - log::trace!("Loading config file"); - block_on(config::load_kube_config()).unwrap() // pub async fn load_kube_config() -> Result - } - }, - } + pub async fn new() -> Result { + Ok(KubeImpl { + client: Client::try_default().await?, + }) } } #[async_trait] impl KubeInterface for KubeImpl { - /// Create new APIClient using KubeImpl's kube::config::Configuration - fn get_kube_client(&self) -> APIClient { - APIClient::new(self.kube_configuration.clone()) + /// Return of clone of KubeImpl's client + fn get_kube_client(&self) -> Client { + self.client.clone() } /// Get Kuberenetes node for specified name @@ -234,15 +172,11 @@ impl KubeInterface for KubeImpl { /// /// # #[tokio::main] /// # async fn main() { - /// let kube = k8s::create_kube_interface(); + /// let kube = k8s::KubeImpl::new().await.unwrap(); /// let node = kube.find_node("node-a").await.unwrap(); /// # } /// ``` - async fn find_node( - &self, - name: &str, - ) -> Result, Box> - { + async fn find_node(&self, name: &str) -> Result { node::find_node(name, self.get_kube_client()).await } @@ -256,17 +190,11 @@ impl KubeInterface for KubeImpl { /// /// # #[tokio::main] /// # async fn main() { - /// let kube = k8s::create_kube_interface(); + /// let kube = k8s::KubeImpl::new().await.unwrap(); /// let interesting_pods = kube.find_pods_with_label("label=interesting").await.unwrap(); /// # } /// ``` - async fn find_pods_with_label( - &self, - selector: &str, - ) -> Result< - ObjectList>, - Box, - > { + async fn find_pods_with_label(&self, selector: &str) -> Result, anyhow::Error> { pod::find_pods_with_selector(Some(selector.to_string()), None, self.get_kube_client()).await } /// Get Kuberenetes pods with specified field selector @@ -279,17 +207,11 @@ impl KubeInterface for KubeImpl { /// /// # #[tokio::main] /// # async fn main() { - /// let kube = k8s::create_kube_interface(); + /// let kube = k8s::KubeImpl::new().await.unwrap(); /// let pods_on_node_a = kube.find_pods_with_field("spec.nodeName=node-a").await.unwrap(); /// # } /// ``` - async fn find_pods_with_field( - &self, - selector: &str, - ) -> Result< - ObjectList>, - Box, - > { + async fn find_pods_with_field(&self, selector: &str) -> Result, anyhow::Error> { pod::find_pods_with_selector(None, Some(selector.to_string()), self.get_kube_client()).await } /// Create Kuberenetes pod @@ -303,15 +225,11 @@ impl KubeInterface for KubeImpl { /// /// # #[tokio::main] /// # async fn main() { - /// let kube = k8s::create_kube_interface(); + /// let kube = k8s::KubeImpl::new().await.unwrap(); /// kube.create_pod(&Pod::default(), "pod_namespace").await.unwrap(); /// # } /// ``` - async fn create_pod( - &self, - pod_to_create: &Pod, - namespace: &str, - ) -> Result<(), Box> { + async fn create_pod(&self, pod_to_create: &Pod, namespace: &str) -> Result<(), anyhow::Error> { pod::create_pod(pod_to_create, namespace, self.get_kube_client()).await } /// Remove Kubernetes pod @@ -324,15 +242,11 @@ impl KubeInterface for KubeImpl { /// /// # #[tokio::main] /// # async fn main() { - /// let kube = k8s::create_kube_interface(); + /// let kube = k8s::KubeImpl::new().await.unwrap(); /// kube.remove_pod("pod_to_remove", "pod_namespace").await.unwrap(); /// # } /// ``` - async fn remove_pod( - &self, - pod_to_remove: &str, - namespace: &str, - ) -> Result<(), Box> { + async fn remove_pod(&self, pod_to_remove: &str, namespace: &str) -> Result<(), anyhow::Error> { pod::remove_pod(pod_to_remove, namespace, self.get_kube_client()).await } @@ -346,17 +260,11 @@ impl KubeInterface for KubeImpl { /// /// # #[tokio::main] /// # async fn main() { - /// let kube = k8s::create_kube_interface(); + /// let kube = k8s::KubeImpl::new().await.unwrap(); /// let interesting_services = kube.find_services("label=interesting").await.unwrap(); /// # } /// ``` - async fn find_services( - &self, - selector: &str, - ) -> Result< - ObjectList>, - Box, - > { + async fn find_services(&self, selector: &str) -> Result, anyhow::Error> { service::find_services_with_selector(selector, self.get_kube_client()).await } /// Create Kubernetes service @@ -370,7 +278,7 @@ impl KubeInterface for KubeImpl { /// /// # #[tokio::main] /// # async fn main() { - /// let kube = k8s::create_kube_interface(); + /// let kube = k8s::KubeImpl::new().await.unwrap(); /// kube.create_service(&Service::default(), "service_namespace").await.unwrap(); /// # } /// ``` @@ -378,7 +286,7 @@ impl KubeInterface for KubeImpl { &self, svc_to_create: &Service, namespace: &str, - ) -> Result<(), Box> { + ) -> Result<(), anyhow::Error> { service::create_service(svc_to_create, namespace, self.get_kube_client()).await } /// Remove Kubernetes service @@ -391,7 +299,7 @@ impl KubeInterface for KubeImpl { /// /// # #[tokio::main] /// # async fn main() { - /// let kube = k8s::create_kube_interface(); + /// let kube = k8s::KubeImpl::new().await.unwrap(); /// kube.remove_service("service_to_remove", "service_namespace").await.unwrap(); /// # } /// ``` @@ -399,7 +307,7 @@ impl KubeInterface for KubeImpl { &self, svc_to_remove: &str, namespace: &str, - ) -> Result<(), Box> { + ) -> Result<(), anyhow::Error> { service::remove_service(svc_to_remove, namespace, self.get_kube_client()).await } /// Update Kubernetes service @@ -413,10 +321,10 @@ impl KubeInterface for KubeImpl { /// /// # #[tokio::main] /// # async fn main() { - /// let kube = k8s::create_kube_interface(); + /// let kube = k8s::KubeImpl::new().await.unwrap(); /// let selector = "environment=production,app=nginx"; /// for svc in kube.find_services(&selector).await.unwrap() { - /// let svc_name = &svc.metadata.name.clone(); + /// let svc_name = &svc.metadata.name.clone().unwrap(); /// let svc_namespace = &svc.metadata.namespace.as_ref().unwrap().clone(); /// let updated_svc = kube.update_service( /// &svc, @@ -427,10 +335,10 @@ impl KubeInterface for KubeImpl { /// ``` async fn update_service( &self, - svc_to_update: &Object, + svc_to_update: &Service, name: &str, namespace: &str, - ) -> Result<(), Box> { + ) -> Result<(), anyhow::Error> { service::update_service(svc_to_update, name, namespace, self.get_kube_client()).await } @@ -444,15 +352,15 @@ impl KubeInterface for KubeImpl { /// /// # #[tokio::main] /// # async fn main() { - /// let kube = k8s::create_kube_interface(); - /// let dcc = kube.find_configuration("dcc-1", "dcc-namespace").await.unwrap(); + /// let kube = k8s::KubeImpl::new().await.unwrap(); + /// let config = kube.find_configuration("config-1", "config-namespace").await.unwrap(); /// # } /// ``` async fn find_configuration( &self, name: &str, namespace: &str, - ) -> Result> { + ) -> Result { configuration::find_configuration(name, namespace, &self.get_kube_client()).await } // Get Akri Configurations with given namespace @@ -465,13 +373,11 @@ impl KubeInterface for KubeImpl { /// /// # #[tokio::main] /// # async fn main() { - /// let kube = k8s::create_kube_interface(); - /// let dccs = kube.get_configurations().await.unwrap(); + /// let kube = k8s::KubeImpl::new().await.unwrap(); + /// let configs = kube.get_configurations().await.unwrap(); /// # } /// ``` - async fn get_configurations( - &self, - ) -> Result> { + async fn get_configurations(&self) -> Result { configuration::get_configurations(&self.get_kube_client()).await } @@ -485,15 +391,11 @@ impl KubeInterface for KubeImpl { /// /// # #[tokio::main] /// # async fn main() { - /// let kube = k8s::create_kube_interface(); + /// let kube = k8s::KubeImpl::new().await.unwrap(); /// let instance = kube.find_instance("instance-1", "instance-namespace").await.unwrap(); /// # } /// ``` - async fn find_instance( - &self, - name: &str, - namespace: &str, - ) -> Result { + async fn find_instance(&self, name: &str, namespace: &str) -> Result { instance::find_instance(name, namespace, &self.get_kube_client()).await } // Get Akri Instances with given namespace @@ -506,13 +408,11 @@ impl KubeInterface for KubeImpl { /// /// # #[tokio::main] /// # async fn main() { - /// let kube = k8s::create_kube_interface(); + /// let kube = k8s::KubeImpl::new().await.unwrap(); /// let instances = kube.get_instances().await.unwrap(); /// # } /// ``` - async fn get_instances( - &self, - ) -> Result> { + async fn get_instances(&self) -> Result { instance::get_instances(&self.get_kube_client()).await } /// Create Akri Instance @@ -522,13 +422,13 @@ impl KubeInterface for KubeImpl { /// ```no_run /// use akri_shared::k8s; /// use akri_shared::k8s::KubeInterface; - /// use akri_shared::akri::instance::Instance; + /// use akri_shared::akri::instance::InstanceSpec; /// /// # #[tokio::main] /// # async fn main() { - /// let kube = k8s::create_kube_interface(); + /// let kube = k8s::KubeImpl::new().await.unwrap(); /// kube.create_instance( - /// &Instance{ + /// &InstanceSpec{ /// configuration_name: "capability_configuration_name".to_string(), /// shared: true, /// nodes: Vec::new(), @@ -544,12 +444,12 @@ impl KubeInterface for KubeImpl { /// ``` async fn create_instance( &self, - instance_to_create: &Instance, + instance_to_create: &InstanceSpec, name: &str, namespace: &str, owner_config_name: &str, owner_config_uid: &str, - ) -> Result<(), Box> { + ) -> Result<(), anyhow::Error> { instance::create_instance( instance_to_create, name, @@ -570,18 +470,14 @@ impl KubeInterface for KubeImpl { /// /// # #[tokio::main] /// # async fn main() { - /// let kube = k8s::create_kube_interface(); + /// let kube = k8s::KubeImpl::new().await.unwrap(); /// kube.delete_instance( /// "instance-1", /// "instance-namespace" /// ).await.unwrap(); /// # } /// ``` - async fn delete_instance( - &self, - name: &str, - namespace: &str, - ) -> Result<(), Box> { + async fn delete_instance(&self, name: &str, namespace: &str) -> Result<(), anyhow::Error> { instance::delete_instance(name, namespace, &self.get_kube_client()).await } /// Update Akri Instance @@ -591,13 +487,13 @@ impl KubeInterface for KubeImpl { /// ```no_run /// use akri_shared::k8s; /// use akri_shared::k8s::KubeInterface; - /// use akri_shared::akri::instance::Instance; + /// use akri_shared::akri::instance::InstanceSpec; /// /// # #[tokio::main] /// # async fn main() { - /// let kube = k8s::create_kube_interface(); + /// let kube = k8s::KubeImpl::new().await.unwrap(); /// kube.update_instance( - /// &Instance{ + /// &InstanceSpec{ /// configuration_name: "capability_configuration_name".to_string(), /// shared: true, /// nodes: Vec::new(), @@ -611,10 +507,10 @@ impl KubeInterface for KubeImpl { /// ``` async fn update_instance( &self, - instance_to_update: &Instance, + instance_to_update: &InstanceSpec, name: &str, namespace: &str, - ) -> Result<(), Box> { + ) -> Result<(), anyhow::Error> { instance::update_instance(instance_to_update, name, namespace, &self.get_kube_client()) .await } @@ -626,10 +522,10 @@ pub async fn try_delete_instance( kube_interface: &dyn KubeInterface, instance_name: &str, instance_namespace: &str, -) -> Result<(), Box> { +) -> Result<(), anyhow::Error> { for x in 0..MAX_INSTANCE_UPDATE_TRIES { match kube_interface - .delete_instance(instance_name, &instance_namespace) + .delete_instance(instance_name, instance_namespace) .await { Ok(()) => { @@ -639,21 +535,20 @@ pub async fn try_delete_instance( Err(e) => { // Check if already was deleted else return error match kube_interface - .find_instance(&instance_name, &instance_namespace) + .find_instance(instance_name, instance_namespace) .await { - Err(kube::Error::Api(ae)) => { - if ae.code == ERROR_NOT_FOUND { - log::trace!( - "try_delete_instance - discovered Instance {} already deleted", - instance_name - ); - break; - } - log::error!("try_delete_instance - when looking up Instance {}, got kube API error: {:?}", instance_name, ae); - } Err(e) => { - log::error!("try_delete_instance - when looking up Instance {}, got kube error: {:?}. {} retries left.", instance_name, e, MAX_INSTANCE_UPDATE_TRIES - x - 1); + if let Some(kube::Error::Api(ae)) = e.downcast_ref::() { + if ae.code == ERROR_NOT_FOUND { + log::trace!( + "try_delete_instance - discovered Instance {} already deleted", + instance_name + ); + break; + } + log::error!("try_delete_instance - when looking up Instance {}, got kube API error: {:?}", instance_name, ae); + } } Ok(_) => { log::error!( @@ -690,8 +585,8 @@ pub mod test_ownership { ownership.get_api_version() ); assert_eq!("Configuration", &ownership.get_kind()); - assert_eq!(true, ownership.get_controller()); - assert_eq!(true, ownership.get_block_owner_deletion()); + assert_eq!(true, ownership.get_controller().unwrap()); + assert_eq!(true, ownership.get_block_owner_deletion().unwrap()); assert_eq!(name, &ownership.get_name()); assert_eq!(uid, &ownership.get_uid()); } @@ -706,8 +601,8 @@ pub mod test_ownership { ownership.get_api_version() ); assert_eq!("Instance", &ownership.get_kind()); - assert_eq!(true, ownership.get_controller()); - assert_eq!(true, ownership.get_block_owner_deletion()); + assert_eq!(true, ownership.get_controller().unwrap()); + assert_eq!(true, ownership.get_block_owner_deletion().unwrap()); assert_eq!(name, &ownership.get_name()); assert_eq!(uid, &ownership.get_uid()); } @@ -718,8 +613,8 @@ pub mod test_ownership { let ownership = OwnershipInfo::new(OwnershipType::Pod, name.to_string(), uid.to_string()); assert_eq!("core/v1", ownership.get_api_version()); assert_eq!("Pod", &ownership.get_kind()); - assert_eq!(true, ownership.get_controller()); - assert_eq!(true, ownership.get_block_owner_deletion()); + assert_eq!(true, ownership.get_controller().unwrap()); + assert_eq!(true, ownership.get_block_owner_deletion().unwrap()); assert_eq!(name, &ownership.get_name()); assert_eq!(uid, &ownership.get_uid()); } @@ -731,8 +626,8 @@ pub mod test_ownership { OwnershipInfo::new(OwnershipType::Service, name.to_string(), uid.to_string()); assert_eq!("core/v1", ownership.get_api_version()); assert_eq!("Service", &ownership.get_kind()); - assert_eq!(true, ownership.get_controller()); - assert_eq!(true, ownership.get_block_owner_deletion()); + assert_eq!(true, ownership.get_controller().unwrap()); + assert_eq!(true, ownership.get_block_owner_deletion().unwrap()); assert_eq!(name, &ownership.get_name()); assert_eq!(uid, &ownership.get_uid()); } diff --git a/shared/src/k8s/node.rs b/shared/src/k8s/node.rs index 0443fb2e6..4f7f518fb 100644 --- a/shared/src/k8s/node.rs +++ b/shared/src/k8s/node.rs @@ -1,8 +1,5 @@ -use k8s_openapi::api::core::v1::{NodeSpec, NodeStatus}; -use kube::{ - api::{Api, Object}, - client::APIClient, -}; +use k8s_openapi::api::core::v1::Node; +use kube::{api::Api, client::Client}; use log::trace; /// Get Kubernetes Node with a given name @@ -11,24 +8,21 @@ use log::trace; /// /// ```no_run /// use akri_shared::k8s::node; -/// use kube::client::APIClient; +/// use kube::client::Client; /// use kube::config; /// /// # #[tokio::main] /// # async fn main() { /// let label_selector = Some("environment=production,app=nginx".to_string()); -/// let api_client = APIClient::new(config::incluster_config().unwrap()); +/// let api_client = Client::try_default().await.unwrap(); /// let node = node::find_node("node-a", api_client).await.unwrap(); /// # } /// ``` -pub async fn find_node( - name: &str, - kube_client: APIClient, -) -> Result, Box> { - trace!("find_node with name={:?}", &name); - let nodes = Api::v1Node(kube_client); +pub async fn find_node(name: &str, kube_client: Client) -> Result { + trace!("find_node with name={}", name); + let nodes: Api = Api::all(kube_client); trace!("find_node PRE nodes.get(...).await?"); - let result = nodes.get(&name).await; + let result = nodes.get(name).await; trace!("find_node return"); Ok(result?) } diff --git a/shared/src/k8s/pod.rs b/shared/src/k8s/pod.rs index 51f1a3f93..69ebc3916 100644 --- a/shared/src/k8s/pod.rs +++ b/shared/src/k8s/pod.rs @@ -5,13 +5,13 @@ use super::{ use either::Either; use k8s_openapi::api::core::v1::{ Affinity, NodeAffinity, NodeSelector, NodeSelectorRequirement, NodeSelectorTerm, Pod, PodSpec, - PodStatus, ResourceRequirements, + ResourceRequirements, }; use k8s_openapi::apimachinery::pkg::api::resource::Quantity; use k8s_openapi::apimachinery::pkg::apis::meta::v1::{ObjectMeta, OwnerReference}; use kube::{ - api::{Api, DeleteParams, ListParams, Object, ObjectList, PostParams}, - client::APIClient, + api::{Api, DeleteParams, ListParams, ObjectList, PostParams}, + client::Client, }; use log::{error, info, trace}; use std::collections::BTreeMap; @@ -28,47 +28,44 @@ pub const AKRI_TARGET_NODE_LABEL_NAME: &str = "akri.sh/target-node"; /// /// ```no_run /// use akri_shared::k8s::pod; -/// use kube::client::APIClient; +/// use kube::client::Client; /// use kube::config; /// /// # #[tokio::main] /// # async fn main() { /// let label_selector = Some("environment=production,app=nginx".to_string()); -/// let api_client = APIClient::new(config::incluster_config().unwrap()); +/// let api_client = Client::try_default().await.unwrap(); /// for pod in pod::find_pods_with_selector(label_selector, None, api_client).await.unwrap() { -/// println!("found pod: {}", pod.metadata.name) +/// println!("found pod: {}", pod.metadata.name.unwrap()) /// } /// # } /// ``` /// /// ```no_run /// use akri_shared::k8s::pod; -/// use kube::client::APIClient; +/// use kube::client::Client; /// use kube::config; /// /// # #[tokio::main] /// # async fn main() { /// let field_selector = Some("spec.nodeName=node-a".to_string()); -/// let api_client = APIClient::new(config::incluster_config().unwrap()); +/// let api_client = Client::try_default().await.unwrap(); /// for pod in pod::find_pods_with_selector(None, field_selector, api_client).await.unwrap() { -/// println!("found pod: {}", pod.metadata.name) +/// println!("found pod: {}", pod.metadata.name.unwrap()) /// } /// # } /// ``` pub async fn find_pods_with_selector( label_selector: Option, field_selector: Option, - kube_client: APIClient, -) -> Result< - ObjectList>, - Box, -> { + kube_client: Client, +) -> Result, anyhow::Error> { trace!( "find_pods_with_selector with label_selector={:?} field_selector={:?}", &label_selector, &field_selector ); - let pods = Api::v1Pod(kube_client); + let pods: Api = Api::all(kube_client); let pod_list_params = ListParams { label_selector, field_selector, @@ -127,11 +124,13 @@ type ResourceQuantityType = BTreeMap; /// OwnershipType, /// pod /// }; -/// use kube::client::APIClient; +/// use kube::client::Client; /// use kube::config; /// use k8s_openapi::api::core::v1::PodSpec; /// -/// let api_client = APIClient::new(config::incluster_config().unwrap()); +/// # #[tokio::main] +/// # async fn main() { +/// let api_client = Client::try_default().await.unwrap(); /// let svc = pod::create_new_pod_from_spec( /// "pod_namespace", /// "capability_instance", @@ -145,6 +144,7 @@ type ResourceQuantityType = BTreeMap; /// "node-a", /// true, /// &PodSpec::default()).unwrap(); +/// # } /// ``` pub fn create_new_pod_from_spec( pod_namespace: &str, @@ -183,51 +183,45 @@ pub fn create_new_pod_from_spec( let owner_references: Vec = vec![OwnerReference { api_version: ownership.get_api_version(), kind: ownership.get_kind(), - controller: Some(ownership.get_controller()), - block_owner_deletion: Some(ownership.get_block_owner_deletion()), + controller: ownership.get_controller(), + block_owner_deletion: ownership.get_block_owner_deletion(), name: ownership.get_name(), uid: ownership.get_uid(), }]; let mut modified_pod_spec = pod_spec.clone(); - for container in &mut modified_pod_spec.containers { - let mut incoming_limits: Option = None; - let mut incoming_requests: Option = None; + let insert_akri_resources = |map: &mut ResourceQuantityType| { + if map.contains_key(RESOURCE_REQUIREMENTS_KEY) { + let placeholder_value = map.get(RESOURCE_REQUIREMENTS_KEY).unwrap().clone(); + map.insert(resource_limit_name.to_string(), placeholder_value); + map.remove(RESOURCE_REQUIREMENTS_KEY); + } + }; + for container in &mut modified_pod_spec.containers { if let Some(resources) = container.resources.as_ref() { - if let Some(limits) = resources.limits.as_ref() { - let mut modified_limits = limits.clone(); - if modified_limits.contains_key(RESOURCE_REQUIREMENTS_KEY) { - let placeholder_value = modified_limits - .get(RESOURCE_REQUIREMENTS_KEY) - .unwrap() - .clone(); - modified_limits.insert(resource_limit_name.to_string(), placeholder_value); - modified_limits.remove(RESOURCE_REQUIREMENTS_KEY); - } - - incoming_limits = Some(modified_limits); - } - if let Some(requests) = resources.requests.as_ref() { - let mut modified_requests = requests.clone(); - if modified_requests.contains_key(RESOURCE_REQUIREMENTS_KEY) { - let placeholder_value = modified_requests - .get(RESOURCE_REQUIREMENTS_KEY) - .unwrap() - .clone(); - modified_requests.insert(resource_limit_name.to_string(), placeholder_value); - modified_requests.remove(RESOURCE_REQUIREMENTS_KEY); - } - - incoming_requests = Some(modified_requests); - } + container.resources = Some(ResourceRequirements { + limits: { + match resources.limits.clone() { + Some(mut map) => { + insert_akri_resources(&mut map); + Some(map) + } + None => None, + } + }, + requests: { + match resources.requests.clone() { + Some(mut map) => { + insert_akri_resources(&mut map); + Some(map) + } + None => None, + } + }, + }); }; - - container.resources = Some(ResourceRequirements { - limits: incoming_limits, - requests: incoming_requests, - }); } // Ensure that the modified PodSpec has the required Affinity settings @@ -254,13 +248,13 @@ pub fn create_new_pod_from_spec( let result = Pod { spec: Some(modified_pod_spec), - metadata: Some(ObjectMeta { + metadata: ObjectMeta { name: Some(app_name), namespace: Some(pod_namespace.to_string()), labels: Some(labels), owner_references: Some(owner_references), ..Default::default() - }), + }, ..Default::default() }; @@ -348,7 +342,7 @@ mod broker_podspec_tests { do_pod_spec_creation_test( vec![image.clone()], vec![Container { - image: Some(image.clone()), + image: Some(image), resources: Some(ResourceRequirements { limits: Some(placeholder_limits), requests: Some(placeholder_requests), @@ -448,18 +442,14 @@ mod broker_podspec_tests { ); // Validate the metadata name/namesapce - assert_eq!(&app_name, &pod.metadata.clone().unwrap().name.unwrap()); - assert_eq!( - &pod_namespace, - &pod.metadata.clone().unwrap().namespace.unwrap() - ); + assert_eq!(&app_name, &pod.metadata.clone().name.unwrap()); + assert_eq!(&pod_namespace, &pod.metadata.clone().namespace.unwrap()); // Validate the labels added assert_eq!( &&app_name, &pod.metadata .clone() - .unwrap() .labels .unwrap() .get(APP_LABEL_ID) @@ -469,7 +459,6 @@ mod broker_podspec_tests { &&API_NAMESPACE.to_string(), &pod.metadata .clone() - .unwrap() .labels .unwrap() .get(CONTROLLER_LABEL_ID) @@ -479,7 +468,6 @@ mod broker_podspec_tests { &&configuration_name, &pod.metadata .clone() - .unwrap() .labels .unwrap() .get(AKRI_CONFIGURATION_LABEL_NAME) @@ -489,7 +477,6 @@ mod broker_podspec_tests { &&instance_name, &pod.metadata .clone() - .unwrap() .labels .unwrap() .get(AKRI_INSTANCE_LABEL_NAME) @@ -499,7 +486,6 @@ mod broker_podspec_tests { &&node_to_run_pod_on, &pod.metadata .clone() - .unwrap() .labels .unwrap() .get(AKRI_TARGET_NODE_LABEL_NAME) @@ -511,7 +497,6 @@ mod broker_podspec_tests { instance_name, pod.metadata .clone() - .unwrap() .owner_references .unwrap() .get(0) @@ -522,7 +507,6 @@ mod broker_podspec_tests { instance_uid, pod.metadata .clone() - .unwrap() .owner_references .unwrap() .get(0) @@ -533,7 +517,6 @@ mod broker_podspec_tests { "Instance", &pod.metadata .clone() - .unwrap() .owner_references .unwrap() .get(0) @@ -544,7 +527,6 @@ mod broker_podspec_tests { &format!("{}/{}", API_NAMESPACE, API_VERSION), &pod.metadata .clone() - .unwrap() .owner_references .unwrap() .get(0) @@ -554,7 +536,6 @@ mod broker_podspec_tests { assert!(pod .metadata .clone() - .unwrap() .owner_references .unwrap() .get(0) @@ -564,7 +545,6 @@ mod broker_podspec_tests { assert!(pod .metadata .clone() - .unwrap() .owner_references .unwrap() .get(0) @@ -846,26 +826,25 @@ mod broker_podspec_tests { /// /// ```no_run /// use akri_shared::k8s::pod; -/// use kube::client::APIClient; +/// use kube::client::Client; /// use kube::config; /// use k8s_openapi::api::core::v1::Pod; /// /// # #[tokio::main] /// # async fn main() { -/// let api_client = APIClient::new(config::incluster_config().unwrap()); +/// let api_client = Client::try_default().await.unwrap(); /// pod::create_pod(&Pod::default(), "pod_namespace", api_client).await.unwrap(); /// # } /// ``` pub async fn create_pod( pod_to_create: &Pod, namespace: &str, - kube_client: APIClient, -) -> Result<(), Box> { + kube_client: Client, +) -> Result<(), anyhow::Error> { trace!("create_pod enter"); - let pods = Api::v1Pod(kube_client.clone()).within(&namespace); - let pod_as_u8 = serde_json::to_vec(&pod_to_create)?; + let pods: Api = Api::namespaced(kube_client, namespace); info!("create_pod pods.create(...).await?:"); - match pods.create(&PostParams::default(), pod_as_u8).await { + match pods.create(&PostParams::default(), pod_to_create).await { Ok(created_pod) => { info!( "create_pod pods.create return: {:?}", @@ -883,7 +862,7 @@ pub async fn create_pod( serde_json::to_string(&pod_to_create), ae ); - Err(ae.into()) + Err(anyhow::anyhow!(ae)) } } Err(e) => { @@ -892,7 +871,7 @@ pub async fn create_pod( serde_json::to_string(&pod_to_create), e ); - Err(e.into()) + Err(anyhow::anyhow!(e)) } } } @@ -903,22 +882,22 @@ pub async fn create_pod( /// /// ```no_run /// use akri_shared::k8s::pod; -/// use kube::client::APIClient; +/// use kube::client::Client; /// use kube::config; /// /// # #[tokio::main] /// # async fn main() { -/// let api_client = APIClient::new(config::incluster_config().unwrap()); +/// let api_client = Client::try_default().await.unwrap(); /// pod::remove_pod("pod_to_remove", "pod_namespace", api_client).await.unwrap(); /// # } /// ``` pub async fn remove_pod( pod_to_remove: &str, namespace: &str, - kube_client: APIClient, -) -> Result<(), Box> { + kube_client: Client, +) -> Result<(), anyhow::Error> { trace!("remove_pod enter"); - let pods = Api::v1Pod(kube_client.clone()).within(&namespace); + let pods: Api = Api::namespaced(kube_client, namespace); info!("remove_pod pods.delete(...).await?:"); match pods.delete(pod_to_remove, &DeleteParams::default()).await { Ok(deleted_pod) => match deleted_pod { @@ -940,7 +919,7 @@ pub async fn remove_pod( "remove_pod pods.delete [{:?}] returned kube error: {:?}", &pod_to_remove, ae ); - Err(ae.into()) + Err(anyhow::anyhow!(ae)) } } Err(e) => { @@ -948,7 +927,7 @@ pub async fn remove_pod( "remove_pod pods.delete [{:?}] error: {:?}", &pod_to_remove, e ); - Err(e.into()) + Err(anyhow::anyhow!(e)) } } } diff --git a/shared/src/k8s/service.rs b/shared/src/k8s/service.rs index 7f9c3727d..f9cafc9bc 100644 --- a/shared/src/k8s/service.rs +++ b/shared/src/k8s/service.rs @@ -6,16 +6,11 @@ use super::{ OwnershipInfo, ERROR_NOT_FOUND, }; use either::Either; -use k8s_openapi::api::core::v1::{Service, ServiceSpec, ServiceStatus}; -use k8s_openapi::apimachinery::pkg::apis::meta::v1::{ - ObjectMeta, OwnerReference as K8sOwnerReference, -}; +use k8s_openapi::api::core::v1::{Service, ServiceSpec}; +use k8s_openapi::apimachinery::pkg::apis::meta::v1::{ObjectMeta, OwnerReference}; use kube::{ - api::{ - Api, DeleteParams, ListParams, Object, ObjectList, OwnerReference as KubeOwnerReference, - PatchParams, PostParams, - }, - client::APIClient, + api::{Api, DeleteParams, ListParams, ObjectList, Patch, PatchParams, PostParams}, + client::Client, }; use log::{error, info, trace}; use std::collections::BTreeMap; @@ -26,33 +21,30 @@ use std::collections::BTreeMap; /// /// ```no_run /// use akri_shared::k8s::service; -/// use kube::client::APIClient; +/// use kube::client::Client; /// use kube::config; /// /// # #[tokio::main] /// # async fn main() { /// let selector = "environment=production,app=nginx"; -/// let api_client = APIClient::new(config::incluster_config().unwrap()); +/// let api_client = Client::try_default().await.unwrap(); /// for svc in service::find_services_with_selector(&selector, api_client).await.unwrap() { -/// println!("found svc: {}", svc.metadata.name) +/// println!("found svc: {}", svc.metadata.name.unwrap()) /// } /// # } /// ``` pub async fn find_services_with_selector( selector: &str, - kube_client: APIClient, -) -> Result< - ObjectList>, - Box, -> { + kube_client: Client, +) -> Result, anyhow::Error> { trace!("find_services_with_selector with selector={:?}", &selector); - let svcs = Api::v1Service(kube_client); + let svc_client: Api = Api::all(kube_client); let svc_list_params = ListParams { label_selector: Some(selector.to_string()), ..Default::default() }; trace!("find_services_with_selector PRE svcs.list(...).await?"); - let result = svcs.list(&svc_list_params).await; + let result = svc_client.list(&svc_list_params).await; trace!("find_services_with_selector return"); Ok(result?) } @@ -97,11 +89,13 @@ pub fn create_service_app_name( /// OwnershipType, /// service /// }; -/// use kube::client::APIClient; +/// use kube::client::Client; /// use kube::config; /// use k8s_openapi::api::core::v1::ServiceSpec; /// -/// let api_client = APIClient::new(config::incluster_config().unwrap()); +/// # #[tokio::main] +/// # async fn main() { +/// let api_client = Client::try_default().await.unwrap(); /// let svc = service::create_new_service_from_spec( /// "svc_namespace", /// "capability_instance", @@ -113,6 +107,7 @@ pub fn create_service_app_name( /// ), /// &ServiceSpec::default(), /// true).unwrap(); +/// # } /// ``` pub fn create_new_service_from_spec( svc_namespace: &str, @@ -122,12 +117,8 @@ pub fn create_new_service_from_spec( svc_spec: &ServiceSpec, node_specific_svc: bool, ) -> Result> { - let app_name = create_service_app_name( - &configuration_name, - &instance_name, - &"svc".to_string(), - node_specific_svc, - ); + let app_name = + create_service_app_name(configuration_name, instance_name, "svc", node_specific_svc); let mut labels: BTreeMap = BTreeMap::new(); labels.insert(APP_LABEL_ID.to_string(), app_name.clone()); labels.insert(CONTROLLER_LABEL_ID.to_string(), API_NAMESPACE.to_string()); @@ -143,25 +134,17 @@ pub fn create_new_service_from_spec( ); } - let owner_references: Vec = vec![K8sOwnerReference { + let owner_references: Vec = vec![OwnerReference { api_version: ownership.get_api_version(), kind: ownership.get_kind(), - controller: Some(ownership.get_controller()), - block_owner_deletion: Some(ownership.get_block_owner_deletion()), + controller: ownership.get_controller(), + block_owner_deletion: ownership.get_block_owner_deletion(), name: ownership.get_name(), uid: ownership.get_uid(), }]; let mut spec = svc_spec.clone(); - let mut modified_selector: BTreeMap; - match spec.selector { - Some(selector) => { - modified_selector = selector; - } - None => { - modified_selector = BTreeMap::new(); - } - } + let mut modified_selector: BTreeMap = spec.selector.unwrap_or_default(); modified_selector.insert(CONTROLLER_LABEL_ID.to_string(), API_NAMESPACE.to_string()); if node_specific_svc { modified_selector.insert( @@ -178,13 +161,13 @@ pub fn create_new_service_from_spec( let new_svc = Service { spec: Some(spec), - metadata: Some(ObjectMeta { + metadata: ObjectMeta { name: Some(app_name), namespace: Some(svc_namespace.to_string()), labels: Some(labels), owner_references: Some(owner_references), ..Default::default() - }), + }, ..Default::default() }; @@ -201,13 +184,13 @@ pub fn create_new_service_from_spec( /// OwnershipType, /// service /// }; -/// use kube::client::APIClient; +/// use kube::client::Client; /// use kube::config; /// /// # #[tokio::main] /// # async fn main() { /// let selector = "environment=production,app=nginx"; -/// let api_client = APIClient::new(config::incluster_config().unwrap()); +/// let api_client = Client::try_default().await.unwrap(); /// for svc in service::find_services_with_selector(&selector, api_client).await.unwrap() { /// let mut svc = svc; /// service::update_ownership( @@ -223,39 +206,37 @@ pub fn create_new_service_from_spec( /// # } /// ``` pub fn update_ownership( - svc_to_update: &mut Object, + svc_to_update: &mut Service, ownership: OwnershipInfo, replace_references: bool, ) -> Result<(), Box> { - if replace_references { + let ownership_ref = OwnerReference { + api_version: ownership.get_api_version(), + kind: ownership.get_kind(), + controller: ownership.get_controller(), + block_owner_deletion: ownership.get_block_owner_deletion(), + name: ownership.get_name(), + uid: ownership.get_uid(), + }; + if replace_references || svc_to_update.metadata.owner_references.is_none() { // Replace all existing ownerReferences with specified ownership - svc_to_update.metadata.ownerReferences = vec![KubeOwnerReference { - apiVersion: ownership.get_api_version(), - kind: ownership.get_kind(), - controller: ownership.get_controller(), - blockOwnerDeletion: ownership.get_block_owner_deletion(), - name: ownership.get_name(), - uid: ownership.get_uid(), - }]; + svc_to_update.metadata.owner_references = Some(vec![ownership_ref]); } else { // Add ownership to list IFF the UID doesn't already exist if !svc_to_update .metadata - .ownerReferences + .owner_references + .as_ref() + .unwrap() .iter() .any(|x| x.uid == ownership.get_uid()) { svc_to_update .metadata - .ownerReferences - .push(KubeOwnerReference { - apiVersion: ownership.get_api_version(), - kind: ownership.get_kind(), - controller: ownership.get_controller(), - blockOwnerDeletion: ownership.get_block_owner_deletion(), - name: ownership.get_name(), - uid: ownership.get_uid(), - }); + .owner_references + .as_mut() + .unwrap() + .push(ownership_ref); } } Ok(()) @@ -267,8 +248,8 @@ mod svcspec_tests { use super::*; use env_logger; - use kube::api::{Object, ObjectMeta, TypeMeta}; - pub type TestServiceObject = Object; + use k8s_openapi::api::core::v1::ServiceStatus; + use kube::api::ObjectMeta; #[test] fn test_create_service_app_name() { @@ -317,18 +298,13 @@ mod svcspec_tests { fn test_update_ownership_replace() { let _ = env_logger::builder().is_test(true).try_init(); - let svc = TestServiceObject { + let mut svc = Service { metadata: ObjectMeta::default(), - spec: ServiceSpec::default(), + spec: Some(ServiceSpec::default()), status: Some(ServiceStatus::default()), - types: TypeMeta { - apiVersion: None, - kind: None, - }, }; - assert_eq!(0, svc.metadata.ownerReferences.len()); - let mut svc = svc; + assert!(svc.metadata.owner_references.is_none()); update_ownership( &mut svc, OwnershipInfo { @@ -339,9 +315,15 @@ mod svcspec_tests { true, ) .unwrap(); - assert_eq!(1, svc.metadata.ownerReferences.len()); - assert_eq!("object1", &svc.metadata.ownerReferences[0].name); - assert_eq!("uid1", &svc.metadata.ownerReferences[0].uid); + assert_eq!(1, svc.metadata.owner_references.as_ref().unwrap().len()); + assert_eq!( + "object1", + &svc.metadata.owner_references.as_ref().unwrap()[0].name + ); + assert_eq!( + "uid1", + &svc.metadata.owner_references.as_ref().unwrap()[0].uid + ); update_ownership( &mut svc, @@ -353,26 +335,28 @@ mod svcspec_tests { true, ) .unwrap(); - assert_eq!(1, svc.metadata.ownerReferences.len()); - assert_eq!("object2", &svc.metadata.ownerReferences[0].name); - assert_eq!("uid2", &svc.metadata.ownerReferences[0].uid); + assert_eq!(1, svc.metadata.owner_references.as_ref().unwrap().len()); + assert_eq!( + "object2", + &svc.metadata.owner_references.as_ref().unwrap()[0].name + ); + assert_eq!( + "uid2", + &svc.metadata.owner_references.as_ref().unwrap()[0].uid + ); } #[test] fn test_update_ownership_append() { let _ = env_logger::builder().is_test(true).try_init(); - let svc = TestServiceObject { + let svc = Service { metadata: ObjectMeta::default(), - spec: ServiceSpec::default(), + spec: Some(ServiceSpec::default()), status: Some(ServiceStatus::default()), - types: TypeMeta { - apiVersion: None, - kind: None, - }, }; - assert_eq!(0, svc.metadata.ownerReferences.len()); + assert!(svc.metadata.owner_references.is_none()); let mut svc = svc; update_ownership( &mut svc, @@ -384,9 +368,15 @@ mod svcspec_tests { false, ) .unwrap(); - assert_eq!(1, svc.metadata.ownerReferences.len()); - assert_eq!("object1", &svc.metadata.ownerReferences[0].name); - assert_eq!("uid1", &svc.metadata.ownerReferences[0].uid); + assert_eq!(1, svc.metadata.owner_references.as_ref().unwrap().len()); + assert_eq!( + "object1", + &svc.metadata.owner_references.as_ref().unwrap()[0].name + ); + assert_eq!( + "uid1", + &svc.metadata.owner_references.as_ref().unwrap()[0].uid + ); update_ownership( &mut svc, @@ -398,11 +388,23 @@ mod svcspec_tests { false, ) .unwrap(); - assert_eq!(2, svc.metadata.ownerReferences.len()); - assert_eq!("object1", &svc.metadata.ownerReferences[0].name); - assert_eq!("uid1", &svc.metadata.ownerReferences[0].uid); - assert_eq!("object2", &svc.metadata.ownerReferences[1].name); - assert_eq!("uid2", &svc.metadata.ownerReferences[1].uid); + assert_eq!(2, svc.metadata.owner_references.as_ref().unwrap().len()); + assert_eq!( + "object1", + &svc.metadata.owner_references.as_ref().unwrap()[0].name + ); + assert_eq!( + "uid1", + &svc.metadata.owner_references.as_ref().unwrap()[0].uid + ); + assert_eq!( + "object2", + &svc.metadata.owner_references.as_ref().unwrap()[1].name + ); + assert_eq!( + "uid2", + &svc.metadata.owner_references.as_ref().unwrap()[1].uid + ); // Test that trying to add the same UID doesn't result in // duplicate @@ -416,11 +418,23 @@ mod svcspec_tests { false, ) .unwrap(); - assert_eq!(2, svc.metadata.ownerReferences.len()); - assert_eq!("object1", &svc.metadata.ownerReferences[0].name); - assert_eq!("uid1", &svc.metadata.ownerReferences[0].uid); - assert_eq!("object2", &svc.metadata.ownerReferences[1].name); - assert_eq!("uid2", &svc.metadata.ownerReferences[1].uid); + assert_eq!(2, svc.metadata.owner_references.as_ref().unwrap().len()); + assert_eq!( + "object1", + &svc.metadata.owner_references.as_ref().unwrap()[0].name + ); + assert_eq!( + "uid1", + &svc.metadata.owner_references.as_ref().unwrap()[0].uid + ); + assert_eq!( + "object2", + &svc.metadata.owner_references.as_ref().unwrap()[1].name + ); + assert_eq!( + "uid2", + &svc.metadata.owner_references.as_ref().unwrap()[1].uid + ); } #[test] @@ -463,18 +477,14 @@ mod svcspec_tests { ); // Validate the metadata name/namesapce - assert_eq!(&app_name, &svc.metadata.clone().unwrap().name.unwrap()); - assert_eq!( - &svc_namespace, - &svc.metadata.clone().unwrap().namespace.unwrap() - ); + assert_eq!(&app_name, &svc.metadata.clone().name.unwrap()); + assert_eq!(&svc_namespace, &svc.metadata.clone().namespace.unwrap()); // Validate the labels added assert_eq!( &&app_name, &svc.metadata .clone() - .unwrap() .labels .unwrap() .get(APP_LABEL_ID) @@ -484,7 +494,6 @@ mod svcspec_tests { &&API_NAMESPACE.to_string(), &svc.metadata .clone() - .unwrap() .labels .unwrap() .get(CONTROLLER_LABEL_ID) @@ -495,7 +504,6 @@ mod svcspec_tests { &&instance_name, &svc.metadata .clone() - .unwrap() .labels .unwrap() .get(AKRI_INSTANCE_LABEL_NAME) @@ -506,7 +514,6 @@ mod svcspec_tests { &&configuration_name, &svc.metadata .clone() - .unwrap() .labels .unwrap() .get(AKRI_CONFIGURATION_LABEL_NAME) @@ -519,8 +526,8 @@ mod svcspec_tests { object_name, svc.metadata .clone() - .unwrap() .owner_references + .as_ref() .unwrap() .get(0) .unwrap() @@ -530,8 +537,8 @@ mod svcspec_tests { object_uid, svc.metadata .clone() - .unwrap() .owner_references + .as_ref() .unwrap() .get(0) .unwrap() @@ -541,8 +548,8 @@ mod svcspec_tests { "Pod", &svc.metadata .clone() - .unwrap() .owner_references + .as_ref() .unwrap() .get(0) .unwrap() @@ -552,7 +559,6 @@ mod svcspec_tests { "core/v1", &svc.metadata .clone() - .unwrap() .owner_references .unwrap() .get(0) @@ -562,7 +568,6 @@ mod svcspec_tests { assert!(svc .metadata .clone() - .unwrap() .owner_references .unwrap() .get(0) @@ -572,7 +577,6 @@ mod svcspec_tests { assert!(svc .metadata .clone() - .unwrap() .owner_references .unwrap() .get(0) @@ -639,26 +643,25 @@ mod svcspec_tests { /// /// ```no_run /// use akri_shared::k8s::service; -/// use kube::client::APIClient; +/// use kube::client::Client; /// use kube::config; /// use k8s_openapi::api::core::v1::Service; /// /// # #[tokio::main] /// # async fn main() { -/// let api_client = APIClient::new(config::incluster_config().unwrap()); +/// let api_client = Client::try_default().await.unwrap(); /// service::create_service(&Service::default(), "svc_namespace", api_client).await.unwrap(); /// # } /// ``` pub async fn create_service( svc_to_create: &Service, namespace: &str, - kube_client: APIClient, -) -> Result<(), Box> { + kube_client: Client, +) -> Result<(), anyhow::Error> { trace!("create_service enter"); - let services = Api::v1Service(kube_client).within(&namespace); - let svc_as_u8 = serde_json::to_vec(&svc_to_create)?; + let services: Api = Api::namespaced(kube_client, namespace); info!("create_service svcs.create(...).await?:"); - match services.create(&PostParams::default(), svc_as_u8).await { + match services.create(&PostParams::default(), svc_to_create).await { Ok(created_svc) => { info!( "create_service services.create return: {:?}", @@ -680,7 +683,7 @@ pub async fn create_service( serde_json::to_string(&svc_to_create), e ); - Err(e.into()) + Err(anyhow::anyhow!(e)) } } } @@ -691,22 +694,22 @@ pub async fn create_service( /// /// ```no_run /// use akri_shared::k8s::service; -/// use kube::client::APIClient; +/// use kube::client::Client; /// use kube::config; /// /// # #[tokio::main] /// # async fn main() { -/// let api_client = APIClient::new(config::incluster_config().unwrap()); +/// let api_client = Client::try_default().await.unwrap(); /// service::remove_service("svc_to_remove", "svc_namespace", api_client).await.unwrap(); /// # } /// ``` pub async fn remove_service( svc_to_remove: &str, namespace: &str, - kube_client: APIClient, -) -> Result<(), Box> { + kube_client: Client, +) -> Result<(), anyhow::Error> { trace!("remove_service enter"); - let svcs = Api::v1Service(kube_client).within(&namespace); + let svcs: Api = Api::namespaced(kube_client, namespace); info!("remove_service svcs.create(...).await?:"); match svcs.delete(svc_to_remove, &DeleteParams::default()).await { Ok(deleted_svc) => match deleted_svc { @@ -731,7 +734,7 @@ pub async fn remove_service( "remove_service svcs.delete [{:?}] returned kube error: {:?}", &svc_to_remove, ae ); - Err(ae.into()) + Err(anyhow::anyhow!(ae)) } } Err(e) => { @@ -739,7 +742,7 @@ pub async fn remove_service( "remove_service svcs.delete [{:?}] error: {:?}", &svc_to_remove, e ); - Err(e.into()) + Err(anyhow::anyhow!(e)) } } } @@ -750,17 +753,17 @@ pub async fn remove_service( /// /// ```no_run /// use akri_shared::k8s::service; -/// use kube::client::APIClient; +/// use kube::client::Client; /// use kube::config; /// /// # #[tokio::main] /// # async fn main() { /// let selector = "environment=production,app=nginx"; -/// let api_client = APIClient::new(config::incluster_config().unwrap()); +/// let api_client = Client::try_default().await.unwrap(); /// for svc in service::find_services_with_selector(&selector, api_client).await.unwrap() { -/// let svc_name = &svc.metadata.name.clone(); +/// let svc_name = &svc.metadata.name.clone().unwrap(); /// let svc_namespace = &svc.metadata.namespace.as_ref().unwrap().clone(); -/// let loop_api_client = APIClient::new(config::incluster_config().unwrap()); +/// let loop_api_client = Client::try_default().await.unwrap(); /// let updated_svc = service::update_service( /// &svc, /// &svc_name, @@ -770,21 +773,23 @@ pub async fn remove_service( /// # } /// ``` pub async fn update_service( - svc_to_update: &Object, + svc_to_update: &Service, name: &str, namespace: &str, - kube_client: APIClient, -) -> Result<(), Box> { + kube_client: Client, +) -> Result<(), anyhow::Error> { trace!( "update_service enter name:{} namespace: {}", &name, &namespace ); - let svcs = Api::v1Service(kube_client).within(&namespace); - let svc_as_u8 = serde_json::to_vec(&svc_to_update)?; + let svcs: Api = Api::namespaced(kube_client, namespace); info!("remove_service svcs.patch(...).await?:"); - match svcs.patch(name, &PatchParams::default(), svc_as_u8).await { + match svcs + .patch(name, &PatchParams::default(), &Patch::Merge(&svc_to_update)) + .await + { Ok(_service_modified) => { log::trace!("update_service return"); Ok(()) @@ -794,11 +799,11 @@ pub async fn update_service( "update_service kube_client.request returned kube error: {:?}", ae ); - Err(ae.into()) + Err(anyhow::anyhow!(ae)) } Err(e) => { log::trace!("update_service kube_client.request error: {:?}", e); - Err(e.into()) + Err(anyhow::anyhow!(e)) } } } diff --git a/shared/src/lib.rs b/shared/src/lib.rs index ed15298c3..b7ff0352f 100644 --- a/shared/src/lib.rs +++ b/shared/src/lib.rs @@ -2,7 +2,6 @@ extern crate k8s_openapi; #[macro_use] extern crate serde_derive; extern crate serde_yaml; -extern crate tokio_core; pub mod akri; pub mod k8s; diff --git a/shared/src/os/mod.rs b/shared/src/os/mod.rs index e378c3f09..efe654007 100644 --- a/shared/src/os/mod.rs +++ b/shared/src/os/mod.rs @@ -1,5 +1,4 @@ pub mod env_var; -pub mod signal; /// Provide file operations pub mod file { @@ -15,7 +14,7 @@ pub mod file { } /// This will read a file (as provided by a relative path) into a String pub fn read_file_to_string(relative_path: &str) -> String { - let file_path = get_canonical_path(&relative_path); + let file_path = get_canonical_path(relative_path); fs::read_to_string(&file_path) .unwrap_or_else(|_| panic!("unable to read file: {}", &file_path)) } diff --git a/shared/src/os/signal.rs b/shared/src/os/signal.rs deleted file mode 100644 index e064f1471..000000000 --- a/shared/src/os/signal.rs +++ /dev/null @@ -1,54 +0,0 @@ -// Taken from iotedge: -// https://raw.githubusercontent.com/Azure/iotedge/master/edgelet/iotedged/src/signal.rs -// iotedge added this credit: -// Adapted from the conduit proxy signal handling: -// https://github.com/runconduit/conduit/blob/master/proxy/src/signal.rs - -use futures_old::Future; - -pub type ShutdownSignal = Box + Send>; - -/// Get shutdown signal to handle SIGINT and SIGTERM -pub fn shutdown() -> ShutdownSignal { - imp::shutdown() -} - -#[cfg(unix)] -mod imp { - use super::ShutdownSignal; - use futures_old::{future, Future, Stream}; - use log::trace; - use std::fmt; - use tokio_signal::unix::{Signal, SIGINT, SIGTERM}; - - pub(super) fn shutdown() -> ShutdownSignal { - let signals = [SIGINT, SIGTERM].iter().map(|&sig| { - Signal::new(sig) - .flatten_stream() - .into_future() - .map(move |_| { - trace!("Received {}, starting shutdown", DisplaySignal(sig)); - }) - }); - let on_any_signal = future::select_all(signals) - .map(|_| ()) - .map_err(|_| unreachable!("Signal never returns an error")); - Box::new(on_any_signal) - } - - /// This is used to store and handle specific shutdown signals - #[derive(Clone, Copy)] - struct DisplaySignal(i32); - - /// Implement Display for SIGINT and SIGTERM - impl fmt::Display for DisplaySignal { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let s = match self.0 { - SIGINT => "SIGINT", - SIGTERM => "SIGTERM", - other => return write!(f, "signal {}", other), - }; - f.write_str(s) - } - } -} diff --git a/shared/src/uds/unix_stream.rs b/shared/src/uds/unix_stream.rs index 97c0beffe..43ec1fca2 100644 --- a/shared/src/uds/unix_stream.rs +++ b/shared/src/uds/unix_stream.rs @@ -1,25 +1,40 @@ /// Module to enable UDS with tonic grpc. /// This is unix only since the underlying UnixStream and UnixListener libraries are unix only. use std::{ - convert::TryFrom, pin::Pin, + sync::Arc, task::{Context, Poll}, - time::{Duration, SystemTime, UNIX_EPOCH}, }; -use tokio::io::{AsyncRead, AsyncWrite}; + +use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; use tonic::transport::server::Connected; #[derive(Debug)] pub struct UnixStream(pub tokio::net::UnixStream); -impl Connected for UnixStream {} +impl Connected for UnixStream { + type ConnectInfo = UdsConnectInfo; + + fn connect_info(&self) -> Self::ConnectInfo { + UdsConnectInfo { + peer_addr: self.0.peer_addr().ok().map(Arc::new), + peer_cred: self.0.peer_cred().ok(), + } + } +} + +#[derive(Clone, Debug)] +pub struct UdsConnectInfo { + pub peer_addr: Option>, + pub peer_cred: Option, +} impl AsyncRead for UnixStream { fn poll_read( mut self: Pin<&mut Self>, cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { + buf: &mut ReadBuf<'_>, + ) -> Poll> { Pin::new(&mut self.0).poll_read(cx, buf) } } @@ -43,6 +58,9 @@ impl AsyncWrite for UnixStream { } pub async fn try_connect(socket_path: &str) -> Result<(), anyhow::Error> { + use std::convert::TryFrom; + use std::time::{Duration, SystemTime, UNIX_EPOCH}; + // Test that server is running, trying for at most 10 seconds // Similar to grpc.timeout, which is yet to be implemented for tonic // See issue: https://github.com/hyperium/tonic/issues/75 @@ -71,7 +89,7 @@ pub async fn try_connect(socket_path: &str) -> Result<(), anyhow::Error> { { connected = true } else { - tokio::time::delay_for(Duration::from_secs(1)).await + tokio::time::sleep(Duration::from_secs(1)).await } } if connected { diff --git a/test/json/local-instance.json b/test/json/local-instance.json index a1d5be708..34105187b 100644 --- a/test/json/local-instance.json +++ b/test/json/local-instance.json @@ -1,4 +1,6 @@ { + "apiVersion": "akri.sh/v0", + "kind": "Instance", "metadata": { "name": "config-a-b494b6", "namespace": "config-a-namespace", diff --git a/test/json/shared-instance-list-slots.json b/test/json/shared-instance-list-slots.json index aca9e9d4f..f3c571458 100644 --- a/test/json/shared-instance-list-slots.json +++ b/test/json/shared-instance-list-slots.json @@ -2,6 +2,8 @@ "apiVersion": "v1", "items": [ { + "apiVersion": "akri.sh/v0", + "kind": "Instance", "metadata": { "name": "config-a-359973", "namespace": "config-a-namespace", diff --git a/test/json/shared-instance-list.json b/test/json/shared-instance-list.json index 33307e123..1b9914dd1 100644 --- a/test/json/shared-instance-list.json +++ b/test/json/shared-instance-list.json @@ -2,6 +2,8 @@ "apiVersion": "v1", "items": [ { + "apiVersion": "akri.sh/v0", + "kind": "Instance", "metadata": { "name": "config-a-359973", "namespace": "config-a-namespace", diff --git a/test/json/shared-instance-update.json b/test/json/shared-instance-update.json index 8def737ef..81d4b5168 100644 --- a/test/json/shared-instance-update.json +++ b/test/json/shared-instance-update.json @@ -1,4 +1,6 @@ { + "apiVersion": "akri.sh/v0", + "kind": "Instance", "metadata": { "name": "config-a-359973", "namespace": "config-a-namespace", diff --git a/test/json/shared-instance.json b/test/json/shared-instance.json index d6bcb4c36..120d0d169 100644 --- a/test/json/shared-instance.json +++ b/test/json/shared-instance.json @@ -1,4 +1,6 @@ { + "apiVersion": "akri.sh/v0", + "kind": "Instance", "metadata": { "name": "config-a-359973", "namespace": "config-a-namespace", diff --git a/version.txt b/version.txt index 592e815ea..e196726d2 100644 --- a/version.txt +++ b/version.txt @@ -1 +1 @@ -0.6.12 +0.6.13 diff --git a/webhooks/validating/configuration/Cargo.toml b/webhooks/validating/configuration/Cargo.toml index 614cfe1b1..82a34ac3e 100644 --- a/webhooks/validating/configuration/Cargo.toml +++ b/webhooks/validating/configuration/Cargo.toml @@ -1,17 +1,18 @@ [package] name = "webhook-configuration" -version = "0.6.12" +version = "0.6.13" authors = ["DazWilkin "] edition = "2018" [dependencies] -actix = "0.11.1" +actix = "0.12.0" actix-web = { version = "3.3.2", features = ["openssl"] } actix-rt = "2.2.0" akri-shared = { path = "../../../shared" } -clap = "3.0.0-beta.2" -k8s-openapi = { version = "0.6.0", features = ["v1_16"] } -kube = { version = "0.23.0", features = ["openapi"] } +clap = "2.33.3" +kube = { version = "0.59.0", features = ["derive"] } +kube-runtime = "0.59.0" +k8s-openapi = { version = "0.13.0", default-features = false, features = ["schemars", "v1_16"] } openapi = { git = "https://github.com/DazWilkin/openapi-admission-v1", tag = "v1.1.0" } openssl = "0.10" rustls = "0.19.0" diff --git a/webhooks/validating/configuration/src/main.rs b/webhooks/validating/configuration/src/main.rs index 3ca1b3304..51b7c7c02 100644 --- a/webhooks/validating/configuration/src/main.rs +++ b/webhooks/validating/configuration/src/main.rs @@ -1,5 +1,5 @@ use actix_web::{post, web, App, HttpResponse, HttpServer, Responder}; -use akri_shared::akri::configuration::KubeAkriConfig; +use akri_shared::akri::configuration::Configuration; use clap::Arg; use k8s_openapi::apimachinery::pkg::runtime::RawExtension; use openapi::models::{ @@ -27,7 +27,7 @@ fn check( match v { serde_json::Value::Object(o) => { for (key, value) in o { - if let Err(e) = check(&value, &deserialized[key]) { + if let Err(e) = check(value, &deserialized[key]) { return Err(None.ok_or(format!( "input key ({:?}) not equal to parsed: ({:?})", key, e @@ -103,7 +103,7 @@ fn filter_configuration(mut v: Value) -> Value { metadata.remove("managedFields"); let generation = metadata.get_mut("generation").unwrap(); - *generation = json!(generation.as_f64().unwrap()); + *generation = json!(generation.as_u64().unwrap()); v } @@ -115,7 +115,7 @@ fn validate_configuration(rqst: &AdmissionRequest) -> AdmissionResponse { let x: RawExtension = serde_json::from_value(raw.clone()) .expect("Could not parse as Kubernetes RawExtension"); let y = serde_json::to_string(&x).unwrap(); - let config: KubeAkriConfig = + let config: Configuration = serde_json::from_str(y.as_str()).expect("Could not parse as Akri Configuration"); let reserialized = serde_json::to_string(&config).unwrap(); let deserialized: Value = serde_json::from_str(&reserialized).expect("untyped JSON"); @@ -179,7 +179,7 @@ async fn validate(rqst: web::Json) -> impl Responder { match &rqst.request { Some(rqst) => { println!("Handler received: AdmissionRequest"); - let resp = validate_configuration(&rqst); + let resp = validate_configuration(rqst); let resp: AdmissionReview = AdmissionReview { api_version: Some("admission.k8s.io/v1".to_owned()), kind: Some("AdmissionReview".to_owned()), @@ -187,11 +187,11 @@ async fn validate(rqst: web::Json) -> impl Responder { response: Some(resp), }; let body = serde_json::to_string(&resp).expect("Valid AdmissionReview"); - return HttpResponse::Ok().body(body); + HttpResponse::Ok().body(body) } None => { println!("Handler received: Nothing"); - return HttpResponse::BadRequest().body(""); + HttpResponse::BadRequest().body("") } } } @@ -200,25 +200,25 @@ async fn validate(rqst: web::Json) -> impl Responder { async fn main() -> std::io::Result<()> { let matches = clap::App::new("Akri Webhook") .arg( - Arg::new("crt_file") + Arg::with_name("crt_file") .long("tls-crt-file") .takes_value(true) .required(true) - .about("TLS certificate file"), + .help("TLS certificate file"), ) .arg( - Arg::new("key_file") + Arg::with_name("key_file") .long("tls-key-file") .takes_value(true) .required(true) - .about("TLS private key file"), + .help("TLS private key file"), ) .arg( - Arg::new("port") + Arg::with_name("port") .long("port") .takes_value(true) .required(true) - .about("port"), + .help("port"), ) .get_matches(); @@ -680,10 +680,10 @@ mod tests { } // Akri Configuration schema tests - use kube::api::{Object, Void}; + use kube::api::{NotUsed, Object}; #[test] fn test_creationtimestamp_is_filtered() { - let t: Object = serde_json::from_str(METADATA).expect("Valid Metadata"); + let t: Object = serde_json::from_str(METADATA).expect("Valid Metadata"); let reserialized = serde_json::to_string(&t).expect("bytes"); let deserialized: Value = serde_json::from_str(&reserialized).expect("untyped JSON"); let v = filter_configuration(deserialized); @@ -692,7 +692,7 @@ mod tests { #[test] fn test_deletiontimestamp_is_filtered() { - let t: Object = serde_json::from_str(METADATA).expect("Valid Metadata"); + let t: Object = serde_json::from_str(METADATA).expect("Valid Metadata"); let reserialized = serde_json::to_string(&t).expect("bytes"); let deserialized: Value = serde_json::from_str(&reserialized).expect("untyped JSON"); let v = filter_configuration(deserialized); @@ -701,7 +701,7 @@ mod tests { #[test] fn test_managedfields_is_filtered() { - let t: Object = serde_json::from_str(METADATA).expect("Valid Metadata"); + let t: Object = serde_json::from_str(METADATA).expect("Valid Metadata"); let reserialized = serde_json::to_string(&t).expect("bytes"); let deserialized: Value = serde_json::from_str(&reserialized).expect("untyped JSON"); let v = filter_configuration(deserialized); @@ -709,12 +709,12 @@ mod tests { } #[test] - fn test_generation_becomes_f64() { - let t: Object = serde_json::from_str(METADATA).expect("Valid Metadata"); + fn test_generation_becomes_u64() { + let t: Object = serde_json::from_str(METADATA).expect("Valid Metadata"); let reserialized = serde_json::to_string(&t).expect("bytes"); let deserialized: Value = serde_json::from_str(&reserialized).expect("untyped JSON"); let v = filter_configuration(deserialized); - assert!(v["metadata"].get("generation").unwrap().is_f64()); + assert!(v["metadata"].get("generation").unwrap().is_u64()); } #[test]