diff --git a/library/core/src/num/uint_macros.rs b/library/core/src/num/uint_macros.rs index 673144cb32827..48a96c5792c64 100644 --- a/library/core/src/num/uint_macros.rs +++ b/library/core/src/num/uint_macros.rs @@ -2764,6 +2764,7 @@ macro_rules! uint_impl { /// ``` #[doc = concat!("assert_eq!(2", stringify!($SelfT), ".next_power_of_two(), 2);")] #[doc = concat!("assert_eq!(3", stringify!($SelfT), ".next_power_of_two(), 4);")] + #[doc = concat!("assert_eq!(0", stringify!($SelfT), ".next_power_of_two(), 1);")] /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[rustc_const_stable(feature = "const_int_pow", since = "1.50.0")] diff --git a/library/std/src/sys/pal/unix/net.rs b/library/std/src/sys/pal/unix/net.rs index 7237989c9059b..b8dc1538a6378 100644 --- a/library/std/src/sys/pal/unix/net.rs +++ b/library/std/src/sys/pal/unix/net.rs @@ -86,7 +86,14 @@ impl Socket { // flag to atomically create the socket and set it as // CLOEXEC. On Linux this was added in 2.6.27. let fd = cvt(libc::socket(fam, ty | libc::SOCK_CLOEXEC, 0))?; - Ok(Socket(FileDesc::from_raw_fd(fd))) + let socket = Socket(FileDesc::from_raw_fd(fd)); + + // DragonFlyBSD, FreeBSD and NetBSD use `SO_NOSIGPIPE` as a `setsockopt` + // flag to disable `SIGPIPE` emission on socket. + #[cfg(any(target_os = "freebsd", target_os = "netbsd", target_os = "dragonfly"))] + setsockopt(&socket, libc::SOL_SOCKET, libc::SO_NOSIGPIPE, 1)?; + + Ok(socket) } else { let fd = cvt(libc::socket(fam, ty, 0))?; let fd = FileDesc::from_raw_fd(fd); diff --git a/src/ci/docker/scripts/fuchsia-test-runner.py b/src/ci/docker/scripts/fuchsia-test-runner.py index 8ac00a8863f6f..4f504341d5266 100755 --- a/src/ci/docker/scripts/fuchsia-test-runner.py +++ b/src/ci/docker/scripts/fuchsia-test-runner.py @@ -9,10 +9,8 @@ import argparse from dataclasses import dataclass -import fcntl import glob import hashlib -import io import json import os import platform @@ -143,6 +141,14 @@ def subprocess_output(self): return sys.stdout return subprocess.DEVNULL + def check_call(self, args, **kwargs): + self.log_info(f"Running: {' '.join(args)}") + return subprocess.check_call(args, **kwargs) + + def check_output(self, args, **kwargs): + self.log_info(f"Running: {' '.join(args)}") + return subprocess.check_output(args, **kwargs) + def ffx_daemon_log_path(self): return os.path.join(self.tmp_dir(), "ffx_daemon_log") @@ -178,7 +184,7 @@ def start_ffx_isolation(self): ) # Disable analytics - subprocess.check_call( + self.check_call( [ ffx_path, "config", @@ -197,7 +203,7 @@ def start_ffx_isolation(self): "test.experimental_structured_output": "true", } for key, value in configs.items(): - subprocess.check_call( + self.check_call( [ ffx_path, "config", @@ -222,7 +228,7 @@ def ffx_cmd_env(self): } def stop_ffx_isolation(self): - subprocess.check_call( + self.check_call( [ self.tool_path("ffx"), "daemon", @@ -265,7 +271,7 @@ def start(self): self.start_ffx_isolation() # Stop any running emulators (there shouldn't be any) - subprocess.check_call( + self.check_call( [ ffx_path, "emu", @@ -282,11 +288,11 @@ def start(self): product_name = "minimal." + self.triple_to_arch(self.target) fuchsia_version = "20.20240412.3.1" - # FIXME: We should be able to replace this with the machine parsable - # `ffx --machine json product lookup ...` once F15 is released. - out = subprocess.check_output( + out = self.check_output( [ ffx_path, + "--machine", + "json", "product", "lookup", product_name, @@ -300,16 +306,15 @@ def start(self): self.log_debug(out) - for line in io.BytesIO(out): - if line.startswith(b"gs://"): - transfer_manifest_url = line.rstrip() - break - else: - raise Exception("Unable to parse transfer manifest") + try: + transfer_manifest_url = json.loads(out)["transfer_manifest_url"] + except Exception as e: + print(e) + raise Exception("Unable to parse transfer manifest") from e # Download the product bundle. product_bundle_dir = os.path.join(self.tmp_dir(), 'product-bundle') - subprocess.check_call( + self.check_call( [ ffx_path, "product", @@ -325,7 +330,7 @@ def start(self): # Start emulator # FIXME: condition --accel hyper on target arch matching host arch - subprocess.check_call( + self.check_call( [ ffx_path, "emu", @@ -346,42 +351,52 @@ def start(self): # Create new package repo self.log_info("Creating package repo...") - subprocess.check_call( + self.check_call( [ - self.tool_path("pm"), - "newrepo", - "-repo", + ffx_path, + "repository", + "create", self.repo_dir(), ], + env=ffx_env, stdout=self.subprocess_output(), stderr=self.subprocess_output(), ) - # Add repo - subprocess.check_call( + self.check_call( [ ffx_path, "repository", "add-from-pm", - self.repo_dir(), "--repository", self.TEST_REPO_NAME, + self.repo_dir(), ], env=ffx_env, stdout=self.subprocess_output(), stderr=self.subprocess_output(), ) + # Write to file + self.write_to_file() + # Start repository server - subprocess.check_call( - [ffx_path, "repository", "server", "start", "--address", "[::]:0"], + self.check_call( + [ + ffx_path, + "repository", + "server", + "start", + "--address", + "[::]:0", + ], env=ffx_env, stdout=self.subprocess_output(), stderr=self.subprocess_output(), ) # Register with newly-started emulator - subprocess.check_call( + self.check_call( [ ffx_path, "target", @@ -395,12 +410,6 @@ def start(self): stderr=self.subprocess_output(), ) - # Create lockfiles - open(self.pm_lockfile_path(), "a").close() - - # Write to file - self.write_to_file() - self.log_info("Success! Your environment is ready to run tests.") # FIXME: shardify this @@ -445,7 +454,6 @@ def start(self): meta/{package_name}.cm={package_dir}/meta/{package_name}.cm bin/{exe_name}={bin_path} lib/{libstd_name}={libstd_path} - lib/{libtest_name}={libtest_path} lib/ld.so.1={sdk_dir}/arch/{target_arch}/sysroot/dist/lib/ld.so.1 lib/libfdio.so={sdk_dir}/arch/{target_arch}/dist/libfdio.so """ @@ -482,9 +490,6 @@ def run(self, args): if not libstd_paths: raise Exception(f"Failed to locate libstd (in {self.rustlibs_dir()})") - if not libtest_paths: - raise Exception(f"Failed to locate libtest (in {self.rustlibs_dir()})") - # Build a unique, deterministic name for the test using the name of the # binary and the last 6 hex digits of the hash of the full path def path_checksum(path): @@ -500,6 +505,7 @@ def path_checksum(path): cml_path = os.path.join(package_dir, "meta", f"{package_name}.cml") cm_path = os.path.join(package_dir, "meta", f"{package_name}.cm") manifest_path = os.path.join(package_dir, f"{package_name}.manifest") + manifest_json_path = os.path.join(package_dir, "package_manifest.json") far_path = os.path.join(package_dir, f"{package_name}-0.far") shared_libs = args.shared_libs[: args.n] @@ -523,22 +529,6 @@ def log(msg): log(f"Bin path: {bin_path}") - log("Setting up package...") - - # Set up package - subprocess.check_call( - [ - self.tool_path("pm"), - "-o", - package_dir, - "-n", - package_name, - "init", - ], - stdout=log_file, - stderr=log_file, - ) - log("Writing CML...") # Write and compile CML @@ -563,7 +553,7 @@ def log(msg): log("Compiling CML...") - subprocess.check_call( + self.check_call( [ self.tool_path("cmc"), "compile", @@ -590,38 +580,61 @@ def log(msg): target=self.target, sdk_dir=self.sdk_dir, libstd_name=os.path.basename(libstd_paths[0]), - libtest_name=os.path.basename(libtest_paths[0]), libstd_path=libstd_paths[0], - libtest_path=libtest_paths[0], target_arch=self.triple_to_arch(self.target), ) ) + # `libtest`` was historically a shared library, but now seems to be (sometimes?) + # statically linked. If we find it as a shared library, include it in the manifest. + if libtest_paths: + manifest.write( + f"lib/{os.path.basename(libtest_paths[0])}={libtest_paths[0]}\n" + ) for shared_lib in shared_libs: manifest.write(f"lib/{os.path.basename(shared_lib)}={shared_lib}\n") + log("Determining API level...") + out = self.check_output( + [ + self.tool_path("ffx"), + "--machine", + "json", + "version", + ], + env=self.ffx_cmd_env(), + stderr=log_file, + ) + api_level = json.loads(out)["tool_version"]["api_level"] + log("Compiling and archiving manifest...") - subprocess.check_call( + self.check_call( [ - self.tool_path("pm"), + self.tool_path("ffx"), + "package", + "build", + manifest_path, "-o", package_dir, - "-m", - manifest_path, - "build", + "--api-level", + str(api_level), ], + env=self.ffx_cmd_env(), stdout=log_file, stderr=log_file, ) - subprocess.check_call( + + self.check_call( [ - self.tool_path("pm"), - "-o", - package_dir, - "-m", - manifest_path, + self.tool_path("ffx"), + "package", "archive", + "create", + "-o", + far_path, + manifest_json_path, ], + env=self.ffx_cmd_env(), stdout=log_file, stderr=log_file, ) @@ -629,25 +642,18 @@ def log(msg): log("Publishing package to repo...") # Publish package to repo - with open(self.pm_lockfile_path(), "w") as pm_lockfile: - fcntl.lockf(pm_lockfile.fileno(), fcntl.LOCK_EX) - subprocess.check_call( - [ - self.tool_path("pm"), - "publish", - "-a", - "-repo", - self.repo_dir(), - "-f", - far_path, - ], - stdout=log_file, - stderr=log_file, - ) - # This lock should be released automatically when the pm - # lockfile is closed, but we'll be polite and unlock it now - # since the spec leaves some wiggle room. - fcntl.lockf(pm_lockfile.fileno(), fcntl.LOCK_UN) + self.check_call( + [ + self.tool_path("ffx"), + "repository", + "publish", + "--package", + os.path.join(package_dir, "package_manifest.json"), + self.repo_dir(), + ], + stdout=log_file, + stderr=log_file, + ) log("Running ffx test...") @@ -765,7 +771,7 @@ def stop(self): # Shut down the emulator self.log_info("Stopping emulator...") - subprocess.check_call( + self.check_call( [ self.tool_path("ffx"), "emu", diff --git a/src/doc/rustc/src/platform-support/fuchsia.md b/src/doc/rustc/src/platform-support/fuchsia.md index 34ab3cdaf25e5..9c2e05b57f5e8 100644 --- a/src/doc/rustc/src/platform-support/fuchsia.md +++ b/src/doc/rustc/src/platform-support/fuchsia.md @@ -683,25 +683,37 @@ cd ${RUST_SRC_PATH} To run the Rust test suite on an emulated Fuchsia device, you'll also need to download a copy of the Fuchsia SDK. The current minimum supported SDK version is -[10.20221207.2.89][minimum_supported_sdk_version]. +[20.20240412.3.1][minimum_supported_sdk_version]. -[minimum_supported_sdk_version]: https://chrome-infra-packages.appspot.com/p/fuchsia/sdk/core/linux-amd64/+/version:10.20221207.2.89 +[minimum_supported_sdk_version]: https://chrome-infra-packages.appspot.com/p/fuchsia/sdk/core/linux-amd64/+/version:20.20240412.3.1 Fuchsia's test runner interacts with the Fuchsia emulator and is located at -`src/ci/docker/scripts/fuchsia-test-runner.py`. We can use it to start our -test environment with: +`src/ci/docker/scripts/fuchsia-test-runner.py`. First, add the following +variables to your existing `config-env.sh`: + +```sh +# TEST_TOOLCHAIN_TMP_DIR can point anywhere, but it: +# - must be less than 108 characters, otherwise qemu can't handle the path +# - must be consistent across calls to this file (don't use `mktemp -d` here) +export TEST_TOOLCHAIN_TMP_DIR="/tmp/rust-tmp" + +# Keep existing contents of `config-env.sh` from earlier, including SDK_PATH +``` + +We can then use the script to start our test environment with: ```sh ( \ + source config-env.sh && \ src/ci/docker/scripts/fuchsia-test-runner.py start \ --rust-build ${RUST_SRC_PATH}/build \ --sdk ${SDK_PATH} \ --target {x86_64-unknown-fuchsia|aarch64-unknown-fuchsia} \ + --verbose \ ) ``` -Where `${RUST_SRC_PATH}/build` is the `build-dir` set in `config.toml` and -`${SDK_PATH}` is the path to the downloaded and unzipped SDK. +Where `${RUST_SRC_PATH}/build` is the `build-dir` set in `config.toml`. Once our environment is started, we can run our tests using `x.py` as usual. The test runner script will run the compiled tests on an emulated Fuchsia device. To