Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 30 additions & 0 deletions doc/manual/rl-next/s3-public-parameter.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
---
synopsis: "S3 URLs now support skipping authentication for public buckets"
prs: [14463]
issues: [4857]
---

S3 URLs now support a `public=true` query parameter that instructs Nix to skip
all credential lookup attempts when accessing S3 buckets. This eliminates
timeout delays when working with publicly accessible S3 buckets and improves
reliability in environments where AWS credentials may be unavailable or
misconfigured.

**Example usage:**

```bash
# S3 binary cache store
nix copy --from 's3://nix-cache?public=true&region=us-east-1' /nix/store/...
```

```nix
# fetchurl with public S3 URL
builtins.fetchurl {
url = "s3://public-bucket/file.tar.gz?public=true&region=us-east-1";
sha256 = "...";
}
```

**Note:** The bucket must have appropriate public access policies configured on
the S3 side. Nix will not attempt to verify permissions - requests will fail
with HTTP 403 if the bucket is not publicly accessible.
9 changes: 9 additions & 0 deletions src/libstore-tests/s3-url.cc
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,15 @@ INSTANTIATE_TEST_SUITE_P(
},
},
"with_absolute_endpoint_uri",
},
ParsedS3URLTestCase{
"s3://public-bucket/data.tar.gz?public=true",
{
.bucket = "public-bucket",
.key = {"data.tar.gz"},
.public_ = true,
},
"public_bucket_true",
}),
[](const ::testing::TestParamInfo<ParsedS3URLTestCase> & info) { return info.param.description; });

Expand Down
6 changes: 6 additions & 0 deletions src/libstore/filetransfer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -885,6 +885,12 @@ void FileTransferRequest::setupForS3()
// Update the request URI to use HTTPS (works without AWS SDK)
uri = parsedS3.toHttpsUrl();

// Skip authentication for public buckets
if (parsedS3.public_) {
debug("S3 request without authentication (marked as public bucket)");
return;
}

#if NIX_WITH_AWS_AUTH
// Auth-specific code only compiled when AWS support is available
awsSigV4Provider = "aws:amz:" + parsedS3.region.value_or("us-east-1") + ":s3";
Expand Down
21 changes: 20 additions & 1 deletion src/libstore/include/nix/store/s3-binary-cache-store.hh
Original file line number Diff line number Diff line change
Expand Up @@ -93,11 +93,30 @@ struct S3BinaryCacheStoreConfig : HttpBinaryCacheStoreConfig
Default is 100 MiB. Only takes effect when multipart-upload is enabled.
)"};

const Setting<bool> public_{
this,
false,
"public",
R"(
Whether to treat this S3 bucket as publicly accessible without authentication.
When set to `true`, Nix will skip all credential lookup attempts, including
checking EC2 instance metadata endpoints. This significantly improves performance
when accessing public S3 buckets from non-AWS infrastructure.

> **Note**
>
> This setting should only be used with genuinely public buckets. Using it
> with private buckets will result in access denied errors.
)"};

/**
* Set of settings that are part of the S3 URI itself.
* These are needed for region specification and other S3-specific settings.
*
* @note The "public" parameter is a Nix-specific flag that controls authentication behavior,
* telling Nix to skip credential lookup for public buckets to avoid timeouts.
*/
const std::set<const AbstractSetting *> s3UriSettings = {&profile, &region, &scheme, &endpoint};
const std::set<const AbstractSetting *> s3UriSettings = {&profile, &region, &scheme, &endpoint, &public_};

static const std::string name()
{
Expand Down
1 change: 1 addition & 0 deletions src/libstore/include/nix/store/s3-url.hh
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ struct ParsedS3URL
* or an authority (so an IP address or a registered name).
*/
std::variant<std::monostate, ParsedURL, ParsedURL::Authority> endpoint;
bool public_ = false;

std::optional<std::string> getEncodedEndpoint() const
{
Expand Down
14 changes: 11 additions & 3 deletions src/libstore/s3-binary-cache-store.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,17 @@ For S3 compatible binary caches, consult that cache's documentation.

### Anonymous reads to your S3-compatible binary cache

> If your binary cache is publicly accessible and does not require authentication,
> it is simplest to use the [HTTP Binary Cache Store] rather than S3 Binary Cache Store with
> <https://example-nix-cache.s3.amazonaws.com> instead of <s3://example-nix-cache>.
If your binary cache is publicly accessible and does not require authentication,
you have two options:

1. Use the [HTTP Binary Cache Store] with <https://example-nix-cache.s3.amazonaws.com> instead of <s3://example-nix-cache>

2. Use the S3 Binary Cache Store with the `public=true` parameter:
```
s3://example-nix-cache?public=true
```

The `public` parameter tells Nix to skip credential lookup attempts.

Your bucket will need a
[bucket policy](https://docs.aws.amazon.com/AmazonS3/v1/userguide/bucket-policies.html)
Expand Down
7 changes: 7 additions & 0 deletions src/libstore/s3-url.cc
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,12 @@ try {
return it->second;
};

auto getBooleanParam = [&](std::string_view key) -> bool {
return getOptionalParam(key)
.transform([](std::string_view val) { return val == "true" || val == "1"; })
.value_or(false);
};

auto endpoint = getOptionalParam("endpoint");
if (parsed.path.size() <= 1 || !parsed.path.front().empty())
throw BadURL("URI has a missing or invalid key");
Expand All @@ -61,6 +67,7 @@ try {

return ParsedURL::Authority::parse(*endpoint);
}(),
.public_ = getBooleanParam("public"),
};
} catch (BadURL & e) {
e.addTrace({}, "while parsing S3 URI: '%s'", parsed.to_string());
Expand Down
9 changes: 8 additions & 1 deletion src/libstore/unix/build/derivation-builder.cc
Original file line number Diff line number Diff line change
Expand Up @@ -954,9 +954,16 @@ std::optional<AwsCredentials> DerivationBuilderImpl::preResolveAwsCredentials()
try {
auto parsedUrl = parseURL(url->second);
if (parsedUrl.scheme == "s3") {
debug("Pre-resolving AWS credentials for S3 URL in builtin:fetchurl");
auto s3Url = ParsedS3URL::parse(parsedUrl);

// Skip credential pre-resolution for public buckets
if (s3Url.public_) {
debug("Skipping credential pre-resolution for public S3 bucket");
return std::nullopt;
}

debug("Pre-resolving AWS credentials for S3 URL in builtin:fetchurl");

// Use the preResolveAwsCredentials from aws-creds
auto credentials = getAwsCredentialsProvider()->getCredentials(s3Url);
debug("Successfully pre-resolved AWS credentials in parent process");
Expand Down
129 changes: 127 additions & 2 deletions tests/nixos/s3-binary-cache-store.nix
Original file line number Diff line number Diff line change
Expand Up @@ -364,7 +364,7 @@ in
"""Test store operations on public bucket without credentials"""
print("\n=== Testing Public Bucket Operations ===")

store_url = make_s3_url(bucket)
store_url = make_s3_url(bucket, public='true')

# Verify store info works without credentials
client.succeed(f"nix store info --store '{store_url}' >&2")
Expand All @@ -383,15 +383,139 @@ in
verify_packages_in_store(client, [PKGS['A'], PKGS['B']], should_exist=False)

# Test copy from public bucket without credentials
client.succeed(
output = client.succeed(
f"nix copy --debug --no-check-sigs "
f"--from '{store_url}' {PKGS['A']} {PKGS['B']} 2>&1"
)

# Verify the public flag is working (should see the debug message)
if "S3 request without authentication (marked as public bucket)" not in output:
print("Debug output:")
print(output)
raise Exception("Expected to see public bucket debug message")

# Verify no credential provider was created
if "creating new AWS credential provider" in output:
print("Debug output:")
print(output)
raise Exception("Should NOT create credential provider for public bucket")

# Verify packages were copied successfully
verify_packages_in_store(client, [PKGS['A'], PKGS['B']])

print(" ✓ nix copy from public bucket works without credentials")
print(" ✓ No credential lookup attempted (public=true flag working)")

@setup_s3(public=True)
def test_fetchurl_public_bucket(bucket):
"""Test that fetchurl of public S3 URL does not trigger credential attempts"""
print("\n=== Testing fetchurl with Public S3 URL ===")

client.wait_for_unit("network-addresses-eth1.service")

# Upload a test file to the public bucket
test_content = "Public S3 test file content for fetchurl\n"
server.succeed(f"echo -n '{test_content}' > /tmp/public-test-file.txt")

# Calculate expected hash on server where file exists
file_hash = server.succeed(
"nix hash file --type sha256 --base32 /tmp/public-test-file.txt"
).strip()

server.succeed(f"mc cp /tmp/public-test-file.txt minio/{bucket}/public-test.txt")

print(" ✓ Uploaded test file to public bucket")

# Test 1: builtins.fetchurl (immediate fetch in evaluator)
# ======================================================
s3_url = make_s3_url(bucket, path="/public-test.txt", public='true')

output = client.succeed(
f"nix eval --debug --impure --expr "
f"'builtins.fetchurl {{ name = \"public-s3-test\"; url = \"{s3_url}\"; }}' 2>&1"
)

# Verify the public flag is working (should see the debug message)
if "S3 request without authentication (marked as public bucket)" not in output:
print("Debug output:")
print(output)
raise Exception("Expected to see public bucket debug message for fetchurl")

# Verify no credential provider was created
if "creating new AWS credential provider" in output:
print("Debug output:")
print(output)
raise Exception("fetchurl should NOT create credential provider for public S3 URL")

# Verify no credential pre-resolution happened (that's for private buckets only)
if "Pre-resolving AWS credentials" in output:
print("Debug output:")
print(output)
raise Exception("Should not attempt credential pre-resolution for public buckets")

print(" ✓ builtins.fetchurl works with public S3 URL")
print(" ✓ No credential lookup attempted (public=true flag working)")
print(" ✓ No credential pre-resolution attempted")

# Test 2: import <nix/fetchurl.nix> (fixed-output derivation with fork)
# =====================================================================
print("\n Testing import <nix/fetchurl.nix> with public S3 URL...")

# Build derivation with unique test ID (using hash calculated earlier)
test_id = random.randint(0, 10000)
test_url = make_s3_url(bucket, path="/public-test.txt", public='true', test_id=test_id)

fetchurl_expr = """
import <nix/fetchurl.nix> {{
name = "public-s3-fork-test-{id}";
url = "{url}";
sha256 = "{hash}";
}}
""".format(id=test_id, url=test_url, hash=file_hash)

build_output = client.succeed(
f"nix build --debug --impure --no-link --expr '{fetchurl_expr}' 2>&1"
)

# Verify fork behavior - should create fresh FileTransfer
if "builtin:fetchurl creating fresh FileTransfer instance" not in build_output:
print("Debug output:")
print(build_output)
raise Exception("Expected to find FileTransfer creation in forked process")

print(" ✓ Forked process creates fresh FileTransfer")

# Verify public bucket handling in forked process
if "S3 request without authentication (marked as public bucket)" not in build_output:
print("Debug output:")
print(build_output)
raise Exception("Expected to see public bucket debug message in forked process")

print(" ✓ Public bucket flag respected in forked process")

# Verify no credential provider was created (neither in parent nor child)
if "creating new AWS credential provider" in build_output:
print("Debug output:")
print(build_output)
raise Exception("Should NOT create credential provider for public S3 URL in fixed-output derivation")

print(" ✓ No credential provider created in parent or child process")

# Verify no credential pre-resolution happened
# (public buckets should skip this entirely, unlike private buckets)
if "Pre-resolving AWS credentials" in build_output:
print("Debug output:")
print(build_output)
raise Exception("Should not attempt credential pre-resolution for public buckets")

if "Using pre-resolved AWS credentials from parent process" in build_output:
print("Debug output:")
print(build_output)
raise Exception("Should not have pre-resolved credentials to use for public buckets")

print(" ✓ No credential pre-resolution attempted (public bucket optimization)")
print("\n ✓ import <nix/fetchurl.nix> works with public S3 URL")
print(" ✓ Fork + build path correctly skips all credential operations")

@setup_s3(populate_bucket=[PKGS['A']])
def test_url_format_variations(bucket):
Expand Down Expand Up @@ -787,6 +911,7 @@ in
test_fork_credential_preresolution()
test_store_operations()
test_public_bucket_operations()
test_fetchurl_public_bucket()
test_url_format_variations()
test_concurrent_fetches()
test_compression_narinfo_gzip()
Expand Down
Loading