diff --git a/.eslintrc.json b/.eslintrc.json index 7e5a1dd078..44510807ab 100644 --- a/.eslintrc.json +++ b/.eslintrc.json @@ -12,6 +12,7 @@ "node/no-unsupported-features/es-syntax": ["off"] }, "parserOptions": { + "ecmaVersion": 2020, "sourceType": "module" } } diff --git a/auth/.eslintrc.json b/auth/.eslintrc.json new file mode 100644 index 0000000000..95f214a816 --- /dev/null +++ b/auth/.eslintrc.json @@ -0,0 +1,6 @@ +{ + "extends": "../.eslintrc.json", + "rules": { + "no-unused-vars": "off" + } +} diff --git a/auth/README.md b/auth/README.md index 636015860f..15d899867d 100644 --- a/auth/README.md +++ b/auth/README.md @@ -64,4 +64,4 @@ information](https://developers.google.com/identity/protocols/application-defaul For more information on downscoped credentials you can visit: -> https://github.com/googleapis/google-auth-library-nodejs \ No newline at end of file +> https://github.com/googleapis/google-auth-library-nodejs diff --git a/auth/customcredentials/aws/Dockerfile b/auth/customcredentials/aws/Dockerfile new file mode 100644 index 0000000000..7f8e9cc0b3 --- /dev/null +++ b/auth/customcredentials/aws/Dockerfile @@ -0,0 +1,15 @@ +FROM node:20-slim + +WORKDIR /app + +COPY package*.json ./ + +RUN npm install --omit=dev + +RUN useradd -m appuser + +COPY --chown=appuser:appuser . . + +USER appuser + +CMD [ "node", "customCredentialSupplierAws.js" ] diff --git a/auth/customcredentials/aws/README.md b/auth/customcredentials/aws/README.md new file mode 100644 index 0000000000..8d5669ea1f --- /dev/null +++ b/auth/customcredentials/aws/README.md @@ -0,0 +1,121 @@ +# Running the Custom AWS Credential Supplier Sample (Node.js) + +This sample demonstrates how to use a custom AWS security credential supplier to authenticate with Google Cloud using AWS as an external identity provider. It uses the **AWS SDK for JavaScript (v3)** to fetch credentials from sources like Amazon Elastic Kubernetes Service (EKS) with IAM Roles for Service Accounts (IRSA), Elastic Container Service (ECS), or Fargate. + +## Prerequisites + +* An AWS account. +* A Google Cloud project with the IAM API enabled. +* A GCS bucket. +* **Node.js 16** or later installed. +* **npm** installed. + +If you want to use AWS security credentials that cannot be retrieved using methods supported natively by the Google Auth library, a custom `AwsSecurityCredentialsSupplier` implementation may be specified. The supplier must return valid, unexpired AWS security credentials when called by the Google Cloud Auth library. + +## Running Locally + +For local development, you can provide credentials and configuration in a JSON file. + +### Install Dependencies + +Ensure you have Node.js installed, then install the required libraries: + +```bash +npm install +``` + +### Configure Credentials for Local Development + +1. Copy the example secrets file to a new file named `custom-credentials-aws-secrets.json` in the project root: + ```bash + cp custom-credentials-aws-secrets.json.example custom-credentials-aws-secrets.json + ``` +2. Open `custom-credentials-aws-secrets.json` and fill in the required values for your AWS and Google Cloud configuration. Do not check your `custom-credentials-aws-secrets.json` file into version control. + + +### Run the Application + +Execute the script using node: + +```bash +node customCredentialSupplierAws.js +``` + +When run locally, the application will detect the `custom-credentials-aws-secrets.json` file and use it to configure the necessary environment variables for the AWS SDK. + +## Running in a Containerized Environment (EKS) + +This section provides a brief overview of how to run the sample in an Amazon EKS cluster. + +### EKS Cluster Setup + +First, you need an EKS cluster. You can create one using `eksctl` or the AWS Management Console. For detailed instructions, refer to the [Amazon EKS documentation](https://docs.aws.amazon.com/eks/latest/userguide/create-cluster.html). + +### Configure IAM Roles for Service Accounts (IRSA) + +IRSA enables you to associate an IAM role with a Kubernetes service account. This provides a secure way for your pods to access AWS services without hardcoding long-lived credentials. + +Run the following command to create the IAM role and bind it to a Kubernetes Service Account: + +```bash +eksctl create iamserviceaccount \ + --name your-k8s-service-account \ + --namespace default \ + --cluster your-cluster-name \ + --region your-aws-region \ + --role-name your-role-name \ + --attach-policy-arn arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess \ + --approve +``` + +> **Note**: The `--attach-policy-arn` flag is used here to demonstrate attaching permissions. Update this with the specific AWS policy ARN your application requires. + +For a deep dive into how this works without using `eksctl`, refer to the [IAM Roles for Service Accounts](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html) documentation. + +### Configure Google Cloud to Trust the AWS Role + +To allow your AWS role to authenticate as a Google Cloud service account, you need to configure Workload Identity Federation. This process involves these key steps: + +1. **Create a Workload Identity Pool and an AWS Provider:** The pool holds the configuration, and the provider is set up to trust your AWS account. + +2. **Create or select a Google Cloud Service Account:** This service account will be impersonated by your AWS role. + +3. **Bind the AWS Role to the Google Cloud Service Account:** Create an IAM policy binding that gives your AWS role the `Workload Identity User` (`roles/iam.workloadIdentityUser`) role on the Google Cloud service account. + +For more detailed information, see the documentation on [Configuring Workload Identity Federation](https://cloud.google.com/iam/docs/workload-identity-federation-with-other-clouds). + +### Containerize and Package the Application + +Create a `Dockerfile` for the Node.js application and push the image to a container registry (for example Amazon ECR) that your EKS cluster can access. + +**Note:** The provided [`Dockerfile`](Dockerfile) is an example and may need modification for your specific needs. + +Build and push the image: +```bash +docker build -t your-container-image:latest . +docker push your-container-image:latest +``` + +### Deploy to EKS + +Create a Kubernetes deployment manifest to deploy your application to the EKS cluster. See the [`pod.yaml`](pod.yaml) file for an example. + +**Note:** The provided [`pod.yaml`](pod.yaml) is an example and may need to be modified for your specific needs. + +Deploy the pod: + +```bash +kubectl apply -f pod.yaml +``` + +### Clean Up + +To clean up the resources, delete the EKS cluster and any other AWS and Google Cloud resources you created. + +```bash +eksctl delete cluster --name your-cluster-name +``` + +## Testing + +This sample is not continuously tested. It is provided for instructional purposes and may require modifications to work in your environment. diff --git a/auth/customcredentials/aws/custom-credentials-aws-secrets.json.example b/auth/customcredentials/aws/custom-credentials-aws-secrets.json.example new file mode 100644 index 0000000000..300dc70c13 --- /dev/null +++ b/auth/customcredentials/aws/custom-credentials-aws-secrets.json.example @@ -0,0 +1,8 @@ +{ + "aws_access_key_id": "YOUR_AWS_ACCESS_KEY_ID", + "aws_secret_access_key": "YOUR_AWS_SECRET_ACCESS_KEY", + "aws_region": "YOUR_AWS_REGION", + "gcp_workload_audience": "YOUR_GCP_WORKLOAD_AUDIENCE", + "gcs_bucket_name": "YOUR_GCS_BUCKET_NAME", + "gcp_service_account_impersonation_url": "YOUR_GCP_SERVICE_ACCOUNT_IMPERSONATION_URL" +} diff --git a/auth/customcredentials/aws/customCredentialSupplierAws.js b/auth/customcredentials/aws/customCredentialSupplierAws.js new file mode 100644 index 0000000000..c8f136bcf0 --- /dev/null +++ b/auth/customcredentials/aws/customCredentialSupplierAws.js @@ -0,0 +1,184 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// [START auth_custom_credential_supplier_aws] +const {AwsClient} = require('google-auth-library'); +const {fromNodeProviderChain} = require('@aws-sdk/credential-providers'); +const fs = require('fs'); +const path = require('path'); +const {STSClient} = require('@aws-sdk/client-sts'); +const {Storage} = require('@google-cloud/storage'); + +/** + * Custom AWS Security Credentials Supplier. + * + * This implementation resolves AWS credentials using the default Node provider + * chain from the AWS SDK. This allows fetching credentials from environment + * variables, shared credential files (~/.aws/credentials), or IAM roles + * for service accounts (IRSA) in EKS, etc. + */ +class CustomAwsSupplier { + constructor() { + this.region = null; + + this.awsCredentialsProvider = fromNodeProviderChain(); + } + + /** + * Returns the AWS region. This is required for signing the AWS request. + * It resolves the region automatically by using the default AWS region + * provider chain, which searches for the region in the standard locations + * (environment variables, AWS config file, etc.). + */ + async getAwsRegion(_context) { + if (this.region) { + return this.region; + } + + const client = new STSClient({}); + this.region = await client.config.region(); + + if (!this.region) { + throw new Error( + 'CustomAwsSupplier: Unable to resolve AWS region. Please set the AWS_REGION environment variable or configure it in your ~/.aws/config file.' + ); + } + + return this.region; + } + + /** + * Retrieves AWS security credentials using the AWS SDK's default provider chain. + */ + async getAwsSecurityCredentials(_context) { + const awsCredentials = await this.awsCredentialsProvider(); + + if (!awsCredentials.accessKeyId || !awsCredentials.secretAccessKey) { + throw new Error( + 'Unable to resolve AWS credentials from the node provider chain. ' + + 'Ensure your AWS CLI is configured, or AWS environment variables (like AWS_ACCESS_KEY_ID) are set.' + ); + } + + return { + accessKeyId: awsCredentials.accessKeyId, + secretAccessKey: awsCredentials.secretAccessKey, + token: awsCredentials.sessionToken, + }; + } +} + +/** + * Authenticates with Google Cloud using AWS credentials and retrieves bucket metadata. + * + * @param {string} bucketName The name of the bucket to retrieve. + * @param {string} audience The Workload Identity Pool audience. + * @param {string} [impersonationUrl] Optional Service Account impersonation URL. + */ +async function authenticateWithAwsCredentials( + bucketName, + audience, + impersonationUrl +) { + const customSupplier = new CustomAwsSupplier(); + + const clientOptions = { + audience: audience, + subject_token_type: 'urn:ietf:params:aws:token-type:aws4_request', + service_account_impersonation_url: impersonationUrl, + aws_security_credentials_supplier: customSupplier, + }; + + const authClient = new AwsClient(clientOptions); + + const storage = new Storage({ + authClient: authClient, + }); + + const [metadata] = await storage.bucket(bucketName).getMetadata(); + return metadata; +} +// [END auth_custom_credential_supplier_aws] + +/** + * If a local secrets file is present, load it into the process environment. + * This is a "just-in-time" configuration for local development. These + * variables are only set for the current process. + */ +function loadConfigFromFile() { + const secretsPath = path.resolve( + __dirname, + 'custom-credentials-aws-secrets.json' + ); + if (!fs.existsSync(secretsPath)) return; + + try { + const secrets = JSON.parse(fs.readFileSync(secretsPath, 'utf8')); + + const envMap = { + aws_access_key_id: 'AWS_ACCESS_KEY_ID', + aws_secret_access_key: 'AWS_SECRET_ACCESS_KEY', + aws_region: 'AWS_REGION', + gcp_workload_audience: 'GCP_WORKLOAD_AUDIENCE', + gcs_bucket_name: 'GCS_BUCKET_NAME', + gcp_service_account_impersonation_url: + 'GCP_SERVICE_ACCOUNT_IMPERSONATION_URL', + }; + + for (const [jsonKey, envKey] of Object.entries(envMap)) { + if (secrets[jsonKey]) { + process.env[envKey] = secrets[jsonKey]; + } + } + } catch (error) { + console.error(`Error reading secrets file: ${error.message}`); + } +} + +async function main() { + loadConfigFromFile(); + + const gcpAudience = process.env.GCP_WORKLOAD_AUDIENCE; + const saImpersonationUrl = process.env.GCP_SERVICE_ACCOUNT_IMPERSONATION_URL; + const gcsBucketName = process.env.GCS_BUCKET_NAME; + + if (!gcpAudience || !gcsBucketName) { + throw new Error( + 'Missing required configuration. Please provide it in a ' + + 'secrets.json file or as environment variables: ' + + 'GCP_WORKLOAD_AUDIENCE, GCS_BUCKET_NAME' + ); + } + + try { + console.log(`Retrieving metadata for bucket: ${gcsBucketName}...`); + const bucketMetadata = await authenticateWithAwsCredentials( + gcsBucketName, + gcpAudience, + saImpersonationUrl + ); + console.log('\n--- SUCCESS! ---'); + console.log('Bucket Metadata:', JSON.stringify(bucketMetadata, null, 2)); + } catch (error) { + console.error('\n--- FAILED ---'); + console.error(error.message || error); + process.exitCode = 1; + } +} + +if (require.main === module) { + main(); +} + +exports.authenticateWithAwsCredentials = authenticateWithAwsCredentials; diff --git a/auth/customcredentials/aws/pod.yaml b/auth/customcredentials/aws/pod.yaml new file mode 100644 index 0000000000..20ca4bf710 --- /dev/null +++ b/auth/customcredentials/aws/pod.yaml @@ -0,0 +1,44 @@ +# Copyright 2025 Google LLC +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Pod +metadata: + name: custom-credential-pod-node +spec: + # The Kubernetes Service Account that is annotated with the corresponding + # AWS IAM role ARN. See the README for instructions on setting up IAM + # Roles for Service Accounts (IRSA). + serviceAccountName: your-k8s-service-account + containers: + - name: gcp-auth-sample-node + # The container image pushed to the container registry + # For example, Amazon Elastic Container Registry + image: your-container-image:latest + env: + # REQUIRED: The AWS region. The AWS SDK for Node.js requires this + # to be set explicitly in containers. + - name: AWS_REGION + value: "your-aws-region" + + # REQUIRED: The full identifier of the Workload Identity Pool provider + - name: GCP_WORKLOAD_AUDIENCE + value: "your-gcp-workload-audience" + + # OPTIONAL: Enable Google Cloud service account impersonation + # - name: GCP_SERVICE_ACCOUNT_IMPERSONATION_URL + # value: "your-gcp-service-account-impersonation-url" + + # REQUIRED: The bucket to list + - name: GCS_BUCKET_NAME + value: "your-gcs-bucket-name" diff --git a/auth/customcredentials/okta/README.md b/auth/customcredentials/okta/README.md new file mode 100644 index 0000000000..daca8c1e03 --- /dev/null +++ b/auth/customcredentials/okta/README.md @@ -0,0 +1,81 @@ +# Running the Custom Okta Credential Supplier Sample (Node.js) + +This sample demonstrates how to use a custom subject token supplier to authenticate with Google Cloud using Okta as an external identity provider. It uses the Client Credentials flow for machine-to-machine (M2M) authentication. + +## Prerequisites + +* An Okta developer account. +* A Google Cloud project with the IAM API enabled. +* A Google Cloud Storage bucket. Ensure that the authenticated user has access to this bucket. +* **Node.js 16** or later installed. +* **npm** installed. + +## Okta Configuration + +Before running the sample, you need to configure an Okta application for Machine-to-Machine (M2M) communication. + +### Create an M2M Application in Okta + +1. Log in to your Okta developer console. +2. Navigate to **Applications** > **Applications** and click **Create App Integration**. +3. Select **API Services** as the sign-on method and click **Next**. +4. Give your application a name and click **Save**. + +### Obtain Okta Credentials + +Once the application is created, you will find the following information in the **General** tab: + +* **Okta Domain**: Your Okta developer domain (e.g., `https://dev-123456.okta.com`). +* **Client ID**: The client ID for your application. +* **Client Secret**: The client secret for your application. + +You will need these values to configure the sample. + +## Google Cloud Configuration + +You need to configure a Workload Identity Pool in Google Cloud to trust the Okta application. + +### Set up Workload Identity Federation + +1. In the Google Cloud Console, navigate to **IAM & Admin** > **Workload Identity Federation**. +2. Click **Create Pool** to create a new Workload Identity Pool. +3. Add a new **OIDC provider** to the pool. +4. Configure the provider with your Okta domain as the issuer URL. +5. Map the Okta `sub` (subject) assertion to a GCP principal. + +For detailed instructions, refer to the [Workload Identity Federation documentation](https://cloud.google.com/iam/docs/workload-identity-federation). + +## Running the Sample + +To run the sample on your local system, you need to install dependencies and configure your credentials. + +### 1. Install Dependencies + +This command downloads all required Node.js libraries. + +```bash +npm install +``` + +### 2. Configure Credentials for Local Development + +1. Copy the example secrets file to a new file named `custom-credentials-okta-secrets.json` in the project root: + ```bash + cp custom-credentials-okta-secrets.json.example custom-credentials-okta-secrets.json + ``` +2. Open `custom-credentials-okta-secrets.json` and fill in the required values for your AWS and Google Cloud configuration. Do not check your `custom-credentials-okta-secrets.json` file into version control. + + +### 3. Run the Application + +Execute the script using Node.js: + +```bash +node customCredentialSupplierOkta.js +``` + +The script authenticates with Okta to get an OIDC token, exchanges that token for a Google Cloud federated token, and uses it to list metadata for the specified Google Cloud Storage bucket. + +## Testing + +This sample is not continuously tested. It is provided for instructional purposes and may require modifications to work in your environment. diff --git a/auth/customcredentials/okta/custom-credentials-okta-secrets.json.example b/auth/customcredentials/okta/custom-credentials-okta-secrets.json.example new file mode 100644 index 0000000000..fa04fda7cb --- /dev/null +++ b/auth/customcredentials/okta/custom-credentials-okta-secrets.json.example @@ -0,0 +1,8 @@ +{ + "okta_domain": "https://your-okta-domain.okta.com", + "okta_client_id": "your-okta-client-id", + "okta_client_secret": "your-okta-client-secret", + "gcp_workload_audience": "//iam.googleapis.com/projects/123456789/locations/global/workloadIdentityPools/my-pool/providers/my-provider", + "gcs_bucket_name": "your-gcs-bucket-name", + "gcp_service_account_impersonation_url": "https://iamcredentials.googleapis.com/v1/projects/-/serviceAccounts/my-service-account@my-project.iam.gserviceaccount.com:generateAccessToken" +} diff --git a/auth/customcredentials/okta/customCredentialSupplierOkta.js b/auth/customcredentials/okta/customCredentialSupplierOkta.js new file mode 100644 index 0000000000..b4ed10b654 --- /dev/null +++ b/auth/customcredentials/okta/customCredentialSupplierOkta.js @@ -0,0 +1,220 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// [START auth_custom_credential_supplier_okta] +const {IdentityPoolClient} = require('google-auth-library'); +const {Storage} = require('@google-cloud/storage'); +const {Gaxios} = require('gaxios'); +const fs = require('fs'); +const path = require('path'); + +/** + * A custom SubjectTokenSupplier that authenticates with Okta using the + * Client Credentials grant flow. + */ +class OktaClientCredentialsSupplier { + constructor(domain, clientId, clientSecret) { + const cleanDomain = domain.endsWith('/') ? domain.slice(0, -1) : domain; + this.oktaTokenUrl = `${cleanDomain}/oauth2/default/v1/token`; + + this.clientId = clientId; + this.clientSecret = clientSecret; + this.accessToken = null; + this.expiryTime = 0; + this.gaxios = new Gaxios(); + } + + /** + * Main method called by the auth library. It will fetch a new token if one + * is not already cached. + * @returns {Promise} A promise that resolves with the Okta Access token. + */ + async getSubjectToken() { + const isTokenValid = + this.accessToken && Date.now() < this.expiryTime - 60 * 1000; + + if (isTokenValid) { + return this.accessToken; + } + + const {accessToken, expiresIn} = await this.fetchOktaAccessToken(); + this.accessToken = accessToken; + this.expiryTime = Date.now() + expiresIn * 1000; + return this.accessToken; + } + + /** + * Performs the Client Credentials grant flow with Okta. + */ + async fetchOktaAccessToken() { + const params = new URLSearchParams(); + params.append('grant_type', 'client_credentials'); + params.append('scope', 'gcp.test.read'); + + const authHeader = + 'Basic ' + + Buffer.from(`${this.clientId}:${this.clientSecret}`).toString('base64'); + + try { + const response = await this.gaxios.request({ + url: this.oktaTokenUrl, + method: 'POST', + headers: { + Authorization: authHeader, + 'Content-Type': 'application/x-www-form-urlencoded', + }, + data: params.toString(), + }); + + const {access_token, expires_in} = response.data; + if (access_token && expires_in) { + return {accessToken: access_token, expiresIn: expires_in}; + } else { + throw new Error( + 'Access token or expires_in not found in Okta response.' + ); + } + } catch (error) { + throw new Error( + `Failed to authenticate with Okta: ${error.response?.data || error.message}` + ); + } + } +} + +/** + * Authenticates with Google Cloud using Okta credentials and retrieves bucket metadata. + * + * @param {string} bucketName The name of the bucket to retrieve. + * @param {string} audience The Workload Identity Pool audience. + * @param {string} domain The Okta domain. + * @param {string} clientId The Okta client ID. + * @param {string} clientSecret The Okta client secret. + * @param {string} [impersonationUrl] Optional Service Account impersonation URL. + */ +async function authenticateWithOktaCredentials( + bucketName, + audience, + domain, + clientId, + clientSecret, + impersonationUrl +) { + const oktaSupplier = new OktaClientCredentialsSupplier( + domain, + clientId, + clientSecret + ); + + const authClient = new IdentityPoolClient({ + audience: audience, + subject_token_type: 'urn:ietf:params:oauth:token-type:jwt', + token_url: 'https://sts.googleapis.com/v1/token', + subject_token_supplier: oktaSupplier, + service_account_impersonation_url: impersonationUrl, + }); + + const storage = new Storage({ + authClient: authClient, + }); + + const [metadata] = await storage.bucket(bucketName).getMetadata(); + return metadata; +} +// [END auth_custom_credential_supplier_okta] + +/** + * If a local secrets file is present, load it into the process environment. + * This is a "just-in-time" configuration for local development. These + * variables are only set for the current process. + */ +function loadConfigFromFile() { + const secretsPath = path.resolve( + __dirname, + 'custom-credentials-okta-secrets.json' + ); + if (!fs.existsSync(secretsPath)) return; + + try { + const secrets = JSON.parse(fs.readFileSync(secretsPath, 'utf8')); + + // Define the mapping: JSON Key -> Environment Variable + const envMap = { + gcp_workload_audience: 'GCP_WORKLOAD_AUDIENCE', + gcs_bucket_name: 'GCS_BUCKET_NAME', + gcp_service_account_impersonation_url: + 'GCP_SERVICE_ACCOUNT_IMPERSONATION_URL', + okta_domain: 'OKTA_DOMAIN', + okta_client_id: 'OKTA_CLIENT_ID', + okta_client_secret: 'OKTA_CLIENT_SECRET', + }; + + // Iterate and assign + for (const [jsonKey, envKey] of Object.entries(envMap)) { + if (secrets[jsonKey]) { + process.env[envKey] = secrets[jsonKey]; + } + } + } catch (error) { + console.error(`Error reading secrets file: ${error.message}`); + } +} + +loadConfigFromFile(); + +async function main() { + const gcpAudience = process.env.GCP_WORKLOAD_AUDIENCE; + const saImpersonationUrl = process.env.GCP_SERVICE_ACCOUNT_IMPERSONATION_URL; + const gcsBucketName = process.env.GCS_BUCKET_NAME; + const oktaDomain = process.env.OKTA_DOMAIN; + const oktaClientId = process.env.OKTA_CLIENT_ID; + const oktaClientSecret = process.env.OKTA_CLIENT_SECRET; + + if ( + !gcpAudience || + !gcsBucketName || + !oktaDomain || + !oktaClientId || + !oktaClientSecret + ) { + throw new Error( + 'Missing required configuration. Please provide it in a ' + + 'secrets.json file or as environment variables.' + ); + } + + try { + console.log(`Retrieving metadata for bucket: ${gcsBucketName}...`); + const bucketMetadata = await authenticateWithOktaCredentials( + gcsBucketName, + gcpAudience, + oktaDomain, + oktaClientId, + oktaClientSecret, + saImpersonationUrl + ); + console.log('\n--- SUCCESS! ---'); + console.log('Bucket Metadata:', JSON.stringify(bucketMetadata, null, 2)); + } catch (error) { + console.error('\n--- FAILED ---'); + console.error(error.message || error); + process.exitCode = 1; + } +} + +if (require.main === module) { + main(); +} + +exports.authenticateWithOktaCredentials = authenticateWithOktaCredentials; diff --git a/auth/package.json b/auth/package.json index 71988b2b5f..00c59e7b65 100644 --- a/auth/package.json +++ b/auth/package.json @@ -15,12 +15,17 @@ "test:auth": "c8 mocha -p -j 2 system-test/auth.test.js --timeout=30000", "test:downscoping": "c8 mocha -p -j 2 system-test/downscoping.test.js --timeout=30000", "test:accessTokenFromImpersonatedCredentials": "c8 mocha -p -j 2 system-test/accessTokenFromImpersonatedCredentials.test.js --timeout=30000", + "test:customcredentials": "c8 mocha -p -j 2 \"system-test/customcredentials/**/*.test.js\" --timeout=30000", "test": "npm -- run system-test", - "system-test": "c8 mocha -p -j 2 system-test/*.test.js --timeout=30000" + "system-test": "c8 mocha -p -j 2 \"system-test/**/*.test.js\" --timeout=30000" }, "dependencies": { - "@google-cloud/storage": "^7.0.0", + "@aws-sdk/client-sts": "^3.58.0", + "@aws-sdk/credential-providers": "^3.0.0", + "@google-cloud/storage": "^7.18.0", + "dotenv": "^17.0.0", "fix": "0.0.6", + "gaxios": "^6.0.0", "google-auth-library": "^9.0.0", "yargs": "^17.0.0" }, diff --git a/auth/system-test/customcredentials/aws/customCredentialSupplierAws.test.js b/auth/system-test/customcredentials/aws/customCredentialSupplierAws.test.js new file mode 100644 index 0000000000..5a3993828b --- /dev/null +++ b/auth/system-test/customcredentials/aws/customCredentialSupplierAws.test.js @@ -0,0 +1,113 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +'use strict'; + +const assert = require('assert'); +const fs = require('fs'); +const path = require('path'); +const { + authenticateWithAwsCredentials, +} = require('../../../customcredentials/aws/customCredentialSupplierAws.js'); + +describe('Custom Credential Supplier AWS', () => { + // Variables to hold the original environment to restore after tests + const originalEnv = {}; + + // The configuration we need to run the test + let bucketName, audience, impersonationUrl; + + before(function () { + const secretsPath = path.resolve( + __dirname, + '../../../customcredentials/aws/custom-credentials-aws-secrets.json' + ); + + if (fs.existsSync(secretsPath)) { + try { + const content = fs.readFileSync(secretsPath, 'utf8'); + const secrets = JSON.parse(content); + + // Helper to safely set env var if it exists in the JSON + const setEnv = (envKey, jsonKey) => { + if (secrets[jsonKey]) { + // Save original value to restore later + if (process.env[envKey] === undefined) { + originalEnv[envKey] = undefined; // Mark that it was undefined + } else if ( + !Object.prototype.hasOwnProperty.call(originalEnv, envKey) + ) { + originalEnv[envKey] = process.env[envKey]; + } + process.env[envKey] = secrets[jsonKey]; + } + }; + + // Map JSON keys to Environment Variables + setEnv('GCP_WORKLOAD_AUDIENCE', 'gcp_workload_audience'); + setEnv('GCS_BUCKET_NAME', 'gcs_bucket_name'); + setEnv( + 'GCP_SERVICE_ACCOUNT_IMPERSONATION_URL', + 'gcp_service_account_impersonation_url' + ); + setEnv('AWS_ACCESS_KEY_ID', 'aws_access_key_id'); + setEnv('AWS_SECRET_ACCESS_KEY', 'aws_secret_access_key'); + setEnv('AWS_REGION', 'aws_region'); + } catch (err) { + console.warn( + 'Failed to parse secrets file, relying on system env vars.', + err + ); + } + } + + // Extract values from the Environment (whether from file or system) + bucketName = process.env.GCS_BUCKET_NAME; + audience = process.env.GCP_WORKLOAD_AUDIENCE; + impersonationUrl = process.env.GCP_SERVICE_ACCOUNT_IMPERSONATION_URL; + const awsKey = process.env.AWS_ACCESS_KEY_ID; + const awsSecret = process.env.AWS_SECRET_ACCESS_KEY; + const awsRegion = process.env.AWS_REGION; + + // Skip test if requirements are missing (mimics Java assumeTrue) + if (!bucketName || !audience || !awsKey || !awsSecret || !awsRegion) { + console.log('Skipping AWS system test: Required configuration missing.'); + this.skip(); + } + }); + + after(() => { + // Restore environment variables to their original state + for (const key in originalEnv) { + if (originalEnv[key] === undefined) { + delete process.env[key]; + } else { + process.env[key] = originalEnv[key]; + } + } + }); + + it('should authenticate using AWS credentials', async () => { + // Act + const metadata = await authenticateWithAwsCredentials( + bucketName, + audience, + impersonationUrl + ); + + // Assert + assert.strictEqual(metadata.name, bucketName); + assert.ok(metadata.location); + }); +}); diff --git a/auth/system-test/customcredentials/okta/customCredentialSupplierOkta.test.js b/auth/system-test/customcredentials/okta/customCredentialSupplierOkta.test.js new file mode 100644 index 0000000000..1156a0b094 --- /dev/null +++ b/auth/system-test/customcredentials/okta/customCredentialSupplierOkta.test.js @@ -0,0 +1,116 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +'use strict'; + +const assert = require('assert'); +const fs = require('fs'); +const path = require('path'); +const { + authenticateWithOktaCredentials, +} = require('../../../customcredentials/okta/customCredentialSupplierOkta.js'); + +describe('Custom Credential Supplier Okta', () => { + const originalEnv = {}; + let bucketName, + audience, + impersonationUrl, + oktaDomain, + oktaClientId, + oktaClientSecret; + + before(function () { + const secretsPath = path.resolve( + __dirname, + '../../../customcredentials/okta/custom-credentials-okta-secrets.json' + ); + + if (fs.existsSync(secretsPath)) { + try { + const content = fs.readFileSync(secretsPath, 'utf8'); + const secrets = JSON.parse(content); + + const setEnv = (envKey, jsonKey) => { + if (secrets[jsonKey]) { + if (process.env[envKey] === undefined) { + originalEnv[envKey] = undefined; + } else if ( + !Object.prototype.hasOwnProperty.call(originalEnv, envKey) + ) { + originalEnv[envKey] = process.env[envKey]; + } + process.env[envKey] = secrets[jsonKey]; + } + }; + + setEnv('GCP_WORKLOAD_AUDIENCE', 'gcp_workload_audience'); + setEnv('GCS_BUCKET_NAME', 'gcs_bucket_name'); + setEnv( + 'GCP_SERVICE_ACCOUNT_IMPERSONATION_URL', + 'gcp_service_account_impersonation_url' + ); + setEnv('OKTA_DOMAIN', 'okta_domain'); + setEnv('OKTA_CLIENT_ID', 'okta_client_id'); + setEnv('OKTA_CLIENT_SECRET', 'okta_client_secret'); + } catch (err) { + console.warn( + 'Failed to parse secrets file, relying on system env vars.', + err + ); + } + } + + bucketName = process.env.GCS_BUCKET_NAME; + audience = process.env.GCP_WORKLOAD_AUDIENCE; + impersonationUrl = process.env.GCP_SERVICE_ACCOUNT_IMPERSONATION_URL; + oktaDomain = process.env.OKTA_DOMAIN; + oktaClientId = process.env.OKTA_CLIENT_ID; + oktaClientSecret = process.env.OKTA_CLIENT_SECRET; + + if ( + !bucketName || + !audience || + !oktaDomain || + !oktaClientId || + !oktaClientSecret + ) { + console.log('Skipping Okta system test: Required configuration missing.'); + this.skip(); + } + }); + + after(() => { + for (const key in originalEnv) { + if (originalEnv[key] === undefined) { + delete process.env[key]; + } else { + process.env[key] = originalEnv[key]; + } + } + }); + + it('should authenticate using Okta credentials', async () => { + const metadata = await authenticateWithOktaCredentials( + bucketName, + audience, + oktaDomain, + oktaClientId, + oktaClientSecret, + impersonationUrl + ); + + assert.strictEqual(metadata.name, bucketName); + assert.ok(metadata.location); + }); +});