diff --git a/.github/workflows/check-file-contents.yml b/.github/workflows/check-file-contents.yml index 1f31ac0732..861e2247a0 100644 --- a/.github/workflows/check-file-contents.yml +++ b/.github/workflows/check-file-contents.yml @@ -89,7 +89,7 @@ jobs: - name: Check for import from cli package in certain changed Python files run: | git fetch origin ${{ github.base_ref }} - CHANGED_FILES=$(git diff --diff-filter=ACMR --name-only origin/${{ github.base_ref }}...HEAD | grep -E '\.py$' | grep -v -E 'cli/.*|tests/.*|contributing/samples/' || true) + CHANGED_FILES=$(git diff --diff-filter=ACMR --name-only origin/${{ github.base_ref }}...HEAD | grep -E '\.py$' | grep -v -E 'cli/.*|src/google/adk/tools/apihub_tool/apihub_toolset.py|tests/.*|contributing/samples/' || true) if [ -n "$CHANGED_FILES" ]; then echo "Changed Python files to check:" echo "$CHANGED_FILES" diff --git a/AGENTS.md b/AGENTS.md index ae5614cd8a..b478d094e5 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -236,7 +236,7 @@ isort --check src/ ### In ADK source -Below styles applies to the ADK source code (under `src/` folder of the Github repo). +Below styles applies to the ADK source code (under `src/` folder of the GitHub repo). #### Use relative imports (Required) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5a1b65936d..0b8286bc2e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -440,7 +440,7 @@ with Bigtable for building AI Agent applications(experimental feature) ([a953807 ### Improvements -* Add Github workflow config for the ADK Answering agent ([8dc0c94](https://github.com/google/adk-python/commit/8dc0c949afb9024738ff7ac1b2c19282175c3200)) +* Add GitHub workflow config for the ADK Answering agent ([8dc0c94](https://github.com/google/adk-python/commit/8dc0c949afb9024738ff7ac1b2c19282175c3200)) * Import AGENT_CARD_WELL_KNOWN_PATH from adk instead of from a2a directly ([37dae9b](https://github.com/google/adk-python/commit/37dae9b631db5060770b66fce0e25cf0ffb56948)) * Make `LlmRequest.LiveConnectConfig` field default to a factory ([74589a1](https://github.com/google/adk-python/commit/74589a1db7df65e319d1ad2f0676ee0cf5d6ec1d)) * Update the prompt to make the ADK Answering Agent more objective ([2833030](https://github.com/google/adk-python/commit/283303032a174d51b8d72f14df83c794d66cb605)) @@ -499,14 +499,13 @@ with Bigtable for building AI Agent applications(experimental feature) ([a953807 ### Features * [Core]Add agent card builder ([18f5bea](https://github.com/google/adk-python/commit/18f5bea411b3b76474ff31bfb2f62742825b45e5)) -* [Core]Add an to_a2a util to convert adk agent to A2A ASGI application ([a77d689](https://github.com/google/adk-python/commit/a77d68964a1c6b7659d6117d57fa59e43399e0c2)) +* [Core]Add a to_a2a util to convert adk agent to A2A ASGI application ([a77d689](https://github.com/google/adk-python/commit/a77d68964a1c6b7659d6117d57fa59e43399e0c2)) * [Core]Add camel case converter for agents ([0e173d7](https://github.com/google/adk-python/commit/0e173d736334f8c6c171b3144ac6ee5b7125c846)) * [Evals]Use LocalEvalService to run all evals in cli and web ([d1f182e](https://github.com/google/adk-python/commit/d1f182e8e68c4a5a4141592f3f6d2ceeada78887)) * [Evals]Enable FinalResponseMatchV2 metric as an experiment ([36e45cd](https://github.com/google/adk-python/commit/36e45cdab3bbfb653eee3f9ed875b59bcd525ea1)) * [Models]Add support for `model-optimizer-*` family of models in vertex ([ffe2bdb](https://github.com/google/adk-python/commit/ffe2bdbe4c2ea86cc7924eb36e8e3bb5528c0016)) * [Services]Added a sample for History Management ([67284fc](https://github.com/google/adk-python/commit/67284fc46667b8c2946762bc9234a8453d48a43c)) -* [Services]Support passing fully qualified agent engine resource name when constructing session service and memory service ([2e77804](https://github.com/google/adk-python/commit/2e778049d0a675e458f4e -35fe4104ca1298dbfcf)) +* [Services]Support passing fully qualified agent engine resource name when constructing session service and memory service ([2e77804](https://github.com/google/adk-python/commit/2e778049d0a675e458f4e35fe4104ca1298dbfcf)) * [Tools]Add ComputerUseToolset ([083dcb4](https://github.com/google/adk-python/commit/083dcb44650eb0e6b70219ede731f2fa78ea7d28)) * [Tools]Allow toolset to process llm_request before tools returned by it ([3643b4a](https://github.com/google/adk-python/commit/3643b4ae196fd9e38e52d5dc9d1cd43ea0733d36)) * [Tools]Support input/output schema by fully-qualified code reference ([dfee06a](https://github.com/google/adk-python/commit/dfee06ac067ea909251d6fb016f8331065d430e9)) @@ -619,7 +618,7 @@ with Bigtable for building AI Agent applications(experimental feature) ([a953807 ### Documentation -* Update the a2a exmaple link in README.md [d0fdfb8](https://github.com/google/adk-python/commit/d0fdfb8c8e2e32801999c81de8d8ed0be3f88e76) +* Update the a2a example link in README.md [d0fdfb8](https://github.com/google/adk-python/commit/d0fdfb8c8e2e32801999c81de8d8ed0be3f88e76) * Adds AGENTS.md to provide relevant project context for the Gemini CLI [37108be](https://github.com/google/adk-python/commit/37108be8557e011f321de76683835448213f8515) * Update CONTRIBUTING.md [ffa9b36](https://github.com/google/adk-python/commit/ffa9b361db615ae365ba62c09a8f4226fb761551) * Add adk project overview and architecture [28d0ea8](https://github.com/google/adk-python/commit/28d0ea876f2f8de952f1eccbc788e98e39f50cf5) @@ -814,7 +813,7 @@ with Bigtable for building AI Agent applications(experimental feature) ([a953807 * Fix typos in README for sample bigquery_agent and oauth_calendar_agent ([9bdd813](https://github.com/google/adk-python/commit/9bdd813be15935af5c5d2a6982a2391a640cab23)) * Make tool_call one span for telemetry and renamed to execute_tool ([999a7fe](https://github.com/google/adk-python/commit/999a7fe69d511b1401b295d23ab3c2f40bccdc6f)) * Use media type in chat window. Remove isArtifactImage and isArtifactAudio reference ([1452dac](https://github.com/google/adk-python/commit/1452dacfeb6b9970284e1ddeee6c4f3cb56781f8)) -* Set output_schema correctly for LiteLllm ([6157db7](https://github.com/google/adk-python/commit/6157db77f2fba4a44d075b51c83bff844027a147)) +* Set output_schema correctly for LiteLlm ([6157db7](https://github.com/google/adk-python/commit/6157db77f2fba4a44d075b51c83bff844027a147)) * Update pending event dialog style ([1db601c](https://github.com/google/adk-python/commit/1db601c4bd90467b97a2f26fe9d90d665eb3c740)) * Remove the gap between event holder and image ([63822c3](https://github.com/google/adk-python/commit/63822c3fa8b0bdce2527bd0d909c038e2b66dd98)) @@ -842,7 +841,7 @@ with Bigtable for building AI Agent applications(experimental feature) ([a953807 ## 1.1.1 ### Features -* Add BigQuery first-party tools. See [here](https://github.com/google/adk-python/commit/d6c6bb4b2489a8b7a4713e4747c30d6df0c07961) for more details. +* Add [BigQuery first-party tools](https://github.com/google/adk-python/commit/d6c6bb4b2489a8b7a4713e4747c30d6df0c07961). ## 1.1.0 @@ -978,7 +977,7 @@ with Bigtable for building AI Agent applications(experimental feature) ([a953807 * Fix google search reading undefined for `renderedContent`. ### Miscellaneous Chores -* Docstring improvements, typo fixings, github action to enfore code styles on formatting and imports, etc. +* Docstring improvements, typo fixings, github action to enforce code styles on formatting and imports, etc. ## 0.3.0 @@ -1017,7 +1016,7 @@ with Bigtable for building AI Agent applications(experimental feature) ([a953807 ### ⚠ BREAKING CHANGES -* Fix typo in method name in `Event`: has_trailing_code_exeuction_result --> has_trailing_code_execution_result. +* Fix typo in method name in `Event`: has_trailing_code_execution_result --> has_trailing_code_execution_result. ### Features @@ -1047,7 +1046,7 @@ with Bigtable for building AI Agent applications(experimental feature) ([a953807 ### Miscellaneous Chores -* Adds unit tests in Github action. +* Adds unit tests in GitHub action. * Improves test coverage. * Various typo fixes. diff --git a/contributing/samples/a2a_auth/remote_a2a/bigquery_agent/agent.py b/contributing/samples/a2a_auth/remote_a2a/bigquery_agent/agent.py index 976cea1707..05517cd86e 100644 --- a/contributing/samples/a2a_auth/remote_a2a/bigquery_agent/agent.py +++ b/contributing/samples/a2a_auth/remote_a2a/bigquery_agent/agent.py @@ -46,7 +46,7 @@ Use the provided tools to conduct various operations on users' data in Google BigQuery. Scenario 1: - The user wants to query their biguqery datasets + The user wants to query their bigquery datasets Use bigquery_datasets_list to query user's datasets Scenario 2: diff --git a/contributing/samples/a2a_human_in_loop/README.md b/contributing/samples/a2a_human_in_loop/README.md index 5f90fad9f8..d88d5f8f3c 100644 --- a/contributing/samples/a2a_human_in_loop/README.md +++ b/contributing/samples/a2a_human_in_loop/README.md @@ -99,7 +99,7 @@ Agent: ✅ Great news! Your reimbursement has been approved by the manager. Proc The human-in-the-loop process follows this pattern: 1. **Initial Call**: Root agent delegates approval request to remote approval agent for amounts >$100 -2. **Pending Response**: Remote approval agent returns immediate response with `status: "pending"` and ticket ID and serface the approval request to root agent +2. **Pending Response**: Remote approval agent returns immediate response with `status: "pending"` and ticket ID and surface the approval request to root agent 3. **Agent Acknowledgment**: Root agent informs user about pending approval status 4. **Human Interaction**: Human manager interacts with root agent to review and approve/reject the request 5. **Updated Response**: Root agent receives updated tool response with approval decision and send it to remote agent diff --git a/contributing/samples/adk_answering_agent/README.md b/contributing/samples/adk_answering_agent/README.md index 2941f29744..25694fad56 100644 --- a/contributing/samples/adk_answering_agent/README.md +++ b/contributing/samples/adk_answering_agent/README.md @@ -116,4 +116,4 @@ The following environment variables are required to upload the docs to update th * `ADK_DOCS_ROOT_PATH=YOUR_ADK_DOCS_ROOT_PATH`: **(Required)** Path to the root of the downloaded adk-docs repo. * `ADK_PYTHON_ROOT_PATH=YOUR_ADK_PYTHON_ROOT_PATH`: **(Required)** Path to the root of the downloaded adk-python repo. -For local execution in interactive mode, you can place these variables in a `.env` file in the project's root directory. For the GitHub workflow, they should be configured as repository secrets. \ No newline at end of file +For local execution in interactive mode, you can place these variables in a `.env` file in the project's root directory. For the GitHub workflow, they should be configured as repository secrets. diff --git a/contributing/samples/adk_answering_agent/upload_docs_to_vertex_ai_search.py b/contributing/samples/adk_answering_agent/upload_docs_to_vertex_ai_search.py index 945c317981..96fe6adf0a 100644 --- a/contributing/samples/adk_answering_agent/upload_docs_to_vertex_ai_search.py +++ b/contributing/samples/adk_answering_agent/upload_docs_to_vertex_ai_search.py @@ -130,7 +130,7 @@ def upload_directory_to_gcs( ) return False - print(f"Sucessfully uploaded {file_count} files to GCS.") + print(f"Successfully uploaded {file_count} files to GCS.") return True @@ -148,7 +148,7 @@ def import_from_gcs_to_vertex_ai( # parent has the format of # "projects/{project_number}/locations/{location}/collections/{collection}/dataStores/{datastore_id}/branches/default_branch" parent=full_datastore_id + "/branches/default_branch", - # Specify the GCS source and use "content" for unstructed data. + # Specify the GCS source and use "content" for unstructured data. gcs_source=discoveryengine.GcsSource( input_uris=[gcs_uri], data_schema="content" ), diff --git a/contributing/samples/adk_answering_agent/utils.py b/contributing/samples/adk_answering_agent/utils.py index d3d6c264c4..9393d9c85a 100644 --- a/contributing/samples/adk_answering_agent/utils.py +++ b/contributing/samples/adk_answering_agent/utils.py @@ -143,7 +143,7 @@ def convert_gcs_to_https(gcs_uri: str) -> Optional[str]: if _check_url_exists(potential_url): return potential_url else: - # If it doesn't exist, fallback to the regular github url + # If it doesn't exist, fall back to the regular github url return _generate_github_url(prefix, relative_path) # Convert the links for other cases, e.g. adk-python diff --git a/contributing/samples/adk_issue_formatting_agent/agent.py b/contributing/samples/adk_issue_formatting_agent/agent.py index 78add9b83b..d2f040ed56 100644 --- a/contributing/samples/adk_issue_formatting_agent/agent.py +++ b/contributing/samples/adk_issue_formatting_agent/agent.py @@ -45,7 +45,7 @@ def list_open_issues(issue_count: int) -> dict[str, Any]: - """List most recent `issue_count` numer of open issues in the repo. + """List most recent `issue_count` number of open issues in the repo. Args: issue_count: number of issues to return @@ -75,7 +75,7 @@ def get_issue(issue_number: int) -> dict[str, Any]: """Get the details of the specified issue number. Args: - issue_number: issue number of the Github issue. + issue_number: issue number of the GitHub issue. Returns: The status of this request, with the issue details when successful. @@ -92,7 +92,7 @@ def add_comment_to_issue(issue_number: int, comment: str) -> dict[str, any]: """Add the specified comment to the given issue number. Args: - issue_number: issue number of the Github issue + issue_number: issue number of the GitHub issue comment: comment to add Returns: @@ -116,7 +116,7 @@ def list_comments_on_issue(issue_number: int) -> dict[str, any]: """List all comments on the given issue number. Args: - issue_number: issue number of the Github issue + issue_number: issue number of the GitHub issue Returns: The the status of this request, with the list of comments when successful. diff --git a/contributing/samples/adk_pr_agent/agent.py b/contributing/samples/adk_pr_agent/agent.py index 8c398e7edd..7d6088ac45 100644 --- a/contributing/samples/adk_pr_agent/agent.py +++ b/contributing/samples/adk_pr_agent/agent.py @@ -125,7 +125,7 @@ def get_github_pr_info_http(pr_number: int) -> str | None: system_prompt = """ You are a helpful assistant to generate reasonable descriptions for pull requests for software engineers. -The descritions should not be too short (e.g.: less than 3 words), or too long (e.g.: more than 30 words). +The descriptions should not be too short (e.g.: less than 3 words), or too long (e.g.: more than 30 words). The generated description should start with `chore`, `docs`, `feat`, `fix`, `test`, or `refactor`. `feat` stands for a new feature. diff --git a/contributing/samples/adk_pr_triaging_agent/agent.py b/contributing/samples/adk_pr_triaging_agent/agent.py index 57aafaee6b..90f6b7cdc1 100644 --- a/contributing/samples/adk_pr_triaging_agent/agent.py +++ b/contributing/samples/adk_pr_triaging_agent/agent.py @@ -58,7 +58,7 @@ def get_pull_request_details(pr_number: int) -> str: """Get the details of the specified pull request. Args: - pr_number: number of the Github pull request. + pr_number: number of the GitHub pull request. Returns: The status of this request, with the details when successful. @@ -162,7 +162,7 @@ def add_label_to_pr(pr_number: int, label: str) -> dict[str, Any]: """Adds a specified label on a pull request. Args: - pr_number: the number of the Github pull request + pr_number: the number of the GitHub pull request label: the label to add Returns: @@ -175,7 +175,7 @@ def add_label_to_pr(pr_number: int, label: str) -> dict[str, Any]: f"Error: Label '{label}' is not an allowed label. Will not apply." ) - # Pull Request is a special issue in Github, so we can use issue url for PR. + # Pull Request is a special issue in GitHub, so we can use issue url for PR. label_url = ( f"{GITHUB_BASE_URL}/repos/{OWNER}/{REPO}/issues/{pr_number}/labels" ) @@ -197,7 +197,7 @@ def add_comment_to_pr(pr_number: int, comment: str) -> dict[str, Any]: """Add the specified comment to the given PR number. Args: - pr_number: the number of the Github pull request + pr_number: the number of the GitHub pull request comment: the comment to add Returns: @@ -205,7 +205,7 @@ def add_comment_to_pr(pr_number: int, comment: str) -> dict[str, Any]: """ print(f"Attempting to add comment '{comment}' to issue #{pr_number}") - # Pull Request is a special issue in Github, so we can use issue url for PR. + # Pull Request is a special issue in GitHub, so we can use issue url for PR. url = f"{GITHUB_BASE_URL}/repos/{OWNER}/{REPO}/issues/{pr_number}/comments" payload = {"body": comment} @@ -225,7 +225,7 @@ def add_comment_to_pr(pr_number: int, comment: str) -> dict[str, Any]: description="Triage ADK pull requests.", instruction=f""" # 1. Identity - You are a Pull Request (PR) triaging bot for the Github {REPO} repo with the owner {OWNER}. + You are a Pull Request (PR) triaging bot for the GitHub {REPO} repo with the owner {OWNER}. # 2. Responsibilities Your core responsibility includes: @@ -242,7 +242,7 @@ def add_comment_to_pr(pr_number: int, comment: str) -> dict[str, Any]: - If it's about session, memory, artifacts services, label it with "services" - If it's about UI/web, label it with "web" - If it's related to tools, label it with "tools" - - If it's about agent evalaution, then label it with "eval". + - If it's about agent evaluation, then label it with "eval". - If it's about streaming/live, label it with "live". - If it's about model support(non-Gemini, like Litellm, Ollama, OpenAI models), label it with "models". - If it's about tracing, label it with "tracing". @@ -280,7 +280,7 @@ def add_comment_to_pr(pr_number: int, comment: str) -> dict[str, Any]: - Call the `get_pull_request_details` tool to get the details of the PR. - Skip the PR (i.e. do not label or comment) if any of the following is true: - the PR is closed - - the PR is labeled with "google-contributior" + - the PR is labeled with "google-contributor" - the PR is already labelled with the above labels (e.g. "documentation", "services", "tools", etc.). - Check if the PR is following the contribution guidelines. - If it's not following the guidelines, recommend or add a comment to the PR that points to the contribution guidelines (https://github.com/google/adk-python/blob/main/CONTRIBUTING.md). diff --git a/contributing/samples/adk_triaging_agent/agent.py b/contributing/samples/adk_triaging_agent/agent.py index e855b950cb..4f5679c969 100644 --- a/contributing/samples/adk_triaging_agent/agent.py +++ b/contributing/samples/adk_triaging_agent/agent.py @@ -50,7 +50,7 @@ def list_unlabeled_issues(issue_count: int) -> dict[str, Any]: - """List most recent `issue_count` numer of unlabeled issues in the repo. + """List most recent `issue_count` number of unlabeled issues in the repo. Args: issue_count: number of issues to return @@ -87,7 +87,7 @@ def add_label_and_owner_to_issue( """Add the specified label and owner to the given issue number. Args: - issue_number: issue number of the Github issue. + issue_number: issue number of the GitHub issue. label: label to assign Returns: @@ -143,7 +143,7 @@ def change_issue_type(issue_number: int, issue_type: str) -> dict[str, Any]: """Change the issue type of the given issue number. Args: - issue_number: issue number of the Github issue, in string foramt. + issue_number: issue number of the GitHub issue, in string format. issue_type: issue type to assign Returns: @@ -168,7 +168,7 @@ def change_issue_type(issue_number: int, issue_type: str) -> dict[str, Any]: name="adk_triaging_assistant", description="Triage ADK issues.", instruction=f""" - You are a triaging bot for the Github {REPO} repo with the owner {OWNER}. You will help get issues, and recommend a label. + You are a triaging bot for the GitHub {REPO} repo with the owner {OWNER}. You will help get issues, and recommend a label. IMPORTANT: {APPROVAL_INSTRUCTION} Here are the rules for labeling: @@ -177,7 +177,7 @@ def change_issue_type(issue_number: int, issue_type: str) -> dict[str, Any]: - If it's about UI/web, label it with "web" - If the user is asking about a question, label it with "question" - If it's related to tools, label it with "tools" - - If it's about agent evalaution, then label it with "eval". + - If it's about agent evaluation, then label it with "eval". - If it's about streaming/live, label it with "live". - If it's about model support(non-Gemini, like Litellm, Ollama, OpenAI models), label it with "models". - If it's about tracing, label it with "tracing". diff --git a/contributing/samples/application_integration_agent/README.md b/contributing/samples/application_integration_agent/README.md index a7106c09a8..0e0a70c17c 100644 --- a/contributing/samples/application_integration_agent/README.md +++ b/contributing/samples/application_integration_agent/README.md @@ -7,7 +7,7 @@ This sample demonstrates how to use the `ApplicationIntegrationToolset` within a ## Prerequisites 1. **Set up Integration Connection:** - * You need an existing [Integration connection](https://cloud.google.com/integration-connectors/docs/overview) configured to interact with your Jira instance. Follow the [documentation](https://google.github.io/adk-docs/tools/google-cloud-tools/#use-integration-connectors) to provision the Integration Connector in Google Cloud and then use this [documentation](https://cloud.google.com/integration-connectors/docs/connectors/jiracloud/configure) to create an JIRA connection. Note the `Connection Name`, `Project ID`, and `Location` of your connection. + * You need an existing [Integration connection](https://cloud.google.com/integration-connectors/docs/overview) configured to interact with your Jira instance. Follow the [documentation](https://google.github.io/adk-docs/tools/google-cloud-tools/#use-integration-connectors) to provision the Integration Connector in Google Cloud and then use this [documentation](https://cloud.google.com/integration-connectors/docs/connectors/jiracloud/configure) to create an Jira connection. Note the `Connection Name`, `Project ID`, and `Location` of your connection. * 2. **Configure Environment Variables:** diff --git a/contributing/samples/application_integration_agent/agent.py b/contributing/samples/application_integration_agent/agent.py index 9658641e3c..83e1143600 100644 --- a/contributing/samples/application_integration_agent/agent.py +++ b/contributing/samples/application_integration_agent/agent.py @@ -40,7 +40,7 @@ model="gemini-2.0-flash", name="Issue_Management_Agent", instruction=""" - You are an agent that helps manage issues in a JIRA instance. + You are an agent that helps manage issues in a Jira instance. Be accurate in your responses based on the tool response. You can perform any formatting in the response that is appropriate or if asked by the user. If there is an error in the tool response, understand the error and try and see if you can fix the error and then and execute the tool again. For example if a variable or parameter is missing, try and see if you can find it in the request or user query or default it and then execute the tool again or check for other tools that could give you the details. If there are any math operations like count or max, min in the user request, call the tool to get the data and perform the math operations and then return the result in the response. For example for maximum, fetch the list and then do the math operation. diff --git a/contributing/samples/bigquery/agent.py b/contributing/samples/bigquery/agent.py index 8096fde653..56a7367c8d 100644 --- a/contributing/samples/bigquery/agent.py +++ b/contributing/samples/bigquery/agent.py @@ -42,7 +42,7 @@ ) if CREDENTIALS_TYPE == AuthCredentialTypes.OAUTH2: - # Initiaze the tools to do interactive OAuth + # Initialize the tools to do interactive OAuth # The environment variables OAUTH_CLIENT_ID and OAUTH_CLIENT_SECRET # must be set credentials_config = BigQueryCredentialsConfig( diff --git a/contributing/samples/code_execution/agent.py b/contributing/samples/code_execution/agent.py index b8cbd61417..82de04f25d 100644 --- a/contributing/samples/code_execution/agent.py +++ b/contributing/samples/code_execution/agent.py @@ -43,7 +43,7 @@ def base_system_instruction(): ``` **Output Visibility:** Always print the output of code execution to visualize results, especially for data exploration and analysis. For example: - - To look a the shape of a pandas.DataFrame do: + - To look at the shape of a pandas.DataFrame do: ```tool_code print(df.shape) ``` @@ -84,7 +84,7 @@ def base_system_instruction(): You need to assist the user with their queries by looking at the data and the context in the conversation. -You final answer should summarize the code and code execution relavant to the user query. +You final answer should summarize the code and code execution relevant to the user query. You should include all pieces of data to answer the user query, such as the table from code execution results. If you cannot answer the question directly, you should follow the guidelines above to generate the next step. diff --git a/contributing/samples/fields_output_schema/agent.py b/contributing/samples/fields_output_schema/agent.py index e3c6966847..70645ea9ba 100644 --- a/contributing/samples/fields_output_schema/agent.py +++ b/contributing/samples/fields_output_schema/agent.py @@ -16,7 +16,7 @@ from pydantic import BaseModel -class WeahterData(BaseModel): +class WeatherData(BaseModel): temperature: str humidity: str wind_speed: str @@ -43,6 +43,6 @@ class WeahterData(BaseModel): * wind_speed: 13 mph """, - output_schema=WeahterData, + output_schema=WeatherData, output_key='weather_data', ) diff --git a/contributing/samples/google_api/agent.py b/contributing/samples/google_api/agent.py index bb06e36f27..390f1bca10 100644 --- a/contributing/samples/google_api/agent.py +++ b/contributing/samples/google_api/agent.py @@ -46,7 +46,7 @@ Use the provided tools to conduct various operations on users' data in Google BigQuery. Scenario 1: - The user wants to query their biguqery datasets + The user wants to query their bigquery datasets Use bigquery_datasets_list to query user's datasets Scenario 2: diff --git a/contributing/samples/hello_world_ollama/README.md b/contributing/samples/hello_world_ollama/README.md index 559e42f65e..dc7acf139d 100644 --- a/contributing/samples/hello_world_ollama/README.md +++ b/contributing/samples/hello_world_ollama/README.md @@ -25,7 +25,7 @@ ollama show mistral-small3.1 You are supposed to see `tools` listed under capabilities. -You can also look at the template the model is using and tweak it based on your needs. +You can also look at the model's template and tweak it based on your needs. ```bash ollama show --modelfile llama3.1 > model_file_to_modify diff --git a/contributing/samples/jira_agent/agent.py b/contributing/samples/jira_agent/agent.py index 9f2b866c95..537d8f0845 100644 --- a/contributing/samples/jira_agent/agent.py +++ b/contributing/samples/jira_agent/agent.py @@ -19,7 +19,7 @@ root_agent = Agent( model='gemini-2.0-flash-001', name='jira_connector_agent', - description='This agent helps search issues in JIRA', + description='This agent helps search issues in Jira', instruction=""" To start with, greet the user First, you will be given a description of what you can do. diff --git a/contributing/samples/jira_agent/tools.py b/contributing/samples/jira_agent/tools.py index f03c5ed106..94c37565fa 100644 --- a/contributing/samples/jira_agent/tools.py +++ b/contributing/samples/jira_agent/tools.py @@ -27,7 +27,7 @@ tool_name="jira_conversation_tool", tool_instructions=""" - This tool is to call an integration to search for issues in JIRA + This tool is to call an integration to search for issues in Jira """, ) diff --git a/contributing/samples/langchain_youtube_search_agent/README.md b/contributing/samples/langchain_youtube_search_agent/README.md index e87ca59420..7d46f60b8d 100644 --- a/contributing/samples/langchain_youtube_search_agent/README.md +++ b/contributing/samples/langchain_youtube_search_agent/README.md @@ -1,6 +1,6 @@ # Langchain Youtube Search Agent -This agent utilize the Lanchain YoutubeSearchTool to search youtubes. +This agent utilize the Langchain YoutubeSearchTool to search youtubes. You need to install below dependencies: ```python diff --git a/contributing/samples/live_bidi_streaming_multi_agent/readme.md b/contributing/samples/live_bidi_streaming_multi_agent/readme.md index 27c93b10f9..dee6f38bf0 100644 --- a/contributing/samples/live_bidi_streaming_multi_agent/readme.md +++ b/contributing/samples/live_bidi_streaming_multi_agent/readme.md @@ -1,9 +1,7 @@ # Simplistic Live (Bidi-Streaming) Multi-Agent -This project provides a basic example of a live, bidirectional streaming multi-agent +This project provides a basic example of a live, [bidirectional streaming](https://google.github.io/adk-docs/streaming/) multi-agent designed for testing and experimentation. -You can see full documentation [here](https://google.github.io/adk-docs/streaming/). - ## Getting Started Follow these steps to get the agent up and running: diff --git a/contributing/samples/live_bidi_streaming_single_agent/readme.md b/contributing/samples/live_bidi_streaming_single_agent/readme.md index 6a9258f3ee..56187fb0d4 100644 --- a/contributing/samples/live_bidi_streaming_single_agent/readme.md +++ b/contributing/samples/live_bidi_streaming_single_agent/readme.md @@ -1,9 +1,7 @@ # Simplistic Live (Bidi-Streaming) Agent -This project provides a basic example of a live, bidirectional streaming agent +This project provides a basic example of a live, [bidirectional streaming](https://google.github.io/adk-docs/streaming/) agent designed for testing and experimentation. -You can see full documentation [here](https://google.github.io/adk-docs/streaming/). - ## Getting Started Follow these steps to get the agent up and running: diff --git a/contributing/samples/mcp_stdio_notion_agent/README.md b/contributing/samples/mcp_stdio_notion_agent/README.md index f53bd2f03f..d40df313f2 100644 --- a/contributing/samples/mcp_stdio_notion_agent/README.md +++ b/contributing/samples/mcp_stdio_notion_agent/README.md @@ -17,4 +17,4 @@ export NOTION_API_KEY= * Send below queries: * What can you do for me ? - * Seach `XXXX` in my pages. + * Search `XXXX` in my pages. diff --git a/contributing/samples/oauth_calendar_agent/README.md b/contributing/samples/oauth_calendar_agent/README.md index 87e382f6c1..381bb7902b 100644 --- a/contributing/samples/oauth_calendar_agent/README.md +++ b/contributing/samples/oauth_calendar_agent/README.md @@ -13,7 +13,7 @@ This sample tests and demos the OAuth support in ADK via two tools: * 2. get_calendar_events - This is an google calendar tool that calls Google Calendar API to get the + This is a google calendar tool that calls Google Calendar API to get the details of a specific calendar. This tool is from the ADK built-in Google Calendar ToolSet. Everything is wrapped and the tool user just needs to pass in the client id and client secret. diff --git a/contributing/samples/oauth_calendar_agent/agent.py b/contributing/samples/oauth_calendar_agent/agent.py index 0be3b6e593..a33ea2b942 100644 --- a/contributing/samples/oauth_calendar_agent/agent.py +++ b/contributing/samples/oauth_calendar_agent/agent.py @@ -131,7 +131,7 @@ def update_time(callback_context: CallbackContext): name="calendar_agent", instruction=""" You are a helpful personal calendar assistant. - Use the provided tools to search for calendar events (use 10 as limit if user does't specify), and update them. + Use the provided tools to search for calendar events (use 10 as limit if user doesn't specify), and update them. Use "primary" as the calendarId if users don't specify. Scenario1: @@ -159,7 +159,7 @@ def update_time(callback_context: CallbackContext): {userInfo?} - Currnet time: {_time} + Current time: {_time} """, tools=[ AuthenticatedFunctionTool( diff --git a/contributing/samples/session_state_agent/README.md b/contributing/samples/session_state_agent/README.md index bec0536487..699517ec53 100644 --- a/contributing/samples/session_state_agent/README.md +++ b/contributing/samples/session_state_agent/README.md @@ -6,7 +6,7 @@ After assigning a state using the context object (e.g. `tool_context.state['log_query_var'] = 'log_query_var_value'`): * The state is available for use in a later callback. -* Once the resulting event is processed by the runner and appneded in the +* Once the resulting event is processed by the runner and appended in the session, the state will be also persisted in the session. This sample agent is for demonstrating the aforementioned behavior. @@ -55,7 +55,7 @@ state is available after writing via the context object ### Current Behavior -The current behavior of pesisting states are: +The current behavior of persisting states are: * for `before_agent_callback`: state delta will be persisted after all callbacks are processed. * for `before_model_callback`: state delta will be persisted with the final LlmResponse, diff --git a/llms-full.txt b/llms-full.txt index 35ffd56dab..4c744512e4 100644 --- a/llms-full.txt +++ b/llms-full.txt @@ -7010,7 +7010,7 @@ This approach involves creating individual test files, each representing a singl - `Expected Intermediate Agent Responses`: These are the natural language responses that the agent (or sub-agents) generates as it moves towards generating a final answer. These natural language responses are usually an - artifact of an multi-agent system, where your root agent depends on sub-agents to achieve a goal. These intermediate responses, may or may not be of + artifact of a multi-agent system, where your root agent depends on sub-agents to achieve a goal. These intermediate responses, may or may not be of interest to the end user, but for a developer/owner of the system, are of critical importance, as they give you the confidence that the agent went through the right path to generate final response. @@ -9102,7 +9102,7 @@ Let's break down what's happening: replace `u_123` with a specific user ID, and `s_123` with a specific session ID. * `{"state": {"key1": "value1", "key2": 42}}`: This is optional. You can use - this to customize the agent's pre-existing state (dict) when creating the + this to customize the agent's preexisting state (dict) when creating the session. This should return the session information if it was created successfully. The @@ -13315,7 +13315,7 @@ You set up authentication when defining your tool: ## Journey 1: Building Agentic Applications with Authenticated Tools -This section focuses on using pre-existing tools (like those from `RestApiTool/ OpenAPIToolset`, `APIHubToolset`, `GoogleApiToolSet`) that require authentication within your agentic application. Your main responsibility is configuring the tools and handling the client-side part of interactive authentication flows (if required by the tool). +This section focuses on using preexisting tools (like those from `RestApiTool/ OpenAPIToolset`, `APIHubToolset`, `GoogleApiToolSet`) that require authentication within your agentic application. Your main responsibility is configuring the tools and handling the client-side part of interactive authentication flows (if required by the tool). ### 1. Configuring Tools with Authentication diff --git a/pylintrc b/pylintrc index 3fc2263683..303cbc3027 100644 --- a/pylintrc +++ b/pylintrc @@ -257,7 +257,7 @@ single-line-if-stmt=yes max-module-lines=99999 # String used as indentation unit. The internal Google style guide mandates 2 -# spaces. Google's externaly-published style guide says 4, consistent with +# spaces. Google's externally-published style guide says 4, consistent with # PEP 8. Here, we use 2 spaces, for conformity with many open-sourced Google # projects (like TensorFlow). indent-string=' ' diff --git a/src/google/adk/a2a/converters/part_converter.py b/src/google/adk/a2a/converters/part_converter.py index d796cb5ff1..3718b3ac13 100644 --- a/src/google/adk/a2a/converters/part_converter.py +++ b/src/google/adk/a2a/converters/part_converter.py @@ -13,7 +13,7 @@ # limitations under the License. """ -module containing utilities for conversion betwen A2A Part and Google GenAI Part +module containing utilities for conversion between A2A Part and Google GenAI Part """ from __future__ import annotations @@ -191,7 +191,7 @@ def convert_genai_part_to_a2a_part( # Convert the funcall and function response to A2A DataPart. # This is mainly for converting human in the loop and auth request and # response. - # TODO once A2A defined how to suervice such information, migrate below + # TODO once A2A defined how to service such information, migrate below # logic accordingly if part.function_call: return a2a_types.Part( diff --git a/src/google/adk/a2a/utils/agent_card_builder.py b/src/google/adk/a2a/utils/agent_card_builder.py index bde5620168..aa7f657f99 100644 --- a/src/google/adk/a2a/utils/agent_card_builder.py +++ b/src/google/adk/a2a/utils/agent_card_builder.py @@ -473,7 +473,7 @@ def _get_default_description(agent: BaseAgent) -> str: async def _extract_examples_from_agent( agent: BaseAgent, ) -> Optional[List[Dict]]: - """Extract examples from example_tool if configured, otherwise from agent instruction.""" + """Extract examples from example_tool if configured; otherwise, from agent instruction.""" if not isinstance(agent, LlmAgent): return None diff --git a/src/google/adk/agents/base_agent.py b/src/google/adk/agents/base_agent.py index a1d633bc06..09eef908e0 100644 --- a/src/google/adk/agents/base_agent.py +++ b/src/google/adk/agents/base_agent.py @@ -232,7 +232,7 @@ def clone( invalid_fields = set(update) - allowed_fields if invalid_fields: raise ValueError( - f'Cannot update non-existent fields in {self.__class__.__name__}:' + f'Cannot update nonexistent fields in {self.__class__.__name__}:' f' {invalid_fields}' ) @@ -588,7 +588,7 @@ def from_config( """Creates an agent from a config. If sub-classes uses a custom agent config, override `_from_config_kwargs` - method to return an updated kwargs for agent construstor. + method to return an updated kwargs for agent constructor. Args: config: The config to create the agent from. diff --git a/src/google/adk/agents/common_configs.py b/src/google/adk/agents/common_configs.py index b765fcb30c..f1f9c57f74 100644 --- a/src/google/adk/agents/common_configs.py +++ b/src/google/adk/agents/common_configs.py @@ -65,7 +65,7 @@ class CodeConfig(BaseModel): args: Optional[List[ArgumentConfig]] = None """Optional. The arguments for the code when `name` refers to a function or a - class's contructor. + class's constructor. Examples: ``` diff --git a/src/google/adk/agents/config_schemas/AgentConfig.json b/src/google/adk/agents/config_schemas/AgentConfig.json index 9662a118ab..6fa079cea1 100644 --- a/src/google/adk/agents/config_schemas/AgentConfig.json +++ b/src/google/adk/agents/config_schemas/AgentConfig.json @@ -629,7 +629,7 @@ } ], "default": null, - "description": "Optional. The producer of the content. Must be either 'user' or\n 'model'. Useful to set for multi-turn conversations, otherwise can be\n empty. If role is not specified, SDK will determine the role.", + "description": "Optional. The producer of the content. Must be either 'user' or\n 'model'. Useful to set for multi-turn conversations; otherwise, can be\n empty. If role is not specified, SDK will determine the role.", "title": "Role" } }, @@ -1095,7 +1095,7 @@ } ], "default": null, - "description": "Optional. Display name of the file data. Used to provide a label or filename to distinguish file datas. It is not currently used in the Gemini GenerateContent calls.", + "description": "Optional. Display name of the file data. Used to provide a label or filename to distinguish file data. It is not currently used in the Gemini GenerateContent calls.", "title": "Displayname" }, "fileUri": { @@ -1347,7 +1347,7 @@ } ], "default": null, - "description": "Optional. Describes the parameters to this function in JSON Schema Object format. Reflects the Open API 3.03 Parameter Object. string Key: the name of the parameter. Parameter names are case sensitive. Schema Value: the Schema defining the type used for the parameter. For function with no parameters, this can be left unset. Parameter names must start with a letter or an underscore and must only contain chars a-z, A-Z, 0-9, or underscores with a maximum length of 64. Example with 1 required and 1 optional parameter: type: OBJECT properties: param1: type: STRING param2: type: INTEGER required: - param1" + "description": "Optional. Describes the parameters to this function in JSON Schema Object format. Reflects the Open API 3.03 Parameter Object. string Key: the name of the parameter. Parameter names are case-sensitive. Schema Value: the Schema defining the type used for the parameter. For function with no parameters, this can be left unset. Parameter names must start with a letter or an underscore and must only contain chars a-z, A-Z, 0-9, or underscores with a maximum length of 64. Example with 1 required and 1 optional parameter: type: OBJECT properties: param1: type: STRING param2: type: INTEGER required: - param1" }, "parametersJsonSchema": { "anyOf": [ @@ -4083,7 +4083,7 @@ } ], "default": null, - "description": "Optional. Number of search results to return per query. The default value is 10. The maximumm allowed value is 10.", + "description": "Optional. Number of search results to return per query. The default value is 10. The maximum allowed value is 10.", "title": "Maxresults" } }, @@ -4501,7 +4501,7 @@ }, "mcp__types__Tool": { "additionalProperties": true, - "description": "Definition for a tool the client can call.", + "description": "Definition for a tool that the client can call.", "properties": { "name": { "title": "Name", diff --git a/src/google/adk/agents/remote_a2a_agent.py b/src/google/adk/agents/remote_a2a_agent.py index d3aea5d3ee..af070bb8c6 100644 --- a/src/google/adk/agents/remote_a2a_agent.py +++ b/src/google/adk/agents/remote_a2a_agent.py @@ -570,7 +570,7 @@ async def _run_live_impl( raise NotImplementedError( f"_run_live_impl for {type(self)} via A2A is not implemented." ) - # This makes the function an async generator but the yield is still unreachable + # This makes the function into an async generator but the yield is still unreachable yield async def cleanup(self) -> None: diff --git a/src/google/adk/auth/auth_handler.py b/src/google/adk/auth/auth_handler.py index 7a51a71e29..07515ab2e8 100644 --- a/src/google/adk/auth/auth_handler.py +++ b/src/google/adk/auth/auth_handler.py @@ -137,7 +137,7 @@ def generate_auth_request(self) -> AuthConfig: def generate_auth_uri( self, ) -> AuthCredential: - """Generates an response containing the auth uri for user to sign in. + """Generates a response containing the auth uri for user to sign in. Returns: An AuthCredential object containing the auth URI and state. diff --git a/src/google/adk/auth/auth_preprocessor.py b/src/google/adk/auth/auth_preprocessor.py index 133b456b72..c3d9b71c2b 100644 --- a/src/google/adk/auth/auth_preprocessor.py +++ b/src/google/adk/auth/auth_preprocessor.py @@ -100,7 +100,7 @@ async def run_async( if not tools_to_resume: continue - # found the the system long running request euc function call + # found the system long running request euc function call # looking for original function call that requests euc for j in range(i - 1, -1, -1): event = events[j] diff --git a/src/google/adk/auth/exchanger/base_credential_exchanger.py b/src/google/adk/auth/exchanger/base_credential_exchanger.py index b09adb80a8..31106b55e2 100644 --- a/src/google/adk/auth/exchanger/base_credential_exchanger.py +++ b/src/google/adk/auth/exchanger/base_credential_exchanger.py @@ -24,7 +24,7 @@ from ..auth_schemes import AuthScheme -class CredentialExchangError(Exception): +class CredentialExchangeError(Exception): """Base exception for credential exchange errors.""" @@ -52,6 +52,6 @@ async def exchange( The exchanged credential. Raises: - CredentialExchangError: If credential exchange fails. + CredentialExchangeError: If credential exchange fails. """ pass diff --git a/src/google/adk/auth/exchanger/oauth2_credential_exchanger.py b/src/google/adk/auth/exchanger/oauth2_credential_exchanger.py index 71c5f7c896..c0c3e8aea0 100644 --- a/src/google/adk/auth/exchanger/oauth2_credential_exchanger.py +++ b/src/google/adk/auth/exchanger/oauth2_credential_exchanger.py @@ -30,7 +30,7 @@ from typing_extensions import override from .base_credential_exchanger import BaseCredentialExchanger -from .base_credential_exchanger import CredentialExchangError +from .base_credential_exchanger import CredentialExchangeError try: from authlib.integrations.requests_client import OAuth2Session @@ -63,10 +63,10 @@ async def exchange( The exchanged credential with access token. Raises: - CredentialExchangError: If auth_scheme is missing. + CredentialExchangeError: If auth_scheme is missing. """ if not auth_scheme: - raise CredentialExchangError( + raise CredentialExchangeError( "auth_scheme is required for OAuth2 credential exchange" ) diff --git a/src/google/adk/cli/adk_web_server.py b/src/google/adk/cli/adk_web_server.py index f0cae1ae88..1b422fe335 100644 --- a/src/google/adk/cli/adk_web_server.py +++ b/src/google/adk/cli/adk_web_server.py @@ -396,7 +396,7 @@ class AdkWebServer: If you pass in a web_assets_dir, the static assets will be served under /dev-ui in addition to the API endpoints created by default. - You can add add additional API endpoints by modifying the FastAPI app + You can add additional API endpoints by modifying the FastAPI app instance returned by get_fast_api_app as this class exposes the agent runners and most other bits of state retained during the lifetime of the server. @@ -449,7 +449,7 @@ def __init__( self.extra_plugins = extra_plugins or [] self.logo_text = logo_text self.logo_image_url = logo_image_url - # Internal propeties we want to allow being modified from callbacks. + # Internal properties we want to allow being modified from callbacks. self.runners_to_clean: set[str] = set() self.current_app_name_ref: SharedValue[str] = SharedValue(value="") self.runner_dict = {} diff --git a/src/google/adk/cli/cli_tools_click.py b/src/google/adk/cli/cli_tools_click.py index 40c15fe8eb..06f567a037 100644 --- a/src/google/adk/cli/cli_tools_click.py +++ b/src/google/adk/cli/cli_tools_click.py @@ -518,7 +518,7 @@ def cli_eval( This will only run eval_1, eval_2 and eval_3 from sample_eval_set_file.json. - *Eval Set Id* + *Eval Set ID* For each eval set, all evals will be run by default. If you want to run only specific evals from a eval set, first create a comma @@ -911,7 +911,7 @@ def wrapper(*args, **kwargs): def deprecated_adk_services_options(): - """Depracated ADK services options.""" + """Deprecated ADK services options.""" def warn(alternative_param, ctx, param, value): if value: diff --git a/src/google/adk/cli/utils/agent_loader.py b/src/google/adk/cli/utils/agent_loader.py index 6eee2ee839..0755c9147c 100644 --- a/src/google/adk/cli/utils/agent_loader.py +++ b/src/google/adk/cli/utils/agent_loader.py @@ -95,7 +95,7 @@ def _load_from_module_or_package( if e.name == agent_name: logger.debug("Module %s itself not found.", agent_name) else: - # it's the case the module imported by {agent_name}.agent module is not + # the module imported by {agent_name}.agent module is not # found e.msg = f"Fail to load '{agent_name}' module. " + e.msg raise e @@ -142,8 +142,7 @@ def _load_from_submodule( if e.name == f"{agent_name}.agent" or e.name == agent_name: logger.debug("Module %s.agent not found.", agent_name) else: - # it's the case the module imported by {agent_name}.agent module is not - # found + # the module imported by {agent_name}.agent module is not found e.msg = f"Fail to load '{agent_name}.agent' module. " + e.msg raise e except Exception as e: diff --git a/src/google/adk/code_executors/code_execution_utils.py b/src/google/adk/code_executors/code_execution_utils.py index 46b412a0de..86aa085acf 100644 --- a/src/google/adk/code_executors/code_execution_utils.py +++ b/src/google/adk/code_executors/code_execution_utils.py @@ -122,12 +122,12 @@ def extract_code_and_truncate_content( the code blocks. Returns: - The first code block if found, otherwise None. + The first code block if found; otherwise, None. """ if not content or not content.parts: return - # Extract the code from the executable code parts if there're no associated + # Extract the code from the executable code parts if there are no associated # code execution result parts. for idx, part in enumerate(content.parts): if part.executable_code and ( diff --git a/src/google/adk/evaluation/_eval_sets_manager_utils.py b/src/google/adk/evaluation/_eval_sets_manager_utils.py index b7e12dd37e..737f769e73 100644 --- a/src/google/adk/evaluation/_eval_sets_manager_utils.py +++ b/src/google/adk/evaluation/_eval_sets_manager_utils.py @@ -28,7 +28,7 @@ def get_eval_set_from_app_and_id( eval_sets_manager: EvalSetsManager, app_name: str, eval_set_id: str ) -> EvalSet: - """Returns an EvalSet if found, otherwise raises NotFoundError.""" + """Returns an EvalSet if found; otherwise, raises NotFoundError.""" eval_set = eval_sets_manager.get_eval_set(app_name, eval_set_id) if not eval_set: raise NotFoundError(f"Eval set `{eval_set_id}` not found.") @@ -38,7 +38,7 @@ def get_eval_set_from_app_and_id( def get_eval_case_from_eval_set( eval_set: EvalSet, eval_case_id: str ) -> Optional[EvalCase]: - """Returns an EvalCase if found, otherwise None.""" + """Returns an EvalCase if found; otherwise, None.""" eval_case_to_find = None # Look up the eval case by eval_case_id diff --git a/src/google/adk/evaluation/agent_evaluator.py b/src/google/adk/evaluation/agent_evaluator.py index c97fe16363..e8f552879d 100644 --- a/src/google/adk/evaluation/agent_evaluator.py +++ b/src/google/adk/evaluation/agent_evaluator.py @@ -49,7 +49,7 @@ from .eval_sets_manager import EvalSetsManager from .evaluator import EvalStatus from .in_memory_eval_sets_manager import InMemoryEvalSetsManager -from .local_eval_sets_manager import convert_eval_set_to_pydanctic_schema +from .local_eval_sets_manager import convert_eval_set_to_pydantic_schema from .user_simulator_provider import UserSimulatorProvider logger = logging.getLogger("google_adk." + __name__) @@ -121,7 +121,7 @@ async def evaluate_eval_set( the agent. There is convention in place here, where the code is going to look for 'root_agent' or `get_agent_async` in the loaded module. eval_set: The eval set. - criteria: Evauation criterias, a dictionary of metric names to their + criteria: Evaluation criteria, a dictionary of metric names to their respective thresholds. This field is deprecated. eval_config: The evauation config. num_runs: Number of times all entries in the eval dataset should be @@ -283,7 +283,7 @@ def _load_eval_set_from_file( try: eval_set = EvalSet.model_validate_json(content) assert len(initial_session) == 0, ( - "Intial session should be specified as a part of EvalSet file." + "Initial session should be specified as a part of EvalSet file." " Explicit initial session is only needed, when specifying data in" " the older schema." ) @@ -315,7 +315,7 @@ def _get_eval_set_from_old_format( "data": data, "initial_session": initial_session, } - return convert_eval_set_to_pydanctic_schema( + return convert_eval_set_to_pydantic_schema( eval_set_id=str(uuid.uuid4()), eval_set_in_json_format=[eval_data] ) diff --git a/src/google/adk/evaluation/base_eval_service.py b/src/google/adk/evaluation/base_eval_service.py index 3d576ab2d6..a82a468324 100644 --- a/src/google/adk/evaluation/base_eval_service.py +++ b/src/google/adk/evaluation/base_eval_service.py @@ -94,11 +94,11 @@ class InferenceRequest(BaseModel): description="""The name of the app to which the eval case belongs to.""" ) - eval_set_id: str = Field(description="""Id of the eval set.""") + eval_set_id: str = Field(description="""ID of the eval set.""") eval_case_ids: Optional[list[str]] = Field( default=None, - description="""Id of the eval cases for which inferences need to be + description="""ID of the eval cases for which inferences need to be generated. All the eval case ids should belong to the EvalSet. @@ -133,10 +133,10 @@ class InferenceResult(BaseModel): description="""The name of the app to which the eval case belongs to.""" ) - eval_set_id: str = Field(description="""Id of the eval set.""") + eval_set_id: str = Field(description="""ID of the eval set.""") eval_case_id: str = Field( - description="""Id of the eval case for which inferences were generated.""", + description="""ID of the eval case for which inferences were generated.""", ) inferences: Optional[list[Invocation]] = Field( @@ -145,7 +145,7 @@ class InferenceResult(BaseModel): ) session_id: Optional[str] = Field( - description="""Id of the inference session.""" + description="""ID of the inference session.""" ) status: InferenceStatus = Field( diff --git a/src/google/adk/evaluation/eval_metrics.py b/src/google/adk/evaluation/eval_metrics.py index 84a19879ae..321e2cc1d9 100644 --- a/src/google/adk/evaluation/eval_metrics.py +++ b/src/google/adk/evaluation/eval_metrics.py @@ -225,7 +225,7 @@ class EvalMetricResultPerInvocation(EvalBaseModel): eval_metric_results: list[EvalMetricResult] = Field( default=[], - description="Eval resutls for each applicable metric.", + description="Eval results for each applicable metric.", ) diff --git a/src/google/adk/evaluation/eval_sets_manager.py b/src/google/adk/evaluation/eval_sets_manager.py index 445cafd82f..be4655e46b 100644 --- a/src/google/adk/evaluation/eval_sets_manager.py +++ b/src/google/adk/evaluation/eval_sets_manager.py @@ -54,7 +54,7 @@ def list_eval_sets(self, app_name: str) -> list[str]: def get_eval_case( self, app_name: str, eval_set_id: str, eval_case_id: str ) -> Optional[EvalCase]: - """Returns an EvalCase if found, otherwise None.""" + """Returns an EvalCase if found; otherwise, None.""" @abstractmethod def add_eval_case(self, app_name: str, eval_set_id: str, eval_case: EvalCase): diff --git a/src/google/adk/evaluation/evaluation_generator.py b/src/google/adk/evaluation/evaluation_generator.py index dff61810b4..970ebd8f9a 100644 --- a/src/google/adk/evaluation/evaluation_generator.py +++ b/src/google/adk/evaluation/evaluation_generator.py @@ -55,7 +55,7 @@ class EvalCaseResponses(BaseModel): """Contains multiple responses associated with an EvalCase. - Multiple responses are a result of repeated requests to genereate inferences. + Multiple responses are a result of repeated requests to generate inferences. """ eval_case: EvalCase diff --git a/src/google/adk/evaluation/evaluator.py b/src/google/adk/evaluation/evaluator.py index 2c007fe4a1..7a97855bec 100644 --- a/src/google/adk/evaluation/evaluator.py +++ b/src/google/adk/evaluation/evaluator.py @@ -54,7 +54,7 @@ class EvaluationResult(BaseModel): class Evaluator(ABC): - """A merics evaluator interface.""" + """A metrics evaluator interface.""" criterion_type: ClassVar[type[BaseCriterion]] = BaseCriterion diff --git a/src/google/adk/evaluation/gcs_eval_sets_manager.py b/src/google/adk/evaluation/gcs_eval_sets_manager.py index 86039c1748..cc8a572697 100644 --- a/src/google/adk/evaluation/gcs_eval_sets_manager.py +++ b/src/google/adk/evaluation/gcs_eval_sets_manager.py @@ -108,9 +108,9 @@ def create_eval_set(self, app_name: str, eval_set_id: str) -> EvalSet: """Creates an empty EvalSet and saves it to GCS. Raises: - ValueError: If eval set id is not valid or an eval set already exists. + ValueError: If Eval Set ID is not valid or an eval set already exists. """ - self._validate_id(id_name="Eval Set Id", id_value=eval_set_id) + self._validate_id(id_name="Eval Set ID", id_value=eval_set_id) new_eval_set_blob_name = self._get_eval_set_blob_name(app_name, eval_set_id) if self.bucket.blob(new_eval_set_blob_name).exists(): raise ValueError( diff --git a/src/google/adk/evaluation/local_eval_service.py b/src/google/adk/evaluation/local_eval_service.py index 09a08d37bb..000964d9a7 100644 --- a/src/google/adk/evaluation/local_eval_service.py +++ b/src/google/adk/evaluation/local_eval_service.py @@ -209,7 +209,7 @@ async def _evaluate_single_inference_result( # We also keep track of the overall score for a metric, derived from all # invocation. For example, if we were keeping track the metric that compares - # how well is the final resposne as compared to a golden answer, then each + # how well is the final response as compared to a golden answer, then each # invocation will have the value of this metric. We will also have an # overall score using aggregation strategy across all invocations. This # would be the score for the eval case. @@ -267,7 +267,7 @@ async def _evaluate_single_inference_result( overall_eval_status=EvalStatus.NOT_EVALUATED ) - # Track overall scrore across all invocations. + # Track overall score across all invocations. eval_metric_result_details = EvalMetricResultDetails( rubric_scores=evaluation_result.overall_rubric_scores ) @@ -366,8 +366,8 @@ def _generate_final_eval_status( self, overall_eval_metric_results: list[EvalMetricResult] ) -> EvalStatus: final_eval_status = EvalStatus.NOT_EVALUATED - # Go over the all the eval statuses and mark the final eval status as - # passed if all of them pass, otherwise mark the final eval status to + # Go over all the eval statuses and mark the final eval status as + # passed if all of them pass; otherwise, mark the final eval status to # failed. for overall_eval_metric_result in overall_eval_metric_results: overall_eval_status = overall_eval_metric_result.eval_status diff --git a/src/google/adk/evaluation/local_eval_sets_manager.py b/src/google/adk/evaluation/local_eval_sets_manager.py index 841dab1fe7..da5225efe6 100644 --- a/src/google/adk/evaluation/local_eval_sets_manager.py +++ b/src/google/adk/evaluation/local_eval_sets_manager.py @@ -85,11 +85,11 @@ def _convert_invocation_to_pydantic_schema( ) -def convert_eval_set_to_pydanctic_schema( +def convert_eval_set_to_pydantic_schema( eval_set_id: str, eval_set_in_json_format: list[dict[str, Any]], ) -> EvalSet: - r"""Returns an pydantic EvalSet generated from the json representation. + r"""Returns a pydantic EvalSet generated from the json representation. Args: eval_set_id: Eval set id. @@ -183,7 +183,7 @@ def load_eval_set_from_file( except ValidationError: # We assume that the eval data was specified in the old format and try # to convert it to the new format. - return convert_eval_set_to_pydanctic_schema( + return convert_eval_set_to_pydantic_schema( eval_set_id, json.loads(content) ) @@ -209,9 +209,9 @@ def create_eval_set(self, app_name: str, eval_set_id: str) -> EvalSet: """Creates and returns an empty EvalSet given the app_name and eval_set_id. Raises: - ValueError: If eval set id is not valid or an eval set already exists. + ValueError: If Eval Set ID is not valid or an eval set already exists. """ - self._validate_id(id_name="Eval Set Id", id_value=eval_set_id) + self._validate_id(id_name="Eval Set ID", id_value=eval_set_id) # Define the file path new_eval_set_path = self._get_eval_set_file_path(app_name, eval_set_id) @@ -265,7 +265,7 @@ def list_eval_sets(self, app_name: str) -> list[str]: def get_eval_case( self, app_name: str, eval_set_id: str, eval_case_id: str ) -> Optional[EvalCase]: - """Returns an EvalCase if found, otherwise None.""" + """Returns an EvalCase if found; otherwise, None.""" eval_set = self.get_eval_set(app_name, eval_set_id) if not eval_set: return None diff --git a/src/google/adk/evaluation/response_evaluator.py b/src/google/adk/evaluation/response_evaluator.py index f76a717fa6..d51c3829fc 100644 --- a/src/google/adk/evaluation/response_evaluator.py +++ b/src/google/adk/evaluation/response_evaluator.py @@ -38,7 +38,7 @@ class ResponseEvaluator(Evaluator): This class supports two metrics: 1) response_evaluation_score - This metric evaluates how coherent agent's resposne was. + This metric evaluates how coherent agent's response was. Value range of this metric is [1,5], with values closer to 5 more desirable. @@ -83,7 +83,7 @@ def get_metric_info(metric_name: str) -> MetricInfo: return MetricInfo( metric_name=PrebuiltMetrics.RESPONSE_EVALUATION_SCORE.value, description=( - "This metric evaluates how coherent agent's resposne was. Value" + "This metric evaluates how coherent agent's response was. Value" " range of this metric is [1,5], with values closer to 5 more" " desirable." ), diff --git a/src/google/adk/events/event.py b/src/google/adk/events/event.py index 8114d31823..cca086430b 100644 --- a/src/google/adk/events/event.py +++ b/src/google/adk/events/event.py @@ -84,7 +84,7 @@ def is_final_response(self) -> bool: NOTE: This method is ONLY for use by Agent Development Kit. - Note that when multiple agents participage in one invocation, there could be + Note that when multiple agents participate in one invocation, there could be one event has `is_final_response()` as True for each participating agent. """ if self.actions.skip_summarization or self.long_running_tool_ids: diff --git a/src/google/adk/flows/llm_flows/_code_execution.py b/src/google/adk/flows/llm_flows/_code_execution.py index a7cc9c14a3..bfa84db69d 100644 --- a/src/google/adk/flows/llm_flows/_code_execution.py +++ b/src/google/adk/flows/llm_flows/_code_execution.py @@ -205,7 +205,7 @@ async def _run_pre_processor( # [Step 1] Extract data files from the session_history and store them in # memory. Meanwhile, mutate the inline data file to text part in session # history from all turns. - all_input_files = _extrac_and_replace_inline_files( + all_input_files = _extract_and_replace_inline_files( code_executor_context, llm_request ) @@ -372,7 +372,7 @@ async def _run_post_processor( llm_response.content = None -def _extrac_and_replace_inline_files( +def _extract_and_replace_inline_files( code_executor_context: CodeExecutorContext, llm_request: LlmRequest, ) -> list[File]: @@ -403,7 +403,7 @@ def _extrac_and_replace_inline_files( text='\nAvailable file: `%s`\n' % file_name ) - # Add the inlne data as input file to the code executor context. + # Add the inline data as input file to the code executor context. file = File( name=file_name, content=CodeExecutionUtils.get_encoded_file_content( diff --git a/src/google/adk/flows/llm_flows/base_llm_flow.py b/src/google/adk/flows/llm_flows/base_llm_flow.py index 644dc55b6c..f6d64cae5f 100644 --- a/src/google/adk/flows/llm_flows/base_llm_flow.py +++ b/src/google/adk/flows/llm_flows/base_llm_flow.py @@ -251,7 +251,7 @@ async def _send_to_model( invocation_context.transcription_cache = [] if not invocation_context.run_config.input_audio_transcription: # if the live model's input transcription is not enabled, then - # we use our onwn audio transcriber to achieve that. + # we use our own audio transcriber to achieve that. invocation_context.transcription_cache.append( TranscriptionEntry(role='user', data=live_request.blob) ) @@ -300,7 +300,7 @@ def get_author_for_event(llm_response): async for llm_response in agen: if llm_response.live_session_resumption_update: logger.info( - 'Update session resumption hanlde:' + 'Update session resumption handle:' f' {llm_response.live_session_resumption_update}.' ) invocation_context.live_session_resumption_handle = ( diff --git a/src/google/adk/models/base_llm_connection.py b/src/google/adk/models/base_llm_connection.py index 22ca3b360d..afce550b13 100644 --- a/src/google/adk/models/base_llm_connection.py +++ b/src/google/adk/models/base_llm_connection.py @@ -30,7 +30,7 @@ async def send_history(self, history: list[types.Content]): """Sends the conversation history to the model. You call this method right after setting up the model connection. - The model will respond if the last content is from user, otherwise it will + The model will respond if the last content is from user; otherwise, it will wait for new user input before responding. Args: diff --git a/src/google/adk/models/gemini_llm_connection.py b/src/google/adk/models/gemini_llm_connection.py index f1470c0a29..fad21c738b 100644 --- a/src/google/adk/models/gemini_llm_connection.py +++ b/src/google/adk/models/gemini_llm_connection.py @@ -40,7 +40,7 @@ async def send_history(self, history: list[types.Content]): """Sends the conversation history to the gemini model. You call this method right after setting up the model connection. - The model will respond if the last content is from user, otherwise it will + The model will respond if the last content is from user; otherwise, it will wait for new user input before responding. Args: diff --git a/src/google/adk/plugins/plugin_manager.py b/src/google/adk/plugins/plugin_manager.py index 217dbb8be6..634f904385 100644 --- a/src/google/adk/plugins/plugin_manager.py +++ b/src/google/adk/plugins/plugin_manager.py @@ -102,7 +102,7 @@ def get_plugin(self, plugin_name: str) -> Optional[BasePlugin]: plugin_name: The name of the plugin to retrieve. Returns: - The plugin instance if found, otherwise `None`. + The plugin instance if found; otherwise, `None`. """ return next((p for p in self.plugins if p.name == plugin_name), None) diff --git a/src/google/adk/runners.py b/src/google/adk/runners.py index 776de6f3cc..f016272b32 100644 --- a/src/google/adk/runners.py +++ b/src/google/adk/runners.py @@ -878,7 +878,7 @@ def _find_agent_to_run( message) """ # If the last event is a function response, should send this response to - # the agent that returned the corressponding function call regardless the + # the agent that returned the corresponding function call regardless the # type of the agent. e.g. a remote a2a agent may surface a credential # request as a special long running function tool call. event = find_matching_function_call(session.events) diff --git a/src/google/adk/telemetry/tracing.py b/src/google/adk/telemetry/tracing.py index e45c24aee6..1c5cd79269 100644 --- a/src/google/adk/telemetry/tracing.py +++ b/src/google/adk/telemetry/tracing.py @@ -362,7 +362,7 @@ def _build_llm_request_for_trace(llm_request: LlmRequest) -> dict[str, Any]: Returns: A dictionary representation of the LLM request. """ - # Some fields in LlmRequest are function pointers and can not be serialized. + # Some fields in LlmRequest are function pointers and cannot be serialized. result = { 'model': llm_request.model, 'config': llm_request.config.model_dump( diff --git a/src/google/adk/tools/_google_credentials.py b/src/google/adk/tools/_google_credentials.py index 427a4cc251..c61f942f5c 100644 --- a/src/google/adk/tools/_google_credentials.py +++ b/src/google/adk/tools/_google_credentials.py @@ -70,7 +70,7 @@ class BaseGoogleCredentialsConfig(BaseModel): `google.auth.load_credentials_from_file(...)`. See more details in https://cloud.google.com/iam/docs/service-account-creds#user-managed-keys. - When the deployed environment cannot provide a pre-existing credential, + When the deployed environment cannot provide a preexisting credential, consider setting below client_id, client_secret and scope for end users to go through oauth flow, so that agent can access the user data. """ diff --git a/src/google/adk/tools/apihub_tool/apihub_toolset.py b/src/google/adk/tools/apihub_tool/apihub_toolset.py index ba4d3f4887..fe9e38bd96 100644 --- a/src/google/adk/tools/apihub_tool/apihub_toolset.py +++ b/src/google/adk/tools/apihub_tool/apihub_toolset.py @@ -114,7 +114,7 @@ def __init__( apihub_resource_name: The resource name of the API in API Hub. Example: ``projects/test-project/locations/us-central1/apis/test-api``. access_token: Google Access token. Generate with gcloud cli - ``gcloud auth auth print-access-token``. Used for fetching API Specs from API Hub. + ``gcloud auth print-access-token``. Used for fetching API Specs from API Hub. service_account_json: The service account config as a json string. Required if not using default service credential. It is used for creating the API Hub client and fetching the API Specs from API Hub. diff --git a/src/google/adk/tools/apihub_tool/clients/apihub_client.py b/src/google/adk/tools/apihub_tool/clients/apihub_client.py index 9bee236e33..84bde60297 100644 --- a/src/google/adk/tools/apihub_tool/clients/apihub_client.py +++ b/src/google/adk/tools/apihub_tool/clients/apihub_client.py @@ -37,7 +37,7 @@ class BaseAPIHubClient(ABC): @abstractmethod def get_spec_content(self, resource_name: str) -> str: - """From a given resource name, get the soec in the API Hub.""" + """From a given resource name, get the spec in the API Hub.""" raise NotImplementedError() diff --git a/src/google/adk/tools/apihub_tool/clients/secret_client.py b/src/google/adk/tools/apihub_tool/clients/secret_client.py index d5015b8aa7..f4d1486155 100644 --- a/src/google/adk/tools/apihub_tool/clients/secret_client.py +++ b/src/google/adk/tools/apihub_tool/clients/secret_client.py @@ -29,7 +29,7 @@ class SecretManagerClient: This class provides a simplified interface for retrieving secrets from Secret Manager, handling authentication using either a service account - JSON keyfile (passed as a string) or a pre-existing authorization token. + JSON keyfile (passed as a string) or a preexisting authorization token. Attributes: _credentials: Google Cloud credentials object (ServiceAccountCredentials diff --git a/src/google/adk/tools/bigquery/config.py b/src/google/adk/tools/bigquery/config.py index adaa8234dc..f8e3089d5b 100644 --- a/src/google/adk/tools/bigquery/config.py +++ b/src/google/adk/tools/bigquery/config.py @@ -37,7 +37,7 @@ class WriteMode(Enum): """Only protected write operations are allowed in a BigQuery session. In this mode write operations in the anonymous dataset of a BigQuery session - are allowed. For example, a temporaray table can be created, manipulated and + are allowed. For example, a temporary table can be created, manipulated and deleted in the anonymous dataset during Agent interaction, while protecting permanent tables from being modified or deleted. To learn more about BigQuery sessions, see https://cloud.google.com/bigquery/docs/sessions-intro. diff --git a/src/google/adk/tools/bigquery/data_insights_tool.py b/src/google/adk/tools/bigquery/data_insights_tool.py index 8d5a979170..0d7280c236 100644 --- a/src/google/adk/tools/bigquery/data_insights_tool.py +++ b/src/google/adk/tools/bigquery/data_insights_tool.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations import json from typing import Any diff --git a/src/google/adk/tools/bigquery/query_tool.py b/src/google/adk/tools/bigquery/query_tool.py index fdabd69476..a671beadda 100644 --- a/src/google/adk/tools/bigquery/query_tool.py +++ b/src/google/adk/tools/bigquery/query_tool.py @@ -158,7 +158,7 @@ def execute_sql( elif settings.write_mode == WriteMode.PROTECTED: # In protected write mode, write operation only to a temporary artifact is # allowed. This artifact must have been created in a BigQuery session. In - # such a scenario the session info (session id and the anonymous dataset + # such a scenario, the session info (session id and the anonymous dataset # containing the artifact) is persisted in the tool context. bq_session_info = tool_context.state.get(BIGQUERY_SESSION_INFO_KEY, None) if bq_session_info: diff --git a/src/google/adk/tools/enterprise_search_tool.py b/src/google/adk/tools/enterprise_search_tool.py index f27b7de67f..7980f8f028 100644 --- a/src/google/adk/tools/enterprise_search_tool.py +++ b/src/google/adk/tools/enterprise_search_tool.py @@ -52,7 +52,7 @@ async def process_llm_request( if is_gemini_model(llm_request.model): if is_gemini_1_model(llm_request.model) and llm_request.config.tools: raise ValueError( - 'Enterprise web search tool can not be used with other tools in' + 'Enterprise web search tool cannot be used with other tools in' ' Gemini 1.x.' ) llm_request.config = llm_request.config or types.GenerateContentConfig() diff --git a/src/google/adk/tools/function_tool.py b/src/google/adk/tools/function_tool.py index c3a4b348e9..b8b87e4ff1 100644 --- a/src/google/adk/tools/function_tool.py +++ b/src/google/adk/tools/function_tool.py @@ -171,7 +171,7 @@ async def run_async( # Before invoking the function, we check for if the list of args passed in # has all the mandatory arguments or not. # If the check fails, then we don't invoke the tool and let the Agent know - # that there was a missing a input parameter. This will basically help + # that there was a missing input parameter. This will basically help # the underlying model fix the issue and retry. mandatory_args = self._get_mandatory_args() missing_mandatory_args = [ @@ -224,7 +224,7 @@ async def _invoke_callable( # Functions are callable objects, but not all callable objects are functions # checking coroutine function is not enough. We also need to check whether - # Callable's __call__ function is a coroutine funciton + # Callable's __call__ function is a coroutine function is_async = inspect.iscoroutinefunction(target) or ( hasattr(target, '__call__') and inspect.iscoroutinefunction(target.__call__) diff --git a/src/google/adk/tools/google_search_tool.py b/src/google/adk/tools/google_search_tool.py index d974989209..8d73ced8d2 100644 --- a/src/google/adk/tools/google_search_tool.py +++ b/src/google/adk/tools/google_search_tool.py @@ -59,7 +59,7 @@ async def process_llm_request( if is_gemini_1_model(llm_request.model): if llm_request.config.tools: raise ValueError( - 'Google search tool can not be used with other tools in Gemini 1.x.' + 'Google search tool cannot be used with other tools in Gemini 1.x.' ) llm_request.config.tools.append( types.Tool(google_search_retrieval=types.GoogleSearchRetrieval()) diff --git a/src/google/adk/tools/google_tool.py b/src/google/adk/tools/google_tool.py index 9776fa0f57..68f11dd503 100644 --- a/src/google/adk/tools/google_tool.py +++ b/src/google/adk/tools/google_tool.py @@ -52,10 +52,10 @@ def __init__( """Initialize the Google API tool. Args: - func: callable that impelments the tool's logic, can accept one + func: callable that implements the tool's logic, can accept one 'credential" parameter credentials_config: credentials config used to call Google API. If None, - then we don't hanlde the auth logic + then we don't handle the auth logic tool_settings: Tool-specific settings. This settings should be provided by each toolset that uses this class to create customized tools. """ diff --git a/src/google/adk/tools/openapi_tool/openapi_spec_parser/tool_auth_handler.py b/src/google/adk/tools/openapi_tool/openapi_spec_parser/tool_auth_handler.py index 74166b00ee..38f4d7ecdb 100644 --- a/src/google/adk/tools/openapi_tool/openapi_spec_parser/tool_auth_handler.py +++ b/src/google/adk/tools/openapi_tool/openapi_spec_parser/tool_auth_handler.py @@ -268,9 +268,9 @@ async def prepare_auth_credentials( ) # here exchangers are doing two different thing: - # for service account the exchanger is doing actualy token exchange - # while for oauth2 it's actually doing the credentail conversion - # from OAuth2 credential to HTTP credentails for setting credential in + # for service account the exchanger is doing actual token exchange + # while for oauth2 it's actually doing the credential conversion + # from OAuth2 credential to HTTP credentials for setting credential in # http header # TODO cleanup the logic: # 1. service account token exchanger should happen before we store them in diff --git a/src/google/adk/tools/url_context_tool.py b/src/google/adk/tools/url_context_tool.py index 4d7c185b8e..10ce142bb1 100644 --- a/src/google/adk/tools/url_context_tool.py +++ b/src/google/adk/tools/url_context_tool.py @@ -49,7 +49,7 @@ async def process_llm_request( llm_request.config = llm_request.config or types.GenerateContentConfig() llm_request.config.tools = llm_request.config.tools or [] if is_gemini_1_model(llm_request.model): - raise ValueError('Url context tool can not be used in Gemini 1.x.') + raise ValueError('Url context tool cannot be used in Gemini 1.x.') elif is_gemini_2_or_above(llm_request.model): llm_request.config.tools.append( types.Tool(url_context=types.UrlContext()) diff --git a/src/google/adk/tools/vertex_ai_search_tool.py b/src/google/adk/tools/vertex_ai_search_tool.py index be51a7e860..aff5be1552 100644 --- a/src/google/adk/tools/vertex_ai_search_tool.py +++ b/src/google/adk/tools/vertex_ai_search_tool.py @@ -97,7 +97,7 @@ async def process_llm_request( if is_gemini_model(llm_request.model): if is_gemini_1_model(llm_request.model) and llm_request.config.tools: raise ValueError( - 'Vertex AI search tool can not be used with other tools in Gemini' + 'Vertex AI search tool cannot be used with other tools in Gemini' ' 1.x.' ) llm_request.config = llm_request.config or types.GenerateContentConfig() diff --git a/tests/integration/fixture/context_variable_agent/agent.py b/tests/integration/fixture/context_variable_agent/agent.py index cef56ccb1e..04e19314f9 100644 --- a/tests/integration/fixture/context_variable_agent/agent.py +++ b/tests/integration/fixture/context_variable_agent/agent.py @@ -43,7 +43,7 @@ def echo_info(customer_id: str) -> str: def build_global_instruction(invocation_context: InvocationContext) -> str: return ( - 'This is the gloabl agent instruction for invocation:' + 'This is the global agent instruction for invocation:' f' {invocation_context.invocation_id}.' ) diff --git a/tests/integration/fixture/flow_complex_spark/agent.py b/tests/integration/fixture/flow_complex_spark/agent.py index 18ce62ff8c..02fbfaebac 100644 --- a/tests/integration/fixture/flow_complex_spark/agent.py +++ b/tests/integration/fixture/flow_complex_spark/agent.py @@ -41,7 +41,7 @@ + Don't ask for clarifications from the user. + Do not ask the user for clarifications or if they have any other questions. + All headers should be bolded. -+ If you have steps in the plan that depend on other information, make sure they are 2 diferent sections in the plan. ++ If you have steps in the plan that depend on other information, make sure they are 2 different sections in the plan. + At the end mention that you will start researching. # Instruction on replying format @@ -68,7 +68,7 @@ # Instruction on replying format -Your reply should be a numbered lsit. +Your reply should be a numbered list. For each question, reply in the following format: "[question_generation_agent]: [generated questions]" @@ -92,7 +92,7 @@ " question." ), instruction="""\ -Inspect all the questions after "[question_generation_agent]: " and asnwer them. +Inspect all the questions after "[question_generation_agent]: " and answer them. # Instruction on replying format diff --git a/tests/integration/fixture/flow_complex_spark/sample.session.json b/tests/integration/fixture/flow_complex_spark/sample.session.json index 31575a84b4..ed3a200d3f 100644 --- a/tests/integration/fixture/flow_complex_spark/sample.session.json +++ b/tests/integration/fixture/flow_complex_spark/sample.session.json @@ -52,7 +52,7 @@ "response": { "status": "ok", "target_agent_name": "research_assistant", - "message": "Transfered to research_assistant" + "message": "Transferred to research_assistant" } } } @@ -165,7 +165,7 @@ "response": { "status": "ok", "target_agent_name": "spark_assistant", - "message": "Transfered to spark_assistant" + "message": "Transferred to spark_assistant" } } } diff --git a/tests/integration/fixture/tool_agent/agent.py b/tests/integration/fixture/tool_agent/agent.py index a89d20899e..2f914750a6 100644 --- a/tests/integration/fixture/tool_agent/agent.py +++ b/tests/integration/fixture/tool_agent/agent.py @@ -90,17 +90,17 @@ def complex_function_list_dict( raise ValueError("Wrong param") -def repetive_call_1(param: str): - return f"Call repetive_call_2 tool with param {param + '_repetive'}" +def repetitive_call_1(param: str): + return f"Call repetitive_call_2 tool with param {param + '_repetitive'}" -def repetive_call_2(param: str): +def repetitive_call_2(param: str): return param test_case_retrieval = FilesRetrieval( name="test_case_retrieval", - description="General guidence for agent test cases", + description="General guidance for agent test cases", input_dir=os.path.join(os.path.dirname(__file__), "files"), ) @@ -109,7 +109,7 @@ def repetive_call_2(param: str): rag_corpora=[ "projects/1096655024998/locations/us-central1/ragCorpora/4985766262475849728" ], - description="General guidence for agent test cases", + description="General guidance for agent test cases", ) invalid_rag_retrieval = VertexAiRagRetrieval( @@ -131,7 +131,7 @@ def repetive_call_2(param: str): shell_tool = LangchainTool(ShellTool()) docs_tool = CrewaiTool( - name="direcotry_read_tool", + name="directory_read_tool", description="use this to find files for you.", tool=DirectoryReadTool(directory="."), ) @@ -194,8 +194,8 @@ def repetive_call_2(param: str): list_str_param_function, return_list_str_function, # complex_function_list_dict, - repetive_call_1, - repetive_call_2, + repetitive_call_1, + repetitive_call_2, test_case_retrieval, valid_rag_retrieval, invalid_rag_retrieval, diff --git a/tests/integration/fixture/trip_planner_agent/agent.py b/tests/integration/fixture/trip_planner_agent/agent.py index ea8a33ab46..5c4a9f2988 100644 --- a/tests/integration/fixture/trip_planner_agent/agent.py +++ b/tests/integration/fixture/trip_planner_agent/agent.py @@ -105,6 +105,6 @@ instruction=""" Your goal is to plan the best trip according to information listed above. You describe why did you choose the city, list top 3 - attactions and provide a detailed itinerary for each day.""", + attractions and provide a detailed itinerary for each day.""", sub_agents=[identify_agent, gather_agent, plan_agent], ) diff --git a/tests/integration/models/test_litellm_with_function.py b/tests/integration/models/test_litellm_with_function.py index e4ac787e7b..b06c8f826c 100644 --- a/tests/integration/models/test_litellm_with_function.py +++ b/tests/integration/models/test_litellm_with_function.py @@ -83,7 +83,7 @@ def llm_request(): @pytest.mark.asyncio -async def test_generate_content_asyn_with_function( +async def test_generate_content_async_with_function( oss_llm_with_function, llm_request ): responses = [ @@ -98,7 +98,7 @@ async def test_generate_content_asyn_with_function( @pytest.mark.asyncio -async def test_generate_content_asyn_stream_with_function( +async def test_generate_content_async_stream_with_function( oss_llm_with_function, llm_request ): responses = [ diff --git a/tests/integration/test_evalute_agent_in_fixture.py b/tests/integration/test_evalute_agent_in_fixture.py index 344ba0994b..bd09549eee 100644 --- a/tests/integration/test_evalute_agent_in_fixture.py +++ b/tests/integration/test_evalute_agent_in_fixture.py @@ -64,8 +64,8 @@ async def test_evaluate_agents_long_running_4_runs_per_eval_item( await AgentEvaluator.evaluate( agent_module=agent_name, eval_dataset_file_path_or_dir=evalfile, - # Using a slightly higher value helps us manange the variances that may + # Using a slightly higher value helps us manage the variances that may # happen in each eval. - # This, of course, comes at a cost of incrased test run times. + # This, of course, comes at a cost of increased test run times. num_runs=4, ) diff --git a/tests/integration/test_tools.py b/tests/integration/test_tools.py index 39662484ec..a9f99791bc 100644 --- a/tests/integration/test_tools.py +++ b/tests/integration/test_tools.py @@ -106,12 +106,12 @@ def test_complex_function_calls_success(agent_runner: TestRunner): [{"agent": tool_agent.agent.root_agent}], indirect=True, ) -def test_repetive_call_success(agent_runner: TestRunner): +def test_repetitive_call_success(agent_runner: TestRunner): _call_function_and_assert( agent_runner, - "repetive_call_1", + "repetitive_call_1", "test", - "test_repetive", + "test_repetitive", ) diff --git a/tests/unittests/a2a/converters/test_request_converter.py b/tests/unittests/a2a/converters/test_request_converter.py index 3f4b8d853c..b56e3ccb1a 100644 --- a/tests/unittests/a2a/converters/test_request_converter.py +++ b/tests/unittests/a2a/converters/test_request_converter.py @@ -368,7 +368,7 @@ def test_end_to_end_conversion_with_fallback_user(self): assert result is not None assert ( result.user_id == "A2A_USER_test_session_456" - ) # Should fallback to context ID + ) # Should fall back to context ID assert result.session_id == "test_session_456" assert isinstance(result.new_message, genai_types.Content) assert result.new_message.role == "user" diff --git a/tests/unittests/a2a/utils/test_agent_card_builder.py b/tests/unittests/a2a/utils/test_agent_card_builder.py index 64414730db..e0b62468e5 100644 --- a/tests/unittests/a2a/utils/test_agent_card_builder.py +++ b/tests/unittests/a2a/utils/test_agent_card_builder.py @@ -331,7 +331,7 @@ def test_replace_pronouns_basic(self): assert result == "I should do my work and it will be mine." def test_replace_pronouns_case_insensitive(self): - """Test _replace_pronouns with case insensitive matching.""" + """Test _replace_pronouns with case-insensitive matching.""" # Arrange text = "YOU should do YOUR work and it will be YOURS." @@ -1073,7 +1073,7 @@ def test_extract_examples_from_instruction_with_different_patterns(self): assert result is None def test_extract_examples_from_instruction_case_insensitive(self): - """Test _extract_examples_from_instruction with case insensitive matching.""" + """Test _extract_examples_from_instruction with case-insensitive matching.""" # Arrange instruction = ( 'example query: "What is the weather?" example response: "The weather' diff --git a/tests/unittests/agents/test_agent_clone.py b/tests/unittests/agents/test_agent_clone.py index 3091454b39..0a3d0a65f4 100644 --- a/tests/unittests/agents/test_agent_clone.py +++ b/tests/unittests/agents/test_agent_clone.py @@ -274,7 +274,7 @@ def test_clone_invalid_field(): """Test that cloning with invalid fields raises an error.""" original = LlmAgent(name="test_agent", description="Test agent") - with pytest.raises(ValueError, match="Cannot update non-existent fields"): + with pytest.raises(ValueError, match="Cannot update nonexistent fields"): original.clone(update={"invalid_field": "value"}) diff --git a/tests/unittests/agents/test_remote_a2a_agent.py b/tests/unittests/agents/test_remote_a2a_agent.py index 65359dc840..fc93e8dd5d 100644 --- a/tests/unittests/agents/test_remote_a2a_agent.py +++ b/tests/unittests/agents/test_remote_a2a_agent.py @@ -360,7 +360,7 @@ async def test_resolve_agent_card_from_file_success(self): @pytest.mark.asyncio async def test_resolve_agent_card_from_file_not_found(self): - """Test agent card resolution from non-existent file raises error.""" + """Test agent card resolution from nonexistent file raises error.""" agent = RemoteA2aAgent( name="test_agent", agent_card="/path/to/nonexistent.json" ) diff --git a/tests/unittests/auth/exchanger/test_credential_exchanger_registry.py b/tests/unittests/auth/exchanger/test_credential_exchanger_registry.py index 66b8582322..32c4812c2f 100644 --- a/tests/unittests/auth/exchanger/test_credential_exchanger_registry.py +++ b/tests/unittests/auth/exchanger/test_credential_exchanger_registry.py @@ -126,7 +126,7 @@ def test_get_exchanger_returns_correct_instance(self): assert isinstance(retrieved_exchanger, BaseCredentialExchanger) def test_get_exchanger_nonexistent_type_returns_none(self): - """Test that get_exchanger returns None for non-existent credential types.""" + """Test that get_exchanger returns None for nonexistent credential types.""" registry = CredentialExchangerRegistry() # Try to get an exchanger that was never registered diff --git a/tests/unittests/auth/exchanger/test_oauth2_credential_exchanger.py b/tests/unittests/auth/exchanger/test_oauth2_credential_exchanger.py index bc5990c677..1156bead70 100644 --- a/tests/unittests/auth/exchanger/test_oauth2_credential_exchanger.py +++ b/tests/unittests/auth/exchanger/test_oauth2_credential_exchanger.py @@ -24,7 +24,7 @@ from google.adk.auth.auth_credential import AuthCredentialTypes from google.adk.auth.auth_credential import OAuth2Auth from google.adk.auth.auth_schemes import OpenIdConnectWithConfig -from google.adk.auth.exchanger.base_credential_exchanger import CredentialExchangError +from google.adk.auth.exchanger.base_credential_exchanger import CredentialExchangeError from google.adk.auth.exchanger.oauth2_credential_exchanger import OAuth2CredentialExchanger import pytest @@ -117,7 +117,7 @@ async def test_exchange_missing_auth_scheme(self): try: await exchanger.exchange(credential, None) assert False, "Should have raised ValueError" - except CredentialExchangError as e: + except CredentialExchangeError as e: assert "auth_scheme is required" in str(e) @patch("google.adk.auth.oauth2_credential_util.OAuth2Session") diff --git a/tests/unittests/auth/test_auth_handler.py b/tests/unittests/auth/test_auth_handler.py index 0a2a2f7802..b1ef070667 100644 --- a/tests/unittests/auth/test_auth_handler.py +++ b/tests/unittests/auth/test_auth_handler.py @@ -419,7 +419,7 @@ def test_get_auth_response_exists( assert result == oauth2_credentials_with_auth_uri def test_get_auth_response_not_exists(self, auth_config): - """Test retrieving a non-existent auth response from state.""" + """Test retrieving a nonexistent auth response from state.""" handler = AuthHandler(auth_config) state = MockState() diff --git a/tests/unittests/cli/utils/test_agent_loader.py b/tests/unittests/cli/utils/test_agent_loader.py index 4e6e254ec0..c314329631 100644 --- a/tests/unittests/cli/utils/test_agent_loader.py +++ b/tests/unittests/cli/utils/test_agent_loader.py @@ -286,7 +286,7 @@ def test_agent_not_found_error(self): loader = AgentLoader(temp_dir) agents_dir = temp_dir # For use in the expected message string - # Try to load non-existent agent + # Try to load nonexistent agent with pytest.raises(ValueError) as exc_info: loader.load_agent("nonexistent_agent") @@ -328,12 +328,12 @@ def __init__(self): assert "No root_agent found for 'broken_agent'" in str(exc_info.value) def test_agent_internal_module_not_found_error(self): - """Test error when an agent tries to import a non-existent module.""" + """Test error when an agent tries to import a nonexistent module.""" with tempfile.TemporaryDirectory() as temp_dir: temp_path = Path(temp_dir) agent_name = "importer_agent" - # Create agent that imports a non-existent module + # Create agent that imports a nonexistent module agent_file = temp_path / f"{agent_name}.py" agent_file.write_text(dedent(f""" from google.adk.agents.base_agent import BaseAgent @@ -526,7 +526,7 @@ def test_yaml_agent_not_found_error(self): loader = AgentLoader(temp_dir) agents_dir = temp_dir # For use in the expected message string - # Try to load non-existent YAML agent + # Try to load nonexistent YAML agent with pytest.raises(ValueError) as exc_info: loader.load_agent("nonexistent_yaml_agent") diff --git a/tests/unittests/code_executors/test_code_executor_context.py b/tests/unittests/code_executors/test_code_executor_context.py index 5f3a237d34..6a85b7a81a 100644 --- a/tests/unittests/code_executors/test_code_executor_context.py +++ b/tests/unittests/code_executors/test_code_executor_context.py @@ -26,7 +26,7 @@ def empty_state() -> State: @pytest.fixture def context_with_data() -> CodeExecutorContext: - """Fixture for a CodeExecutorContext with some pre-populated data.""" + """Fixture for a CodeExecutorContext with some prepopulated data.""" state_data = { "_code_execution_context": { "execution_session_id": "session123", diff --git a/tests/unittests/evaluation/test_gcs_eval_sets_manager.py b/tests/unittests/evaluation/test_gcs_eval_sets_manager.py index 8cb7b7ecb3..1f26148727 100644 --- a/tests/unittests/evaluation/test_gcs_eval_sets_manager.py +++ b/tests/unittests/evaluation/test_gcs_eval_sets_manager.py @@ -101,7 +101,7 @@ def test_gcs_eval_sets_manager_create_eval_set_invalid_id( app_name = "test_app" eval_set_id = "invalid-id" - with pytest.raises(ValueError, match="Invalid Eval Set Id"): + with pytest.raises(ValueError, match="Invalid Eval Set ID"): gcs_eval_sets_manager.create_eval_set(app_name, eval_set_id) def test_gcs_eval_sets_manager_list_eval_sets_success( diff --git a/tests/unittests/evaluation/test_local_eval_service.py b/tests/unittests/evaluation/test_local_eval_service.py index df90bf39dc..cf2ca342f3 100644 --- a/tests/unittests/evaluation/test_local_eval_service.py +++ b/tests/unittests/evaluation/test_local_eval_service.py @@ -527,7 +527,7 @@ def test_generate_final_eval_status_doesn_t_throw_on(eval_service): # eval case. # We go over all the possible values of EvalStatus one by one and expect - # the _generate_final_eval_status to handle it without throwing an exeception. + # the _generate_final_eval_status to handle it without throwing an exception. for status in EvalStatus: eval_metric_result = EvalMetricResult( metric_name="metric1", threshold=0.5, eval_status=status diff --git a/tests/unittests/evaluation/test_local_eval_sets_manager.py b/tests/unittests/evaluation/test_local_eval_sets_manager.py index 08a5ee9d3f..fd31a9e5fd 100644 --- a/tests/unittests/evaluation/test_local_eval_sets_manager.py +++ b/tests/unittests/evaluation/test_local_eval_sets_manager.py @@ -24,7 +24,7 @@ from google.adk.evaluation.eval_case import Invocation from google.adk.evaluation.eval_set import EvalSet from google.adk.evaluation.local_eval_sets_manager import _EVAL_SET_FILE_EXTENSION -from google.adk.evaluation.local_eval_sets_manager import convert_eval_set_to_pydanctic_schema +from google.adk.evaluation.local_eval_sets_manager import convert_eval_set_to_pydantic_schema from google.adk.evaluation.local_eval_sets_manager import load_eval_set_from_file from google.adk.evaluation.local_eval_sets_manager import LocalEvalSetsManager from google.genai import types as genai_types @@ -32,10 +32,10 @@ import pytest -class TestConvertEvalSetToPydancticSchema: - """Tests convert_eval_set_to_pydanctic_schema method.""" +class TestConvertEvalSetToPydanticSchema: + """Tests convert_eval_set_to_pydantic_schema method.""" - def test_convert_eval_set_to_pydanctic_schema_complete(self): + def test_convert_eval_set_to_pydantic_schema_complete(self): eval_set_id = "test_eval_set" eval_set_in_json_format = [{ "name": "roll_17_sided_dice_twice", @@ -71,7 +71,7 @@ def test_convert_eval_set_to_pydanctic_schema_complete(self): }, }] - eval_set = convert_eval_set_to_pydanctic_schema( + eval_set = convert_eval_set_to_pydantic_schema( eval_set_id, eval_set_in_json_format ) @@ -93,14 +93,14 @@ def test_convert_eval_set_to_pydanctic_schema_complete(self): == 1 ) - def test_convert_eval_set_to_pydanctic_schema_minimal(self): + def test_convert_eval_set_to_pydantic_schema_minimal(self): eval_set_id = "test_eval_set" eval_set_in_json_format = [{ "name": "minimal_case", "data": [{"query": "Hello", "reference": "World"}], }] - eval_set = convert_eval_set_to_pydanctic_schema( + eval_set = convert_eval_set_to_pydantic_schema( eval_set_id, eval_set_in_json_format ) @@ -117,7 +117,7 @@ def test_convert_eval_set_to_pydanctic_schema_minimal(self): == "World" ) - def test_convert_eval_set_to_pydanctic_schema_empty_tool_use_and_intermediate_responses( + def test_convert_eval_set_to_pydantic_schema_empty_tool_use_and_intermediate_responses( self, ): eval_set_id = "test_eval_set" @@ -131,7 +131,7 @@ def test_convert_eval_set_to_pydanctic_schema_empty_tool_use_and_intermediate_re }], }] - eval_set = convert_eval_set_to_pydanctic_schema( + eval_set = convert_eval_set_to_pydantic_schema( eval_set_id, eval_set_in_json_format ) @@ -150,7 +150,7 @@ def test_convert_eval_set_to_pydanctic_schema_empty_tool_use_and_intermediate_re == 0 ) - def test_convert_eval_set_to_pydanctic_schema_empty_initial_session(self): + def test_convert_eval_set_to_pydantic_schema_empty_initial_session(self): eval_set_id = "test_eval_set" eval_set_in_json_format = [{ "name": "empty_session", @@ -158,14 +158,14 @@ def test_convert_eval_set_to_pydanctic_schema_empty_initial_session(self): "initial_session": {}, }] - eval_set = convert_eval_set_to_pydanctic_schema( + eval_set = convert_eval_set_to_pydantic_schema( eval_set_id, eval_set_in_json_format ) assert eval_set.eval_set_id == eval_set_id assert eval_set.eval_cases[0].session_input is None - def test_convert_eval_set_to_pydanctic_schema_invalid_data(self): + def test_convert_eval_set_to_pydantic_schema_invalid_data(self): # This test implicitly checks for potential validation errors during Pydantic # object creation eval_set_id = "test_eval_set" @@ -190,7 +190,7 @@ def test_convert_eval_set_to_pydanctic_schema_invalid_data(self): }] with pytest.raises(ValidationError): - convert_eval_set_to_pydanctic_schema(eval_set_id, eval_set_in_json_format) + convert_eval_set_to_pydantic_schema(eval_set_id, eval_set_in_json_format) class TestLoadEvalSetFromFile: @@ -300,14 +300,14 @@ def test_load_eval_set_from_file_invalid_json(self, tmp_path): def test_load_eval_set_from_file_invalid_data(self, tmp_path, mocker): # Create a dummy file with invalid data that fails both Pydantic validation # and the old format conversion. We mock the - # convert_eval_set_to_pydanctic_schema function to raise a ValueError + # convert_eval_set_to_pydantic_schema function to raise a ValueError # so that we can assert that the exception is raised. file_path = tmp_path / "invalid_data.json" with open(file_path, "w", encoding="utf-8") as f: f.write('{"invalid": "data"}') mocker.patch( - "google.adk.evaluation.local_eval_sets_manager.convert_eval_set_to_pydanctic_schema", + "google.adk.evaluation.local_eval_sets_manager.convert_eval_set_to_pydantic_schema", side_effect=ValueError(), ) @@ -392,7 +392,7 @@ def test_local_eval_sets_manager_create_eval_set_invalid_id( app_name = "test_app" eval_set_id = "invalid-id" - with pytest.raises(ValueError, match="Invalid Eval Set Id"): + with pytest.raises(ValueError, match="Invalid Eval Set ID"): local_eval_sets_manager.create_eval_set(app_name, eval_set_id) def test_local_eval_sets_manager_create_eval_set_already_exists( diff --git a/tests/unittests/flows/llm_flows/test_agent_transfer.py b/tests/unittests/flows/llm_flows/test_agent_transfer.py index f7d17d65bd..19225ce793 100644 --- a/tests/unittests/flows/llm_flows/test_agent_transfer.py +++ b/tests/unittests/flows/llm_flows/test_agent_transfer.py @@ -182,7 +182,7 @@ def test_auto_to_auto_to_single(is_resumable: bool): ] # sub_agent_1 should still be the current agent. sub_agent_1_1 is single so - # it should not be the current agent, otherwise the conversation will be + # it should not be the current agent; otherwise, the conversation will be # tied to sub_agent_1_1 forever. assert testing_utils.simplify_events(runner.run('test2')) == [ ('sub_agent_1', 'response2'), diff --git a/tests/unittests/flows/llm_flows/test_functions_sequential.py b/tests/unittests/flows/llm_flows/test_functions_sequential.py index a88d90f3d1..5ae073c615 100644 --- a/tests/unittests/flows/llm_flows/test_functions_sequential.py +++ b/tests/unittests/flows/llm_flows/test_functions_sequential.py @@ -64,13 +64,13 @@ def increase_by_one(x: int) -> int: assert testing_utils.simplify_contents(mockModel.requests[0].contents) == [ ('user', 'test') ] - # 3 items: user content, functaion call / response for the 1st call + # 3 items: user content, function call / response for the 1st call assert testing_utils.simplify_contents(mockModel.requests[1].contents) == [ ('user', 'test'), ('model', function_call({'x': 1})), ('user', function_response({'result': 2})), ] - # 5 items: user content, functaion call / response for two calls + # 5 items: user content, function call / response for two calls assert testing_utils.simplify_contents(mockModel.requests[2].contents) == [ ('user', 'test'), ('model', function_call({'x': 1})), @@ -78,7 +78,7 @@ def increase_by_one(x: int) -> int: ('model', function_call({'x': 2})), ('user', function_response({'result': 3})), ] - # 7 items: user content, functaion call / response for three calls + # 7 items: user content, function call / response for three calls assert testing_utils.simplify_contents(mockModel.requests[3].contents) == [ ('user', 'test'), ('model', function_call({'x': 1})), diff --git a/tests/unittests/flows/llm_flows/test_functions_simple.py b/tests/unittests/flows/llm_flows/test_functions_simple.py index b8599486b1..9fa1151387 100644 --- a/tests/unittests/flows/llm_flows/test_functions_simple.py +++ b/tests/unittests/flows/llm_flows/test_functions_simple.py @@ -32,7 +32,7 @@ def test_simple_function(): function_call_1 = types.Part.from_function_call( name='increase_by_one', args={'x': 1} ) - function_respones_2 = types.Part.from_function_response( + function_responses_2 = types.Part.from_function_response( name='increase_by_one', response={'result': 2} ) responses: list[types.Content] = [ @@ -54,7 +54,7 @@ def increase_by_one(x: int) -> int: runner = testing_utils.InMemoryRunner(agent) assert testing_utils.simplify_events(runner.run('test')) == [ ('root_agent', function_call_1), - ('root_agent', function_respones_2), + ('root_agent', function_responses_2), ('root_agent', 'response1'), ] @@ -65,7 +65,7 @@ def increase_by_one(x: int) -> int: assert testing_utils.simplify_contents(mock_model.requests[1].contents) == [ ('user', 'test'), ('model', function_call_1), - ('user', function_respones_2), + ('user', function_responses_2), ] # Asserts the function calls. diff --git a/tests/unittests/tools/bigquery/test_bigquery_query_tool.py b/tests/unittests/tools/bigquery/test_bigquery_query_tool.py index 5c8b921234..c69af8dee0 100644 --- a/tests/unittests/tools/bigquery/test_bigquery_query_tool.py +++ b/tests/unittests/tools/bigquery/test_bigquery_query_tool.py @@ -875,7 +875,7 @@ def test_execute_sql_non_select_stmt_write_protected_persistent_target( ): """Test execute_sql tool for non-SELECT query when writes are protected. - This is a special case when the destination table is a persistent/permananent + This is a special case when the destination table is a persistent/permanent one and the protected write is enabled. In this case the operation should fail. """ diff --git a/tests/unittests/tools/openapi_tool/openapi_spec_parser/test.yaml b/tests/unittests/tools/openapi_tool/openapi_spec_parser/test.yaml index 0cea00362c..1cc139a66c 100644 --- a/tests/unittests/tools/openapi_tool/openapi_spec_parser/test.yaml +++ b/tests/unittests/tools/openapi_tool/openapi_spec_parser/test.yaml @@ -871,7 +871,7 @@ components: type: string iCalUID: description: |- - Event unique identifier as defined in RFC5545. It is used to uniquely identify events accross calendaring systems and must be supplied when importing events via the import method. + Event unique identifier as defined in RFC5545. It is used to uniquely identify events across calendaring systems and must be supplied when importing events via the import method. Note that the iCalUID and the id are not identical and only one of them should be supplied at event creation time. One difference in their semantics is that in recurring events, all occurrences of one event have different ids while they all share the same iCalUIDs. To retrieve an event using its iCalUID, call the events.list method using the iCalUID parameter. To retrieve an event using its id, call the events.get method. type: string id: diff --git a/tests/unittests/tools/openapi_tool/openapi_spec_parser/test_tool_auth_handler.py b/tests/unittests/tools/openapi_tool/openapi_spec_parser/test_tool_auth_handler.py index e405ce5b88..16b0d3b848 100644 --- a/tests/unittests/tools/openapi_tool/openapi_spec_parser/test_tool_auth_handler.py +++ b/tests/unittests/tools/openapi_tool/openapi_spec_parser/test_tool_auth_handler.py @@ -150,11 +150,11 @@ async def test_openid_connect_with_auth_response( tool_context = create_mock_tool_context() mock_auth_handler = MagicMock() - returned_credentail = AuthCredential( + returned_credential = AuthCredential( auth_type=AuthCredentialTypes.OPEN_ID_CONNECT, oauth2=OAuth2Auth(auth_response_uri='test_auth_response_uri'), ) - mock_auth_handler.get_auth_response.return_value = returned_credentail + mock_auth_handler.get_auth_response.return_value = returned_credential mock_auth_handler_path = 'google.adk.tools.tool_context.AuthHandler' monkeypatch.setattr( mock_auth_handler_path, lambda *args, **kwargs: mock_auth_handler @@ -176,7 +176,7 @@ async def test_openid_connect_with_auth_response( stored_credential = credential_store.get_credential( openid_connect_scheme, openid_connect_credential ) - assert stored_credential == returned_credentail + assert stored_credential == returned_credential mock_auth_handler.get_auth_response.assert_called_once() diff --git a/tests/unittests/tools/test_build_function_declaration.py b/tests/unittests/tools/test_build_function_declaration.py index 8be1f86520..f57c3d3838 100644 --- a/tests/unittests/tools/test_build_function_declaration.py +++ b/tests/unittests/tools/test_build_function_declaration.py @@ -390,7 +390,7 @@ def function_string_return(param: str) -> str: assert function_decl.response.type == types.Type.STRING -def test_fucntion_with_no_response_annotations(): +def test_function_with_no_response_annotations(): """Test a function that has no response annotations.""" def transfer_to_agent(agent_name: str, tool_context: ToolContext): diff --git a/tests/unittests/tools/test_enterprise_web_search_tool.py b/tests/unittests/tools/test_enterprise_web_search_tool.py index 390da4a78b..9eabcf0bab 100644 --- a/tests/unittests/tools/test_enterprise_web_search_tool.py +++ b/tests/unittests/tools/test_enterprise_web_search_tool.py @@ -93,6 +93,4 @@ async def test_process_llm_request_failure_with_multiple_tools_gemini_1_models() await tool.process_llm_request( tool_context=tool_context, llm_request=llm_request ) - assert 'can not be used with other tools in Gemini 1.x.' in str( - exc_info.value - ) + assert 'cannot be used with other tools in Gemini 1.x.' in str(exc_info.value) diff --git a/tests/unittests/tools/test_function_tool.py b/tests/unittests/tools/test_function_tool.py index 38758e8d2c..78610d330d 100644 --- a/tests/unittests/tools/test_function_tool.py +++ b/tests/unittests/tools/test_function_tool.py @@ -39,14 +39,14 @@ def function_for_testing_with_no_args(): async def async_function_for_testing_with_1_arg_and_tool_context( arg1, tool_context ): - """Async function for testing with 1 arge and tool context.""" + """Async function for testing with 1 arg and tool context.""" assert arg1 assert tool_context return arg1 async def async_function_for_testing_with_2_arg_and_no_tool_context(arg1, arg2): - """Async function for testing with 2 arge and no tool context.""" + """Async function for testing with 2 args and no tool context.""" assert arg1 assert arg2 return arg1 @@ -65,7 +65,7 @@ async def __call__(self, arg1, arg2): def function_for_testing_with_1_arg_and_tool_context(arg1, tool_context): - """Function for testing with 1 arge and tool context.""" + """Function for testing with 1 arg and tool context.""" assert arg1 assert tool_context return arg1 @@ -81,7 +81,7 @@ async def __call__(self, arg1, tool_context): def function_for_testing_with_2_arg_and_no_tool_context(arg1, arg2): - """Function for testing with 2 arge and no tool context.""" + """Function for testing with 2 args and no tool context.""" assert arg1 assert arg2 return arg1 @@ -283,7 +283,7 @@ async def test_run_async_missing_all_arg_async_func(): @pytest.mark.asyncio async def test_run_async_with_optional_args_not_set_sync_func(): - """Test that run_async calls the function for sync funciton with optional args not set.""" + """Test that run_async calls the function for sync function with optional args not set.""" def func_with_optional_args(arg1, arg2=None, *, arg3, arg4=None, **kwargs): return f"{arg1},{arg3}" @@ -296,7 +296,7 @@ def func_with_optional_args(arg1, arg2=None, *, arg3, arg4=None, **kwargs): @pytest.mark.asyncio async def test_run_async_with_optional_args_not_set_async_func(): - """Test that run_async calls the function for async funciton with optional args not set.""" + """Test that run_async calls the function for async function with optional args not set.""" async def async_func_with_optional_args( arg1, arg2=None, *, arg3, arg4=None, **kwargs diff --git a/tests/unittests/tools/test_gemini_schema_util.py b/tests/unittests/tools/test_gemini_schema_util.py index ffbc80b819..ff38f07ae2 100644 --- a/tests/unittests/tools/test_gemini_schema_util.py +++ b/tests/unittests/tools/test_gemini_schema_util.py @@ -200,7 +200,7 @@ def test_to_gemini_schema_nested_dict(self): }, } gemini_schema = _to_gemini_schema(openapi_schema) - # Since metadata is not properties nor item, it will call to_gemini_schema recursively. + # Since metadata is neither properties nor item, it will call to_gemini_schema recursively. assert isinstance(gemini_schema.properties["metadata"], Schema) assert ( gemini_schema.properties["metadata"].type == Type.OBJECT @@ -544,7 +544,7 @@ def test_sanitize_schema_formats_for_gemini_nullable(self): "properties": { "case_id": { "description": "The ID of the case.", - "title": "Case Id", + "title": "Case ID", "type": "string", }, "next_page_token": { @@ -567,7 +567,7 @@ def test_sanitize_schema_formats_for_gemini_nullable(self): "properties": { "case_id": { "description": "The ID of the case.", - "title": "Case Id", + "title": "Case ID", "type": "string", }, "next_page_token": { diff --git a/tests/unittests/tools/test_google_search_tool.py b/tests/unittests/tools/test_google_search_tool.py index 9623875aaa..2f090abb17 100644 --- a/tests/unittests/tools/test_google_search_tool.py +++ b/tests/unittests/tools/test_google_search_tool.py @@ -186,7 +186,7 @@ async def test_process_llm_request_with_gemini_1_model_and_existing_tools_raises with pytest.raises( ValueError, match=( - 'Google search tool can not be used with other tools in Gemini 1.x' + 'Google search tool cannot be used with other tools in Gemini 1.x' ), ): await tool.process_llm_request( @@ -215,7 +215,7 @@ async def test_process_llm_request_with_path_based_gemini_1_model_and_existing_t with pytest.raises( ValueError, match=( - 'Google search tool can not be used with other tools in Gemini 1.x' + 'Google search tool cannot be used with other tools in Gemini 1.x' ), ): await tool.process_llm_request( diff --git a/tests/unittests/tools/test_url_context_tool.py b/tests/unittests/tools/test_url_context_tool.py index cbbbb0c9a1..eaa7391593 100644 --- a/tests/unittests/tools/test_url_context_tool.py +++ b/tests/unittests/tools/test_url_context_tool.py @@ -146,7 +146,7 @@ async def test_process_llm_request_with_gemini_1_model_raises_error(self): ) with pytest.raises( - ValueError, match='Url context tool can not be used in Gemini 1.x' + ValueError, match='Url context tool cannot be used in Gemini 1.x' ): await tool.process_llm_request( tool_context=tool_context, llm_request=llm_request @@ -166,7 +166,7 @@ async def test_process_llm_request_with_path_based_gemini_1_model_raises_error( ) with pytest.raises( - ValueError, match='Url context tool can not be used in Gemini 1.x' + ValueError, match='Url context tool cannot be used in Gemini 1.x' ): await tool.process_llm_request( tool_context=tool_context, llm_request=llm_request diff --git a/tests/unittests/tools/test_vertex_ai_search_tool.py b/tests/unittests/tools/test_vertex_ai_search_tool.py index 12ee2f60f4..0df19288a3 100644 --- a/tests/unittests/tools/test_vertex_ai_search_tool.py +++ b/tests/unittests/tools/test_vertex_ai_search_tool.py @@ -207,7 +207,7 @@ async def test_process_llm_request_with_gemini_1_and_other_tools_raises_error( with pytest.raises( ValueError, match=( - 'Vertex AI search tool can not be used with other tools in' + 'Vertex AI search tool cannot be used with other tools in' ' Gemini 1.x' ), ): @@ -237,7 +237,7 @@ async def test_process_llm_request_with_path_based_gemini_1_and_other_tools_rais with pytest.raises( ValueError, match=( - 'Vertex AI search tool can not be used with other tools in' + 'Vertex AI search tool cannot be used with other tools in' ' Gemini 1.x' ), ): diff --git a/tests/unittests/utils/test_model_name_utils.py b/tests/unittests/utils/test_model_name_utils.py index 2e3b70a92b..127589d4a6 100644 --- a/tests/unittests/utils/test_model_name_utils.py +++ b/tests/unittests/utils/test_model_name_utils.py @@ -120,7 +120,7 @@ def test_is_gemini_model_edge_cases(self): assert is_gemini_model('gemini_1_5_flash') is False def test_is_gemini_model_case_sensitivity(self): - """Test that model detection is case sensitive.""" + """Test that model detection is case-sensitive.""" assert is_gemini_model('Gemini-2.5-pro') is False assert is_gemini_model('GEMINI-2.5-pro') is False assert is_gemini_model('gemini-2.5-PRO') is True # Only the start matters