diff --git a/.release-please-manifest.json b/.release-please-manifest.json index dbc9e2fa5a3..6da66fac38c 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,4 +1,5 @@ { + "handwritten/cloud-profiler": "6.0.3", "packages/gapic-node-processing": "0.1.6", "packages/google-ads-admanager": "0.5.0", "packages/google-ads-datamanager": "0.1.0", @@ -23,6 +24,7 @@ "packages/google-cloud-apigeeconnect": "4.2.1", "packages/google-cloud-apigeeregistry": "2.2.1", "packages/google-cloud-apihub": "0.5.1", + "packages/google-cloud-apiregistry": "0.1.0", "packages/google-cloud-apphub": "0.6.1", "packages/google-cloud-asset": "6.3.1", "packages/google-cloud-assuredworkloads": "5.1.1", @@ -92,6 +94,7 @@ "packages/google-cloud-gkeconnect-gateway": "5.2.1", "packages/google-cloud-gkehub": "6.3.1", "packages/google-cloud-gkemulticloud": "2.3.0", + "packages/google-cloud-gkerecommender": "0.1.0", "packages/google-cloud-gsuiteaddons": "2.2.1", "packages/google-cloud-hypercomputecluster": "0.1.0", "packages/google-cloud-iap": "4.3.1", @@ -145,11 +148,11 @@ "packages/google-cloud-saasplatform-saasservicemgmt": "0.1.1", "packages/google-cloud-scheduler": "5.3.1", "packages/google-cloud-secretmanager": "6.1.1", + "packages/google-cloud-securesourcemanager": "0.8.1", "packages/google-cloud-security-privateca": "7.0.1", "packages/google-cloud-security-publicca": "2.2.1", "packages/google-cloud-securitycenter": "9.2.1", "packages/google-cloud-securitycentermanagement": "0.7.1", - "packages/google-cloud-securesourcemanager": "0.8.1", "packages/google-cloud-servicedirectory": "6.1.1", "packages/google-cloud-servicehealth": "0.7.1", "packages/google-cloud-shell": "4.1.1", @@ -216,7 +219,5 @@ "packages/google-storagetransfer": "4.2.1", "packages/google-streetview-publish": "0.4.1", "packages/grafeas": "6.1.1", - "packages/typeless-sample-bot": "3.1.1", - "packages/google-cloud-apiregistry": "0.1.0", - "packages/google-cloud-gkerecommender": "0.1.0" -} \ No newline at end of file + "packages/typeless-sample-bot": "3.1.1" +} diff --git a/handwritten/cloud-profiler/.OwlBot.yaml b/handwritten/cloud-profiler/.OwlBot.yaml new file mode 100644 index 00000000000..46bbeee9f9f --- /dev/null +++ b/handwritten/cloud-profiler/.OwlBot.yaml @@ -0,0 +1,17 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +begin-after-commit-hash: 397c0bfd367a2427104f988d5329bc117caafd95 + diff --git a/handwritten/cloud-profiler/.editorconfig b/handwritten/cloud-profiler/.editorconfig new file mode 100644 index 00000000000..62e0fdb64aa --- /dev/null +++ b/handwritten/cloud-profiler/.editorconfig @@ -0,0 +1,14 @@ +; http://editorconfig.org + +root = true + +[*] +indent_style = space +indent_size = 2 +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true + +[*.md] +indent_size = 4 diff --git a/handwritten/cloud-profiler/.eslintignore b/handwritten/cloud-profiler/.eslintignore new file mode 100644 index 00000000000..c4a0963e9bd --- /dev/null +++ b/handwritten/cloud-profiler/.eslintignore @@ -0,0 +1,8 @@ +**/node_modules +**/coverage +test/fixtures +build/ +docs/ +protos/ +samples/generated/ +system-test/**/fixtures diff --git a/handwritten/cloud-profiler/.eslintrc.json b/handwritten/cloud-profiler/.eslintrc.json new file mode 100644 index 00000000000..78215349546 --- /dev/null +++ b/handwritten/cloud-profiler/.eslintrc.json @@ -0,0 +1,3 @@ +{ + "extends": "./node_modules/gts" +} diff --git a/handwritten/cloud-profiler/.gitattributes b/handwritten/cloud-profiler/.gitattributes new file mode 100644 index 00000000000..33739cb74e4 --- /dev/null +++ b/handwritten/cloud-profiler/.gitattributes @@ -0,0 +1,4 @@ +*.ts text eol=lf +*.js text eol=lf +protos/* linguist-generated +**/api-extractor.json linguist-language=JSON-with-Comments diff --git a/handwritten/cloud-profiler/.github/.OwlBot.lock.yaml b/handwritten/cloud-profiler/.github/.OwlBot.lock.yaml new file mode 100644 index 00000000000..24943e1161e --- /dev/null +++ b/handwritten/cloud-profiler/.github/.OwlBot.lock.yaml @@ -0,0 +1,17 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +docker: + image: gcr.io/cloud-devrel-public-resources/owlbot-nodejs:latest + digest: sha256:609822e3c09b7a1bd90b99655904609f162cc15acb4704f1edf778284c36f429 +# created: 2024-10-01T19:34:30.797530443Z diff --git a/handwritten/cloud-profiler/.github/CODEOWNERS b/handwritten/cloud-profiler/.github/CODEOWNERS new file mode 100644 index 00000000000..b4bc3846d5c --- /dev/null +++ b/handwritten/cloud-profiler/.github/CODEOWNERS @@ -0,0 +1,9 @@ +# Code owners file. +# This file controls who is tagged for review for any given pull request. +# +# For syntax help see: +# https://help.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners#codeowners-syntax + + +# Unless specified, the jsteam is the default owner for nodejs repositories. +* @googleapis/api-profiler @googleapis/jsteam \ No newline at end of file diff --git a/handwritten/cloud-profiler/.github/ISSUE_TEMPLATE/bug_report.yml b/handwritten/cloud-profiler/.github/ISSUE_TEMPLATE/bug_report.yml new file mode 100644 index 00000000000..12c2910364f --- /dev/null +++ b/handwritten/cloud-profiler/.github/ISSUE_TEMPLATE/bug_report.yml @@ -0,0 +1,99 @@ +name: Bug Report +description: Create a report to help us improve +labels: + - bug +body: + - type: markdown + attributes: + value: > + **PLEASE READ**: If you have a support contract with Google, please + create an issue in the [support + console](https://cloud.google.com/support/) instead of filing on GitHub. + This will ensure a timely response. Otherwise, please make sure to + follow the steps below. + - type: checkboxes + attributes: + label: Please make sure you have searched for information in the following + guides. + options: + - label: "Search the issues already opened: + https://github.com/GoogleCloudPlatform/google-cloud-node/issues" + required: true + - label: "Search StackOverflow: + http://stackoverflow.com/questions/tagged/google-cloud-platform+nod\ + e.js" + required: true + - label: "Check our Troubleshooting guide: + https://github.com/googleapis/google-cloud-node/blob/main/docs/trou\ + bleshooting.md" + required: true + - label: "Check our FAQ: + https://github.com/googleapis/google-cloud-node/blob/main/docs/faq.\ + md" + required: true + - label: "Check our libraries HOW-TO: + https://github.com/googleapis/gax-nodejs/blob/main/client-libraries\ + .md" + required: true + - label: "Check out our authentication guide: + https://github.com/googleapis/google-auth-library-nodejs" + required: true + - label: "Check out handwritten samples for many of our APIs: + https://github.com/GoogleCloudPlatform/nodejs-docs-samples" + required: true + - type: textarea + attributes: + label: > + A screenshot that you have tested with "Try this API". + description: > + As our client libraries are mostly autogenerated, we kindly request + that you test whether your issue is with the client library, or with the + API itself. To do so, please search for your API + here: https://developers.google.com/apis-explorer and attempt to + reproduce the issue in the given method. Please include a screenshot of + the response in "Try this API". This response should NOT match the current + behavior you are experiencing. If the behavior is the same, it means + that you are likely experiencing a bug with the API itself. In that + case, please submit an issue to the API team, either by submitting an + issue in its issue tracker (https://cloud.google.com/support/docs/issue-trackers), or by + submitting an issue in its linked tracker in the .repo-metadata.json + file https://issuetracker.google.com/savedsearches/5116474 + validations: + required: true + - type: input + attributes: + label: > + Link to the code that reproduces this issue. A link to a **public** Github Repository or gist with a minimal + reproduction. + description: > + **Skipping this or providing an invalid link will result in the issue being closed** + validations: + required: true + - type: textarea + attributes: + label: > + A step-by-step description of how to reproduce the issue, based on + the linked reproduction. + description: > + Screenshots can be provided in the issue body below. + placeholder: | + 1. Start the application in development (next dev) + 2. Click X + 3. Y will happen + validations: + required: true + - type: textarea + attributes: + label: A clear and concise description of what the bug is, and what you + expected to happen. + placeholder: Following the steps from the previous section, I expected A to + happen, but I observed B instead + validations: + required: true + + - type: textarea + attributes: + label: A clear and concise description WHY you expect this behavior, i.e., was it a recent change, there is documentation that points to this behavior, etc. ** + placeholder: 'Documentation here(link) states that B should happen instead of A' + validations: + required: true diff --git a/handwritten/cloud-profiler/.github/ISSUE_TEMPLATE/config.yml b/handwritten/cloud-profiler/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 00000000000..603b90133b6 --- /dev/null +++ b/handwritten/cloud-profiler/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,4 @@ +contact_links: + - name: Google Cloud Support + url: https://cloud.google.com/support/ + about: If you have a support contract with Google, please use the Google Cloud Support portal. diff --git a/handwritten/cloud-profiler/.github/ISSUE_TEMPLATE/documentation_request.yml b/handwritten/cloud-profiler/.github/ISSUE_TEMPLATE/documentation_request.yml new file mode 100644 index 00000000000..7df17fcfaed --- /dev/null +++ b/handwritten/cloud-profiler/.github/ISSUE_TEMPLATE/documentation_request.yml @@ -0,0 +1,53 @@ +name: Documentation Requests +description: Requests for more information +body: + - type: markdown + attributes: + value: > + Please use this issue type to log documentation requests against the library itself. + These requests should involve documentation on Github (`.md` files), and should relate to the library + itself. If you have questions or documentation requests for an API, please + reach out to the API tracker itself. + + Please submit an issue to the API team, either by submitting an + issue in its issue tracker https://cloud.google.com/support/docs/issue-trackers), or by + submitting an issue in its linked tracker in the .repo-metadata.json + file in the API under packages/* ([example](https://issuetracker.google.com/savedsearches/5116474)). + You can also submit a request to documentation on cloud.google.com itself with the "Send Feedback" + on the bottom of the page. + + + Please note that documentation requests and questions for specific APIs + will be closed. + - type: checkboxes + attributes: + label: Please make sure you have searched for information in the following + guides. + options: + - label: "Search the issues already opened: + https://github.com/GoogleCloudPlatform/google-cloud-node/issues" + required: true + - label: "Check our Troubleshooting guide: + https://googlecloudplatform.github.io/google-cloud-node/#/docs/guid\ + es/troubleshooting" + required: true + - label: "Check our FAQ: + https://googlecloudplatform.github.io/google-cloud-node/#/docs/guid\ + es/faq" + required: true + - label: "Check our libraries HOW-TO: + https://github.com/googleapis/gax-nodejs/blob/main/client-libraries\ + .md" + required: true + - label: "Check out our authentication guide: + https://github.com/googleapis/google-auth-library-nodejs" + required: true + - label: "Check out handwritten samples for many of our APIs: + https://github.com/GoogleCloudPlatform/nodejs-docs-samples" + required: true + - type: textarea + attributes: + label: > + Documentation Request + validations: + required: true diff --git a/handwritten/cloud-profiler/.github/ISSUE_TEMPLATE/feature_request.yml b/handwritten/cloud-profiler/.github/ISSUE_TEMPLATE/feature_request.yml new file mode 100644 index 00000000000..2c4ba2eb366 --- /dev/null +++ b/handwritten/cloud-profiler/.github/ISSUE_TEMPLATE/feature_request.yml @@ -0,0 +1,53 @@ +name: Feature Request +description: Suggest an idea for this library +labels: + - feature request +body: + - type: markdown + attributes: + value: > + **PLEASE READ**: If you have a support contract with Google, please + create an issue in the [support + console](https://cloud.google.com/support/) instead of filing on GitHub. + This will ensure a timely response. Otherwise, please make sure to + follow the steps below. + - type: textarea + attributes: + label: > + A screenshot that you have tested with "Try this API". + description: > + As our client libraries are mostly autogenerated, we kindly request + that you test whether your feature request is with the client library, or with the + API itself. To do so, please search for your API + here: https://developers.google.com/apis-explorer and attempt to + reproduce the issue in the given method. Please include a screenshot of + the response in "Try this API". This response should NOT match the current + behavior you are experiencing. If the behavior is the same, it means + that you are likely requesting a feature for the API itself. In that + case, please submit an issue to the API team, either by submitting an + issue in its issue tracker https://cloud.google.com/support/docs/issue-trackers, or by + submitting an issue in its linked tracker in the .repo-metadata.json + file in the API under packages/* ([example](https://issuetracker.google.com/savedsearches/5116474)) + + Example of library specific issues would be: retry strategies, authentication questions, or issues with typings. + Examples of API issues would include: expanding method parameter types, adding functionality to an API. + validations: + required: true + - type: textarea + attributes: + label: > + What would you like to see in the library? + description: > + Screenshots can be provided in the issue body below. + placeholder: | + 1. Set up authentication like so + 2. Run the program like so + 3. X would be nice to happen + + - type: textarea + attributes: + label: Describe alternatives you've considered + + - type: textarea + attributes: + label: Additional context/notes \ No newline at end of file diff --git a/handwritten/cloud-profiler/.github/ISSUE_TEMPLATE/processs_request.md b/handwritten/cloud-profiler/.github/ISSUE_TEMPLATE/processs_request.md new file mode 100644 index 00000000000..45682e8f117 --- /dev/null +++ b/handwritten/cloud-profiler/.github/ISSUE_TEMPLATE/processs_request.md @@ -0,0 +1,4 @@ +--- +name: Process Request +about: Submit a process request to the library. Process requests are any requests related to library infrastructure, for example CI/CD, publishing, releasing, broken links. +--- diff --git a/handwritten/cloud-profiler/.github/ISSUE_TEMPLATE/questions.md b/handwritten/cloud-profiler/.github/ISSUE_TEMPLATE/questions.md new file mode 100644 index 00000000000..62c1dd1b93a --- /dev/null +++ b/handwritten/cloud-profiler/.github/ISSUE_TEMPLATE/questions.md @@ -0,0 +1,8 @@ +--- +name: Question +about: If you have a question, please use Discussions + +--- + +If you have a general question that goes beyond the library itself, we encourage you to use [Discussions](https://github.com//discussions) +to engage with fellow community members! diff --git a/handwritten/cloud-profiler/.github/ISSUE_TEMPLATE/support_request.md b/handwritten/cloud-profiler/.github/ISSUE_TEMPLATE/support_request.md new file mode 100644 index 00000000000..99586903212 --- /dev/null +++ b/handwritten/cloud-profiler/.github/ISSUE_TEMPLATE/support_request.md @@ -0,0 +1,7 @@ +--- +name: Support request +about: If you have a support contract with Google, please create an issue in the Google Cloud Support console. + +--- + +**PLEASE READ**: If you have a support contract with Google, please create an issue in the [support console](https://cloud.google.com/support/) instead of filing on GitHub. This will ensure a timely response. diff --git a/handwritten/cloud-profiler/.github/PULL_REQUEST_TEMPLATE.md b/handwritten/cloud-profiler/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000000..6c7e5597bac --- /dev/null +++ b/handwritten/cloud-profiler/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,7 @@ +Thank you for opening a Pull Request! Before submitting your PR, there are a few things you can do to make sure it goes smoothly: +- [ ] Make sure to open an issue as a [bug/issue](https://github.com/googleapis/cloud-profiler-nodejs/issues/new/choose) before writing your code! That way we can discuss the change, evaluate designs, and agree on the general idea +- [ ] Ensure the tests and linter pass +- [ ] Code coverage does not decrease (if any source code was changed) +- [ ] Appropriate docs were updated (if necessary) + +Fixes # 🦕 diff --git a/handwritten/cloud-profiler/.github/auto-approve.yml b/handwritten/cloud-profiler/.github/auto-approve.yml new file mode 100644 index 00000000000..7cba0af636c --- /dev/null +++ b/handwritten/cloud-profiler/.github/auto-approve.yml @@ -0,0 +1,2 @@ +processes: + - "NodeDependency" \ No newline at end of file diff --git a/handwritten/cloud-profiler/.github/auto-label.yaml b/handwritten/cloud-profiler/.github/auto-label.yaml new file mode 100644 index 00000000000..09c8d735b45 --- /dev/null +++ b/handwritten/cloud-profiler/.github/auto-label.yaml @@ -0,0 +1,2 @@ +requestsize: + enabled: true diff --git a/handwritten/cloud-profiler/.github/generated-files-bot.yml b/handwritten/cloud-profiler/.github/generated-files-bot.yml new file mode 100644 index 00000000000..992ccef4a13 --- /dev/null +++ b/handwritten/cloud-profiler/.github/generated-files-bot.yml @@ -0,0 +1,16 @@ +generatedFiles: +- path: '.kokoro/**' + message: '`.kokoro` files are templated and should be updated in [`synthtool`](https://github.com/googleapis/synthtool)' +- path: '.github/CODEOWNERS' + message: 'CODEOWNERS should instead be modified via the `codeowner_team` property in .repo-metadata.json' +- path: '.github/workflows/ci.yaml' + message: '`.github/workflows/ci.yaml` (GitHub Actions) should be updated in [`synthtool`](https://github.com/googleapis/synthtool)' +- path: '.github/generated-files-bot.+(yml|yaml)' + message: '`.github/generated-files-bot.(yml|yaml)` should be updated in [`synthtool`](https://github.com/googleapis/synthtool)' +- path: 'README.md' + message: '`README.md` is managed by [`synthtool`](https://github.com/googleapis/synthtool). However, a partials file can be used to update the README, e.g.: https://github.com/googleapis/nodejs-storage/blob/main/.readme-partials.yaml' +- path: 'samples/README.md' + message: '`samples/README.md` is managed by [`synthtool`](https://github.com/googleapis/synthtool). However, a partials file can be used to update the README, e.g.: https://github.com/googleapis/nodejs-storage/blob/main/.readme-partials.yaml' +ignoreAuthors: +- 'gcf-owl-bot[bot]' +- 'yoshi-automation' diff --git a/handwritten/cloud-profiler/.github/release-please.yml b/handwritten/cloud-profiler/.github/release-please.yml new file mode 100644 index 00000000000..a1b41da3cb3 --- /dev/null +++ b/handwritten/cloud-profiler/.github/release-please.yml @@ -0,0 +1,2 @@ +handleGHRelease: true +releaseType: node diff --git a/handwritten/cloud-profiler/.github/release-trigger.yml b/handwritten/cloud-profiler/.github/release-trigger.yml new file mode 100644 index 00000000000..d4ca94189e1 --- /dev/null +++ b/handwritten/cloud-profiler/.github/release-trigger.yml @@ -0,0 +1 @@ +enabled: true diff --git a/handwritten/cloud-profiler/.github/scripts/close-invalid-link.cjs b/handwritten/cloud-profiler/.github/scripts/close-invalid-link.cjs new file mode 100644 index 00000000000..d7a3688e755 --- /dev/null +++ b/handwritten/cloud-profiler/.github/scripts/close-invalid-link.cjs @@ -0,0 +1,56 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +async function closeIssue(github, owner, repo, number) { + await github.rest.issues.createComment({ + owner: owner, + repo: repo, + issue_number: number, + body: 'Issue was opened with an invalid reproduction link. Please make sure the repository is a valid, publicly-accessible github repository, and make sure the url is complete (example: https://github.com/googleapis/google-cloud-node)' + }); + await github.rest.issues.update({ + owner: owner, + repo: repo, + issue_number: number, + state: 'closed' + }); +} +module.exports = async ({github, context}) => { + const owner = context.repo.owner; + const repo = context.repo.repo; + const number = context.issue.number; + + const issue = await github.rest.issues.get({ + owner: owner, + repo: repo, + issue_number: number, + }); + + const isBugTemplate = issue.data.body.includes('Link to the code that reproduces this issue'); + + if (isBugTemplate) { + console.log(`Issue ${number} is a bug template`) + try { + const link = issue.data.body.split('\n')[18].match(/(https?:\/\/(gist\.)?github.com\/.*)/)[0]; + console.log(`Issue ${number} contains this link: ${link}`) + const isValidLink = (await fetch(link)).ok; + console.log(`Issue ${number} has a ${isValidLink ? 'valid' : 'invalid'} link`) + if (!isValidLink) { + await closeIssue(github, owner, repo, number); + } + } catch (err) { + await closeIssue(github, owner, repo, number); + } + } +}; diff --git a/handwritten/cloud-profiler/.github/scripts/close-unresponsive.cjs b/handwritten/cloud-profiler/.github/scripts/close-unresponsive.cjs new file mode 100644 index 00000000000..142dc1265a4 --- /dev/null +++ b/handwritten/cloud-profiler/.github/scripts/close-unresponsive.cjs @@ -0,0 +1,69 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +function labeledEvent(data) { + return data.event === 'labeled' && data.label.name === 'needs more info'; + } + + const numberOfDaysLimit = 15; + const close_message = `This has been closed since a request for information has \ + not been answered for ${numberOfDaysLimit} days. It can be reopened when the \ + requested information is provided.`; + + module.exports = async ({github, context}) => { + const owner = context.repo.owner; + const repo = context.repo.repo; + + const issues = await github.rest.issues.listForRepo({ + owner: owner, + repo: repo, + labels: 'needs more info', + }); + const numbers = issues.data.map((e) => e.number); + + for (const number of numbers) { + const events = await github.paginate( + github.rest.issues.listEventsForTimeline, + { + owner: owner, + repo: repo, + issue_number: number, + }, + (response) => response.data.filter(labeledEvent) + ); + + const latest_response_label = events[events.length - 1]; + + const created_at = new Date(latest_response_label.created_at); + const now = new Date(); + const diff = now - created_at; + const diffDays = diff / (1000 * 60 * 60 * 24); + + if (diffDays > numberOfDaysLimit) { + await github.rest.issues.update({ + owner: owner, + repo: repo, + issue_number: number, + state: 'closed', + }); + + await github.rest.issues.createComment({ + owner: owner, + repo: repo, + issue_number: number, + body: close_message, + }); + } + } + }; diff --git a/handwritten/cloud-profiler/.github/scripts/remove-response-label.cjs b/handwritten/cloud-profiler/.github/scripts/remove-response-label.cjs new file mode 100644 index 00000000000..887cf349e9d --- /dev/null +++ b/handwritten/cloud-profiler/.github/scripts/remove-response-label.cjs @@ -0,0 +1,33 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +module.exports = async ({ github, context }) => { + const commenter = context.actor; + const issue = await github.rest.issues.get({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: context.issue.number, + }); + const author = issue.data.user.login; + const labels = issue.data.labels.map((e) => e.name); + + if (author === commenter && labels.includes('needs more info')) { + await github.rest.issues.removeLabel({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: context.issue.number, + name: 'needs more info', + }); + } + }; diff --git a/handwritten/cloud-profiler/.github/sync-repo-settings.yaml b/handwritten/cloud-profiler/.github/sync-repo-settings.yaml new file mode 100644 index 00000000000..accde8ce253 --- /dev/null +++ b/handwritten/cloud-profiler/.github/sync-repo-settings.yaml @@ -0,0 +1,40 @@ +# Whether or not rebase-merging is enabled on this repository. +# Defaults to `true` +rebaseMergeAllowed: false + +# Whether or not squash-merging is enabled on this repository. +# Defaults to `true` +squashMergeAllowed: true + +# Whether or not PRs are merged with a merge commit on this repository. +# Defaults to `false` +mergeCommitAllowed: false + +# Rules for main branch protection +branchProtectionRules: +# Identifies the protection rule pattern. Name of the branch to be protected. +# Defaults to `main` +- pattern: main + # Can admins overwrite branch protection. + # Defaults to `true` + isAdminEnforced: true + # Number of approving reviews required to update matching branches. + # Defaults to `1` + requiredApprovingReviewCount: 1 + # Are reviews from code owners required to update matching branches. + # Defaults to `false` + requiresCodeOwnerReviews: true + # Require up to date branches + requiresStrictStatusChecks: true + # List of required status check contexts that must pass for commits to be accepted to matching branches. + requiredStatusCheckContexts: + - "Kokoro integration test" + - "ci/kokoro: Samples test" + - "ci/kokoro: System test" + - "cla/google" + - "docs" + - "lint" + - "test (14)" + - "test (16)" + - "test (18)" + - "system-test" diff --git a/handwritten/cloud-profiler/.github/workflows/ci.yaml b/handwritten/cloud-profiler/.github/workflows/ci.yaml new file mode 100644 index 00000000000..9e1ea26bef0 --- /dev/null +++ b/handwritten/cloud-profiler/.github/workflows/ci.yaml @@ -0,0 +1,56 @@ +# This file is manually updated due to specific needs of the cloud +# profiler agent, e.g., not supporting Windows. +# +# It should be in sync with Google's template +# https://github.com/googleapis/synthtool/blob/master/synthtool/gcp/templates/node_library/.github/workflows/ci.yaml +# for Node.js GCP libraries as much as possible. + +on: + push: + branches: + - main + pull_request: +name: ci +jobs: + test: + runs-on: ubuntu-latest + strategy: + matrix: + node: [14, 16, 18, 20] + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 + - uses: actions/setup-node@v4 + with: + node-version: ${{ matrix.node }} + - run: node --version + # The first installation step ensures that all of our production + # dependencies work on the given Node.js version, this helps us find + # dependencies that don't match our engines field: + - run: npm install --production --engine-strict --ignore-scripts --no-package-lock + # Clean up the production install, before installing dev/production: + - run: rm -rf node_modules + - run: npm install + - run: npm test + env: + MOCHA_THROW_DEPRECATION: false + lint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 + - uses: actions/setup-node@v4 + with: + node-version: 14 + - run: npm install + - run: npm run lint + # docs: + # runs-on: ubuntu-latest + # steps: + # - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 + # - uses: actions/setup-node@v4 + # with: + # node-version: 14 + # - run: npm install + # - run: npm run docs + # - uses: JustinBeckwith/linkinator-action@v1 + # with: + # paths: docs/ diff --git a/handwritten/cloud-profiler/.github/workflows/issues-no-repro.yaml b/handwritten/cloud-profiler/.github/workflows/issues-no-repro.yaml new file mode 100644 index 00000000000..442a46bcc48 --- /dev/null +++ b/handwritten/cloud-profiler/.github/workflows/issues-no-repro.yaml @@ -0,0 +1,18 @@ +name: invalid_link +on: + issues: + types: [opened, reopened] + +jobs: + close: + runs-on: ubuntu-latest + permissions: + issues: write + pull-requests: write + steps: + - uses: actions/checkout@v4 + - uses: actions/github-script@v7 + with: + script: | + const script = require('./.github/scripts/close-invalid-link.cjs') + await script({github, context}) diff --git a/handwritten/cloud-profiler/.github/workflows/response.yaml b/handwritten/cloud-profiler/.github/workflows/response.yaml new file mode 100644 index 00000000000..6ed37326fea --- /dev/null +++ b/handwritten/cloud-profiler/.github/workflows/response.yaml @@ -0,0 +1,35 @@ +name: no_response +on: + schedule: + - cron: '30 1 * * *' # Run every day at 01:30 + workflow_dispatch: + issue_comment: + +jobs: + close: + if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' + runs-on: ubuntu-latest + permissions: + issues: write + pull-requests: write + steps: + - uses: actions/checkout@v4 + - uses: actions/github-script@v7 + with: + script: | + const script = require('./.github/scripts/close-unresponsive.cjs') + await script({github, context}) + + remove_label: + if: github.event_name == 'issue_comment' + runs-on: ubuntu-latest + permissions: + issues: write + pull-requests: write + steps: + - uses: actions/checkout@v4 + - uses: actions/github-script@v7 + with: + script: | + const script = require('./.github/scripts/remove-response-label.cjs') + await script({github, context}) diff --git a/handwritten/cloud-profiler/.gitignore b/handwritten/cloud-profiler/.gitignore new file mode 100644 index 00000000000..31a3a2b5629 --- /dev/null +++ b/handwritten/cloud-profiler/.gitignore @@ -0,0 +1,8 @@ +.nyc_output +.vscode +build +node_modules +package-lock.json +.coverage +docs +__pycache__ diff --git a/handwritten/cloud-profiler/.jsdoc.js b/handwritten/cloud-profiler/.jsdoc.js new file mode 100644 index 00000000000..2913d85f3eb --- /dev/null +++ b/handwritten/cloud-profiler/.jsdoc.js @@ -0,0 +1,51 @@ +// Copyright 2019 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +'use strict'; + +module.exports = { + opts: { + readme: './README.md', + package: './package.json', + template: './node_modules/jsdoc-fresh', + recurse: true, + verbose: true, + destination: './docs/' + }, + plugins: [ + 'plugins/markdown', + 'jsdoc-region-tag' + ], + source: { + excludePattern: '(^|\\/|\\\\)[._]', + include: [ + 'build/src' + ], + includePattern: '\\.js$' + }, + templates: { + copyright: 'Copyright 2019 Google, LLC.', + includeDate: false, + sourceFiles: false, + systemName: '@google-cloud/profiler', + theme: 'lumen', + default: { + "outputSourceFiles": false + } + }, + markdown: { + idInHeadings: true + } +}; diff --git a/handwritten/cloud-profiler/.kokoro/.gitattributes b/handwritten/cloud-profiler/.kokoro/.gitattributes new file mode 100644 index 00000000000..87acd4f484e --- /dev/null +++ b/handwritten/cloud-profiler/.kokoro/.gitattributes @@ -0,0 +1 @@ +* linguist-generated=true diff --git a/handwritten/cloud-profiler/.kokoro/README.md b/handwritten/cloud-profiler/.kokoro/README.md new file mode 100644 index 00000000000..754496e4d2d --- /dev/null +++ b/handwritten/cloud-profiler/.kokoro/README.md @@ -0,0 +1,5 @@ +This directory contains kokoro configurations and associated scripts for tests +and workflows, like unit tests, which are replicated in other Node.JS +repositories. + +To simplify maintenance these files are kept together in this directory. \ No newline at end of file diff --git a/handwritten/cloud-profiler/.kokoro/common.cfg b/handwritten/cloud-profiler/.kokoro/common.cfg new file mode 100644 index 00000000000..a45467d0836 --- /dev/null +++ b/handwritten/cloud-profiler/.kokoro/common.cfg @@ -0,0 +1,24 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Build logs will be here +action { + define_artifacts { + regex: "**/*sponge_log.xml" + } +} + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Use the trampoline script to run in docker. +build_file: "handwritten/cloud-profiler/.kokoro/trampoline_v2.sh" + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/node:14-user" +} +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/handwritten/cloud-profiler/.kokoro/test.sh" +} diff --git a/handwritten/cloud-profiler/.kokoro/continuous/go111/golint.cfg b/handwritten/cloud-profiler/.kokoro/continuous/go111/golint.cfg new file mode 100644 index 00000000000..b2b414e920d --- /dev/null +++ b/handwritten/cloud-profiler/.kokoro/continuous/go111/golint.cfg @@ -0,0 +1,24 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Build logs will be here +action { + define_artifacts { + regex: "**/*sponge_log.xml" + } +} + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Use the trampoline script to run in docker. +build_file: "handwritten/cloud-profiler/.kokoro/trampoline.sh" + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/go111" +} +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/handwritten/cloud-profiler/.kokoro/golint.sh" +} diff --git a/handwritten/cloud-profiler/.kokoro/continuous/node14/common.cfg b/handwritten/cloud-profiler/.kokoro/continuous/node14/common.cfg new file mode 100644 index 00000000000..a45467d0836 --- /dev/null +++ b/handwritten/cloud-profiler/.kokoro/continuous/node14/common.cfg @@ -0,0 +1,24 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Build logs will be here +action { + define_artifacts { + regex: "**/*sponge_log.xml" + } +} + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Use the trampoline script to run in docker. +build_file: "handwritten/cloud-profiler/.kokoro/trampoline_v2.sh" + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/node:14-user" +} +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/handwritten/cloud-profiler/.kokoro/test.sh" +} diff --git a/handwritten/cloud-profiler/.kokoro/continuous/node14/lint.cfg b/handwritten/cloud-profiler/.kokoro/continuous/node14/lint.cfg new file mode 100644 index 00000000000..3fc91307dfd --- /dev/null +++ b/handwritten/cloud-profiler/.kokoro/continuous/node14/lint.cfg @@ -0,0 +1,4 @@ +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/handwritten/cloud-profiler/.kokoro/lint.sh" +} diff --git a/handwritten/cloud-profiler/.kokoro/continuous/node14/samples-test.cfg b/handwritten/cloud-profiler/.kokoro/continuous/node14/samples-test.cfg new file mode 100644 index 00000000000..44cf28e51e2 --- /dev/null +++ b/handwritten/cloud-profiler/.kokoro/continuous/node14/samples-test.cfg @@ -0,0 +1,12 @@ +# Download resources for system tests (service account key, etc.) +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/google-cloud-nodejs" + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/handwritten/cloud-profiler/.kokoro/samples-test.sh" +} + +env_vars: { + key: "SECRET_MANAGER_KEYS" + value: "long-door-651-kokoro-system-test-service-account" +} \ No newline at end of file diff --git a/handwritten/cloud-profiler/.kokoro/continuous/node14/system-test.cfg b/handwritten/cloud-profiler/.kokoro/continuous/node14/system-test.cfg new file mode 100644 index 00000000000..8b6ae10f93f --- /dev/null +++ b/handwritten/cloud-profiler/.kokoro/continuous/node14/system-test.cfg @@ -0,0 +1,12 @@ +# Download resources for system tests (service account key, etc.) +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/google-cloud-nodejs" + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/handwritten/cloud-profiler/.kokoro/system-test.sh" +} + +env_vars: { + key: "SECRET_MANAGER_KEYS" + value: "long-door-651-kokoro-system-test-service-account" +} \ No newline at end of file diff --git a/handwritten/cloud-profiler/.kokoro/continuous/node14/test.cfg b/handwritten/cloud-profiler/.kokoro/continuous/node14/test.cfg new file mode 100644 index 00000000000..e69de29bb2d diff --git a/handwritten/cloud-profiler/.kokoro/docs.sh b/handwritten/cloud-profiler/.kokoro/docs.sh new file mode 100755 index 00000000000..85901242b5e --- /dev/null +++ b/handwritten/cloud-profiler/.kokoro/docs.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eo pipefail + +export NPM_CONFIG_PREFIX=${HOME}/.npm-global + +cd $(dirname $0)/.. + +npm install + +npm run docs-test diff --git a/handwritten/cloud-profiler/.kokoro/golint.sh b/handwritten/cloud-profiler/.kokoro/golint.sh new file mode 100755 index 00000000000..fb25f3dec74 --- /dev/null +++ b/handwritten/cloud-profiler/.kokoro/golint.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eo pipefail + +export NPM_CONFIG_PREFIX=/home/node/.npm-global + +cd $(dirname $0)/.. + +echo "Check that code is formatted" + +gofmtdiff=$(gofmt -s -d .) && if [ -n "$gofmtdiff" ]; then printf 'gofmt -s found:\n%s\n' "$gofmtdiff" && exit 1; fi + +echo "Install golint" + +go get -u golang.org/x/lint/golint + +echo "Run lint check" + +golintlint=$(golint ./...) && if [ -n "$golintlint" ]; then printf 'golint found:\n%s\n' "$golintlint" && exit 1; fi diff --git a/handwritten/cloud-profiler/.kokoro/lint.sh b/handwritten/cloud-profiler/.kokoro/lint.sh new file mode 100755 index 00000000000..aef4866e4c4 --- /dev/null +++ b/handwritten/cloud-profiler/.kokoro/lint.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eo pipefail + +export NPM_CONFIG_PREFIX=${HOME}/.npm-global + +cd $(dirname $0)/.. + +npm install + +# Install and link samples +if [ -f samples/package.json ]; then + cd samples/ + npm link ../ + npm install + cd .. +fi + +npm run lint diff --git a/handwritten/cloud-profiler/.kokoro/populate-secrets.sh b/handwritten/cloud-profiler/.kokoro/populate-secrets.sh new file mode 100755 index 00000000000..deb2b199eb4 --- /dev/null +++ b/handwritten/cloud-profiler/.kokoro/populate-secrets.sh @@ -0,0 +1,76 @@ +#!/bin/bash +# Copyright 2020 Google LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This file is called in the early stage of `trampoline_v2.sh` to +# populate secrets needed for the CI builds. + +set -eo pipefail + +function now { date +"%Y-%m-%d %H:%M:%S" | tr -d '\n' ;} +function msg { println "$*" >&2 ;} +function println { printf '%s\n' "$(now) $*" ;} + +# Populates requested secrets set in SECRET_MANAGER_KEYS + +# In Kokoro CI builds, we use the service account attached to the +# Kokoro VM. This means we need to setup auth on other CI systems. +# For local run, we just use the gcloud command for retrieving the +# secrets. + +if [[ "${RUNNING_IN_CI:-}" == "true" ]]; then + GCLOUD_COMMANDS=( + "docker" + "run" + "--entrypoint=gcloud" + "--volume=${KOKORO_GFILE_DIR}:${KOKORO_GFILE_DIR}" + "gcr.io/google.com/cloudsdktool/cloud-sdk" + ) + if [[ "${TRAMPOLINE_CI:-}" == "kokoro" ]]; then + SECRET_LOCATION="${KOKORO_GFILE_DIR}/secret_manager" + else + echo "Authentication for this CI system is not implemented yet." + exit 2 + # TODO: Determine appropriate SECRET_LOCATION and the GCLOUD_COMMANDS. + fi +else + # For local run, use /dev/shm or temporary directory for + # KOKORO_GFILE_DIR. + if [[ -d "/dev/shm" ]]; then + export KOKORO_GFILE_DIR=/dev/shm + else + export KOKORO_GFILE_DIR=$(mktemp -d -t ci-XXXXXXXX) + fi + SECRET_LOCATION="${KOKORO_GFILE_DIR}/secret_manager" + GCLOUD_COMMANDS=("gcloud") +fi + +msg "Creating folder on disk for secrets: ${SECRET_LOCATION}" +mkdir -p ${SECRET_LOCATION} + +for key in $(echo ${SECRET_MANAGER_KEYS} | sed "s/,/ /g") +do + msg "Retrieving secret ${key}" + "${GCLOUD_COMMANDS[@]}" \ + secrets versions access latest \ + --project cloud-devrel-kokoro-resources \ + --secret $key > \ + "$SECRET_LOCATION/$key" + if [[ $? == 0 ]]; then + msg "Secret written to ${SECRET_LOCATION}/${key}" + else + msg "Error retrieving secret ${key}" + exit 2 + fi +done diff --git a/handwritten/cloud-profiler/.kokoro/presubmit/go111/golint.cfg b/handwritten/cloud-profiler/.kokoro/presubmit/go111/golint.cfg new file mode 100644 index 00000000000..b2b414e920d --- /dev/null +++ b/handwritten/cloud-profiler/.kokoro/presubmit/go111/golint.cfg @@ -0,0 +1,24 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Build logs will be here +action { + define_artifacts { + regex: "**/*sponge_log.xml" + } +} + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Use the trampoline script to run in docker. +build_file: "handwritten/cloud-profiler/.kokoro/trampoline.sh" + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/go111" +} +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/handwritten/cloud-profiler/.kokoro/golint.sh" +} diff --git a/handwritten/cloud-profiler/.kokoro/presubmit/node13/common.cfg b/handwritten/cloud-profiler/.kokoro/presubmit/node13/common.cfg new file mode 100644 index 00000000000..ac38556d312 --- /dev/null +++ b/handwritten/cloud-profiler/.kokoro/presubmit/node13/common.cfg @@ -0,0 +1,24 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Build logs will be here +action { + define_artifacts { + regex: "**/*sponge_log.xml" + } +} + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Use the trampoline script to run in docker. +build_file: "handwritten/cloud-profiler/.kokoro/trampoline.sh" + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/node:13-user" +} +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/handwritten/cloud-profiler/.kokoro/test.sh" +} diff --git a/handwritten/cloud-profiler/.kokoro/presubmit/node13/test.cfg b/handwritten/cloud-profiler/.kokoro/presubmit/node13/test.cfg new file mode 100644 index 00000000000..e69de29bb2d diff --git a/handwritten/cloud-profiler/.kokoro/presubmit/node14/common.cfg b/handwritten/cloud-profiler/.kokoro/presubmit/node14/common.cfg new file mode 100644 index 00000000000..a45467d0836 --- /dev/null +++ b/handwritten/cloud-profiler/.kokoro/presubmit/node14/common.cfg @@ -0,0 +1,24 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Build logs will be here +action { + define_artifacts { + regex: "**/*sponge_log.xml" + } +} + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Use the trampoline script to run in docker. +build_file: "handwritten/cloud-profiler/.kokoro/trampoline_v2.sh" + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/node:14-user" +} +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/handwritten/cloud-profiler/.kokoro/test.sh" +} diff --git a/handwritten/cloud-profiler/.kokoro/presubmit/node14/samples-test.cfg b/handwritten/cloud-profiler/.kokoro/presubmit/node14/samples-test.cfg new file mode 100644 index 00000000000..44cf28e51e2 --- /dev/null +++ b/handwritten/cloud-profiler/.kokoro/presubmit/node14/samples-test.cfg @@ -0,0 +1,12 @@ +# Download resources for system tests (service account key, etc.) +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/google-cloud-nodejs" + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/handwritten/cloud-profiler/.kokoro/samples-test.sh" +} + +env_vars: { + key: "SECRET_MANAGER_KEYS" + value: "long-door-651-kokoro-system-test-service-account" +} \ No newline at end of file diff --git a/handwritten/cloud-profiler/.kokoro/presubmit/node14/system-test.cfg b/handwritten/cloud-profiler/.kokoro/presubmit/node14/system-test.cfg new file mode 100644 index 00000000000..8b6ae10f93f --- /dev/null +++ b/handwritten/cloud-profiler/.kokoro/presubmit/node14/system-test.cfg @@ -0,0 +1,12 @@ +# Download resources for system tests (service account key, etc.) +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/google-cloud-nodejs" + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/handwritten/cloud-profiler/.kokoro/system-test.sh" +} + +env_vars: { + key: "SECRET_MANAGER_KEYS" + value: "long-door-651-kokoro-system-test-service-account" +} \ No newline at end of file diff --git a/handwritten/cloud-profiler/.kokoro/presubmit/node14/test.cfg b/handwritten/cloud-profiler/.kokoro/presubmit/node14/test.cfg new file mode 100644 index 00000000000..e69de29bb2d diff --git a/handwritten/cloud-profiler/.kokoro/presubmit/windows/common.cfg b/handwritten/cloud-profiler/.kokoro/presubmit/windows/common.cfg new file mode 100644 index 00000000000..d6e25e0b1b8 --- /dev/null +++ b/handwritten/cloud-profiler/.kokoro/presubmit/windows/common.cfg @@ -0,0 +1,2 @@ +# Format: //devtools/kokoro/config/proto/build.proto + diff --git a/handwritten/cloud-profiler/.kokoro/presubmit/windows/test.cfg b/handwritten/cloud-profiler/.kokoro/presubmit/windows/test.cfg new file mode 100644 index 00000000000..218265e0df5 --- /dev/null +++ b/handwritten/cloud-profiler/.kokoro/presubmit/windows/test.cfg @@ -0,0 +1,2 @@ +# Use the test file directly +build_file: "handwritten/cloud-profiler/.kokoro/test.bat" diff --git a/handwritten/cloud-profiler/.kokoro/publish.sh b/handwritten/cloud-profiler/.kokoro/publish.sh new file mode 100755 index 00000000000..ca1d47af347 --- /dev/null +++ b/handwritten/cloud-profiler/.kokoro/publish.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eo pipefail + +export NPM_CONFIG_PREFIX=${HOME}/.npm-global + +# Start the releasetool reporter +python3 -m releasetool publish-reporter-script > /tmp/publisher-script; source /tmp/publisher-script + +cd $(dirname $0)/.. + +NPM_TOKEN=$(cat $KOKORO_KEYSTORE_DIR/73713_google-cloud-npm-token-1) +echo "//wombat-dressing-room.appspot.com/:_authToken=${NPM_TOKEN}" > ~/.npmrc + +npm install +npm pack . +# npm provides no way to specify, observe, or predict the name of the tarball +# file it generates. We have to look in the current directory for the freshest +# .tgz file. +TARBALL=$(ls -1 -t *.tgz | head -1) + +npm publish --access=public --registry=https://wombat-dressing-room.appspot.com "$TARBALL" + +# Kokoro collects *.tgz and package-lock.json files and stores them in Placer +# so we can generate SBOMs and attestations. +# However, we *don't* want Kokoro to collect package-lock.json and *.tgz files +# that happened to be installed with dependencies. +find node_modules -name package-lock.json -o -name "*.tgz" | xargs rm -f \ No newline at end of file diff --git a/handwritten/cloud-profiler/.kokoro/release/docs-devsite.cfg b/handwritten/cloud-profiler/.kokoro/release/docs-devsite.cfg new file mode 100644 index 00000000000..052baa782cb --- /dev/null +++ b/handwritten/cloud-profiler/.kokoro/release/docs-devsite.cfg @@ -0,0 +1,26 @@ +# service account used to publish up-to-date docs. +before_action { + fetch_keystore { + keystore_resource { + keystore_config_id: 73713 + keyname: "docuploader_service_account" + } + } +} + +# doc publications use a Python image. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/node:14-user" +} + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Use the trampoline script to run in docker. +build_file: "handwritten/cloud-profiler/.kokoro/trampoline_v2.sh" + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/handwritten/cloud-profiler/.kokoro/release/docs-devsite.sh" +} diff --git a/handwritten/cloud-profiler/.kokoro/release/docs-devsite.sh b/handwritten/cloud-profiler/.kokoro/release/docs-devsite.sh new file mode 100755 index 00000000000..81a89f6c172 --- /dev/null +++ b/handwritten/cloud-profiler/.kokoro/release/docs-devsite.sh @@ -0,0 +1,30 @@ +#!/bin/bash + +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eo pipefail + +if [[ -z "$CREDENTIALS" ]]; then + # if CREDENTIALS are explicitly set, assume we're testing locally + # and don't set NPM_CONFIG_PREFIX. + export NPM_CONFIG_PREFIX=${HOME}/.npm-global + export PATH="$PATH:${NPM_CONFIG_PREFIX}/bin" + cd $(dirname $0)/../.. +fi + +npm install +npm install --no-save @google-cloud/cloud-rad@^0.4.0 +# publish docs to devsite +npx @google-cloud/cloud-rad . cloud-rad diff --git a/handwritten/cloud-profiler/.kokoro/release/docs.cfg b/handwritten/cloud-profiler/.kokoro/release/docs.cfg new file mode 100644 index 00000000000..82cd2b8faa0 --- /dev/null +++ b/handwritten/cloud-profiler/.kokoro/release/docs.cfg @@ -0,0 +1,26 @@ +# service account used to publish up-to-date docs. +before_action { + fetch_keystore { + keystore_resource { + keystore_config_id: 73713 + keyname: "docuploader_service_account" + } + } +} + +# doc publications use a Python image. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/node:14-user" +} + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Use the trampoline script to run in docker. +build_file: "handwritten/cloud-profiler/.kokoro/trampoline_v2.sh" + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/handwritten/cloud-profiler/.kokoro/release/docs.sh" +} diff --git a/handwritten/cloud-profiler/.kokoro/release/docs.sh b/handwritten/cloud-profiler/.kokoro/release/docs.sh new file mode 100755 index 00000000000..1d8f3f490a5 --- /dev/null +++ b/handwritten/cloud-profiler/.kokoro/release/docs.sh @@ -0,0 +1,49 @@ +#!/bin/bash + +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eo pipefail + +# build jsdocs (Python is installed on the Node 10 docker image). +if [[ -z "$CREDENTIALS" ]]; then + # if CREDENTIALS are explicitly set, assume we're testing locally + # and don't set NPM_CONFIG_PREFIX. + export NPM_CONFIG_PREFIX=${HOME}/.npm-global + export PATH="$PATH:${NPM_CONFIG_PREFIX}/bin" + cd $(dirname $0)/../.. +fi +npm install +npm run docs + +# create docs.metadata, based on package.json and .repo-metadata.json. +npm i json@9.0.6 -g +python3 -m docuploader create-metadata \ + --name=$(cat .repo-metadata.json | json name) \ + --version=$(cat package.json | json version) \ + --language=$(cat .repo-metadata.json | json language) \ + --distribution-name=$(cat .repo-metadata.json | json distribution_name) \ + --product-page=$(cat .repo-metadata.json | json product_documentation) \ + --github-repository=$(cat .repo-metadata.json | json repo) \ + --issue-tracker=$(cat .repo-metadata.json | json issue_tracker) +cp docs.metadata ./docs/docs.metadata + +# deploy the docs. +if [[ -z "$CREDENTIALS" ]]; then + CREDENTIALS=${KOKORO_KEYSTORE_DIR}/73713_docuploader_service_account +fi +if [[ -z "$BUCKET" ]]; then + BUCKET=docs-staging +fi +python3 -m docuploader upload ./docs --credentials $CREDENTIALS --staging-bucket $BUCKET diff --git a/handwritten/cloud-profiler/.kokoro/release/publish.cfg b/handwritten/cloud-profiler/.kokoro/release/publish.cfg new file mode 100644 index 00000000000..18fff4094cb --- /dev/null +++ b/handwritten/cloud-profiler/.kokoro/release/publish.cfg @@ -0,0 +1,51 @@ +before_action { + fetch_keystore { + keystore_resource { + keystore_config_id: 73713 + keyname: "docuploader_service_account" + } + } +} + +before_action { + fetch_keystore { + keystore_resource { + keystore_config_id: 73713 + keyname: "google-cloud-npm-token-1" + } + } +} + +env_vars: { + key: "SECRET_MANAGER_KEYS" + value: "releasetool-publish-reporter-app,releasetool-publish-reporter-googleapis-installation,releasetool-publish-reporter-pem" +} + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Use the trampoline script to run in docker. +build_file: "handwritten/cloud-profiler/.kokoro/trampoline_v2.sh" + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/node:14-user" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/handwritten/cloud-profiler/.kokoro/publish.sh" +} + +# Store the packages we uploaded to npmjs.org and their corresponding +# package-lock.jsons in Placer. That way, we have a record of exactly +# what we published, and which version of which tools we used to publish +# it, which we can use to generate SBOMs and attestations. +action { + define_artifacts { + regex: "github/**/*.tgz" + regex: "github/**/package-lock.json" + strip_prefix: "github" + } +} diff --git a/handwritten/cloud-profiler/.kokoro/samples-test.sh b/handwritten/cloud-profiler/.kokoro/samples-test.sh new file mode 100755 index 00000000000..8c5d108cb58 --- /dev/null +++ b/handwritten/cloud-profiler/.kokoro/samples-test.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eo pipefail + +export NPM_CONFIG_PREFIX=${HOME}/.npm-global + +# Setup service account credentials. +export GOOGLE_APPLICATION_CREDENTIALS=${KOKORO_GFILE_DIR}/secret_manager/long-door-651-kokoro-system-test-service-account +export GCLOUD_PROJECT=long-door-651 + +cd $(dirname $0)/.. + +# Run a pre-test hook, if a pre-samples-test.sh is in the project +if [ -f .kokoro/pre-samples-test.sh ]; then + set +x + . .kokoro/pre-samples-test.sh + set -x +fi + +if [ -f samples/package.json ]; then + npm install + + # Install and link samples + cd samples/ + npm link ../ + npm install + cd .. + # If tests are running against main branch, configure flakybot + # to open issues on failures: + if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"continuous"* ]] || [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"nightly"* ]]; then + export MOCHA_REPORTER_OUTPUT=test_output_sponge_log.xml + export MOCHA_REPORTER=xunit + cleanup() { + chmod +x $KOKORO_GFILE_DIR/linux_amd64/flakybot + $KOKORO_GFILE_DIR/linux_amd64/flakybot + } + trap cleanup EXIT HUP + fi + + npm run samples-test +fi + +# codecov combines coverage across integration and unit tests. Include +# the logic below for any environment you wish to collect coverage for: +COVERAGE_NODE=14 +if npx check-node-version@3.3.0 --silent --node $COVERAGE_NODE; then + NYC_BIN=./node_modules/nyc/bin/nyc.js + if [ -f "$NYC_BIN" ]; then + $NYC_BIN report || true + fi + bash $KOKORO_GFILE_DIR/codecov.sh +else + echo "coverage is only reported for Node $COVERAGE_NODE" +fi diff --git a/handwritten/cloud-profiler/.kokoro/system-test.sh b/handwritten/cloud-profiler/.kokoro/system-test.sh new file mode 100755 index 00000000000..0b3043d268c --- /dev/null +++ b/handwritten/cloud-profiler/.kokoro/system-test.sh @@ -0,0 +1,61 @@ +#!/bin/bash + +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eo pipefail + +export NPM_CONFIG_PREFIX=${HOME}/.npm-global + +# Setup service account credentials. +export GOOGLE_APPLICATION_CREDENTIALS=${KOKORO_GFILE_DIR}/secret_manager/long-door-651-kokoro-system-test-service-account +export GCLOUD_PROJECT=long-door-651 + +cd $(dirname $0)/.. + +# Run a pre-test hook, if a pre-system-test.sh is in the project +if [ -f .kokoro/pre-system-test.sh ]; then + set +x + . .kokoro/pre-system-test.sh + set -x +fi + +npm install + +# If tests are running against main branch, configure flakybot +# to open issues on failures: +if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"continuous"* ]] || [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"nightly"* ]]; then + export MOCHA_REPORTER_OUTPUT=test_output_sponge_log.xml + export MOCHA_REPORTER=xunit + cleanup() { + chmod +x $KOKORO_GFILE_DIR/linux_amd64/flakybot + $KOKORO_GFILE_DIR/linux_amd64/flakybot + } + trap cleanup EXIT HUP +fi + +npm run system-test + +# codecov combines coverage across integration and unit tests. Include +# the logic below for any environment you wish to collect coverage for: +COVERAGE_NODE=14 +if npx check-node-version@3.3.0 --silent --node $COVERAGE_NODE; then + NYC_BIN=./node_modules/nyc/bin/nyc.js + if [ -f "$NYC_BIN" ]; then + $NYC_BIN report || true + fi + bash $KOKORO_GFILE_DIR/codecov.sh +else + echo "coverage is only reported for Node $COVERAGE_NODE" +fi diff --git a/handwritten/cloud-profiler/.kokoro/test.bat b/handwritten/cloud-profiler/.kokoro/test.bat new file mode 100644 index 00000000000..0bb12405231 --- /dev/null +++ b/handwritten/cloud-profiler/.kokoro/test.bat @@ -0,0 +1,33 @@ +@rem Copyright 2018 Google LLC. All rights reserved. +@rem +@rem Licensed under the Apache License, Version 2.0 (the "License"); +@rem you may not use this file except in compliance with the License. +@rem You may obtain a copy of the License at +@rem +@rem http://www.apache.org/licenses/LICENSE-2.0 +@rem +@rem Unless required by applicable law or agreed to in writing, software +@rem distributed under the License is distributed on an "AS IS" BASIS, +@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +@rem See the License for the specific language governing permissions and +@rem limitations under the License. + +@echo "Starting Windows build" + +cd /d %~dp0 +cd .. + +@rem npm path is not currently set in our image, we should fix this next time +@rem we upgrade Node.js in the image: +SET PATH=%PATH%;/cygdrive/c/Program Files/nodejs/npm + +call nvm use v14.17.3 +call which node + +call npm install || goto :error +call npm run test || goto :error + +goto :EOF + +:error +exit /b 1 diff --git a/handwritten/cloud-profiler/.kokoro/test.sh b/handwritten/cloud-profiler/.kokoro/test.sh new file mode 100755 index 00000000000..862d478d324 --- /dev/null +++ b/handwritten/cloud-profiler/.kokoro/test.sh @@ -0,0 +1,51 @@ +#!/bin/bash + +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eo pipefail + +export NPM_CONFIG_PREFIX=${HOME}/.npm-global + +cd $(dirname $0)/.. + +npm install +# If tests are running against main branch, configure flakybot +# to open issues on failures: +if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"continuous"* ]] || [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"nightly"* ]]; then + export MOCHA_REPORTER_OUTPUT=test_output_sponge_log.xml + export MOCHA_REPORTER=xunit + cleanup() { + chmod +x $KOKORO_GFILE_DIR/linux_amd64/flakybot + $KOKORO_GFILE_DIR/linux_amd64/flakybot + } + trap cleanup EXIT HUP +fi +# Unit tests exercise the entire API surface, which may include +# deprecation warnings: +export MOCHA_THROW_DEPRECATION=false +npm test + +# codecov combines coverage across integration and unit tests. Include +# the logic below for any environment you wish to collect coverage for: +COVERAGE_NODE=14 +if npx check-node-version@3.3.0 --silent --node $COVERAGE_NODE; then + NYC_BIN=./node_modules/nyc/bin/nyc.js + if [ -f "$NYC_BIN" ]; then + $NYC_BIN report || true + fi + bash $KOKORO_GFILE_DIR/codecov.sh +else + echo "coverage is only reported for Node $COVERAGE_NODE" +fi diff --git a/handwritten/cloud-profiler/.kokoro/trampoline.sh b/handwritten/cloud-profiler/.kokoro/trampoline.sh new file mode 100755 index 00000000000..f693a1ce7aa --- /dev/null +++ b/handwritten/cloud-profiler/.kokoro/trampoline.sh @@ -0,0 +1,32 @@ +#!/bin/bash +# Copyright 2017 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This file is not used any more, but we keep this file for making it +# easy to roll back. +# TODO: Remove this file from the template. + +set -eo pipefail + +# Always run the cleanup script, regardless of the success of bouncing into +# the container. +function cleanup() { + chmod +x ${KOKORO_GFILE_DIR}/trampoline_cleanup.sh + ${KOKORO_GFILE_DIR}/trampoline_cleanup.sh + echo "cleanup"; +} +trap cleanup EXIT + +$(dirname $0)/populate-secrets.sh # Secret Manager secrets. +python3 "${KOKORO_GFILE_DIR}/trampoline_v1.py" diff --git a/handwritten/cloud-profiler/.kokoro/trampoline_v2.sh b/handwritten/cloud-profiler/.kokoro/trampoline_v2.sh new file mode 100755 index 00000000000..4d03112128a --- /dev/null +++ b/handwritten/cloud-profiler/.kokoro/trampoline_v2.sh @@ -0,0 +1,490 @@ +#!/usr/bin/env bash +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# trampoline_v2.sh +# +# If you want to make a change to this file, consider doing so at: +# https://github.com/googlecloudplatform/docker-ci-helper +# +# This script is for running CI builds. For Kokoro builds, we +# set this script to `build_file` field in the Kokoro configuration. + +# This script does 3 things. +# +# 1. Prepare the Docker image for the test +# 2. Run the Docker with appropriate flags to run the test +# 3. Upload the newly built Docker image +# +# in a way that is somewhat compatible with trampoline_v1. +# +# These environment variables are required: +# TRAMPOLINE_IMAGE: The docker image to use. +# TRAMPOLINE_DOCKERFILE: The location of the Dockerfile. +# +# You can optionally change these environment variables: +# TRAMPOLINE_IMAGE_UPLOAD: +# (true|false): Whether to upload the Docker image after the +# successful builds. +# TRAMPOLINE_BUILD_FILE: The script to run in the docker container. +# TRAMPOLINE_WORKSPACE: The workspace path in the docker container. +# Defaults to /workspace. +# Potentially there are some repo specific envvars in .trampolinerc in +# the project root. +# +# Here is an example for running this script. +# TRAMPOLINE_IMAGE=gcr.io/cloud-devrel-kokoro-resources/node:10-user \ +# TRAMPOLINE_BUILD_FILE=.kokoro/system-test.sh \ +# .kokoro/trampoline_v2.sh + +set -euo pipefail + +TRAMPOLINE_VERSION="2.0.7" + +if command -v tput >/dev/null && [[ -n "${TERM:-}" ]]; then + readonly IO_COLOR_RED="$(tput setaf 1)" + readonly IO_COLOR_GREEN="$(tput setaf 2)" + readonly IO_COLOR_YELLOW="$(tput setaf 3)" + readonly IO_COLOR_RESET="$(tput sgr0)" +else + readonly IO_COLOR_RED="" + readonly IO_COLOR_GREEN="" + readonly IO_COLOR_YELLOW="" + readonly IO_COLOR_RESET="" +fi + +function function_exists { + [ $(LC_ALL=C type -t $1)"" == "function" ] +} + +# Logs a message using the given color. The first argument must be one +# of the IO_COLOR_* variables defined above, such as +# "${IO_COLOR_YELLOW}". The remaining arguments will be logged in the +# given color. The log message will also have an RFC-3339 timestamp +# prepended (in UTC). You can disable the color output by setting +# TERM=vt100. +function log_impl() { + local color="$1" + shift + local timestamp="$(date -u "+%Y-%m-%dT%H:%M:%SZ")" + echo "================================================================" + echo "${color}${timestamp}:" "$@" "${IO_COLOR_RESET}" + echo "================================================================" +} + +# Logs the given message with normal coloring and a timestamp. +function log() { + log_impl "${IO_COLOR_RESET}" "$@" +} + +# Logs the given message in green with a timestamp. +function log_green() { + log_impl "${IO_COLOR_GREEN}" "$@" +} + +# Logs the given message in yellow with a timestamp. +function log_yellow() { + log_impl "${IO_COLOR_YELLOW}" "$@" +} + +# Logs the given message in red with a timestamp. +function log_red() { + log_impl "${IO_COLOR_RED}" "$@" +} + +readonly tmpdir=$(mktemp -d -t ci-XXXXXXXX) +readonly tmphome="${tmpdir}/h" +mkdir -p "${tmphome}" + +function cleanup() { + rm -rf "${tmpdir}" +} +trap cleanup EXIT + +RUNNING_IN_CI="${RUNNING_IN_CI:-false}" + +# The workspace in the container, defaults to /workspace. +TRAMPOLINE_WORKSPACE="${TRAMPOLINE_WORKSPACE:-/workspace}" + +pass_down_envvars=( + # TRAMPOLINE_V2 variables. + # Tells scripts whether they are running as part of CI or not. + "RUNNING_IN_CI" + # Indicates which CI system we're in. + "TRAMPOLINE_CI" + # Indicates the version of the script. + "TRAMPOLINE_VERSION" + # Contains path to build artifacts being executed. + "KOKORO_BUILD_ARTIFACTS_SUBDIR" +) + +log_yellow "Building with Trampoline ${TRAMPOLINE_VERSION}" + +# Detect which CI systems we're in. If we're in any of the CI systems +# we support, `RUNNING_IN_CI` will be true and `TRAMPOLINE_CI` will be +# the name of the CI system. Both envvars will be passing down to the +# container for telling which CI system we're in. +if [[ -n "${KOKORO_BUILD_ID:-}" ]]; then + # descriptive env var for indicating it's on CI. + RUNNING_IN_CI="true" + TRAMPOLINE_CI="kokoro" + if [[ "${TRAMPOLINE_USE_LEGACY_SERVICE_ACCOUNT:-}" == "true" ]]; then + if [[ ! -f "${KOKORO_GFILE_DIR}/kokoro-trampoline.service-account.json" ]]; then + log_red "${KOKORO_GFILE_DIR}/kokoro-trampoline.service-account.json does not exist. Did you forget to mount cloud-devrel-kokoro-resources/trampoline? Aborting." + exit 1 + fi + # This service account will be activated later. + TRAMPOLINE_SERVICE_ACCOUNT="${KOKORO_GFILE_DIR}/kokoro-trampoline.service-account.json" + else + if [[ "${TRAMPOLINE_VERBOSE:-}" == "true" ]]; then + gcloud auth list + fi + log_yellow "Configuring Container Registry access" + gcloud auth configure-docker --quiet + fi + pass_down_envvars+=( + # KOKORO dynamic variables. + "KOKORO_BUILD_NUMBER" + "KOKORO_BUILD_ID" + "KOKORO_JOB_NAME" + "KOKORO_GIT_COMMIT" + "KOKORO_GITHUB_COMMIT" + "KOKORO_GITHUB_PULL_REQUEST_NUMBER" + "KOKORO_GITHUB_PULL_REQUEST_COMMIT" + # For flakybot + "KOKORO_GITHUB_COMMIT_URL" + "KOKORO_GITHUB_PULL_REQUEST_URL" + ) +elif [[ "${TRAVIS:-}" == "true" ]]; then + RUNNING_IN_CI="true" + TRAMPOLINE_CI="travis" + pass_down_envvars+=( + "TRAVIS_BRANCH" + "TRAVIS_BUILD_ID" + "TRAVIS_BUILD_NUMBER" + "TRAVIS_BUILD_WEB_URL" + "TRAVIS_COMMIT" + "TRAVIS_COMMIT_MESSAGE" + "TRAVIS_COMMIT_RANGE" + "TRAVIS_JOB_NAME" + "TRAVIS_JOB_NUMBER" + "TRAVIS_JOB_WEB_URL" + "TRAVIS_PULL_REQUEST" + "TRAVIS_PULL_REQUEST_BRANCH" + "TRAVIS_PULL_REQUEST_SHA" + "TRAVIS_PULL_REQUEST_SLUG" + "TRAVIS_REPO_SLUG" + "TRAVIS_SECURE_ENV_VARS" + "TRAVIS_TAG" + ) +elif [[ -n "${GITHUB_RUN_ID:-}" ]]; then + RUNNING_IN_CI="true" + TRAMPOLINE_CI="github-workflow" + pass_down_envvars+=( + "GITHUB_WORKFLOW" + "GITHUB_RUN_ID" + "GITHUB_RUN_NUMBER" + "GITHUB_ACTION" + "GITHUB_ACTIONS" + "GITHUB_ACTOR" + "GITHUB_REPOSITORY" + "GITHUB_EVENT_NAME" + "GITHUB_EVENT_PATH" + "GITHUB_SHA" + "GITHUB_REF" + "GITHUB_HEAD_REF" + "GITHUB_BASE_REF" + ) +elif [[ "${CIRCLECI:-}" == "true" ]]; then + RUNNING_IN_CI="true" + TRAMPOLINE_CI="circleci" + pass_down_envvars+=( + "CIRCLE_BRANCH" + "CIRCLE_BUILD_NUM" + "CIRCLE_BUILD_URL" + "CIRCLE_COMPARE_URL" + "CIRCLE_JOB" + "CIRCLE_NODE_INDEX" + "CIRCLE_NODE_TOTAL" + "CIRCLE_PREVIOUS_BUILD_NUM" + "CIRCLE_PROJECT_REPONAME" + "CIRCLE_PROJECT_USERNAME" + "CIRCLE_REPOSITORY_URL" + "CIRCLE_SHA1" + "CIRCLE_STAGE" + "CIRCLE_USERNAME" + "CIRCLE_WORKFLOW_ID" + "CIRCLE_WORKFLOW_JOB_ID" + "CIRCLE_WORKFLOW_UPSTREAM_JOB_IDS" + "CIRCLE_WORKFLOW_WORKSPACE_ID" + ) +fi + +# Configure the service account for pulling the docker image. +function repo_root() { + local dir="$1" + while [[ ! -d "${dir}/.git" ]]; do + dir="$(dirname "$dir")" + done + echo "${dir}" +} + +# Detect the project root. In CI builds, we assume the script is in +# the git tree and traverse from there, otherwise, traverse from `pwd` +# to find `.git` directory. +if [[ "${RUNNING_IN_CI:-}" == "true" ]]; then + PROGRAM_PATH="$(realpath "$0")" + PROGRAM_DIR="$(dirname "${PROGRAM_PATH}")" + PROJECT_ROOT="$(repo_root "${PROGRAM_DIR}")" +else + PROJECT_ROOT="$(repo_root $(pwd))" +fi + +log_yellow "Changing to the project root: ${PROJECT_ROOT}." +cd "${PROJECT_ROOT}" + +# To support relative path for `TRAMPOLINE_SERVICE_ACCOUNT`, we need +# to use this environment variable in `PROJECT_ROOT`. +if [[ -n "${TRAMPOLINE_SERVICE_ACCOUNT:-}" ]]; then + + mkdir -p "${tmpdir}/gcloud" + gcloud_config_dir="${tmpdir}/gcloud" + + log_yellow "Using isolated gcloud config: ${gcloud_config_dir}." + export CLOUDSDK_CONFIG="${gcloud_config_dir}" + + log_yellow "Using ${TRAMPOLINE_SERVICE_ACCOUNT} for authentication." + gcloud auth activate-service-account \ + --key-file "${TRAMPOLINE_SERVICE_ACCOUNT}" + log_yellow "Configuring Container Registry access" + gcloud auth configure-docker --quiet +fi + +required_envvars=( + # The basic trampoline configurations. + "TRAMPOLINE_IMAGE" + "TRAMPOLINE_BUILD_FILE" +) + +if [[ -f "${PROJECT_ROOT}/.trampolinerc" ]]; then + source "${PROJECT_ROOT}/.trampolinerc" +fi + +log_yellow "Checking environment variables." +for e in "${required_envvars[@]}" +do + if [[ -z "${!e:-}" ]]; then + log "Missing ${e} env var. Aborting." + exit 1 + fi +done + +# We want to support legacy style TRAMPOLINE_BUILD_FILE used with V1 +# script: e.g. "github/repo-name/.kokoro/run_tests.sh" +TRAMPOLINE_BUILD_FILE="${TRAMPOLINE_BUILD_FILE#github/*/}" +log_yellow "Using TRAMPOLINE_BUILD_FILE: ${TRAMPOLINE_BUILD_FILE}" + +# ignore error on docker operations and test execution +set +e + +log_yellow "Preparing Docker image." +# We only download the docker image in CI builds. +if [[ "${RUNNING_IN_CI:-}" == "true" ]]; then + # Download the docker image specified by `TRAMPOLINE_IMAGE` + + # We may want to add --max-concurrent-downloads flag. + + log_yellow "Start pulling the Docker image: ${TRAMPOLINE_IMAGE}." + if docker pull "${TRAMPOLINE_IMAGE}"; then + log_green "Finished pulling the Docker image: ${TRAMPOLINE_IMAGE}." + has_image="true" + else + log_red "Failed pulling the Docker image: ${TRAMPOLINE_IMAGE}." + has_image="false" + fi +else + # For local run, check if we have the image. + if docker images "${TRAMPOLINE_IMAGE}" | grep "${TRAMPOLINE_IMAGE%:*}"; then + has_image="true" + else + has_image="false" + fi +fi + + +# The default user for a Docker container has uid 0 (root). To avoid +# creating root-owned files in the build directory we tell docker to +# use the current user ID. +user_uid="$(id -u)" +user_gid="$(id -g)" +user_name="$(id -un)" + +# To allow docker in docker, we add the user to the docker group in +# the host os. +docker_gid=$(cut -d: -f3 < <(getent group docker)) + +update_cache="false" +if [[ "${TRAMPOLINE_DOCKERFILE:-none}" != "none" ]]; then + # Build the Docker image from the source. + context_dir=$(dirname "${TRAMPOLINE_DOCKERFILE}") + docker_build_flags=( + "-f" "${TRAMPOLINE_DOCKERFILE}" + "-t" "${TRAMPOLINE_IMAGE}" + "--build-arg" "UID=${user_uid}" + "--build-arg" "USERNAME=${user_name}" + ) + if [[ "${has_image}" == "true" ]]; then + docker_build_flags+=("--cache-from" "${TRAMPOLINE_IMAGE}") + fi + + log_yellow "Start building the docker image." + if [[ "${TRAMPOLINE_VERBOSE:-false}" == "true" ]]; then + echo "docker build" "${docker_build_flags[@]}" "${context_dir}" + fi + + # ON CI systems, we want to suppress docker build logs, only + # output the logs when it fails. + if [[ "${RUNNING_IN_CI:-}" == "true" ]]; then + if docker build "${docker_build_flags[@]}" "${context_dir}" \ + > "${tmpdir}/docker_build.log" 2>&1; then + if [[ "${TRAMPOLINE_VERBOSE:-}" == "true" ]]; then + cat "${tmpdir}/docker_build.log" + fi + + log_green "Finished building the docker image." + update_cache="true" + else + log_red "Failed to build the Docker image, aborting." + log_yellow "Dumping the build logs:" + cat "${tmpdir}/docker_build.log" + exit 1 + fi + else + if docker build "${docker_build_flags[@]}" "${context_dir}"; then + log_green "Finished building the docker image." + update_cache="true" + else + log_red "Failed to build the Docker image, aborting." + exit 1 + fi + fi +else + if [[ "${has_image}" != "true" ]]; then + log_red "We do not have ${TRAMPOLINE_IMAGE} locally, aborting." + exit 1 + fi +fi + +# We use an array for the flags so they are easier to document. +docker_flags=( + # Remove the container after it exists. + "--rm" + + # Use the host network. + "--network=host" + + # Run in priviledged mode. We are not using docker for sandboxing or + # isolation, just for packaging our dev tools. + "--privileged" + + # Run the docker script with the user id. Because the docker image gets to + # write in ${PWD} you typically want this to be your user id. + # To allow docker in docker, we need to use docker gid on the host. + "--user" "${user_uid}:${docker_gid}" + + # Pass down the USER. + "--env" "USER=${user_name}" + + # Mount the project directory inside the Docker container. + "--volume" "${PROJECT_ROOT}:${TRAMPOLINE_WORKSPACE}" + "--workdir" "${TRAMPOLINE_WORKSPACE}" + "--env" "PROJECT_ROOT=${TRAMPOLINE_WORKSPACE}" + + # Mount the temporary home directory. + "--volume" "${tmphome}:/h" + "--env" "HOME=/h" + + # Allow docker in docker. + "--volume" "/var/run/docker.sock:/var/run/docker.sock" + + # Mount the /tmp so that docker in docker can mount the files + # there correctly. + "--volume" "/tmp:/tmp" + # Pass down the KOKORO_GFILE_DIR and KOKORO_KEYSTORE_DIR + # TODO(tmatsuo): This part is not portable. + "--env" "TRAMPOLINE_SECRET_DIR=/secrets" + "--volume" "${KOKORO_GFILE_DIR:-/dev/shm}:/secrets/gfile" + "--env" "KOKORO_GFILE_DIR=/secrets/gfile" + "--volume" "${KOKORO_KEYSTORE_DIR:-/dev/shm}:/secrets/keystore" + "--env" "KOKORO_KEYSTORE_DIR=/secrets/keystore" +) + +# Add an option for nicer output if the build gets a tty. +if [[ -t 0 ]]; then + docker_flags+=("-it") +fi + +# Passing down env vars +for e in "${pass_down_envvars[@]}" +do + if [[ -n "${!e:-}" ]]; then + docker_flags+=("--env" "${e}=${!e}") + fi +done + +# If arguments are given, all arguments will become the commands run +# in the container, otherwise run TRAMPOLINE_BUILD_FILE. +if [[ $# -ge 1 ]]; then + log_yellow "Running the given commands '" "${@:1}" "' in the container." + readonly commands=("${@:1}") + if [[ "${TRAMPOLINE_VERBOSE:-}" == "true" ]]; then + echo docker run "${docker_flags[@]}" "${TRAMPOLINE_IMAGE}" "${commands[@]}" + fi + docker run "${docker_flags[@]}" "${TRAMPOLINE_IMAGE}" "${commands[@]}" +else + log_yellow "Running the tests in a Docker container." + docker_flags+=("--entrypoint=${TRAMPOLINE_BUILD_FILE}") + if [[ "${TRAMPOLINE_VERBOSE:-}" == "true" ]]; then + echo docker run "${docker_flags[@]}" "${TRAMPOLINE_IMAGE}" + fi + docker run "${docker_flags[@]}" "${TRAMPOLINE_IMAGE}" +fi + + +test_retval=$? + +if [[ ${test_retval} -eq 0 ]]; then + log_green "Build finished with ${test_retval}" +else + log_red "Build finished with ${test_retval}" +fi + +# Only upload it when the test passes. +if [[ "${update_cache}" == "true" ]] && \ + [[ $test_retval == 0 ]] && \ + [[ "${TRAMPOLINE_IMAGE_UPLOAD:-false}" == "true" ]]; then + log_yellow "Uploading the Docker image." + if docker push "${TRAMPOLINE_IMAGE}"; then + log_green "Finished uploading the Docker image." + else + log_red "Failed uploading the Docker image." + fi + # Call trampoline_after_upload_hook if it's defined. + if function_exists trampoline_after_upload_hook; then + trampoline_after_upload_hook + fi + +fi + +exit "${test_retval}" diff --git a/handwritten/cloud-profiler/.mocharc.js b/handwritten/cloud-profiler/.mocharc.js new file mode 100644 index 00000000000..0b600509bed --- /dev/null +++ b/handwritten/cloud-profiler/.mocharc.js @@ -0,0 +1,29 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +const config = { + "enable-source-maps": true, + "throw-deprecation": true, + "timeout": 10000, + "recursive": true +} +if (process.env.MOCHA_THROW_DEPRECATION === 'false') { + delete config['throw-deprecation']; +} +if (process.env.MOCHA_REPORTER) { + config.reporter = process.env.MOCHA_REPORTER; +} +if (process.env.MOCHA_REPORTER_OUTPUT) { + config['reporter-option'] = `output=${process.env.MOCHA_REPORTER_OUTPUT}`; +} +module.exports = config diff --git a/handwritten/cloud-profiler/.nycrc b/handwritten/cloud-profiler/.nycrc new file mode 100644 index 00000000000..b18d5472b62 --- /dev/null +++ b/handwritten/cloud-profiler/.nycrc @@ -0,0 +1,24 @@ +{ + "report-dir": "./.coverage", + "reporter": ["text", "lcov"], + "exclude": [ + "**/*-test", + "**/.coverage", + "**/apis", + "**/benchmark", + "**/conformance", + "**/docs", + "**/samples", + "**/scripts", + "**/protos", + "**/test", + "**/*.d.ts", + ".jsdoc.js", + "**/.jsdoc.js", + "karma.conf.js", + "webpack-tests.config.js", + "webpack.config.js" + ], + "exclude-after-remap": false, + "all": true +} diff --git a/handwritten/cloud-profiler/.prettierignore b/handwritten/cloud-profiler/.prettierignore new file mode 100644 index 00000000000..9340ad9b86d --- /dev/null +++ b/handwritten/cloud-profiler/.prettierignore @@ -0,0 +1,6 @@ +**/node_modules +**/coverage +test/fixtures +build/ +docs/ +protos/ diff --git a/handwritten/cloud-profiler/.prettierrc.js b/handwritten/cloud-profiler/.prettierrc.js new file mode 100644 index 00000000000..d1b95106f4c --- /dev/null +++ b/handwritten/cloud-profiler/.prettierrc.js @@ -0,0 +1,17 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +module.exports = { + ...require('gts/.prettierrc.json') +} diff --git a/handwritten/cloud-profiler/.readme-partials.yaml b/handwritten/cloud-profiler/.readme-partials.yaml new file mode 100644 index 00000000000..349e094af63 --- /dev/null +++ b/handwritten/cloud-profiler/.readme-partials.yaml @@ -0,0 +1,275 @@ +body: |- + + ### Prerequisites + + 1. Your application will need to be using Node.js version between 14 and 20. + + 1. `@google-cloud/profiler` depends on the + [`pprof`](https://www.npmjs.com/package/pprof) module, a module with a native + component that is used to collect profiles with v8's CPU and Heap profilers. + You may need to install additional dependencies to build the `pprof` module. + * For Linux: `pprof` has prebuilt binaries available for Linux and Alpine + Linux for Node 14 and 16. No additional dependencies are required. + * For other environments: when using `@google-cloud/profiler` on environments + that `pprof` does not have prebuilt binaries for, the module + [`node-gyp`](https://www.npmjs.com/package/node-gyp) will be used to + build binaries. See `node-gyp`'s + [documentation](https://github.com/nodejs/node-gyp#installation) + for information on dependencies required to build binaries with `node-gyp`. + + 1. You will need a project in the [Google Developers Console][cloud-console]. + Your application can run anywhere, but the profiler data is associated with a + particular project. + + 1. You will need to enable the Cloud Profiler API for your project. + + ### Basic Set-up + + 1. Install `@google-cloud/profiler` with [`npm`](https://www.npmjs.com) or add + to your [`package.json`](https://docs.npmjs.com/files/package.json#dependencies). + + ```sh + # Install through npm while saving to the local 'package.json' + npm install --save @google-cloud/profiler + ``` + + 2. Include and start the profiler at the beginning of your application: + + ```js + require('@google-cloud/profiler').start().catch((err) => { + console.log(`Failed to start profiler: ${err}`); + }); + ``` + + Some environments require a configuration to be passed to the `start()` + function. For more details on this, see instructions for running + [outside of Google Cloud Platform](#running-elsewhere), on + [App Engine flexible environment](#running-on-app-engine-flexible-environment), + on [Google Compute Engine](#running-on-google-compute-engine), + and on [Google Container Engine](#running-on-google-container-engine). + + 3. If you are running your application locally, or on a machine where you are + using the [Google Cloud SDK][gcloud-sdk], make sure to log in with the + application default credentials: + + ```sh + gcloud beta auth application-default login + ``` + + Alternatively, you can set `GOOGLE_APPLICATION_CREDENTIALS`. For more + details on this, see [Running elsewhere](#running-elsewhere) + + ### Configuration + + See [the default configuration](https://github.com/googleapis/cloud-profiler-nodejs/blob/master/src/config.ts) for a list of possible + configuration options. These options can be passed to the agent through the + object argument to the start command shown below: + + ```js + await require('@google-cloud/profiler').start({disableTime: true}); + ``` + + Alternatively, you can provide the configuration through a config file. This + can be useful if you want to load our module using `--require` on the command + line (which requires and starts the agent) instead of editing your main script. + The `GCLOUD_PROFILER_CONFIG` environment variable should point to your + configuration file. + + ```bash + export GCLOUD_PROFILER_CONFIG=./path/to/your/profiler/configuration.js + ``` + + #### Changing log level + + The profiler writes log statements to the console log for diagnostic purposes. + By default, the log level is set to warn. You can adjust this by setting + `logLevel` in the config. Setting `logLevel` to 0 will disable logging, + 1 sets log level to error, 2 sets it to warn (default), 3 sets it to info, + and 4 sets it to debug. + + So, for example, to start the profiler with the log level at debug, you would + do this: + + ```js + await require('@google-cloud/profiler').start({logLevel: 4}); + ``` + + #### Disabling heap or time profile collection + + By default, the profiler collects both heap profiles, which show memory + allocations, and time profiles, which capture how much wall-clock time is spent + in different locations of the code. Using the configuration, it is possible to + disable the collection of either type of profile. + + To disable time profile collection, set `disableTime` to true: + + ```js + await require('@google-cloud/profiler').start({disableTime: true}); + ``` + + To disable heap profile collection, set `disableHeap` to true: + + ```js + await require('@google-cloud/profiler').start({disableHeap: true}); + ``` + + ### Running on Google Cloud Platform + + There are three different services that can host Node.js applications within + Google Cloud Platform: Google App Engine flexible environment, Google Compute + Engine, and Google Container Engine. After installing `@google-cloud/profiler` + in your project and ensuring that the environment you are using uses a + supported version of Node.js, follow the service-specific instructions to + enable the profiler. + + #### Running on App Engine flexible environment + + To enable the profiling agent for a Node.js program running in the App Engine + flexible environment, import the agent at the top of your application’s main + script or entry point by including the following code snippet: + + ```js + require('@google-cloud/profiler').start(); + ``` + + You can specify which version of Node.js you're using by adding a snippet like + the following to your `package.json`: + + ```json + "engines": { + "node": ">=14.0.0" + } + ``` + The above snippet will ensure that you're using 14.0.0 or greater. + + Deploy your application to App Engine Flexible environment as usual. + + #### Running on Google Compute Engine + + To enable the profiling agent for a Node.js program running in the Google + Compute Engine environment, import the agent at the top of your application’s + main script or entry point by including the following code snippet: + + ```js + require('@google-cloud/profiler').start({ + serviceContext: { + service: 'your-service', + version: '1.0.0' + } + }); + ``` + + #### Running on Google Container Engine + + To enable the profiling agent for a Node.js program running in the Google + Container Engine environment, import the agent at the top of your application’s + main script or entry point by including the following code snippet: + + ```js + require('@google-cloud/profiler').start({ + serviceContext: { + service: 'your-service', + version: '1.0.0' + } + }); + ``` + + #### Running on Istio + + On Istio, the GCP Metadata server may not be available for a few seconds after + your application has started. When this occurs, the profiling agent may fail + to start because it cannot initialize required fields. One can retry when + starting the profiler with the following snippet. + + ```js + const profiler = require('@google-cloud/profiler'); + async function startProfiler() { + for (let i = 0; i < 3; i++) { + try { + await profiler.start({ + serviceContext: { + service: 'your-service', + version: '1.0.0', + }, + }); + } catch(e) { + console.log(`Failed to start profiler: ${e}`); + } + + // Wait for 1 second before trying again. + await new Promise(r => setTimeout(r, 1000)); + } + } + startProfiler(); + + ``` + + + ### Running elsewhere + + You can still use `@google-cloud/profiler` if your application is running + outside of Google Cloud Platform, for example, running locally, on-premise, or + on another cloud provider. + + 1. You will need to specify your project id and the service you want the + collected profiles to be associated with, and (optionally) the version of + the service when starting the profiler: + + ```js + await require('@google-cloud/profiler').start({ + projectId: 'project-id', + serviceContext: { + service: 'your-service', + version: '1.0.0' + } + }); + ``` + 2. You will need to provide credential for your application. + + * If you are running your application on a development machine or test + environment where you are using the [`gcloud` command line tools][gcloud-sdk], + and are logged using `gcloud beta auth application-default login`, you + already have sufficient credentials, and a service account key is not + required. + + * You can provide credentials via + [Application Default Credentials][app-default-credentials]. This is the + recommended method. + 1. [Create a new JSON service account key][service-account]. + 2. Copy the key somewhere your application can access it. Be sure not + to expose the key publicly. + 3. Set the environment variable `GOOGLE_APPLICATION_CREDENTIALS` to + the full path to the key. The profiler will automatically look for + this environment variable. + + * You may set the `keyFilename` or `credentials` configuration field to the + full path or contents to the key file, respectively. Setting either of these + fields will override either setting `GOOGLE_APPLICATION_CREDENTIALS` or + logging in using `gcloud`. + + This is how you would set `keyFilename`: + ```js + await require('@google-cloud/profiler').start({ + projectId: 'project-id', + serviceContext: { + service: 'your-service', + version: '1.0.0' + }, + keyFilename: '/path/to/keyfile' + }); + ``` + + This is how you would set `credentials`: + ```js + await require('@google-cloud/profiler').start({ + projectId: 'project-id', + serviceContext: { + service: 'your-service', + version: '1.0.0' + }, + credentials: { + client_email: 'email', + private_key: 'private_key' + } + }); + ``` diff --git a/handwritten/cloud-profiler/.release-please-manifest.json b/handwritten/cloud-profiler/.release-please-manifest.json new file mode 100644 index 00000000000..dd0536b6c16 --- /dev/null +++ b/handwritten/cloud-profiler/.release-please-manifest.json @@ -0,0 +1 @@ +{".":"6.0.3"} diff --git a/handwritten/cloud-profiler/.repo-metadata.json b/handwritten/cloud-profiler/.repo-metadata.json new file mode 100644 index 00000000000..53abb7c52e0 --- /dev/null +++ b/handwritten/cloud-profiler/.repo-metadata.json @@ -0,0 +1,15 @@ +{ + "name": "profiler", + "name_pretty": "Cloud Profiler", + "product_documentation": "https://cloud.google.com/profiler/docs", + "client_documentation": "https://cloud.google.com/nodejs/docs/reference/profiler/latest", + "issue_tracker": "https://issuetracker.google.com/savedsearches/5116474", + "release_level": "stable", + "language": "nodejs", + "repo": "googleapis/google-cloud-node", + "distribution_name": "@google-cloud/profiler", + "api_id": "cloudprofiler.googleapis.com", + "codeowner_team": "@googleapis/api-profiler", + "api_shortname": "profiler", + "library_type": "AGENT" +} diff --git a/handwritten/cloud-profiler/CHANGELOG.md b/handwritten/cloud-profiler/CHANGELOG.md new file mode 100644 index 00000000000..09829684486 --- /dev/null +++ b/handwritten/cloud-profiler/CHANGELOG.md @@ -0,0 +1,348 @@ +# Changelog + +[npm history][1] + +[1]: https://www.npmjs.com/package/@google-cloud/profiler?activeTab=versions + +## [6.0.3](https://github.com/googleapis/cloud-profiler-nodejs/compare/v6.0.2...v6.0.3) (2025-04-14) + + +### Bug Fixes + +* Replace vulnerable parse-duration with CJS compatible ms library ([ef0f58c](https://github.com/googleapis/cloud-profiler-nodejs/commit/ef0f58c0ac7abf15dcf3b2a31cd0a4b9bfdb3b72)) + +## [6.0.2](https://github.com/googleapis/cloud-profiler-nodejs/compare/v6.0.1...v6.0.2) (2024-09-11) + + +### Bug Fixes + +* **deps:** Update dependency @google-cloud/logging-min to v11 ([#939](https://github.com/googleapis/cloud-profiler-nodejs/issues/939)) ([97ec3fb](https://github.com/googleapis/cloud-profiler-nodejs/commit/97ec3fbe9d254dc824b6df9eec6630b3acfd04c6)) +* **deps:** Update dependency protobufjs to ~7.3.0 ([#928](https://github.com/googleapis/cloud-profiler-nodejs/issues/928)) ([89eb3be](https://github.com/googleapis/cloud-profiler-nodejs/commit/89eb3be3c19e8f607d5a7764c8849a0f602f47dc)) +* **deps:** Update dependency protobufjs to ~7.4.0 ([#938](https://github.com/googleapis/cloud-profiler-nodejs/issues/938)) ([60b8264](https://github.com/googleapis/cloud-profiler-nodejs/commit/60b826405724d16c5173a1b9c6a852bd34222f4d)) + +## [6.0.1](https://github.com/googleapis/cloud-profiler-nodejs/compare/v6.0.0...v6.0.1) (2024-02-05) + + +### Bug Fixes + +* **deps:** Update dependency pprof to v4 ([#917](https://github.com/googleapis/cloud-profiler-nodejs/issues/917)) ([2411b71](https://github.com/googleapis/cloud-profiler-nodejs/commit/2411b7140e30fe89d10004010c3395aa18fbb543)) +* Lazily init logging ([#801](https://github.com/googleapis/cloud-profiler-nodejs/issues/801)) ([04853a9](https://github.com/googleapis/cloud-profiler-nodejs/commit/04853a96896b54694d6480c5263af7145ee129ea)) + +## [6.0.0](https://github.com/googleapis/cloud-profiler-nodejs/compare/v5.0.5...v6.0.0) (2023-08-15) + + +### ⚠ BREAKING CHANGES + +* upgrade to Node 14 ([#891](https://github.com/googleapis/cloud-profiler-nodejs/issues/891)) + +### Miscellaneous Chores + +* Upgrade to Node 14 ([#891](https://github.com/googleapis/cloud-profiler-nodejs/issues/891)) ([66b7f4e](https://github.com/googleapis/cloud-profiler-nodejs/commit/66b7f4e4beeaf09e7bb1107368997bfafd2a4679)) + +## [5.0.5](https://github.com/googleapis/cloud-profiler-nodejs/compare/v5.0.4...v5.0.5) (2023-07-24) + + +### Bug Fixes + +* **deps:** Upgrade pprof to v3.2.1 [security] ([#885](https://github.com/googleapis/cloud-profiler-nodejs/issues/885)) ([c140fe5](https://github.com/googleapis/cloud-profiler-nodejs/commit/c140fe5bca0f07ce775fd0f10b0aae1537962a1b)) + +## [5.0.4](https://github.com/googleapis/cloud-profiler-nodejs/compare/v5.0.3...v5.0.4) (2023-02-09) + + +### Bug Fixes + +* **deps:** Update dependency protobufjs to ~7.2.0 ([#868](https://github.com/googleapis/cloud-profiler-nodejs/issues/868)) ([e414a04](https://github.com/googleapis/cloud-profiler-nodejs/commit/e414a043e12d9d46b24fecdb761c434a3a225699)) +* Update go installation method ([#862](https://github.com/googleapis/cloud-profiler-nodejs/issues/862)) ([07f69a7](https://github.com/googleapis/cloud-profiler-nodejs/commit/07f69a7ed35ca1c438a63f4d2c7ea71e7f2b9882)) + +## [5.0.3](https://github.com/googleapis/cloud-profiler-nodejs/compare/v5.0.2...v5.0.3) (2022-09-09) + + +### Bug Fixes + +* **deps:** Update dependency protobufjs to ~7.1.0 ([#854](https://github.com/googleapis/cloud-profiler-nodejs/issues/854)) ([e8e725d](https://github.com/googleapis/cloud-profiler-nodejs/commit/e8e725d74d7e9578ac0fac0fb94774e833ae1979)) +* Remove pip install statements ([#1546](https://github.com/googleapis/cloud-profiler-nodejs/issues/1546)) ([#852](https://github.com/googleapis/cloud-profiler-nodejs/issues/852)) ([85ddb0f](https://github.com/googleapis/cloud-profiler-nodejs/commit/85ddb0f07cb40dab1e6284b80d5a921a596b2b9f)) + +## [5.0.2](https://github.com/googleapis/cloud-profiler-nodejs/compare/v5.0.1...v5.0.2) (2022-07-25) + + +### Bug Fixes + +* **deps:** update dependency protobufjs to v7 ([#849](https://github.com/googleapis/cloud-profiler-nodejs/issues/849)) ([5536eac](https://github.com/googleapis/cloud-profiler-nodejs/commit/5536eac0ac661d54fab58a236d561b259b2fe04f)) + +## [5.0.1](https://github.com/googleapis/cloud-profiler-nodejs/compare/v5.0.0...v5.0.1) (2022-07-08) + + +### Bug Fixes + +* fix build so it includes expected files (and modify test to catch this) ([6d9a5ca](https://github.com/googleapis/cloud-profiler-nodejs/commit/6d9a5caa276a7d41f6f3b0e9da8d576e261f3f3f)) + +## [5.0.0](https://github.com/googleapis/cloud-profiler-nodejs/compare/v4.2.0...v5.0.0) (2022-06-20) + + +### ⚠ BREAKING CHANGES + +* update library to use Node 12 (#835) + +### Bug Fixes + +* **deps:** update dependency @google-cloud/common to v4 ([#843](https://github.com/googleapis/cloud-profiler-nodejs/issues/843)) ([1af6b0f](https://github.com/googleapis/cloud-profiler-nodejs/commit/1af6b0f18aec665b042a27ea90158b99d7435d4e)) +* **deps:** update dependency @google-cloud/logging-min to v10 ([#838](https://github.com/googleapis/cloud-profiler-nodejs/issues/838)) ([1120298](https://github.com/googleapis/cloud-profiler-nodejs/commit/11202986b7d4501637e7c30c5e9787789e98ccba)) + + +### Build System + +* update library to use Node 12 ([#835](https://github.com/googleapis/cloud-profiler-nodejs/issues/835)) ([07221ef](https://github.com/googleapis/cloud-profiler-nodejs/commit/07221ef0503c25311834a50b987b70d51393420c)) + +## [4.2.0](https://github.com/googleapis/cloud-profiler-nodejs/compare/v4.1.7...v4.2.0) (2022-05-13) + + +### Bug Fixes + +* initial module in golint presubmit check ([#818](https://github.com/googleapis/cloud-profiler-nodejs/issues/818)) ([df1f28a](https://github.com/googleapis/cloud-profiler-nodejs/commit/df1f28a6e0e74f9d5ee6751a203fe5610866b584)) +* library should released as 4.2.0 ([#833](https://github.com/googleapis/cloud-profiler-nodejs/issues/833)) ([8d722aa](https://github.com/googleapis/cloud-profiler-nodejs/commit/8d722aa0e5a3f03ea729db0a4d98b2a8c7717bcf)) +* relax service name requirements to allow starting with numbers ([#828](https://github.com/googleapis/cloud-profiler-nodejs/issues/828)) ([fe0f1cc](https://github.com/googleapis/cloud-profiler-nodejs/commit/fe0f1ccd14c2a091f1778142083f49813d724dcf)) + +### [4.1.7](https://www.github.com/googleapis/cloud-profiler-nodejs/compare/v4.1.6...v4.1.7) (2021-12-20) + + +### Bug Fixes + +* remove certification expiration workaround ([#781](https://www.github.com/googleapis/cloud-profiler-nodejs/issues/781)) ([44af460](https://www.github.com/googleapis/cloud-profiler-nodejs/commit/44af4604f0553ef917e48abf51fa3f52950ed6b7)) + +### [4.1.6](https://www.github.com/googleapis/cloud-profiler-nodejs/compare/v4.1.5...v4.1.6) (2021-12-20) + + +### Bug Fixes + +* log to console using structured logging ([#780](https://www.github.com/googleapis/cloud-profiler-nodejs/issues/780)) ([6b96e00](https://www.github.com/googleapis/cloud-profiler-nodejs/commit/6b96e007418bd6074c2bf9de64df0742e6a6d4ae)) + +### [4.1.5](https://www.github.com/googleapis/cloud-profiler-nodejs/compare/v4.1.4...v4.1.5) (2021-10-06) + + +### Bug Fixes + +* workaround certificate expiration issue in integration tests ([#773](https://www.github.com/googleapis/cloud-profiler-nodejs/issues/773)) ([9d4908b](https://www.github.com/googleapis/cloud-profiler-nodejs/commit/9d4908b7161e0aade0915d2d88a43b0a6bfc9791)) + +### [4.1.4](https://www.github.com/googleapis/cloud-profiler-nodejs/compare/v4.1.3...v4.1.4) (2021-09-14) + + +### Bug Fixes + +* **build:** migrate to using main branch ([#760](https://www.github.com/googleapis/cloud-profiler-nodejs/issues/760)) ([1649406](https://www.github.com/googleapis/cloud-profiler-nodejs/commit/164940698d7f4da0b2b6ea9e3b059e553fcfaa42)) + +### [4.1.3](https://www.github.com/googleapis/cloud-profiler-nodejs/compare/v4.1.2...v4.1.3) (2021-07-20) + + +### Bug Fixes + +* **deps:** update dependency pprof to v3.2.0 ([#754](https://www.github.com/googleapis/cloud-profiler-nodejs/issues/754)) ([fc48f9a](https://www.github.com/googleapis/cloud-profiler-nodejs/commit/fc48f9aee74a3638d98086e0b404e299ae31b592)) + +### [4.1.2](https://www.github.com/googleapis/cloud-profiler-nodejs/compare/v4.1.1...v4.1.2) (2021-05-05) + + +### Bug Fixes + +* **deps:** update dependency pprof to v3.1.0 ([#731](https://www.github.com/googleapis/cloud-profiler-nodejs/issues/731)) ([ba96e49](https://www.github.com/googleapis/cloud-profiler-nodejs/commit/ba96e49a2d254d713d55f16bedbf6a5268500801)) +* **deps:** update dependency protobufjs to ~6.11.0 ([#733](https://www.github.com/googleapis/cloud-profiler-nodejs/issues/733)) ([33abbeb](https://www.github.com/googleapis/cloud-profiler-nodejs/commit/33abbebe1e6424658a66cdb728b93875d4edadd3)) + +### [4.1.1](https://www.github.com/googleapis/cloud-profiler-nodejs/compare/v4.1.0...v4.1.1) (2021-03-16) + + +### Bug Fixes + +* **deps:** update dependency delay to v5 ([#722](https://www.github.com/googleapis/cloud-profiler-nodejs/issues/722)) ([98d43e1](https://www.github.com/googleapis/cloud-profiler-nodejs/commit/98d43e12b2d19f7d8b70618b6d3ec5de6e8d702c)) +* **deps:** update dependency parse-duration to v1 ([#725](https://www.github.com/googleapis/cloud-profiler-nodejs/issues/725)) ([fcd1239](https://www.github.com/googleapis/cloud-profiler-nodejs/commit/fcd12391782ad9363cffae0c3b507512fc8dfbc8)) + +## [4.1.0](https://www.github.com/googleapis/cloud-profiler-nodejs/compare/v4.0.3...v4.1.0) (2020-11-02) + + +### Features + +* add support for Node 14 ([#709](https://www.github.com/googleapis/cloud-profiler-nodejs/issues/709)) ([3b8f4ad](https://www.github.com/googleapis/cloud-profiler-nodejs/commit/3b8f4adbb069c163864ba4e61e7980e78c057713)) + + +### Bug Fixes + +* Update engines to prevent agent from being used with versions of Node.js where v8 profilers have memory leaks ([#699](https://www.github.com/googleapis/cloud-profiler-nodejs/issues/699)) ([160d1f6](https://www.github.com/googleapis/cloud-profiler-nodejs/commit/160d1f68c657f6ce18b8cea921470d1b4482619f)) + + +### Reverts + +* Revert "fix: Update engines to prevent agent from getting used when it will cause memory leak." (#706) ([9141c2f](https://www.github.com/googleapis/cloud-profiler-nodejs/commit/9141c2fc9373a8a8b26a577165a23a21835db779)), closes [#706](https://www.github.com/googleapis/cloud-profiler-nodejs/issues/706) [#699](https://www.github.com/googleapis/cloud-profiler-nodejs/issues/699) + +### [4.0.3](https://www.github.com/googleapis/cloud-profiler-nodejs/compare/v4.0.2...v4.0.3) (2020-09-12) + + +### Bug Fixes + +* move gitattributes files to node templates ([#679](https://www.github.com/googleapis/cloud-profiler-nodejs/issues/679)) ([521e418](https://www.github.com/googleapis/cloud-profiler-nodejs/commit/521e4180f825299907ad374412d81d65bb674596)) +* **deps:** update dependency parse-duration to 0.4.4 ([#668](https://www.github.com/googleapis/cloud-profiler-nodejs/issues/668)) ([2757231](https://www.github.com/googleapis/cloud-profiler-nodejs/commit/2757231e07492bc38bf25fe3add1274b36d2bdca)) + +### [4.0.2](https://www.github.com/googleapis/cloud-profiler-nodejs/compare/v4.0.1...v4.0.2) (2020-07-14) + + +### Bug Fixes + +* **deps:** update dependency protobufjs to ~6.10.0 ([#665](https://www.github.com/googleapis/cloud-profiler-nodejs/issues/665)) ([dc4ed5c](https://www.github.com/googleapis/cloud-profiler-nodejs/commit/dc4ed5c57e675b0a87e193b0ba012d5d22b85951)) + +### [4.0.1](https://www.github.com/googleapis/cloud-profiler-nodejs/compare/v4.0.0...v4.0.1) (2020-07-09) + + +### Bug Fixes + +* **deps:** update dependency pretty-ms to v7 ([#642](https://www.github.com/googleapis/cloud-profiler-nodejs/issues/642)) ([f69c7a7](https://www.github.com/googleapis/cloud-profiler-nodejs/commit/f69c7a73b17c150c2b523412e430b5d1ac03e12a)) +* **deps:** update dependency protobufjs to ~6.9.0 ([#634](https://www.github.com/googleapis/cloud-profiler-nodejs/issues/634)) ([a90149c](https://www.github.com/googleapis/cloud-profiler-nodejs/commit/a90149c4f91630d75bb41fab145713637736d21c)) +* malformed tsconfig causing broken tests [#640](https://www.github.com/googleapis/cloud-profiler-nodejs/issues/640) ([#647](https://www.github.com/googleapis/cloud-profiler-nodejs/issues/647)) ([09c19c8](https://www.github.com/googleapis/cloud-profiler-nodejs/commit/09c19c88a0ca137b7970c386730b145b66b77ec3)) +* **deps:** update dependency teeny-request to v7 ([#652](https://www.github.com/googleapis/cloud-profiler-nodejs/issues/652)) ([b46eb4f](https://www.github.com/googleapis/cloud-profiler-nodejs/commit/b46eb4f2552871f405579100e4e916dbde5c60d8)) +* typeo in nodejs .gitattribute ([#661](https://www.github.com/googleapis/cloud-profiler-nodejs/issues/661)) ([92f46ac](https://www.github.com/googleapis/cloud-profiler-nodejs/commit/92f46ac22a510c7bab05549b83779dd4f60096b7)) + +## [4.0.0](https://www.github.com/googleapis/cloud-profiler-nodejs/compare/v3.0.1...v4.0.0) (2020-04-17) + + +### ⚠ BREAKING CHANGES + +* require node.js 10.x and up (#623) +* `start({...}) ` now throws an error when the profiling agent cannot be set up because required fields are not set in the config and cannot be determined based on metadata or environment variables. + +### Features + +* add support for Node 12 ([#580](https://www.github.com/googleapis/cloud-profiler-nodejs/issues/580)) ([e7cb85e](https://www.github.com/googleapis/cloud-profiler-nodejs/commit/e7cb85efafb72f3f50acb48cafcc99e00cdd4616)) +* package is now GA ([#627](https://www.github.com/googleapis/cloud-profiler-nodejs/issues/627)) ([1327ece](https://www.github.com/googleapis/cloud-profiler-nodejs/commit/1327ece67da31c2fc5ab1e20f6481cfa8d207e63)) +* require the project ID to be set before starting the profiling agent ([#516](https://www.github.com/googleapis/cloud-profiler-nodejs/issues/516)) ([5b46b66](https://www.github.com/googleapis/cloud-profiler-nodejs/commit/5b46b66db3354bff611e2b34d51d40fc4e4befa5)) + + +### Bug Fixes + +* **deps:** TypeScript 3.7.0 causes breaking change in typings ([#564](https://www.github.com/googleapis/cloud-profiler-nodejs/issues/564)) ([3ac08e5](https://www.github.com/googleapis/cloud-profiler-nodejs/commit/3ac08e52a8f9dbb87aaa70324c2f5bc69f805b7c)) +* apache license URL ([#468](https://www.github.com/googleapis/cloud-profiler-nodejs/issues/468)) ([#618](https://www.github.com/googleapis/cloud-profiler-nodejs/issues/618)) ([1ae39be](https://www.github.com/googleapis/cloud-profiler-nodejs/commit/1ae39bef02389451ecf5b120770c6b6c914e96b8)) +* **deps:** update dependency @google-cloud/common to v3 ([#613](https://www.github.com/googleapis/cloud-profiler-nodejs/issues/613)) ([356dfa6](https://www.github.com/googleapis/cloud-profiler-nodejs/commit/356dfa6d461be219d9937c3ca1430b6b0891bee1)) +* **deps:** update dependency @types/semver to v7 ([#587](https://www.github.com/googleapis/cloud-profiler-nodejs/issues/587)) ([1400fda](https://www.github.com/googleapis/cloud-profiler-nodejs/commit/1400fda2018331ceb41df9434f307b8046fdde89)) +* **deps:** update dependency gcp-metadata to v4 ([#609](https://www.github.com/googleapis/cloud-profiler-nodejs/issues/609)) ([66875dd](https://www.github.com/googleapis/cloud-profiler-nodejs/commit/66875dd259adcab7e18068bee1d76afa7dc641e9)) +* **deps:** update dependency pprof to v1.3.0 ([#595](https://www.github.com/googleapis/cloud-profiler-nodejs/issues/595)) ([7ba1423](https://www.github.com/googleapis/cloud-profiler-nodejs/commit/7ba14239d5096811c30148fa1f405765cc13f0d2)) +* **deps:** update dependency pprof to v2 ([#619](https://www.github.com/googleapis/cloud-profiler-nodejs/issues/619)) ([37c7655](https://www.github.com/googleapis/cloud-profiler-nodejs/commit/37c7655b227676af7a400e379d7b172f41f2592e)) +* **deps:** update dependency pretty-ms to v6 ([#596](https://www.github.com/googleapis/cloud-profiler-nodejs/issues/596)) ([bed6c20](https://www.github.com/googleapis/cloud-profiler-nodejs/commit/bed6c2007bd7224b827c3f8dcf8369c4d9b8ba8c)) +* **deps:** update dependency semver to v7 ([#567](https://www.github.com/googleapis/cloud-profiler-nodejs/issues/567)) ([b6f80f6](https://www.github.com/googleapis/cloud-profiler-nodejs/commit/b6f80f6819e6fc42b767ecc17594a933e4ff8316)) +* **docs:** snippets are now replaced in jsdoc comments ([#560](https://www.github.com/googleapis/cloud-profiler-nodejs/issues/560)) ([07164f3](https://www.github.com/googleapis/cloud-profiler-nodejs/commit/07164f372913255a0b84754668ea2216cc2a6944)) + + +### Build System + +* require node.js 10.x and up ([#623](https://www.github.com/googleapis/cloud-profiler-nodejs/issues/623)) ([90d20bb](https://www.github.com/googleapis/cloud-profiler-nodejs/commit/90d20bb566ed54237074af85dedb249653a06657)) + +### [3.0.1](https://www.github.com/googleapis/cloud-profiler-nodejs/compare/v3.0.0...v3.0.1) (2019-10-30) + + +### Bug Fixes + +* remove @google-cloud/common retries for CreateProfile and UpdateProfile requests ([#555](https://www.github.com/googleapis/cloud-profiler-nodejs/issues/555)) ([09ef74d](https://www.github.com/googleapis/cloud-profiler-nodejs/commit/09ef74d9a9a84d4c3a04edd9f93013cf8c56425b)) +* **deps:** update dependency pprof to v1.2.0 ([#557](https://www.github.com/googleapis/cloud-profiler-nodejs/issues/557)) ([664bf0e](https://www.github.com/googleapis/cloud-profiler-nodejs/commit/664bf0e948cfb898702a2c4198e8d30f0699862c)) + +## v3.0.0 + +09-17-2019 10:42 PDT + +### New Features +- feat(breaking!): support API endpoint override ([#509](https://github.com/googleapis/cloud-profiler-nodejs/pull/509)) +- chore: message to log agent version should be consistent with other agents ([#531](https://github.com/googleapis/cloud-profiler-nodejs/pull/531)) +- chore: log the agent's version on start-up ([#530](https://github.com/googleapis/cloud-profiler-nodejs/pull/530)) +- chore: log the agent's version on start-up ([#530](https://github.com/googleapis/cloud-profiler-nodejs/pull/530)) + +### Dependencies +- fix(deps): update dependency gcp-metadata to v3 ([#541](https://github.com/googleapis/cloud-profiler-nodejs/pull/541)) +- chore(deps): pin pprof module ([#539](https://github.com/googleapis/cloud-profiler-nodejs/pull/539)) +- chore(deps): update dependency nock to v11 ([#538](https://github.com/googleapis/cloud-profiler-nodejs/pull/538)) +- chore(deps): update dependency source-map to ^0.7.0 ([#537](https://github.com/googleapis/cloud-profiler-nodejs/pull/537)) +- Revert "chore(deps): update dependency nock to v11 ([#534](https://github.com/googleapis/cloud-profiler-nodejs/pull/534))" ([#535](https://github.com/googleapis/cloud-profiler-nodejs/pull/535)) +- chore(deps): update dependency nock to v11 ([#534](https://github.com/googleapis/cloud-profiler-nodejs/pull/534)) +- chore(deps): update dependency typescript to ~3.6.0 ([#532](https://github.com/googleapis/cloud-profiler-nodejs/pull/532)) +- fix(dep): update to be compatible with @google-cloud/common 2.1.X ([#529](https://github.com/googleapis/cloud-profiler-nodejs/pull/529)) +- fix(deps): use the latest extend ([#523](https://github.com/googleapis/cloud-profiler-nodejs/pull/523)) + +### Documentation +- docs: use the jsdoc-fresh theme ([#519](https://github.com/googleapis/cloud-profiler-nodejs/pull/519)) +- docs: note support for Node 12 in documentation ([#540](https://github.com/googleapis/cloud-profiler-nodejs/pull/540)) + + +### Internal / Testing Changes +- build: add Node 12 remove Node 11 ([#520](https://github.com/googleapis/cloud-profiler-nodejs/pull/520)) +- update .nycrc ignore rules ([#536](https://github.com/googleapis/cloud-profiler-nodejs/pull/536)) +- chore: confirm zone is set in e2e test ([#518](https://github.com/googleapis/cloud-profiler-nodejs/pull/518)) +- build: use config file for linkinator ([#517](https://github.com/googleapis/cloud-profiler-nodejs/pull/517)) + +## v2.0.2 + +06-26-2019 08:59 PDT + +### Dependencies +- chore(deps): update dependency typescript to ~3.5.0 ([#498](https://github.com/googleapis/cloud-profiler-nodejs/pull/498)) +- chore(deps): update dependency js-green-licenses to v1 ([#503](https://github.com/googleapis/cloud-profiler-nodejs/pull/503)) +- chore(deps): update sinon and @types/sinon ([#504](https://github.com/googleapis/cloud-profiler-nodejs/pull/504)) +- fix(deps): update dependency pprof to v1 ([#487](https://github.com/googleapis/cloud-profiler-nodejs/pull/487)) + +### Documentation +- fix(docs): make anchors work in jsdoc ([#513](https://github.com/googleapis/cloud-profiler-nodejs/pull/513)) +- docs: add repo-metadata for docs ([#511](https://github.com/googleapis/cloud-profiler-nodejs/pull/511)) + +### Internal / Testing Changes +- build: switch to GitHub magic proxy ([#510](https://github.com/googleapis/cloud-profiler-nodejs/pull/510)) +- build: set correct src path for template ([#508](https://github.com/googleapis/cloud-profiler-nodejs/pull/508)) +- build: use standard kokoro configuration ([#502](https://github.com/googleapis/cloud-profiler-nodejs/pull/502)) +- chore: run gts fix ([#506](https://github.com/googleapis/cloud-profiler-nodejs/pull/506)) +- build: share some code for E2E tests ([#505](https://github.com/googleapis/cloud-profiler-nodejs/pull/505)) +- build: don't run test scripts in verbose mode ([#501](https://github.com/googleapis/cloud-profiler-nodejs/pull/501)) +- build: ignore proto files in test coverage ([#497](https://github.com/googleapis/cloud-profiler-nodejs/pull/497)) + +## v2.0.1 + +05-23-2019 13:08 PDT + +### Implementation Changes +- fix: agent should correctly set zone in GCP ([#489](https://github.com/googleapis/cloud-profiler-nodejs/pull/489)) + +### Dependencies +- fix(deps): remove unused dependencies ([#494](https://github.com/googleapis/cloud-profiler-nodejs/pull/494)) +- refactor: drop dependency on pify ([#493](https://github.com/googleapis/cloud-profiler-nodejs/pull/493)) + +### Documentation +- doc: remove reference to Node 6 from documentation ([#485](https://github.com/googleapis/cloud-profiler-nodejs/pull/485)) + +### Internal / Testing Changes +- chore: retry npm install in system test when the command hangs ([#491](https://github.com/googleapis/cloud-profiler-nodejs/pull/491)) +- chore: remove unused third_party directory ([#486](https://github.com/googleapis/cloud-profiler-nodejs/pull/486)) + +## v2.0.0 + +05-14-2019 13:21 PDT + +### Implementation Changes +- build: remove support for node 6 ([#472](https://github.com/googleapis/cloud-profiler-nodejs/pull/472)) + +### Dependencies +- fix(deps): update dependency gcp-metadata to v2 ([#481](https://github.com/googleapis/cloud-profiler-nodejs/pull/481)) +- fix(deps): update dependency @google-cloud/common to v1 ([#482](https://github.com/googleapis/cloud-profiler-nodejs/pull/482)) +- fix(deps): update dependency pretty-ms to v5 ([#464](https://github.com/googleapis/cloud-profiler-nodejs/pull/464)) +- chore(deps): update dependency gts to v1 ([#474](https://github.com/googleapis/cloud-profiler-nodejs/pull/474)) +- fix(deps): update dependency gaxios to v2 ([#469](https://github.com/googleapis/cloud-profiler-nodejs/pull/469)) +- chore(deps): update dependency @types/nock to v10 ([#470](https://github.com/googleapis/cloud-profiler-nodejs/pull/470)) +- chore(dep): update mocha to 6.1 ([#461](https://github.com/googleapis/cloud-profiler-nodejs/pull/461)) + +### Documentation +- Specify default logging level ([#483](https://github.com/googleapis/cloud-profiler-nodejs/pull/483)) + +### Internal / Testing Changes +- build: allow Node 10 to push to codecov ([#480](https://github.com/googleapis/cloud-profiler-nodejs/pull/480)) +- build: patch Windows container, fixing Node 10 ([#479](https://github.com/googleapis/cloud-profiler-nodejs/pull/479)) +- Revert "build!: upgrade engines field to >=8.10.0 ([#476](https://github.com/googleapis/cloud-profiler-nodejs/pull/476))" ([#477](https://github.com/googleapis/cloud-profiler-nodejs/pull/477)) +- build!: upgrade engines field to >=8.10.0 ([#476](https://github.com/googleapis/cloud-profiler-nodejs/pull/476)) +- chore: update to .nycrc with --all enabled ([#473](https://github.com/googleapis/cloud-profiler-nodejs/pull/473)) +- fix: update dependencies for npm audit ([#468](https://github.com/googleapis/cloud-profiler-nodejs/pull/468)) +- fix: add es2015 as target for tsconfig.json ([#466](https://github.com/googleapis/cloud-profiler-nodejs/pull/466)) +- chore: remove old kokoro configs ([#458](https://github.com/googleapis/cloud-profiler-nodejs/pull/458)) + +## v1.1.2 + +04-04-2019 13:05 PDT + +No changes since last release. + +Confirming that we can use [releasetool](https://github.com/googleapis/releasetool) to release the agent going forward. diff --git a/handwritten/cloud-profiler/CODE_OF_CONDUCT.md b/handwritten/cloud-profiler/CODE_OF_CONDUCT.md new file mode 100644 index 00000000000..2add2547a81 --- /dev/null +++ b/handwritten/cloud-profiler/CODE_OF_CONDUCT.md @@ -0,0 +1,94 @@ + +# Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of +experience, education, socio-economic status, nationality, personal appearance, +race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, or to ban temporarily or permanently any +contributor for other behaviors that they deem inappropriate, threatening, +offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +This Code of Conduct also applies outside the project spaces when the Project +Steward has a reasonable belief that an individual's behavior may have a +negative impact on the project or its community. + +## Conflict Resolution + +We do not believe that all conflict is bad; healthy debate and disagreement +often yield positive results. However, it is never okay to be disrespectful or +to engage in behavior that violates the project’s code of conduct. + +If you see someone violating the code of conduct, you are encouraged to address +the behavior directly with those involved. Many issues can be resolved quickly +and easily, and this gives people more control over the outcome of their +dispute. If you are unable to resolve the matter for any reason, or if the +behavior is threatening or harassing, report it. We are dedicated to providing +an environment where participants feel welcome and safe. + +Reports should be directed to *googleapis-stewards@google.com*, the +Project Steward(s) for *Google Cloud Client Libraries*. It is the Project Steward’s duty to +receive and address reported violations of the code of conduct. They will then +work with a committee consisting of representatives from the Open Source +Programs Office and the Google Open Source Strategy team. If for any reason you +are uncomfortable reaching out to the Project Steward, please email +opensource@google.com. + +We will investigate every complaint, but you may not receive a direct response. +We will use our discretion in determining when and how to follow up on reported +incidents, which may range from not taking action to permanent expulsion from +the project and project-sponsored spaces. We will notify the accused of the +report and provide them an opportunity to discuss it before any action is taken. +The identity of the reporter will be omitted from the details of the report +supplied to the accused. In potentially harmful situations, such as ongoing +harassment or threats to anyone's safety, we may take action without notice. + +## Attribution + +This Code of Conduct is adapted from the Contributor Covenant, version 1.4, +available at +https://www.contributor-covenant.org/version/1/4/code-of-conduct.html \ No newline at end of file diff --git a/handwritten/cloud-profiler/CONTRIBUTING.md b/handwritten/cloud-profiler/CONTRIBUTING.md new file mode 100644 index 00000000000..b4169bb89a4 --- /dev/null +++ b/handwritten/cloud-profiler/CONTRIBUTING.md @@ -0,0 +1,75 @@ +# How to become a contributor and submit your own code + +**Table of contents** + +* [Contributor License Agreements](#contributor-license-agreements) +* [Contributing a patch](#contributing-a-patch) +* [Running the tests](#running-the-tests) +* [Releasing the library](#releasing-the-library) + +## Contributor License Agreements + +We'd love to accept your sample apps and patches! Before we can take them, we +have to jump a couple of legal hurdles. + +Please fill out either the individual or corporate Contributor License Agreement +(CLA). + + * If you are an individual writing original source code and you're sure you + own the intellectual property, then you'll need to sign an [individual CLA](https://developers.google.com/open-source/cla/individual). + * If you work for a company that wants to allow you to contribute your work, + then you'll need to sign a [corporate CLA](https://developers.google.com/open-source/cla/corporate). + +Follow either of the two links above to access the appropriate CLA and +instructions for how to sign and return it. Once we receive it, we'll be able to +accept your pull requests. + +## Contributing A Patch + +1. Submit an issue describing your proposed change to the repo in question. +1. The repo owner will respond to your issue promptly. +1. If your proposed change is accepted, and you haven't already done so, sign a + Contributor License Agreement (see details above). +1. Fork the desired repo, develop and test your code changes. +1. Ensure that your code adheres to the existing style in the code to which + you are contributing. +1. Ensure that your code has an appropriate set of tests which all pass. +1. Title your pull request following [Conventional Commits](https://www.conventionalcommits.org/) styling. +1. Submit a pull request. + +### Before you begin + +1. [Select or create a Cloud Platform project][projects]. +1. [Enable the Cloud Profiler API][enable_api]. +1. [Set up authentication with a service account][auth] so you can access the + API from your local workstation. + + +## Running the tests + +1. [Prepare your environment for Node.js setup][setup]. + +1. Install dependencies: + + npm install + +1. Run the tests: + + # Run unit tests. + npm test + + # Run sample integration tests. + npm run samples-test + + # Run all system tests. + npm run system-test + +1. Lint (and maybe fix) any changes: + + npm run fix + +[setup]: https://cloud.google.com/nodejs/docs/setup +[projects]: https://console.cloud.google.com/project +[billing]: https://support.google.com/cloud/answer/6293499#enable-billing +[enable_api]: https://console.cloud.google.com/flows/enableapi?apiid=cloudprofiler.googleapis.com +[auth]: https://cloud.google.com/docs/authentication/getting-started \ No newline at end of file diff --git a/handwritten/cloud-profiler/LICENSE b/handwritten/cloud-profiler/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/handwritten/cloud-profiler/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/handwritten/cloud-profiler/README.md b/handwritten/cloud-profiler/README.md new file mode 100644 index 00000000000..7f9ab909cef --- /dev/null +++ b/handwritten/cloud-profiler/README.md @@ -0,0 +1,408 @@ +[//]: # "This README.md file is auto-generated, all changes to this file will be lost." +[//]: # "To regenerate it, use `python -m synthtool`." +Google Cloud Platform logo + +# [Cloud Profiler: Node.js Client](https://github.com/googleapis/cloud-profiler-nodejs) + +[![release level](https://img.shields.io/badge/release%20level-stable-brightgreen.svg?style=flat)](https://cloud.google.com/terms/launch-stages) +[![npm version](https://img.shields.io/npm/v/@google-cloud/profiler.svg)](https://www.npmjs.org/package/@google-cloud/profiler) + + + + +Adds support for Cloud Profiler to Node.js applications + + +A comprehensive list of changes in each version may be found in +[the CHANGELOG](https://github.com/googleapis/cloud-profiler-nodejs/blob/main/CHANGELOG.md). + +* [Cloud Profiler Node.js Client API Reference][client-docs] +* [Cloud Profiler Documentation][product-docs] +* [github.com/googleapis/cloud-profiler-nodejs](https://github.com/googleapis/cloud-profiler-nodejs) + +Read more about the client libraries for Cloud APIs, including the older +Google APIs Client Libraries, in [Client Libraries Explained][explained]. + +[explained]: https://cloud.google.com/apis/docs/client-libraries-explained + +**Table of contents:** + + +* [Quickstart](#quickstart) + * [Before you begin](#before-you-begin) + * [Installing the client library](#installing-the-client-library) + +* [Samples](#samples) +* [Versioning](#versioning) +* [Contributing](#contributing) +* [License](#license) + +## Quickstart + +### Before you begin + +1. [Select or create a Cloud Platform project][projects]. +1. [Enable the Cloud Profiler API][enable_api]. +1. [Set up authentication][auth] so you can access the + API from your local workstation. + +### Installing the client library + +```bash +npm install @google-cloud/profiler +``` + + +### Prerequisites + +1. Your application will need to be using Node.js version between 14 and 20. + +1. `@google-cloud/profiler` depends on the +[`pprof`](https://www.npmjs.com/package/pprof) module, a module with a native +component that is used to collect profiles with v8's CPU and Heap profilers. +You may need to install additional dependencies to build the `pprof` module. + * For Linux: `pprof` has prebuilt binaries available for Linux and Alpine + Linux for Node 14 and 16. No additional dependencies are required. + * For other environments: when using `@google-cloud/profiler` on environments + that `pprof` does not have prebuilt binaries for, the module + [`node-gyp`](https://www.npmjs.com/package/node-gyp) will be used to + build binaries. See `node-gyp`'s + [documentation](https://github.com/nodejs/node-gyp#installation) + for information on dependencies required to build binaries with `node-gyp`. + +1. You will need a project in the [Google Developers Console][cloud-console]. +Your application can run anywhere, but the profiler data is associated with a +particular project. + +1. You will need to enable the Cloud Profiler API for your project. + +### Basic Set-up + +1. Install `@google-cloud/profiler` with [`npm`](https://www.npmjs.com) or add +to your [`package.json`](https://docs.npmjs.com/files/package.json#dependencies). + + ```sh + # Install through npm while saving to the local 'package.json' + npm install --save @google-cloud/profiler + ``` + +2. Include and start the profiler at the beginning of your application: + + ```js + require('@google-cloud/profiler').start().catch((err) => { + console.log(`Failed to start profiler: ${err}`); + }); + ``` + + Some environments require a configuration to be passed to the `start()` + function. For more details on this, see instructions for running + [outside of Google Cloud Platform](#running-elsewhere), on + [App Engine flexible environment](#running-on-app-engine-flexible-environment), + on [Google Compute Engine](#running-on-google-compute-engine), + and on [Google Container Engine](#running-on-google-container-engine). + +3. If you are running your application locally, or on a machine where you are +using the [Google Cloud SDK][gcloud-sdk], make sure to log in with the +application default credentials: + + ```sh + gcloud beta auth application-default login + ``` + + Alternatively, you can set `GOOGLE_APPLICATION_CREDENTIALS`. For more + details on this, see [Running elsewhere](#running-elsewhere) + +### Configuration + +See [the default configuration](https://github.com/googleapis/cloud-profiler-nodejs/blob/master/src/config.ts) for a list of possible +configuration options. These options can be passed to the agent through the +object argument to the start command shown below: + +```js +await require('@google-cloud/profiler').start({disableTime: true}); +``` + +Alternatively, you can provide the configuration through a config file. This +can be useful if you want to load our module using `--require` on the command +line (which requires and starts the agent) instead of editing your main script. +The `GCLOUD_PROFILER_CONFIG` environment variable should point to your +configuration file. + +```bash +export GCLOUD_PROFILER_CONFIG=./path/to/your/profiler/configuration.js +``` + +#### Changing log level + +The profiler writes log statements to the console log for diagnostic purposes. +By default, the log level is set to warn. You can adjust this by setting +`logLevel` in the config. Setting `logLevel` to 0 will disable logging, +1 sets log level to error, 2 sets it to warn (default), 3 sets it to info, +and 4 sets it to debug. + +So, for example, to start the profiler with the log level at debug, you would +do this: + +```js +await require('@google-cloud/profiler').start({logLevel: 4}); +``` + +#### Disabling heap or time profile collection + +By default, the profiler collects both heap profiles, which show memory +allocations, and time profiles, which capture how much wall-clock time is spent +in different locations of the code. Using the configuration, it is possible to +disable the collection of either type of profile. + +To disable time profile collection, set `disableTime` to true: + +```js +await require('@google-cloud/profiler').start({disableTime: true}); +``` + +To disable heap profile collection, set `disableHeap` to true: + +```js +await require('@google-cloud/profiler').start({disableHeap: true}); +``` + +### Running on Google Cloud Platform + +There are three different services that can host Node.js applications within +Google Cloud Platform: Google App Engine flexible environment, Google Compute +Engine, and Google Container Engine. After installing `@google-cloud/profiler` +in your project and ensuring that the environment you are using uses a +supported version of Node.js, follow the service-specific instructions to +enable the profiler. + +#### Running on App Engine flexible environment + +To enable the profiling agent for a Node.js program running in the App Engine +flexible environment, import the agent at the top of your application’s main +script or entry point by including the following code snippet: + +```js +require('@google-cloud/profiler').start(); +``` + +You can specify which version of Node.js you're using by adding a snippet like +the following to your `package.json`: + +```json +"engines": { + "node": ">=14.0.0" +} +``` +The above snippet will ensure that you're using 14.0.0 or greater. + +Deploy your application to App Engine Flexible environment as usual. + +#### Running on Google Compute Engine + +To enable the profiling agent for a Node.js program running in the Google +Compute Engine environment, import the agent at the top of your application’s +main script or entry point by including the following code snippet: + +```js +require('@google-cloud/profiler').start({ +serviceContext: { + service: 'your-service', + version: '1.0.0' +} +}); +``` + +#### Running on Google Container Engine + +To enable the profiling agent for a Node.js program running in the Google +Container Engine environment, import the agent at the top of your application’s +main script or entry point by including the following code snippet: + +```js +require('@google-cloud/profiler').start({ +serviceContext: { + service: 'your-service', + version: '1.0.0' +} +}); +``` + +#### Running on Istio + +On Istio, the GCP Metadata server may not be available for a few seconds after +your application has started. When this occurs, the profiling agent may fail +to start because it cannot initialize required fields. One can retry when +starting the profiler with the following snippet. + +```js +const profiler = require('@google-cloud/profiler'); +async function startProfiler() { +for (let i = 0; i < 3; i++) { + try { + await profiler.start({ + serviceContext: { + service: 'your-service', + version: '1.0.0', + }, + }); + } catch(e) { + console.log(`Failed to start profiler: ${e}`); + } + + // Wait for 1 second before trying again. + await new Promise(r => setTimeout(r, 1000)); +} +} +startProfiler(); + +``` + + +### Running elsewhere + +You can still use `@google-cloud/profiler` if your application is running +outside of Google Cloud Platform, for example, running locally, on-premise, or +on another cloud provider. + +1. You will need to specify your project id and the service you want the +collected profiles to be associated with, and (optionally) the version of +the service when starting the profiler: + +```js + await require('@google-cloud/profiler').start({ + projectId: 'project-id', + serviceContext: { + service: 'your-service', + version: '1.0.0' + } + }); +``` +2. You will need to provide credential for your application. + +* If you are running your application on a development machine or test +environment where you are using the [`gcloud` command line tools][gcloud-sdk], +and are logged using `gcloud beta auth application-default login`, you +already have sufficient credentials, and a service account key is not +required. + +* You can provide credentials via +[Application Default Credentials][app-default-credentials]. This is the +recommended method. + 1. [Create a new JSON service account key][service-account]. + 2. Copy the key somewhere your application can access it. Be sure not + to expose the key publicly. + 3. Set the environment variable `GOOGLE_APPLICATION_CREDENTIALS` to + the full path to the key. The profiler will automatically look for + this environment variable. + +* You may set the `keyFilename` or `credentials` configuration field to the +full path or contents to the key file, respectively. Setting either of these +fields will override either setting `GOOGLE_APPLICATION_CREDENTIALS` or +logging in using `gcloud`. + + This is how you would set `keyFilename`: + ```js + await require('@google-cloud/profiler').start({ + projectId: 'project-id', + serviceContext: { + service: 'your-service', + version: '1.0.0' + }, + keyFilename: '/path/to/keyfile' + }); + ``` + + This is how you would set `credentials`: + ```js + await require('@google-cloud/profiler').start({ + projectId: 'project-id', + serviceContext: { + service: 'your-service', + version: '1.0.0' + }, + credentials: { + client_email: 'email', + private_key: 'private_key' + } + }); + ``` + + +## Samples + +Samples are in the [`samples/`](https://github.com/googleapis/cloud-profiler-nodejs/tree/main/samples) directory. Each sample's `README.md` has instructions for running its sample. + +| Sample | Source Code | Try it | +| --------------------------- | --------------------------------- | ------ | +| App | [source code](https://github.com/googleapis/cloud-profiler-nodejs/blob/main/samples/app.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/cloud-profiler-nodejs&page=editor&open_in_editor=samples/app.js,samples/README.md) | +| Snippets | [source code](https://github.com/googleapis/cloud-profiler-nodejs/blob/main/samples/snippets.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/cloud-profiler-nodejs&page=editor&open_in_editor=samples/snippets.js,samples/README.md) | + + + +The [Cloud Profiler Node.js Client API Reference][client-docs] documentation +also contains samples. + +## Supported Node.js Versions + +Our client libraries follow the [Node.js release schedule](https://github.com/nodejs/release#release-schedule). +Libraries are compatible with all current _active_ and _maintenance_ versions of +Node.js. +If you are using an end-of-life version of Node.js, we recommend that you update +as soon as possible to an actively supported LTS version. + +Google's client libraries support legacy versions of Node.js runtimes on a +best-efforts basis with the following warnings: + +* Legacy versions are not tested in continuous integration. +* Some security patches and features cannot be backported. +* Dependencies cannot be kept up-to-date. + +Client libraries targeting some end-of-life versions of Node.js are available, and +can be installed through npm [dist-tags](https://docs.npmjs.com/cli/dist-tag). +The dist-tags follow the naming convention `legacy-(version)`. +For example, `npm install @google-cloud/profiler@legacy-8` installs client libraries +for versions compatible with Node.js 8. + +## Versioning + +This library follows [Semantic Versioning](http://semver.org/). + + + +This library is considered to be **stable**. The code surface will not change in backwards-incompatible ways +unless absolutely necessary (e.g. because of critical security issues) or with +an extensive deprecation period. Issues and requests against **stable** libraries +are addressed with the highest priority. + + + + + + +More Information: [Google Cloud Platform Launch Stages][launch_stages] + +[launch_stages]: https://cloud.google.com/terms/launch-stages + +## Contributing + +Contributions welcome! See the [Contributing Guide](https://github.com/googleapis/cloud-profiler-nodejs/blob/main/CONTRIBUTING.md). + +Please note that this `README.md`, the `samples/README.md`, +and a variety of configuration files in this repository (including `.nycrc` and `tsconfig.json`) +are generated from a central template. To edit one of these files, make an edit +to its templates in +[directory](https://github.com/googleapis/synthtool). + +## License + +Apache Version 2.0 + +See [LICENSE](https://github.com/googleapis/cloud-profiler-nodejs/blob/main/LICENSE) + +[client-docs]: https://cloud.google.com/nodejs/docs/reference/profiler/latest +[product-docs]: https://cloud.google.com/profiler/docs +[shell_img]: https://gstatic.com/cloudssh/images/open-btn.png +[projects]: https://console.cloud.google.com/project +[billing]: https://support.google.com/cloud/answer/6293499#enable-billing +[enable_api]: https://console.cloud.google.com/flows/enableapi?apiid=cloudprofiler.googleapis.com +[auth]: https://cloud.google.com/docs/authentication/external/set-up-adc-local diff --git a/handwritten/cloud-profiler/linkinator.config.json b/handwritten/cloud-profiler/linkinator.config.json new file mode 100644 index 00000000000..fbf540f0108 --- /dev/null +++ b/handwritten/cloud-profiler/linkinator.config.json @@ -0,0 +1,11 @@ +{ + "recurse": true, + "skip": [ + "https://codecov.io/gh/googleapis/", + "www.googleapis.com", + "circleci.com/gh/googleapis/cloud-profiler-nodejs", + "img.shields.io" + ], + "silent": true, + "concurrency": 10 +} diff --git a/handwritten/cloud-profiler/owlbot.py b/handwritten/cloud-profiler/owlbot.py new file mode 100644 index 00000000000..47859bd630a --- /dev/null +++ b/handwritten/cloud-profiler/owlbot.py @@ -0,0 +1,18 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import synthtool.languages.node_mono_repo as node + +node.owlbot_main(relative_dir="handwritten/cloud-profiler",templates_excludes=[".github/sync-repo-settings.yaml", + ".github/workflows/ci.yaml"]) diff --git a/handwritten/cloud-profiler/package.json b/handwritten/cloud-profiler/package.json new file mode 100644 index 00000000000..9c071f1eff8 --- /dev/null +++ b/handwritten/cloud-profiler/package.json @@ -0,0 +1,89 @@ +{ + "name": "@google-cloud/profiler", + "version": "6.0.3", + "description": "Adds support for Cloud Profiler to Node.js applications", + "repository": { + "type": "git", + "directory": "handwritten/cloud-profiler", + "url": "https://github.com/googleapis/google-cloud-node.git" + }, + "main": "build/src/index.js", + "types": "build/src/index.d.ts", + "scripts": { + "test": "echo 'skipping tests for time being'", + "system-test": "echo 'skipping tests for time being'", + "samples-test": "echo 'no sample tests'", + "clean": "gts clean", + "compile": "tsc -p .", + "fix": "gts fix", + "lint": "gts check", + "docs": "jsdoc -c .jsdoc.js", + "prelint": "cd samples; npm link ../; npm install", + "prepare": "npm run compile", + "pretest": "npm run compile", + "license-check": "jsgl --local .", + "docs-test": "echo 'skipping tests for time being'", + "predocs-test": "npm run docs", + "precompile": "gts clean" + }, + "author": { + "name": "Google Inc." + }, + "license": "Apache-2.0", + "dependencies": { + "@google-cloud/common": "^5.0.0", + "@google-cloud/logging-min": "^11.0.0", + "@google-cloud/promisify": "~4.0.0", + "@types/console-log-level": "^1.4.0", + "@types/semver": "^7.0.0", + "console-log-level": "^1.4.0", + "delay": "^5.0.0", + "extend": "^3.0.2", + "gcp-metadata": "^6.0.0", + "ms": "^2.1.3", + "pprof": "4.0.0", + "pretty-ms": "^7.0.0", + "protobufjs": "~7.4.0", + "semver": "^7.0.0", + "teeny-request": "^9.0.0" + }, + "devDependencies": { + "@types/extend": "^3.0.0", + "@types/long": "^5.0.0", + "@types/mocha": "^9.0.0", + "@types/ms": "^2.1.0", + "@types/nock": "^11.0.0", + "@types/node": "^20.0.0", + "@types/pretty-ms": "^5.0.0", + "@types/sinon": "^17.0.0", + "@types/tmp": "0.2.6", + "c8": "^9.0.0", + "codecov": "^3.0.0", + "gts": "^5.0.0", + "js-green-licenses": "^4.0.0", + "jsdoc": "^4.0.0", + "jsdoc-fresh": "^3.0.0", + "jsdoc-region-tag": "^3.0.0", + "linkinator": "^5.0.0", + "mocha": "^9.2.2", + "nock": "^13.0.0", + "sinon": "^18.0.0", + "source-map": "^0.7.0", + "tmp": "0.2.3", + "typescript": "5.1.6" + }, + "files": [ + "build/src", + "build/third_party/cloud-debug-nodejs" + ], + "nyc": { + "exclude": [ + "build/test", + "build/system-test" + ] + }, + "engines": { + "node": ">=14.0.0" + }, + "homepage": "https://github.com/googleapis/google-cloud-node/tree/main/handwritten/cloud-profiler" +} diff --git a/handwritten/cloud-profiler/samples/README.md b/handwritten/cloud-profiler/samples/README.md new file mode 100644 index 00000000000..02a26cf79c9 --- /dev/null +++ b/handwritten/cloud-profiler/samples/README.md @@ -0,0 +1,68 @@ +[//]: # "This README.md file is auto-generated, all changes to this file will be lost." +[//]: # "To regenerate it, use `python -m synthtool`." +Google Cloud Platform logo + +# [Cloud Profiler: Node.js Samples](https://github.com/googleapis/cloud-profiler-nodejs) + +[![Open in Cloud Shell][shell_img]][shell_link] + + + +## Table of Contents + +* [Before you begin](#before-you-begin) +* [Samples](#samples) + * [App](#app) + * [Snippets](#snippets) + +## Before you begin + +Before running the samples, make sure you've followed the steps outlined in +[Using the client library](https://github.com/googleapis/cloud-profiler-nodejs#using-the-client-library). + +`cd samples` + +`npm install` + +`cd ..` + +## Samples + + + +### App + +View the [source code](https://github.com/googleapis/cloud-profiler-nodejs/blob/main/samples/app.js). + +[![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/cloud-profiler-nodejs&page=editor&open_in_editor=samples/app.js,samples/README.md) + +__Usage:__ + + +`node samples/app.js` + + +----- + + + + +### Snippets + +View the [source code](https://github.com/googleapis/cloud-profiler-nodejs/blob/main/samples/snippets.js). + +[![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/cloud-profiler-nodejs&page=editor&open_in_editor=samples/snippets.js,samples/README.md) + +__Usage:__ + + +`node samples/snippets.js` + + + + + + +[shell_img]: https://gstatic.com/cloudssh/images/open-btn.png +[shell_link]: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/cloud-profiler-nodejs&page=editor&open_in_editor=samples/README.md +[product-docs]: https://cloud.google.com/profiler/docs diff --git a/handwritten/cloud-profiler/samples/app.js b/handwritten/cloud-profiler/samples/app.js new file mode 100644 index 00000000000..213e3b3e1ef --- /dev/null +++ b/handwritten/cloud-profiler/samples/app.js @@ -0,0 +1,19 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +'use strict'; + +// [START profiler_setup_nodejs_app_engine] +require('@google-cloud/profiler').start(); +// [END profiler_setup_nodejs_app_engine] diff --git a/handwritten/cloud-profiler/samples/package.json b/handwritten/cloud-profiler/samples/package.json new file mode 100644 index 00000000000..bbbe3e5fa69 --- /dev/null +++ b/handwritten/cloud-profiler/samples/package.json @@ -0,0 +1,25 @@ +{ + "name": "cloud-profiler-samples", + "version": "0.1.0", + "private": true, + "description": "Google Cloud Profiler samples", + "main": "app.js", + "scripts": { + "test": "echo 'no test yet'" + }, + "engines": { + "node": ">=14.0.0" + }, + "repository": { + "type": "git", + "url": "git+https://github.com/googleapis/cloud-profiler-nodejs.git" + }, + "author": "Google LLC.", + "license": "Apache-2.0", + "dependencies": { + "@google-cloud/profiler": "^6.0.3" + }, + "files": [ + "*.js" + ] +} \ No newline at end of file diff --git a/handwritten/cloud-profiler/samples/snippets.js b/handwritten/cloud-profiler/samples/snippets.js new file mode 100644 index 00000000000..4a32b1a4dcb --- /dev/null +++ b/handwritten/cloud-profiler/samples/snippets.js @@ -0,0 +1,24 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +'use strict'; + +// [START profiler_setup_nodejs_compute_engine] +require('@google-cloud/profiler').start({ + serviceContext: { + service: 'your-service', + version: '1.0.0', + }, +}); +// [END profiler_setup_nodejs_compute_engine] diff --git a/handwritten/cloud-profiler/src/config.ts b/handwritten/cloud-profiler/src/config.ts new file mode 100644 index 00000000000..e68d263ff5c --- /dev/null +++ b/handwritten/cloud-profiler/src/config.ts @@ -0,0 +1,202 @@ +// Copyright 2017 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import {GoogleAuthOptions} from '@google-cloud/common'; +import * as ms from 'ms'; + +// Configuration for Profiler. +export interface Config extends GoogleAuthOptions { + /** + * The API endpoint of the service used to make requests. + * Defaults to `cloudprofiler.googleapis.com`. + */ + apiEndpoint?: string; + + // Cloud Console projectId to associate profiles with instead of one read + // from VM metadata server. + projectId?: string; + + // Log levels: 0-disabled,1-error,2-warn,3-info,4-debug. + // Log statements are printed to the console. + logLevel?: number; + + // Specifies the service with which profiles from this application will be + // associated. + serviceContext?: { + // Name of the service under which the profiled data will be recorded and + // exposed in the UI for the project. + // You can specify an arbitrary string, see deployment.target at + // https://github.com/googleapis/googleapis/blob/master/google/devtools/cloudprofiler/v2/profiler.proto + // for restrictions. + // The string should be the same across different replicas of your service + // so that a globally constant profiling rate is maintained. + service?: string; + + // Version of the service. It can be an arbitrary string. Cloud Profiler + // profiles each version of each service in each zone once per minute. + version?: string; + }; + + // Virtual machine instance to associate profiles with instead of the one + // read from the VM metadata server. + instance?: string; + + // Zone to associate profiles with instead of the one read from the VM + // metadata server. + zone?: string; + + // When true, time profiling will be disabled. + disableTime?: boolean; + + // When true, heap profiling will be disabled. + disableHeap?: boolean; + + // Average time between samples collected by time profiler. + // Increasing the time between samples will reduce quality of profiles by + // reducing number of samples. + // Decreasing time between samples may increase overhead of profiling. + timeIntervalMicros?: number; + + // Average bytes between samples collected by heap profiler. + // Increasing the bytes between samples will reduce quality of profiles by + // reducing number of samples. + // Decreasing bytes between samples may increase overhead of profiling. + heapIntervalBytes?: number; + + // Maximum depth of stacks recorded for heap samples. Decreasing stack depth + // will make it more likely that stack traces are truncated. Increasing + // stack depth may increase overhead of profiling. + heapMaxStackDepth?: number; + + // Samples with stacks with any location containing this as a substring + // in their file name will not be included in heap profiles. + // By default this is set to "@google-cloud/profiler" to exclude samples from + // the profiler. + ignoreHeapSamplesPath?: string; + + // On each consecutive error in profile creation, the backoff envelope will + // increase by this factor. The backoff will be a random value selected + // from a uniform distribution between 0 and the backoff envelope. + backoffMultiplier?: number; + + // On first error during profile creation, if the backoff is not specified + // by the server response, then profiler will wait between 0 and + // initialBackoffMillis before asking the server to create a profile again. + // After a successful profile creation, the backoff envelope will be reset to + // initialBackoffMillis. + initialBackoffMillis?: number; + + // If the backoff is not specified by the server response, then profiler will + // wait at most backoffCapMillis before asking server to create a profile + // again. + backoffCapMillis?: number; + + // Server-specified backoffs will be capped at serverBackoffCapMillis. + // The backoff is capped here because setTimeout (which is used to control + // when next profile is collected) will run immediately if the backoff is + // too large. + // https://nodejs.org/dist/latest-v9.x/docs/api/timers.html#timers_settimeout_callback_delay_args. + serverBackoffCapMillis?: number; + + // Time between profile collection. + // For testing with startLocal() only. + localProfilingPeriodMillis?: number; + + // Debugging information for startLocal will be recorded every + // localLogPeriodMillis milliseconds. + // For testing with startLocal() only. + localLogPeriodMillis?: number; + + // Duration of time profiles collected when using startLocal(). + // For testing with startLocal() only. + localTimeDurationMillis?: number; + + // List of directories recursively searched for *.js.map files. Defaults to + // process.cwd(). + // + // The profiler uses these files to re-map the source file paths in the + // profiles. The most common use case of having a source map is an application + // written in TypeScript: the source file paths that the profiler observes in + // the profiling data are the transpiled *.js files; to attribute the data + // back to the TypeScript source that the developer wrote source map needs to + // be generated, distribute and used. + // + // The source map of the application typically resides directly in + // process.cwd(), so the default value should work well pretty much always. + // The node_modules directory is not searched for source maps, so + // source maps for dependencies will not be used. + sourceMapSearchPath?: string[]; + + // When true, source map support will be disabled. + // All locations in profiles will reference locations in the running + // JavaScript. + disableSourceMaps?: boolean; +} + +// Interface for config after local initialization. +export interface LocalConfig extends GoogleAuthOptions { + apiEndpoint: string; + projectId?: string; + logLevel: number; + serviceContext: {service: string; version?: string}; + instance?: string; + zone?: string; + disableTime: boolean; + disableHeap: boolean; + timeIntervalMicros: number; + heapIntervalBytes: number; + heapMaxStackDepth: number; + ignoreHeapSamplesPath: string; + initialBackoffMillis: number; + backoffCapMillis: number; + backoffMultiplier: number; + serverBackoffCapMillis: number; + localProfilingPeriodMillis: number; + localLogPeriodMillis: number; + localTimeDurationMillis: number; + sourceMapSearchPath: string[]; + disableSourceMaps: boolean; +} + +// Interface for an initialized profiler config. +export interface ProfilerConfig extends LocalConfig { + projectId: string; +} + +// Default values for configuration for a profiler. +export const defaultConfig = { + logLevel: 2, + serviceContext: {}, + disableHeap: false, + disableTime: false, + timeIntervalMicros: 1000, + heapIntervalBytes: 512 * 1024, + heapMaxStackDepth: 64, + ignoreHeapSamplesPath: '@google-cloud/profiler', + initialBackoffMillis: 60 * 1000, // 1 minute + backoffCapMillis: ms('1h')!, + backoffMultiplier: 1.3, + apiEndpoint: 'cloudprofiler.googleapis.com', + + // This is the largest duration for setTimeout which does not cause it to + // run immediately. + // https://nodejs.org/dist/latest-v9.x/docs/api/timers.html#timers_settimeout_callback_delay_args. + serverBackoffCapMillis: 2147483647, + + localProfilingPeriodMillis: 1000, + localLogPeriodMillis: 10000, + localTimeDurationMillis: 1000, + sourceMapSearchPath: [process.cwd()], + disableSourceMaps: false, +}; diff --git a/handwritten/cloud-profiler/src/index.ts b/handwritten/cloud-profiler/src/index.ts new file mode 100644 index 00000000000..b69ea0dca44 --- /dev/null +++ b/handwritten/cloud-profiler/src/index.ts @@ -0,0 +1,288 @@ +// Copyright 2017 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import delay from 'delay'; +import * as extend from 'extend'; +import * as fs from 'fs'; +import * as gcpMetadata from 'gcp-metadata'; +import {heap as heapProfiler} from 'pprof'; +import * as semver from 'semver'; + +import {Config, defaultConfig, LocalConfig, ProfilerConfig} from './config'; +import {createLogger} from './logger'; +import {Profiler} from './profiler'; + +// eslint-disable-next-line @typescript-eslint/no-var-requires +const pjson = require('../../package.json'); +const serviceRegex = /^[a-z0-9]([-a-z0-9_.]{0,253}[a-z0-9])?$/; + +function hasService( + config: Config +): config is {serviceContext: {service: string}} { + return ( + config.serviceContext !== undefined && + typeof config.serviceContext.service === 'string' + ); +} + +function hasProjectId(config: Config): config is {projectId: string} { + return typeof config.projectId === 'string'; +} + +/** + * Sets unset values in the configuration to the value retrieved from + * environment variables or specified in defaultConfig. + * Throws error if value that must be set cannot be initialized. + */ +function initConfigLocal(config: Config): LocalConfig { + const envConfig: Config = { + projectId: process.env.GCLOUD_PROJECT, + serviceContext: { + service: process.env.GAE_SERVICE || process.env.K_SERVICE, + version: process.env.GAE_VERSION || process.env.K_REVISION, + }, + }; + + if (process.env.GCLOUD_PROFILER_LOGLEVEL !== undefined) { + const envLogLevel = Number(process.env.GCLOUD_PROFILER_LOGLEVEL); + if (!isNaN(envLogLevel)) { + envConfig.logLevel = envLogLevel; + } + } + + let envSetConfig: Config = {}; + const configPath = process.env.GCLOUD_PROFILER_CONFIG; + if (configPath) { + let envSetConfigBuf; + try { + envSetConfigBuf = fs.readFileSync(configPath); + } catch (e) { + throw Error(`Could not read GCLOUD_PROFILER_CONFIG ${configPath}: ${e}`); + } + try { + envSetConfig = JSON.parse(envSetConfigBuf.toString()); + } catch (e) { + throw Error(`Could not parse GCLOUD_PROFILER_CONFIG ${configPath}: ${e}`); + } + } + + const mergedUserConfigs = extend(true, {}, envSetConfig, envConfig, config); + if ( + Array.isArray(mergedUserConfigs.sourceMapSearchPath) && + mergedUserConfigs.sourceMapSearchPath.length === 0 && + !mergedUserConfigs.disableSourceMaps + ) { + throw new Error( + 'serviceMapSearchPath is an empty array. Use disableSourceMaps to' + + ' disable source map support instead.' + ); + } + + const mergedConfig = extend(true, {}, defaultConfig, mergedUserConfigs); + + if (!hasService(mergedConfig)) { + throw new Error('Service must be specified in the configuration'); + } + + if (!serviceRegex.test(mergedConfig.serviceContext.service)) { + throw new Error( + `Service ${ + mergedConfig.serviceContext.service + } does not match regular expression "${serviceRegex.toString()}"` + ); + } + + return mergedConfig; +} + +/** + * Sets unset values in the configuration which can be retrieved from GCP + * metadata. + */ +async function initConfigMetadata( + config: LocalConfig +): Promise { + const logger = createLogger(config.logLevel); + const getMetadataProperty = async ( + f: (s: string) => Promise, + field: string + ) => { + try { + return await f(field); + } catch (e) { + logger.debug(`Failed to fetch ${field} from metadata: ${e}`); + } + return undefined; + }; + + if (!config.projectId || !config.zone || !config.instance) { + const [projectId, instance, zone] = await Promise.all([ + getMetadataProperty(gcpMetadata.project, 'project-id'), + getMetadataProperty(gcpMetadata.instance, 'name'), + getMetadataProperty(gcpMetadata.instance, 'zone'), + ]); + + if (!config.zone && zone) { + config.zone = zone.substring(zone.lastIndexOf('/') + 1); + } + if (!config.instance && instance) { + config.instance = instance; + } + if (!config.projectId && projectId) { + config.projectId = projectId; + } + } + + if (!hasProjectId(config)) { + throw new Error('Project ID must be specified in the configuration'); + } + + return config; +} + +/** + * Returns true if the version passed in satifised version requirements + * specified in the profiler's package.json. + * + * Exported for testing. + */ +export function nodeVersionOkay(version: string | semver.SemVer): boolean { + // Coerce version if possible, to remove any pre-release, alpha, beta, etc + // tags. + version = semver.coerce(version) || version; + return semver.satisfies(version, pjson.engines.node); +} + +/** + * Initializes the config, and starts heap profiler if the heap profiler is + * needed. Returns a profiler if creation is successful. Otherwise, returns + * rejected promise. + */ +export async function createProfiler(config: Config = {}): Promise { + if (!nodeVersionOkay(process.version)) { + throw new Error( + `Could not start profiler: node version ${process.version}` + + ` does not satisfies "${pjson.engines.node}"` + + '\nSee https://github.com/googleapis/cloud-profiler-nodejs#prerequisites' + + ' for details.' + ); + } + + const localConfig: LocalConfig = initConfigLocal(config); + + // Start the heap profiler if profiler config does not indicate heap profiling + // is disabled. This must be done before any asynchronous calls are made so + // all memory allocations made after start() is called can be captured. + if (!localConfig.disableHeap) { + heapProfiler.start( + localConfig.heapIntervalBytes, + localConfig.heapMaxStackDepth + ); + } + let profilerConfig: ProfilerConfig; + try { + profilerConfig = await initConfigMetadata(localConfig); + } catch (err) { + heapProfiler.stop(); + throw err; + } + return new Profiler(profilerConfig); +} + +/** + * Starts the profiling agent and returns a promise. + * If any error is encountered when configuring the profiler the promise will + * be rejected. Resolves when profiling is started. + * + * config - Config describing configuration for profiling. + * + * @example + * profiler.start(); + * + * @example + * profiler.start(config); + * + */ +export async function start(config: Config = {}): Promise { + const profiler = await createProfiler(config); + profiler.start(); +} + +/** + * For debugging purposes. Collects profiles and discards the collected + * profiles. + */ +export async function startLocal(config: Config = {}): Promise { + const profiler = await createProfiler(config); + + // Set up periodic logging. + const logger = createLogger(config.logLevel); + + let heapProfileCount = 0; + let timeProfileCount = 0; + let prevLogTime = Date.now(); + + setInterval(() => { + const curTime = Date.now(); + const {rss, heapTotal, heapUsed} = process.memoryUsage(); + const debugInfo = [ + new Date().toISOString(), + 'rss', + (rss / (1024 * 1024)).toFixed(3), + 'MiB,', + 'heap total', + (heapTotal / (1024 * 1024)).toFixed(3), + 'MiB,', + 'heap used', + (heapUsed / (1024 * 1024)).toFixed(3), + 'MiB,', + 'heap profile collection rate', + ((heapProfileCount * 1000) / (curTime - prevLogTime)).toFixed(3), + 'profiles/s,', + 'time profile collection rate', + ((timeProfileCount * 1000) / (curTime - prevLogTime)).toFixed(3), + 'profiles/s', + ].map(v => v + ''); + logger.debug(debugInfo.join(' ')); + + heapProfileCount = 0; + timeProfileCount = 0; + prevLogTime = curTime; + }, profiler.config.localLogPeriodMillis); + + // Periodic profiling + setInterval(async () => { + if (!config.disableHeap) { + await profiler.profile({ + name: 'Heap-Profile' + new Date(), + profileType: 'HEAP', + }); + heapProfileCount++; + } + await delay(profiler.config.localProfilingPeriodMillis / 2); + if (!config.disableTime) { + await profiler.profile({ + name: 'Time-Profile' + new Date(), + profileType: 'WALL', + duration: profiler.config.localTimeDurationMillis.toString() + 'ms', + }); + timeProfileCount++; + } + }, profiler.config.localProfilingPeriodMillis); +} + +// If the module was --require'd from the command line, start the agent. +if (module.parent && module.parent.id === 'internal/preload') { + start(); +} diff --git a/handwritten/cloud-profiler/src/logger.ts b/handwritten/cloud-profiler/src/logger.ts new file mode 100644 index 00000000000..e00f959b3bf --- /dev/null +++ b/handwritten/cloud-profiler/src/logger.ts @@ -0,0 +1,91 @@ +// Copyright 2017 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* eslint-disable @typescript-eslint/no-explicit-any */ + +import {defaultConfig} from './config'; +import {LogSync, Logging} from '@google-cloud/logging-min'; + +const logging = new Logging(); + +// migrating from 'console-log-level' package we keep +// min and max log levels numeric interface used there +const [MIN_LEVEL, MAX_LEVEL] = [0, 4]; + +// eslint-disable-next-line @typescript-eslint/no-var-requires +const pjson = require('../../package.json'); + +export class Logger { + private log: LogSync; + private severityThreshold: number; + + constructor(readonly level?: number) { + if (level === undefined) { + level = defaultConfig.logLevel; + } + if (level < MIN_LEVEL) { + level = MIN_LEVEL; + } else if (level > MAX_LEVEL) { + level = MAX_LEVEL; + } + this.severityThreshold = level; + this.log = logging.logSync(pjson.name); + } + + debug(msg: string) { + if (this.severityThreshold > 3) { + this.log.debug(this.log.entry(this.toOneLine(msg))); + } + } + + info(msg: string) { + if (this.severityThreshold > 2) { + this.log.info(this.log.entry(this.toOneLine(msg))); + } + } + + warn(msg: string) { + if (this.severityThreshold > 1) { + this.log.warning(this.log.entry(this.toOneLine(msg))); + } + } + + error(msg: string) { + if (this.severityThreshold > 0) { + this.log.error(this.log.entry(this.toOneLine(msg))); + } + } + + private toOneLine(msg: string): string { + const temp = msg.replace('\r\n', '\\r\\n'); + return temp.replace('\n', '\\n'); + } +} + +let didLoggingInit = false; + +export function createLogger(level?: number): Logger { + if (!didLoggingInit) { + logging.setProjectId().catch(err => { + console.error(`failed to set logging project id ${err}`); + }); + logging.setDetectedResource().catch(err => { + console.error(`failed to discover resource metadata ${err}`); + }); + + didLoggingInit = true; + } + + return new Logger(level); +} diff --git a/handwritten/cloud-profiler/src/profiler.ts b/handwritten/cloud-profiler/src/profiler.ts new file mode 100644 index 00000000000..12dccc68ece --- /dev/null +++ b/handwritten/cloud-profiler/src/profiler.ts @@ -0,0 +1,570 @@ +// Copyright 2017 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import { + Service, + ServiceConfig, + ServiceObject, + ApiError, + DecorateRequestOptions, +} from '@google-cloud/common'; +import {heap as heapProfiler, SourceMapper, time as timeProfiler} from 'pprof'; +import {perftools} from 'pprof/proto/profile'; +import * as msToStr from 'pretty-ms'; +import {promisify} from 'util'; +import * as zlib from 'zlib'; +import * as r from 'teeny-request'; + +import {ProfilerConfig} from './config'; +import {createLogger} from './logger'; + +import * as ms from 'ms'; +// eslint-disable-next-line @typescript-eslint/no-var-requires +const pjson = require('../../package.json'); +const SCOPE = 'https://www.googleapis.com/auth/monitoring.write'; +const gzip = promisify(zlib.gzip); + +enum ProfileTypes { + Wall = 'WALL', + Heap = 'HEAP', +} + +/** + * @return true iff http status code indicates an error. + */ +function isErrorResponseStatusCode(code: number) { + return code < 200 || code >= 300; +} + +/** + * Interface for deployment field of RequestProfile. Profiles with matching + * deployments will be grouped together. + * Used as body of request when creating profile using the profiler API. + * + * Public for testing. + */ +export interface Deployment { + projectId?: string; + target?: string; + labels?: {zone?: string; version?: string; language: string}; +} + +/** + * Interface for body of response from profiler API when creating + * profile and used as body of request to profiler API when + * uploading a profile. + * + * Public for testing. + */ +export interface RequestProfile { + name: string; + profileType?: string; + duration?: string; + profileBytes?: string; + deployment?: Deployment; + labels?: {instance?: string}; +} + +/** + * @return the error's message, if present. Otherwise returns the + * message of the response body, if that field exists, or the response status + * message. + */ +function getResponseErrorMessage( + // eslint-disable-next-line @typescript-eslint/no-explicit-any + response: r.Response, + err: Error | null +): string | undefined { + if (err && err.message) { + return err.message; + } + // eslint-disable-next-line @typescript-eslint/no-explicit-any + const body = (response as any).body; + if (body && body.message && typeof body.message === 'string') { + return body.message; + } + return response.statusMessage; +} + +/** + * @return number indicated by backoff if the response indicates a backoff and + * that backoff is greater than 0. Otherwise returns undefined. + */ +function getServerResponseBackoff(body: object): number | undefined { + // eslint-disable-next-line @typescript-eslint/no-explicit-any + const b = body as any; + if (b.error && b.error.details && Array.isArray(b.error.details)) { + for (const item of b.error.details) { + if ( + typeof item === 'object' && + item.retryDelay && + typeof item.retryDelay === 'string' + ) { + // item is a RetryInfo + // https://github.com/googleapis/googleapis/blob/4ec607bd375cddbec6d28bc1931eab7da221e4bb/google/rpc/error_details.proto#L92 + // and the ProtoJSON encoding of the duration will be a string of seconds with "s" + // suffix https://protobuf.dev/programming-guides/json + const retryDelay: `${number}s` = item.retryDelay; + const backoffMillis = ms(retryDelay); + if (backoffMillis > 0) { + return backoffMillis; + } + } + } + } + return undefined; +} + +/** + * @return true if an deployment is a Deployment and false otherwise. + */ +// eslint-disable-next-line @typescript-eslint/no-explicit-any +function isDeployment(deployment: any): deployment is Deployment { + return ( + (deployment.projectId === undefined || + typeof deployment.projectId === 'string') && + (deployment.target === undefined || + typeof deployment.target === 'string') && + deployment.labels !== undefined && + deployment.labels.language !== undefined && + typeof deployment.labels.language === 'string' + ); +} + +/** + * @return true if an prof is a RequestProfile and false otherwise. + */ +// eslint-disable-next-line @typescript-eslint/no-explicit-any +function isRequestProfile(prof: any): prof is RequestProfile { + return ( + prof && + typeof prof.name === 'string' && + typeof prof.profileType === 'string' && + (prof.duration === undefined || typeof prof.duration === 'string') && + (prof.labels === undefined || + prof.labels.instance === undefined || + typeof prof.labels.instance === 'string') && + (prof.deployment === undefined || isDeployment(prof.deployment)) + ); +} + +/** + * Converts a profile to a compressed, base64 encoded string. + * + * Work for converting profile is done on the event loop. In particular, + * profile encoding is done on the event loop. So, this does block execution + * of the program, but for a short period of time, since profiles are small. + * + * @param p - profile to be converted to string. + */ +async function profileBytes(p: perftools.profiles.IProfile): Promise { + const buffer = perftools.profiles.Profile.encode(p).finish(); + const gzBuf = (await gzip(buffer)) as Buffer; + return gzBuf.toString('base64'); +} + +/** + * Error constructed from HTTP server response which indicates backoff. + */ +export class BackoffResponseError extends Error { + constructor( + message: string | undefined, + readonly backoffMillis: number + ) { + super(message); + } +} + +/** + * @return true if error is a BackoffResponseError and false otherwise + */ +function isBackoffResponseError(err: Error): err is BackoffResponseError { + return typeof (err as BackoffResponseError).backoffMillis === 'number'; +} + +/** + * Class which tracks how long to wait before the next retry and can be + * used to get this backoff. + */ +export class Retryer { + private nextBackoffMillis: number; + + // For testing. Allows Math.random() to be replaced with non-random function. + private random: () => number; + + constructor( + readonly initialBackoffMillis: number, + readonly backoffCapMillis: number, + readonly backoffMultiplier: number, + random = Math.random + ) { + this.nextBackoffMillis = this.initialBackoffMillis; + this.random = random; + } + getBackoff(): number { + const curBackoff = this.random() * this.nextBackoffMillis; + this.nextBackoffMillis = Math.min( + this.backoffMultiplier * this.nextBackoffMillis, + this.backoffCapMillis + ); + return curBackoff; + } + reset() { + this.nextBackoffMillis = this.initialBackoffMillis; + } +} + +/** + * @return profile iff response indicates success and the returned profile was + * valid. + * @throws error when the response indicated failure or the returned profile + * was not valid. + */ +function responseToProfileOrError( + err: Error | null, + body?: object, + // eslint-disable-next-line @typescript-eslint/no-explicit-any + response?: r.Response +): RequestProfile { + // response.statusCode is guaranteed to exist on client requests. + if (response && isErrorResponseStatusCode(response.statusCode!)) { + const message = getResponseErrorMessage(response, err); + if (body) { + const delayMillis = getServerResponseBackoff(body); + if (delayMillis) { + throw new BackoffResponseError(message, delayMillis); + } + } + throw new Error(message); + } + if (err) { + throw err; + } + if (isRequestProfile(body)) { + return body; + } + throw new Error(`Profile not valid: ${JSON.stringify(body)}.`); +} + +/** + * Polls profiler server for instructions on behalf of a task and + * collects and uploads profiles as requested. + * + * If heap profiling is enabled, the heap profiler must be enabled before heap + * profiles can be collected. + */ +export class Profiler extends ServiceObject { + private logger: ReturnType; + private profileLabels: {instance?: string}; + private deployment: Deployment; + private profileTypes: string[]; + private retryer: Retryer; + private sourceMapper: SourceMapper | undefined; + private baseApiUrl: string; + + // Public for testing. + config: ProfilerConfig; + + constructor(config: ProfilerConfig) { + config = config || ({} as ProfilerConfig); + const baseApiUrl = `https://${config.apiEndpoint}/v2`; + const serviceConfig: ServiceConfig = { + apiEndpoint: config.apiEndpoint, + baseUrl: baseApiUrl, + scopes: [SCOPE], + packageJson: pjson, + }; + super({ + parent: new Service(serviceConfig, config), + baseUrl: '/', + }); + this.config = config; + this.baseApiUrl = baseApiUrl; + + this.logger = createLogger(this.config.logLevel); + + const labels: {zone?: string; version?: string; language: string} = { + language: 'nodejs', + }; + if (this.config.zone) { + labels.zone = this.config.zone; + } + if (this.config.serviceContext.version) { + labels.version = this.config.serviceContext.version; + } + this.deployment = { + projectId: this.config.projectId, + target: this.config.serviceContext.service, + labels, + }; + + this.profileLabels = {}; + if (this.config.instance) { + this.profileLabels.instance = this.config.instance; + } + + this.profileTypes = []; + if (!this.config.disableTime) { + this.profileTypes.push(ProfileTypes.Wall); + } + if (!this.config.disableHeap) { + this.profileTypes.push(ProfileTypes.Heap); + } + this.retryer = new Retryer( + this.config.initialBackoffMillis, + this.config.backoffCapMillis, + this.config.backoffMultiplier + ); + } + + /** + * Starts an endless loop to poll profiler server for instructions, and + * collects and uploads profiles as requested. + * If there is a problem when collecting a profile or uploading a profile to + * profiler server, this problem will be logged at the error level and + * otherwise ignored. + * If there is a problem polling profiler server for instructions + * on the type of profile to be collected, this problem will be logged at the + * error level and getting profile type will be retried. + */ + async start(): Promise { + if (!this.config.disableSourceMaps) { + try { + this.sourceMapper = await SourceMapper.create( + this.config.sourceMapSearchPath + ); + } catch (err) { + this.logger.error( + `Failed to initialize SourceMapper. Source map support has been disabled: ${err}` + ); + this.config.disableSourceMaps = true; + } + } + this.logger.debug(`Cloud Profiler Node.js agent version: ${pjson.version}`); + this.runLoop(); + } + + /** + * Endlessly polls the profiler server for instructions, and collects and + * uploads profiles as requested. + */ + async runLoop() { + const delayMillis = await this.collectProfile(); + setTimeout(this.runLoop.bind(this), delayMillis).unref(); + } + + /** + * Waits for profiler server to tell it to collect a profile, then collects + * a profile and uploads it. + * + * @return time, in ms, to wait before asking profiler server again about + * collecting another profile. + */ + async collectProfile(): Promise { + let prof: RequestProfile; + try { + prof = await this.createProfile(); + } catch (err) { + if (isBackoffResponseError(err as BackoffResponseError)) { + this.logger.debug( + `Must wait ${msToStr( + (err as BackoffResponseError).backoffMillis + )} to create profile: ${err}` + ); + return Math.min( + (err as BackoffResponseError).backoffMillis, + this.config.serverBackoffCapMillis + ); + } + const backoff = this.retryer.getBackoff(); + this.logger.warn( + `Failed to create profile, waiting ${msToStr( + backoff + )} to try again: ${err}` + ); + return backoff; + } + this.retryer.reset(); + await this.profileAndUpload(prof); + return 0; + } + + /** + * Talks to profiler server, which hangs until server indicates + * job should be profiled and then indicates what type of profile should + * be collected. + * + * If any problem is encountered, an error will be thrown. + * + * @return a RequestProfile specifying which type of profile should be + * collected and other information needed to collect and upload a profile of + * the specified type. + * + * TODO (issue #28): right now, this call could hang for up to an hour when + * this method is the only thing on the event loop, keeping the program open + * even when all work is done. Should expose the ability to cancel the http + * request made here, and then determine when to cancel this request. + * + * Public to allow for testing. + */ + async createProfile(): Promise { + const reqBody = { + deployment: this.deployment, + profileType: this.profileTypes, + }; + const options: DecorateRequestOptions = { + method: 'POST', + uri: '/profiles', + body: reqBody, + json: true, + maxRetries: 0, + + // Default timeout for for a request is 1 minute, but request to create + // profile is designed to hang until it is time to collect a profile + // (up to one hour). + timeout: ms('1h')!, + }; + + this.logger.debug('Attempting to create profile.'); + return new Promise((resolve, reject) => { + this.request( + options, + ( + err: Error | ApiError | null, + body?: object, + // eslint-disable-next-line @typescript-eslint/no-explicit-any + response?: r.Response + ) => { + try { + const prof = responseToProfileOrError(err, body, response); + this.logger.debug( + `Successfully created profile ${prof.profileType}.` + ); + resolve(prof); + } catch (err) { + reject(err); + } + } + ); + }); + } + + /** + * Collects a profile of the type specified by the profileType field of prof. + * If any problem is encountered, like a problem collecting or uploading the + * profile, a message will be logged, and the error will otherwise be ignored. + * + * Public to allow for testing. + */ + async profileAndUpload(prof: RequestProfile): Promise { + try { + prof = await this.profile(prof); + this.logger.debug(`Successfully collected profile ${prof.profileType}.`); + prof.labels = this.profileLabels; + } catch (err) { + this.logger.debug(`Failed to collect profile: ${err}`); + return; + } + const options = { + method: 'PATCH', + uri: this.baseApiUrl + '/' + prof.name, + body: prof, + json: true, + maxRetries: 0, + }; + + try { + const [, res] = await this.request(options); + if (isErrorResponseStatusCode(res.statusCode)) { + let message: number | string = res.statusCode; + if (res.statusMessage) { + message = res.statusMessage; + } + this.logger.debug(`Could not upload profile: ${message}.`); + return; + } + this.logger.debug(`Successfully uploaded profile ${prof.profileType}.`); + } catch (err) { + this.logger.debug(`Failed to upload profile: ${err}`); + } + } + + /** + * Collects a profile of the type specified by profileType field of prof. + * If any problem is encountered, for example the profileType is not + * recognized or profiling is disabled for the specified profileType, an + * error will be thrown. + * + * Public to allow for testing. + */ + async profile(prof: RequestProfile): Promise { + switch (prof.profileType) { + case ProfileTypes.Wall: + return this.writeTimeProfile(prof); + case ProfileTypes.Heap: + return this.writeHeapProfile(prof); + default: + throw new Error(`Unexpected profile type ${prof.profileType}.`); + } + } + + /** + * Collects a time profile, converts profile to compressed, base64 encoded + * string, and puts this string in profileBytes field of prof. + * + * Public to allow for testing. + */ + async writeTimeProfile(prof: RequestProfile): Promise { + if (this.config.disableTime) { + throw Error('Cannot collect time profile, time profiler not enabled.'); + } + if (prof.duration === undefined) { + throw Error('Cannot collect time profile, duration is undefined.'); + } + const durationMillis = ms(prof.duration as ms.StringValue); + if (!durationMillis) { + throw Error( + `Cannot collect time profile, duration "${prof.duration}" cannot` + + ' be parsed.' + ); + } + const options = { + durationMillis, + intervalMicros: this.config.timeIntervalMicros, + sourceMapper: this.sourceMapper, + }; + + const p = await timeProfiler.profile(options); + prof.profileBytes = await profileBytes(p); + return prof; + } + + /** + * Collects a heap profile, converts profile to compressed, base64 encoded + * string, and adds profileBytes field to prof with this string. + * + * Public to allow for testing. + */ + async writeHeapProfile(prof: RequestProfile): Promise { + if (this.config.disableHeap) { + throw Error('Cannot collect heap profile, heap profiler not enabled.'); + } + const p = heapProfiler.profile( + this.config.ignoreHeapSamplesPath, + this.sourceMapper + ); + prof.profileBytes = await profileBytes(p); + return prof; + } +} diff --git a/handwritten/cloud-profiler/src/v8-types.ts b/handwritten/cloud-profiler/src/v8-types.ts new file mode 100644 index 00000000000..1352e573317 --- /dev/null +++ b/handwritten/cloud-profiler/src/v8-types.ts @@ -0,0 +1,46 @@ +// Copyright 2017 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Type Definitions based on implementation in bindings/ + +export interface TimeProfile { + /** Time in nanoseconds at which profile was stopped. */ + endTime: number; + topDownRoot: TimeProfileNode; + /** Time in nanoseconds at which profile was started. */ + startTime: number; +} + +export interface ProfileNode { + // name is the function name. + name?: string; + scriptName: string; + scriptId?: number; + lineNumber?: number; + columnNumber?: number; + children: ProfileNode[]; +} + +export interface TimeProfileNode extends ProfileNode { + hitCount: number; +} + +export interface AllocationProfileNode extends ProfileNode { + allocations: Allocation[]; +} + +export interface Allocation { + sizeBytes: number; + count: number; +} diff --git a/handwritten/cloud-profiler/system-test/README.md b/handwritten/cloud-profiler/system-test/README.md new file mode 100644 index 00000000000..6cb3c68304f --- /dev/null +++ b/handwritten/cloud-profiler/system-test/README.md @@ -0,0 +1,18 @@ +This directory contains an integration test that confirms the basic +functionality of the profiler works on Compute Engine. In particular, this test +confirms that the agent can create and upload profiles from a Compute Engine +VM, and that these profiles contain symbolized samples from the benchmark +application. + +More specifically, this test: +1. Starts 3 Compute Engine VMs, one to test Node.js versions 6, 8, and 9. + Each Compute Engine VM then: + 1. Downloads the desired version of Node.js, github, and build-essentials + (the dependencies needed to run the test). + 2. Clones the agent source code at the revision of interest. + 3. Runs the benchmark application, busybench.js (which repeatedly calls + a function which creates and fills a buffer) with the agent attached. +2. Waits for the application in each Compute Engine VM to finish. +3. Queries the Cloud Profiler API to confirm that both heap and wall profiles + have been uploaded to the API and that the profiles contain symbolized + samples which include the name of the function in the benchmark. diff --git a/handwritten/cloud-profiler/system-test/busybench/package.json b/handwritten/cloud-profiler/system-test/busybench/package.json new file mode 100644 index 00000000000..b5db9cff7df --- /dev/null +++ b/handwritten/cloud-profiler/system-test/busybench/package.json @@ -0,0 +1,29 @@ +{ + "name": "system-test", + "version": "0.0.0", + "description": "", + "main": "build/src/index.js", + "types": "build/src/index.d.ts", + "files": [ + "build/src" + ], + "engines": { + "node": ">=14.0.0" + }, + "license": "Apache-2.0", + "keywords": [], + "scripts": { + "test": "echo \"Error: no test specified\" && exit 1", + "check": "gts check", + "clean": "gts clean", + "compile": "tsc -p .", + "fix": "gts fix", + "prepare": "npm run compile", + "pretest": "npm run compile", + "posttest": "npm run check" + }, + "devDependencies": { + "gts": "^3.1.0", + "typescript": "4.9.5" + } +} diff --git a/handwritten/cloud-profiler/system-test/busybench/src/busybench.ts b/handwritten/cloud-profiler/system-test/busybench/src/busybench.ts new file mode 100644 index 00000000000..b4bfb9eaf6d --- /dev/null +++ b/handwritten/cloud-profiler/system-test/busybench/src/busybench.ts @@ -0,0 +1,48 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +import * as profiler from '@google-cloud/profiler'; +profiler.start({ + logLevel: 5, +}); + +const startTime: number = Date.now(); +const testArr: Array> = []; + +/** + * Fills several arrays, then calls itself with setImmediate. + * It continues to do this until durationSeconds after the startTime. + */ +function busyLoop(durationSeconds: number) { + for (let i = 0; i < testArr.length; i++) { + for (let j = 0; j < testArr[i].length; j++) { + testArr[i][j] = Math.sqrt(j * testArr[i][j]); + } + } + if (Date.now() - startTime < 1000 * durationSeconds) { + setTimeout(() => busyLoop(durationSeconds), 5); + } +} + +function benchmark(durationSeconds: number) { + // Allocate arrMiB MiB in 1 MiB chunks. + for (let i = 0; i < arrMiB; i++) { + // 8 bytes per number * 128 * 1024 = 1 MiB + testArr[i] = new Array(128 * 1024); + } + busyLoop(durationSeconds); +} + +const durationSeconds = Number(process.argv.length > 2 ? process.argv[2] : 600); +const arrMiB = Number(process.argv.length > 3 ? process.argv[3] : 128); +setTimeout(() => benchmark(durationSeconds), 1000); diff --git a/handwritten/cloud-profiler/system-test/busybench/tsconfig.json b/handwritten/cloud-profiler/system-test/busybench/tsconfig.json new file mode 100644 index 00000000000..d7e41e89e28 --- /dev/null +++ b/handwritten/cloud-profiler/system-test/busybench/tsconfig.json @@ -0,0 +1,13 @@ +{ + "extends": "./node_modules/gts/tsconfig-google.json", + "compilerOptions": { + "rootDir": ".", + "outDir": "build", + "lib": [ "es2015" ], + "target": "es2015", + "skipLibCheck": true + }, + "include": [ + "src/*.ts" + ] +} diff --git a/handwritten/cloud-profiler/system-test/integration_test.go b/handwritten/cloud-profiler/system-test/integration_test.go new file mode 100644 index 00000000000..08601e369ce --- /dev/null +++ b/handwritten/cloud-profiler/system-test/integration_test.go @@ -0,0 +1,395 @@ +// Copyright 2018 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build integration && go1.7 + +package e2e + +import ( + "bytes" + "flag" + "fmt" + "os" + "runtime" + "strings" + "testing" + "text/template" + "time" + + "cloud.google.com/go/profiler/proftest" + "golang.org/x/net/context" + "golang.org/x/oauth2/google" + compute "google.golang.org/api/compute/v1" +) + +var ( + repo = flag.String("repo", "https://github.com/googleapis/cloud-profiler-nodejs.git", "git repo to test") + branch = flag.String("branch", "", "git branch to test") + commit = flag.String("commit", "", "git commit to test") + pr = flag.Int("pr", 0, "git pull request to test") + runBackoffTest = flag.Bool("run_backoff_test", false, "Enables the backoff integration test. This integration test requires over 45 mins to run, so it is not run by default.") + + runID = strings.ToLower(strings.Replace(time.Now().Format("2006-01-02-15-04-05.000000-MST"), ".", "-", -1)) + benchFinishString = "benchmark application(s) complete" + errorString = "failed to set up or run the benchmark" +) + +const ( + cloudScope = "https://www.googleapis.com/auth/cloud-platform" + gceBenchDuration = 600 * time.Second + gceTestTimeout = 25 * time.Minute + + // For any agents to receive backoff, there must be more than 32 agents in + // the deployment. The initial backoff received will be 33 minutes; each + // subsequent backoff will be one minute longer. Running 45 benchmarks for + // 45 minutes will ensure that several agents receive backoff responses and + // are able to wait for the backoff duration then send another request. + numBackoffBenchmarks = 45 + backoffBenchDuration = 45 * time.Minute + backoffTestTimeout = 60 * time.Minute +) + +const startupTemplate = ` +{{ define "setup"}} + +npm_install() { + npm cache clean --force # Avoid persistent errors on rare cache corruptions. + timeout 60 npm install --quiet --no-color --no-progress "${@}" +} + +# Install git +retry apt-get update >/dev/null +retry apt-get -y -q install git build-essential >/dev/null + +# Install desired version of Node.js +retry curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.35.3/install.sh | bash >/dev/null +export NVM_DIR="$HOME/.nvm" >/dev/null +[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" >/dev/null + +# nvm install writes to stderr and stdout on successful install, so both are +# redirected to serial port 3. +retry nvm install {{.NodeVersion}} &>/dev/ttyS2 +npm -v +node -v +NODEDIR=$(dirname $(dirname $(which node))) + +# Install agent +git_clone_repo() { + rm -rf cloud-profiler-nodejs && git clone {{.Repo}} +} +retry git_clone_repo +cd cloud-profiler-nodejs +retry git fetch origin {{if .PR}}pull/{{.PR}}/head{{else}}{{.Branch}}{{end}}:pull_branch +git checkout pull_branch +git reset --hard {{.Commit}} + +retry npm_install --nodedir="$NODEDIR" + +npm run compile +npm pack --nodedir="$NODEDIR" >/dev/null +VERSION=$(node -e "console.log(require('./package.json').version);") +PROFILER="$HOME/cloud-profiler-nodejs/google-cloud-profiler-$VERSION.tgz" + +TESTDIR="$HOME/test" +mkdir -p "$TESTDIR" +cp -r "system-test/busybench" "$TESTDIR" +cd "$TESTDIR/busybench" + +retry npm_install @mapbox/node-pre-gyp --save +retry npm_install --nodedir="$NODEDIR" "$PROFILER" typescript gts + +npm run compile + +# Workaround to reduce flakiness connecting to the metadata server. +export DETECT_GCP_RETRIES=5 +{{- end }} + +{{ define "integration" -}} +{{- template "prologue" . }} +{{- template "setup" . }} +# Run benchmark with agent +GCLOUD_PROFILER_LOGLEVEL=5 GAE_SERVICE={{.Service}} node --trace-warnings build/src/busybench.js {{.DurationSec}} + +# Indicate to test that script has finished running +echo "{{.FinishString}}" + +{{ template "epilogue" . -}} +{{end}} + +{{ define "integration_backoff" -}} +{{- template "prologue" . }} +{{- template "setup" . }} + +# Do not display commands being run to simplify logging output. +set +x + +# Run benchmarks with agent. +echo "Starting {{.NumBackoffBenchmarks}} benchmarks." +for (( i = 0; i < {{.NumBackoffBenchmarks}}; i++ )); do + # A Node.js application will not exit while a CreateProfile request is + # inflight, so timeout is used to force the application to terminate. + (timeout {{.DurationSec}} sh -c \ + 'GCLOUD_PROFILER_LOGLEVEL=5 GAE_SERVICE={{.Service}} node --trace-warnings build/src/busybench.js {{.DurationSec}} 1' + ) |& while read line; do echo "benchmark $i: ${line}"; done || [ "$?" -eq "124" ] & +done +echo "Successfully started {{.NumBackoffBenchmarks}} benchmarks." + +wait + +# Continue displaying commands being run. +set -x + +echo "{{.FinishString}}" + +{{ template "epilogue" . -}} +{{ end }} + +` + +type profileSummary struct { + profileType string + functionName string + sourceFile string +} + +type nodeGCETestCase struct { + proftest.InstanceConfig + name string + nodeVersion string + benchDuration time.Duration + timeout time.Duration + + backoffTest bool + + // wantProfileTypes will not be used when the test is a backoff integration + // test. + wantProfiles []profileSummary +} + +func (tc *nodeGCETestCase) initializeStartUpScript(template *template.Template) error { + params := struct { + Service string + NodeVersion string + Repo string + PR int + Branch string + Commit string + FinishString string + ErrorString string + DurationSec int + NumBackoffBenchmarks int + }{ + Service: tc.name, + NodeVersion: tc.nodeVersion, + Repo: *repo, + PR: *pr, + Branch: *branch, + Commit: *commit, + FinishString: benchFinishString, + ErrorString: errorString, + DurationSec: int(tc.benchDuration.Seconds()), + } + + testTemplate := "integration" + if tc.backoffTest { + testTemplate = "integration_backoff" + params.NumBackoffBenchmarks = numBackoffBenchmarks + } + + var buf bytes.Buffer + err := template.Lookup(testTemplate).Execute(&buf, params) + if err != nil { + return fmt.Errorf("failed to render startup script for %s: %v", tc.name, err) + } + tc.StartupScript = buf.String() + return nil +} + +func TestAgentIntegration(t *testing.T) { + projectID := os.Getenv("GCLOUD_TESTS_NODEJS_PROJECT_ID") + if projectID == "" { + t.Fatalf("Getenv(GCLOUD_TESTS_NODEJS_PROJECT_ID) got empty string") + } + + zone := os.Getenv("GCLOUD_TESTS_NODEJS_ZONE") + if zone == "" { + t.Fatalf("Getenv(GCLOUD_TESTS_NODEJS_ZONE) got empty string") + } + + if *commit == "" { + t.Fatal("commit flag is not set") + } + + ctx := context.Background() + + client, err := google.DefaultClient(ctx, cloudScope) + if err != nil { + t.Fatalf("failed to get default client: %v", err) + } + + computeService, err := compute.New(client) + if err != nil { + t.Fatalf("failed to initialize compute Service: %v", err) + } + + template, err := proftest.BaseStartupTmpl.Parse(startupTemplate) + if err != nil { + t.Fatalf("failed to parse startup script template: %v", err) + } + + gceTr := proftest.GCETestRunner{ + TestRunner: proftest.TestRunner{ + Client: client, + }, + ComputeService: computeService, + } + + wantProfiles := []profileSummary{ + {"WALL", "busyLoop", "busybench.ts"}, + {"HEAP", "benchmark", "busybench.ts"}, + } + + testcases := []nodeGCETestCase{ + { + InstanceConfig: proftest.InstanceConfig{ + ProjectID: projectID, + Zone: zone, + Name: fmt.Sprintf("profiler-test-node14-%s", runID), + MachineType: "n1-standard-1", + }, + name: fmt.Sprintf("profiler-test-node14-%s-gce", runID), + wantProfiles: wantProfiles, + nodeVersion: "14", + timeout: gceTestTimeout, + benchDuration: gceBenchDuration, + }, + { + InstanceConfig: proftest.InstanceConfig{ + ProjectID: projectID, + Zone: zone, + Name: fmt.Sprintf("profiler-test-node16-%s", runID), + MachineType: "n1-standard-1", + }, + name: fmt.Sprintf("profiler-test-node16-%s-gce", runID), + wantProfiles: wantProfiles, + nodeVersion: "16", + timeout: gceTestTimeout, + benchDuration: gceBenchDuration, + }, + { + InstanceConfig: proftest.InstanceConfig{ + ProjectID: projectID, + Zone: zone, + Name: fmt.Sprintf("profiler-test-node18-%s", runID), + MachineType: "n1-standard-1", + }, + name: fmt.Sprintf("profiler-test-node18-%s-gce", runID), + wantProfiles: wantProfiles, + nodeVersion: "18", + timeout: gceTestTimeout, + benchDuration: gceBenchDuration, + }, + { + InstanceConfig: proftest.InstanceConfig{ + ProjectID: projectID, + Zone: zone, + Name: fmt.Sprintf("profiler-test-node20-%s", runID), + MachineType: "n1-standard-1", + }, + name: fmt.Sprintf("profiler-test-node20-%s-gce", runID), + wantProfiles: wantProfiles, + nodeVersion: "20", + timeout: gceTestTimeout, + benchDuration: gceBenchDuration, + }, + } + + if *runBackoffTest { + testcases = append(testcases, + nodeGCETestCase{ + InstanceConfig: proftest.InstanceConfig{ + ProjectID: projectID, + Zone: zone, + Name: fmt.Sprintf("profiler-backoff-test-node14-%s", runID), + + // Running many copies of the benchmark requires more + // memory than is available on an n1-standard-1. Use a + // machine type with more memory for backoff test. + MachineType: "n1-highmem-2", + }, + name: fmt.Sprintf("profiler-backoff-test-node14-%s", runID), + backoffTest: true, + nodeVersion: "14", + timeout: backoffTestTimeout, + benchDuration: backoffBenchDuration, + }) + } + + // Allow test cases to run in parallel. + runtime.GOMAXPROCS(len(testcases)) + + for _, tc := range testcases { + tc := tc // capture range variable + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + if err := tc.initializeStartUpScript(template); err != nil { + t.Fatalf("failed to initialize startup script: %v", err) + } + + err := gceTr.StartInstance(ctx, &tc.InstanceConfig) + if err != nil { + t.Fatalf("failed to start GCE instance: %v", err) + } + defer func() { + if gceTr.DeleteInstance(ctx, &tc.InstanceConfig); err != nil { + t.Fatal(err) + } + }() + + timeoutCtx, cancel := context.WithTimeout(ctx, tc.timeout) + defer cancel() + output, err := gceTr.PollAndLogSerialPort(timeoutCtx, &tc.InstanceConfig, benchFinishString, errorString, t.Logf) + if err != nil { + t.Fatal(err) + } + + if tc.backoffTest { + if err := proftest.CheckSerialOutputForBackoffs(output, numBackoffBenchmarks, "action throttled, backoff", "Attempting to create profile", "benchmark"); err != nil { + t.Errorf("failed to check serial output for backoffs: %v", err) + } + return + } + + timeNow := time.Now() + endTime := timeNow.Format(time.RFC3339) + startTime := timeNow.Add(-1 * time.Hour).Format(time.RFC3339) + for _, wantProfile := range tc.wantProfiles { + pr, err := gceTr.TestRunner.QueryProfilesWithZone(tc.ProjectID, tc.name, startTime, endTime, wantProfile.profileType, tc.Zone) + if err != nil { + t.Errorf("QueryProfiles(%s, %s, %s, %s, %s) got error: %v", tc.ProjectID, tc.name, startTime, endTime, wantProfile.profileType, err) + continue + } + if wantProfile.sourceFile != "" { + if err := pr.HasFunctionInFile(wantProfile.functionName, wantProfile.sourceFile); err != nil { + t.Errorf("Function %s not found in source file %s in profiles of type %s: %v", wantProfile.functionName, wantProfile.sourceFile, wantProfile.profileType, err) + } + continue + } + if err := pr.HasFunction(wantProfile.functionName); err != nil { + t.Errorf("Function %s not found in profiles of type %s: %v", wantProfile.functionName, wantProfile.profileType, err) + } + } + }) + } +} diff --git a/handwritten/cloud-profiler/system-test/integration_test.sh b/handwritten/cloud-profiler/system-test/integration_test.sh new file mode 100755 index 00000000000..9596c9f560b --- /dev/null +++ b/handwritten/cloud-profiler/system-test/integration_test.sh @@ -0,0 +1,51 @@ +#!/bin/bash + +retry() { + for i in {1..3}; do + [ $i == 1 ] || sleep 10 # Backing off after a failed attempt. + "${@}" && return 0 + done + return 1 +} + +# Fail on any error. +set -eo pipefail + +# Display commands being run. +set -x + +cd $(dirname $0)/.. + +git config --global --add safe.directory /tmpfs/src/github/cloud-profiler-nodejs + +SERVICE_KEY="${KOKORO_KEYSTORE_DIR}/72935_cloud-profiler-e2e-service-account-key" +COMMIT=$(git rev-parse HEAD) +BRANCH=$(git rev-parse --abbrev-ref HEAD) +REPO=$(git config --get remote.origin.url) +export GCLOUD_TESTS_NODEJS_PROJECT_ID="cloud-profiler-e2e" +export GCLOUD_TESTS_NODEJS_ZONE="us-east4-b" +export GOOGLE_APPLICATION_CREDENTIALS="${SERVICE_KEY}" + +# Run test. +cd "system-test" + +# Pull in newer version of Go than provided by Kokoro image +go version +GO_VERSION="1.22.4" +retry curl -LO https://go.dev/dl/go${GO_VERSION}.linux-amd64.tar.gz +sudo rm -rf /usr/local/go && tar -C /usr/local -xzf go${GO_VERSION}.linux-amd64.tar.gz +export PATH=$PATH:/usr/local/go/bin +go version + +# Initializing go modules allows our dependencies to install versions of their +# dependencies specified by their go.mod files. This reduces the likelihood of +# dependencies breaking this test. +go mod init e2e +retry go get cloud.google.com/go/profiler/proftest@main +retry go test -c -tags=integration . + +if [ "$KOKORO_GITHUB_PULL_REQUEST_NUMBER" = "" ]; then + ./e2e.test -commit="$COMMIT" -branch="$BRANCH" -repo="$REPO" -run_backoff_test=true +else + ./e2e.test -commit="$COMMIT" -pr="$KOKORO_GITHUB_PULL_REQUEST_NUMBER" +fi diff --git a/handwritten/cloud-profiler/system-test/kokoro/common.cfg b/handwritten/cloud-profiler/system-test/kokoro/common.cfg new file mode 100644 index 00000000000..f82020fb58d --- /dev/null +++ b/handwritten/cloud-profiler/system-test/kokoro/common.cfg @@ -0,0 +1,22 @@ +# Copyright 2018 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +before_action { + fetch_keystore { + keystore_resource { + keystore_config_id: 72935 + keyname: "cloud-profiler-e2e-service-account-key" + } + } +} diff --git a/handwritten/cloud-profiler/system-test/kokoro/continuous.cfg b/handwritten/cloud-profiler/system-test/kokoro/continuous.cfg new file mode 100644 index 00000000000..e036ac8920d --- /dev/null +++ b/handwritten/cloud-profiler/system-test/kokoro/continuous.cfg @@ -0,0 +1,16 @@ +# Copyright 2018 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Location of the build script in this repository. +build_file: "cloud-profiler-nodejs/system-test/integration_test.sh" diff --git a/handwritten/cloud-profiler/system-test/kokoro/presubmit.cfg b/handwritten/cloud-profiler/system-test/kokoro/presubmit.cfg new file mode 100644 index 00000000000..e036ac8920d --- /dev/null +++ b/handwritten/cloud-profiler/system-test/kokoro/presubmit.cfg @@ -0,0 +1,16 @@ +# Copyright 2018 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Location of the build script in this repository. +build_file: "cloud-profiler-nodejs/system-test/integration_test.sh" diff --git a/handwritten/cloud-profiler/system-test/test-start.ts b/handwritten/cloud-profiler/system-test/test-start.ts new file mode 100644 index 00000000000..d94013778f6 --- /dev/null +++ b/handwritten/cloud-profiler/system-test/test-start.ts @@ -0,0 +1,163 @@ +// Copyright 2017 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import * as assert from 'assert'; +import {describe, it, before, after} from 'mocha'; +import delay from 'delay'; +import * as nock from 'nock'; +import {promisify} from 'util'; +import * as zlib from 'zlib'; + +import {perftools} from 'pprof/proto/profile'; +import {RequestProfile} from '../src/profiler'; + +const API = 'https://cloudprofiler.googleapis.com/v2'; +let savedEnv: {}; +let uploadedProfiles: RequestProfile[] = new Array(); +let createProfileCount = 0; +nock.disableNetConnect(); + +// eslint-disable-next-line @typescript-eslint/no-var-requires +const fakeCredentials = require('../../test/fixtures/gcloud-credentials.json'); + +// Start profiler and collect profiles before testing. +before(async () => { + savedEnv = process.env; + process.env = {}; + + process.env.GCLOUD_PROJECT = 'test-projectId'; + process.env.GAE_SERVICE = 'test-service'; + process.env.GAE_VERSION = '0.0.0'; + + // Mock profiler API. + nock(API) + .persist() + .post('/projects/' + process.env.GCLOUD_PROJECT + '/profiles') + .delay(1000) + .reply(200, (): RequestProfile => { + let prof; + if (createProfileCount % 2 === 0) { + prof = { + name: 'projects/X/test-projectId', + profileType: 'WALL', + duration: '10s', + }; + } else { + prof = { + name: 'projects/X/test-projectId', + profileType: 'HEAP', + duration: '10s', + }; + } + createProfileCount++; + return prof; + }); + const tempUploadedProfiles = new Array(); + nock(API) + .persist() + .patch('/projects/X/test-projectId') + .reply(200, (_: RequestProfile, body: RequestProfile) => { + if (typeof body === 'string') { + body = JSON.parse(body); + } + tempUploadedProfiles.push(body); + }); + nock('https://oauth2.googleapis.com') + .post(/\/token/, () => true) + .once() + .reply(200, { + refresh_token: 'hello', + access_token: 'goodbye', + expiry_date: new Date(9999, 1, 1), + }); + + // start profiling and wait to collect profiles. + // eslint-disable-next-line @typescript-eslint/no-var-requires + const profiler = require('../src/index'); + profiler.start({credentials: fakeCredentials}); + await delay(30 * 1000); + + // copy over currently uploaded profiles, so all tests look at same profiles. + uploadedProfiles = tempUploadedProfiles.slice(); + + // Restore environment variables and mocks. + process.env = savedEnv; +}); + +// Restore environment variables after tests. +// nock not restored, since profiles still being uploaded. +after(() => { + process.env = savedEnv; +}); + +describe('start', () => { + it('should have uploaded multiple profiles', () => { + nock.restore(); + assert.ok( + uploadedProfiles.length >= 2, + 'Expected 2 or more profiles to be uploaded' + ); + }); + it('should have uploaded wall profile with samples first', async () => { + const wall = uploadedProfiles[0]; + const decodedBytes = Buffer.from(wall.profileBytes as string, 'base64'); + const unzippedBytes = (await promisify(zlib.gunzip)( + decodedBytes + )) as Uint8Array; + const outProfile = perftools.profiles.Profile.decode(unzippedBytes); + assert.strictEqual(wall.profileType, 'WALL'); + assert.strictEqual( + outProfile.stringTable[outProfile.sampleType[0].type as number], + 'sample' + ); + assert.strictEqual( + outProfile.stringTable[outProfile.sampleType[1].type as number], + 'wall' + ); + assert.strictEqual( + outProfile.stringTable[outProfile.sampleType[0].unit as number], + 'count' + ); + assert.strictEqual( + outProfile.stringTable[outProfile.sampleType[1].unit as number], + 'microseconds' + ); + assert.ok(outProfile.sample.length > 0, 'Expected 1 or more samples'); + }); + it('should have uploaded heap profile second', async () => { + const heap = uploadedProfiles[1]; + const decodedBytes = Buffer.from(heap.profileBytes as string, 'base64'); + const unzippedBytes = (await promisify(zlib.gunzip)( + decodedBytes + )) as Uint8Array; + const outProfile = perftools.profiles.Profile.decode(unzippedBytes); + assert.strictEqual(heap.profileType, 'HEAP'); + assert.strictEqual( + outProfile.stringTable[outProfile.sampleType[0].type as number], + 'objects' + ); + assert.strictEqual( + outProfile.stringTable[outProfile.sampleType[1].type as number], + 'space' + ); + assert.strictEqual( + outProfile.stringTable[outProfile.sampleType[0].unit as number], + 'count' + ); + assert.strictEqual( + outProfile.stringTable[outProfile.sampleType[1].unit as number], + 'bytes' + ); + }); +}); diff --git a/handwritten/cloud-profiler/test/fixtures/gcloud-credentials.json b/handwritten/cloud-profiler/test/fixtures/gcloud-credentials.json new file mode 100644 index 00000000000..3499fcc9c3d --- /dev/null +++ b/handwritten/cloud-profiler/test/fixtures/gcloud-credentials.json @@ -0,0 +1,6 @@ +{ + "client_id": "x", + "client_secret": "y", + "refresh_token": "z", + "type": "authorized_user" +} diff --git a/handwritten/cloud-profiler/test/fixtures/test-config.json b/handwritten/cloud-profiler/test/fixtures/test-config.json new file mode 100644 index 00000000000..6bd7cc22de9 --- /dev/null +++ b/handwritten/cloud-profiler/test/fixtures/test-config.json @@ -0,0 +1,12 @@ +{ + "logLevel": 3, + "serviceContext": { + "version": "env_config_version", + "service": "env_config_service" + }, + "disableHeap": true, + "disableTime": true, + "instance": "env_config_instance", + "zone": "env_config_zone", + "projectId": "env_config_fake-projectId" +} diff --git a/handwritten/cloud-profiler/test/profiles-for-tests.ts b/handwritten/cloud-profiler/test/profiles-for-tests.ts new file mode 100644 index 00000000000..6d9e546a305 --- /dev/null +++ b/handwritten/cloud-profiler/test/profiles-for-tests.ts @@ -0,0 +1,1133 @@ +// Copyright 2017 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import * as fs from 'fs'; +import * as path from 'path'; +import {SourceMapGenerator} from 'source-map'; +import * as tmp from 'tmp'; + +import {perftools} from 'pprof/proto/profile'; +import {TimeProfile} from '../src/v8-types'; + +const timeLeaf1 = { + name: 'function1', + scriptName: 'script1', + scriptId: 1, + lineNumber: 10, + columnNumber: 5, + hitCount: 1, + children: [], +}; + +const timeLeaf2 = { + name: 'function1', + scriptName: 'script2', + scriptId: 2, + lineNumber: 15, + columnNumber: 3, + hitCount: 2, + children: [], +}; + +const timeLeaf3 = { + name: 'function1', + scriptName: 'script1', + scriptId: 1, + lineNumber: 5, + columnNumber: 3, + hitCount: 1, + children: [], +}; + +const timeNode1 = { + name: 'function1', + scriptName: 'script1', + scriptId: 1, + lineNumber: 5, + columnNumber: 3, + hitCount: 3, + children: [timeLeaf1, timeLeaf2], +}; + +const timeNode2 = { + name: 'function2', + scriptName: 'script2', + scriptId: 2, + lineNumber: 1, + columnNumber: 5, + hitCount: 0, + children: [timeLeaf3], +}; + +const timeRoot = { + name: '(root)', + scriptName: 'root', + scriptId: 0, + lineNumber: 0, + columnNumber: 0, + hitCount: 0, + children: [timeNode1, timeNode2], +}; + +export const v8TimeProfile: TimeProfile = Object.freeze({ + startTime: 0, + endTime: 10 * 1000 * 1000, + topDownRoot: timeRoot, +}); + +const timeLines = [ + {functionId: 1, line: 1}, + {functionId: 2, line: 5}, + {functionId: 3, line: 15}, + {functionId: 2, line: 10}, +]; + +const timeFunctions = [ + new perftools.profiles.Function({ + id: 1, + name: 5, + systemName: 5, + filename: 6, + }), + new perftools.profiles.Function({ + id: 2, + name: 7, + systemName: 7, + filename: 8, + }), + new perftools.profiles.Function({ + id: 3, + name: 7, + systemName: 7, + filename: 6, + }), +]; + +const timeLocations = [ + new perftools.profiles.Location({ + line: [timeLines[0]], + id: 1, + }), + new perftools.profiles.Location({ + line: [timeLines[1]], + id: 2, + }), + new perftools.profiles.Location({ + line: [timeLines[2]], + id: 3, + }), + new perftools.profiles.Location({ + line: [timeLines[3]], + id: 4, + }), +]; + +export const timeProfile: perftools.profiles.IProfile = Object.freeze({ + sampleType: [ + new perftools.profiles.ValueType({type: 1, unit: 2}), + new perftools.profiles.ValueType({type: 3, unit: 4}), + ], + sample: [ + new perftools.profiles.Sample({ + locationId: [2, 1], + value: [1, 1000], + label: [], + }), + new perftools.profiles.Sample({ + locationId: [2], + value: [3, 3000], + label: [], + }), + new perftools.profiles.Sample({ + locationId: [3, 2], + value: [2, 2000], + label: [], + }), + new perftools.profiles.Sample({ + locationId: [4, 2], + value: [1, 1000], + label: [], + }), + ], + location: timeLocations, + function: timeFunctions, + stringTable: [ + '', + 'sample', + 'count', + 'wall', + 'microseconds', + 'function2', + 'script2', + 'function1', + 'script1', + ], + timeNanos: 0, + durationNanos: 10 * 1000 * 1000 * 1000, + periodType: new perftools.profiles.ValueType({type: 3, unit: 4}), + period: 1000, +}); + +// timeProfile is encoded then decoded to convert numbers to longs, in +// decodedTimeProfile +const encodedTimeProfile = + perftools.profiles.Profile.encode(timeProfile).finish(); +export const decodedTimeProfile = Object.freeze( + perftools.profiles.Profile.decode(encodedTimeProfile) +); + +const heapLeaf1 = { + name: 'function2', + scriptName: 'script1', + scriptId: 1, + lineNumber: 8, + columnNumber: 5, + allocations: [{count: 5, sizeBytes: 1024}], + children: [], +}; + +const heapLeaf2 = { + name: 'function3', + scriptName: 'script1', + scriptId: 1, + lineNumber: 10, + columnNumber: 5, + allocations: [ + {count: 8, sizeBytes: 10}, + {count: 15, sizeBytes: 72}, + ], + children: [], +}; + +const heapNode2 = { + name: 'function1', + scriptName: 'script1', + scriptId: 1, + lineNumber: 5, + columnNumber: 5, + allocations: [], + children: [heapLeaf1, heapLeaf2], +}; + +const heapNode1 = { + name: 'main', + scriptName: 'main', + scriptId: 0, + lineNumber: 1, + columnNumber: 5, + allocations: [ + {count: 1, sizeBytes: 5}, + {count: 3, sizeBytes: 7}, + ], + children: [heapNode2], +}; + +export const v8HeapProfile = Object.freeze({ + name: '(root)', + scriptName: '(root)', + scriptId: 10000, + lineNumber: 0, + columnNumber: 5, + allocations: [], + children: [heapNode1], +}); + +const heapLines = [ + {functionId: 1, line: 1}, + {functionId: 2, line: 5}, + {functionId: 3, line: 10}, + {functionId: 4, line: 8}, +]; + +const heapFunctions = [ + new perftools.profiles.Function({ + id: 1, + name: 5, + systemName: 5, + filename: 5, + }), + new perftools.profiles.Function({ + id: 2, + name: 6, + systemName: 6, + filename: 7, + }), + new perftools.profiles.Function({ + id: 3, + name: 8, + systemName: 8, + filename: 7, + }), + new perftools.profiles.Function({ + id: 4, + name: 9, + systemName: 9, + filename: 7, + }), +]; + +const heapLocations = [ + new perftools.profiles.Location({line: [heapLines[0]], id: 1}), + new perftools.profiles.Location({line: [heapLines[1]], id: 2}), + new perftools.profiles.Location({line: [heapLines[2]], id: 3}), + new perftools.profiles.Location({line: [heapLines[3]], id: 4}), +]; + +export const heapProfile: perftools.profiles.IProfile = Object.freeze({ + sampleType: [ + new perftools.profiles.ValueType({type: 1, unit: 2}), + new perftools.profiles.ValueType({type: 3, unit: 4}), + ], + sample: [ + new perftools.profiles.Sample({ + locationId: [1], + value: [1, 5], + label: [], + }), + new perftools.profiles.Sample({ + locationId: [1], + value: [3, 21], + label: [], + }), + new perftools.profiles.Sample({ + locationId: [3, 2, 1], + value: [8, 80], + label: [], + }), + new perftools.profiles.Sample({ + locationId: [3, 2, 1], + value: [15, 15 * 72], + label: [], + }), + new perftools.profiles.Sample({ + locationId: [4, 2, 1], + value: [5, 5 * 1024], + label: [], + }), + ], + location: heapLocations, + function: heapFunctions, + stringTable: [ + '', + 'objects', + 'count', + 'space', + 'bytes', + 'main', + 'function1', + 'script1', + 'function3', + 'function2', + ], + timeNanos: 0, + periodType: new perftools.profiles.ValueType({type: 3, unit: 4}), + period: 524288, +}); + +// heapProfile is encoded then decoded to convert numbers to longs, in +// decodedHeapProfile +const encodedHeapProfile = + perftools.profiles.Profile.encode(heapProfile).finish(); +export const decodedHeapProfile = Object.freeze( + perftools.profiles.Profile.decode(encodedHeapProfile) +); + +const heapLinesWithExternal = [ + {functionId: 1}, + {functionId: 2, line: 1}, + {functionId: 3, line: 5}, + {functionId: 4, line: 10}, + {functionId: 5, line: 8}, +]; + +const heapFunctionsWithExternal = [ + new perftools.profiles.Function({ + id: 1, + name: 5, + systemName: 5, + filename: 0, + }), + new perftools.profiles.Function({ + id: 2, + name: 6, + systemName: 6, + filename: 6, + }), + new perftools.profiles.Function({ + id: 3, + name: 7, + systemName: 7, + filename: 8, + }), + new perftools.profiles.Function({ + id: 4, + name: 9, + systemName: 9, + filename: 8, + }), + new perftools.profiles.Function({ + id: 5, + name: 10, + systemName: 10, + filename: 8, + }), +]; + +const heapLocationsWithExternal = [ + new perftools.profiles.Location({line: [heapLinesWithExternal[0]], id: 1}), + new perftools.profiles.Location({line: [heapLinesWithExternal[1]], id: 2}), + new perftools.profiles.Location({line: [heapLinesWithExternal[2]], id: 3}), + new perftools.profiles.Location({line: [heapLinesWithExternal[3]], id: 4}), + new perftools.profiles.Location({line: [heapLinesWithExternal[4]], id: 5}), +]; + +export const heapProfileWithExternal: perftools.profiles.IProfile = + Object.freeze({ + sampleType: [ + new perftools.profiles.ValueType({type: 1, unit: 2}), + new perftools.profiles.ValueType({type: 3, unit: 4}), + ], + sample: [ + new perftools.profiles.Sample({ + locationId: [1], + value: [1, 1024], + label: [], + }), + new perftools.profiles.Sample({ + locationId: [2], + value: [1, 5], + label: [], + }), + new perftools.profiles.Sample({ + locationId: [2], + value: [3, 21], + label: [], + }), + new perftools.profiles.Sample({ + locationId: [4, 3, 2], + value: [8, 80], + label: [], + }), + new perftools.profiles.Sample({ + locationId: [4, 3, 2], + value: [15, 15 * 72], + label: [], + }), + new perftools.profiles.Sample({ + locationId: [5, 3, 2], + value: [5, 5 * 1024], + label: [], + }), + ], + location: heapLocationsWithExternal, + function: heapFunctionsWithExternal, + stringTable: [ + '', + 'objects', + 'count', + 'space', + 'bytes', + '(external)', + 'main', + 'function1', + 'script1', + 'function3', + 'function2', + ], + timeNanos: 0, + periodType: new perftools.profiles.ValueType({type: 3, unit: 4}), + period: 524288, + }); + +// heapProfile is encoded then decoded to convert numbers to longs, in +// decodedHeapProfile +export const decodedHeapProfileWithExternal = Object.freeze( + perftools.profiles.Profile.decode(encodedHeapProfile) +); + +const anonymousHeapNode = { + scriptName: 'main', + scriptId: 0, + lineNumber: 1, + columnNumber: 5, + allocations: [{count: 1, sizeBytes: 5}], + children: [], +}; + +export const v8AnonymousFunctionHeapProfile = Object.freeze({ + name: '(root)', + scriptName: '(root)', + scriptId: 10000, + lineNumber: 0, + columnNumber: 5, + allocations: [], + children: [anonymousHeapNode], +}); + +const anonymousFunctionHeapLines = [{functionId: 1, line: 1}]; + +const anonymousFunctionHeapFunctions = [ + new perftools.profiles.Function({ + id: 1, + name: 5, + systemName: 5, + filename: 6, + }), +]; + +const anonymousFunctionHeapLocations = [ + new perftools.profiles.Location({ + line: [anonymousFunctionHeapLines[0]], + id: 1, + }), +]; + +export const anonymousFunctionHeapProfile: perftools.profiles.IProfile = + Object.freeze({ + sampleType: [ + new perftools.profiles.ValueType({type: 1, unit: 2}), + new perftools.profiles.ValueType({type: 3, unit: 4}), + ], + sample: [ + new perftools.profiles.Sample({ + locationId: [1], + value: [1, 5], + label: [], + }), + ], + location: anonymousFunctionHeapLocations, + function: anonymousFunctionHeapFunctions, + stringTable: [ + '', + 'objects', + 'count', + 'space', + 'bytes', + '(anonymous)', + 'main', + ], + timeNanos: 0, + periodType: new perftools.profiles.ValueType({type: 3, unit: 4}), + period: 524288, + }); + +const anonymousFunctionTimeNode = { + scriptName: 'main', + scriptId: 2, + lineNumber: 1, + columnNumber: 5, + hitCount: 1, + children: [], +}; + +const anonymousFunctionTimeRoot = { + name: '(root)', + scriptName: 'root', + scriptId: 0, + lineNumber: 0, + columnNumber: 0, + hitCount: 0, + children: [anonymousFunctionTimeNode], +}; + +export const v8AnonymousFunctionTimeProfile: TimeProfile = Object.freeze({ + startTime: 0, + endTime: 10 * 1000 * 1000, + topDownRoot: anonymousFunctionTimeRoot, +}); + +const anonymousFunctionTimeLines = [{functionId: 1, line: 1}]; + +const anonymousFunctionTimeFunctions = [ + new perftools.profiles.Function({ + id: 1, + name: 5, + systemName: 5, + filename: 6, + }), +]; + +const anonymousFunctionTimeLocations = [ + new perftools.profiles.Location({ + line: [anonymousFunctionTimeLines[0]], + id: 1, + }), +]; + +export const anonymousFunctionTimeProfile: perftools.profiles.IProfile = + Object.freeze({ + sampleType: [ + new perftools.profiles.ValueType({type: 1, unit: 2}), + new perftools.profiles.ValueType({type: 3, unit: 4}), + ], + sample: [ + new perftools.profiles.Sample({ + locationId: [1], + value: [1, 1000], + label: [], + }), + ], + location: anonymousFunctionTimeLocations, + function: anonymousFunctionTimeFunctions, + stringTable: [ + '', + 'sample', + 'count', + 'wall', + 'microseconds', + '(anonymous)', + 'main', + ], + timeNanos: 0, + durationNanos: 10 * 1000 * 1000 * 1000, + periodType: new perftools.profiles.ValueType({type: 3, unit: 4}), + period: 1000, + }); + +const heapWithPathLeaf1 = { + name: 'foo2', + scriptName: 'foo.ts', + scriptId: 0, + lineNumber: 3, + columnNumber: 3, + allocations: [{count: 1, sizeBytes: 2}], + children: [], +}; + +const heapWithPathLeaf2 = { + name: 'bar', + scriptName: '@google-cloud/profiler/profiler.ts', + scriptId: 1, + lineNumber: 10, + columnNumber: 5, + allocations: [{count: 2, sizeBytes: 2}], + children: [], +}; + +const heapWithPathLeaf3 = { + name: 'bar', + scriptName: 'bar.ts', + scriptId: 2, + lineNumber: 3, + columnNumber: 3, + allocations: [{count: 3, sizeBytes: 2}], + children: [], +}; + +const heapWithPathNode2 = { + name: 'baz', + scriptName: 'foo.ts', + scriptId: 0, + lineNumber: 1, + columnNumber: 5, + allocations: [], + children: [heapWithPathLeaf1, heapWithPathLeaf2], +}; + +const heapWithPathNode1 = { + name: 'foo1', + scriptName: 'node_modules/@google-cloud/profiler/profiler.ts', + scriptId: 3, + lineNumber: 2, + columnNumber: 5, + allocations: [], + children: [heapWithPathLeaf3], +}; + +export const v8HeapWithPathProfile = Object.freeze({ + name: '(root)', + scriptName: '(root)', + scriptId: 10000, + lineNumber: 0, + columnNumber: 5, + allocations: [], + children: [heapWithPathNode1, heapWithPathNode2], +}); + +const heapIncludePathFunctions = [ + new perftools.profiles.Function({ + id: 1, + name: 5, + systemName: 5, + filename: 6, + }), + new perftools.profiles.Function({ + id: 2, + name: 7, + systemName: 7, + filename: 8, + }), + new perftools.profiles.Function({ + id: 3, + name: 9, + systemName: 9, + filename: 6, + }), + new perftools.profiles.Function({ + id: 4, + name: 10, + systemName: 10, + filename: 11, + }), + new perftools.profiles.Function({ + id: 5, + name: 7, + systemName: 7, + filename: 12, + }), +]; + +const heapIncludePathLocations = [ + new perftools.profiles.Location({ + line: [{functionId: 1, line: 1}], + id: 1, + }), + new perftools.profiles.Location({ + line: [{functionId: 2, line: 10}], + id: 2, + }), + new perftools.profiles.Location({ + line: [{functionId: 3, line: 3}], + id: 3, + }), + new perftools.profiles.Location({ + line: [{functionId: 4, line: 2}], + id: 4, + }), + new perftools.profiles.Location({ + line: [{functionId: 5, line: 3}], + id: 5, + }), +]; + +export const heapProfileIncludePath: perftools.profiles.IProfile = + Object.freeze({ + sampleType: [ + new perftools.profiles.ValueType({type: 1, unit: 2}), + new perftools.profiles.ValueType({type: 3, unit: 4}), + ], + sample: [ + new perftools.profiles.Sample({ + locationId: [2, 1], + value: [2, 4], + label: [], + }), + new perftools.profiles.Sample({ + locationId: [3, 1], + value: [1, 2], + label: [], + }), + new perftools.profiles.Sample({ + locationId: [5, 4], + value: [3, 6], + label: [], + }), + ], + location: heapIncludePathLocations, + function: heapIncludePathFunctions, + stringTable: [ + '', + 'objects', + 'count', + 'space', + 'bytes', + 'baz', + 'foo.ts', + 'bar', + '@google-cloud/profiler/profiler.ts', + 'foo2', + 'foo1', + 'node_modules/@google-cloud/profiler/profiler.ts', + 'bar.ts', + ], + timeNanos: 0, + periodType: new perftools.profiles.ValueType({type: 3, unit: 4}), + period: 524288, + }); + +// heapProfile is encoded then decoded to convert numbers to longs, in +// decodedHeapProfile +const encodedHeapProfileIncludePath = perftools.profiles.Profile.encode( + heapProfileIncludePath +).finish(); +export const decodedHeapProfileIncludePath = Object.freeze( + perftools.profiles.Profile.decode(encodedHeapProfileIncludePath) +); + +const heapExcludePathFunctions = [ + new perftools.profiles.Function({ + id: 1, + name: 5, + systemName: 5, + filename: 6, + }), + new perftools.profiles.Function({ + id: 2, + name: 7, + systemName: 7, + filename: 6, + }), +]; + +const heapExcludePathLocations = [ + new perftools.profiles.Location({ + line: [{functionId: 1, line: 1}], + id: 1, + }), + new perftools.profiles.Location({ + line: [{functionId: 2, line: 3}], + id: 2, + }), +]; + +export const heapProfileExcludePath: perftools.profiles.IProfile = + Object.freeze({ + sampleType: [ + new perftools.profiles.ValueType({type: 1, unit: 2}), + new perftools.profiles.ValueType({type: 3, unit: 4}), + ], + sample: [ + new perftools.profiles.Sample({ + locationId: [2, 1], + value: [1, 2], + label: [], + }), + ], + location: heapExcludePathLocations, + function: heapExcludePathFunctions, + stringTable: [ + '', + 'objects', + 'count', + 'space', + 'bytes', + 'baz', + 'foo.ts', + 'foo2', + ], + timeNanos: 0, + periodType: new perftools.profiles.ValueType({type: 3, unit: 4}), + period: 524288, + }); + +// heapProfile is encoded then decoded to convert numbers to longs, in +// decodedHeapProfile +const encodedHeapProfileExcludePath = perftools.profiles.Profile.encode( + heapProfileExcludePath +).finish(); +export const decodedHeapProfileExcludePath = Object.freeze( + perftools.profiles.Profile.decode(encodedHeapProfileExcludePath) +); + +const mapDir = tmp.dirSync(); +export const mapDirPath = mapDir.name; + +export const mapFoo = new SourceMapGenerator({file: 'foo.js'}); +mapFoo.addMapping({ + source: path.join(mapDirPath, 'foo.ts'), + name: 'foo1', + generated: {line: 1, column: 3}, + original: {line: 10, column: 0}, +}); +mapFoo.addMapping({ + source: path.join(mapDirPath, 'foo.ts'), + name: 'foo2', + generated: {line: 5, column: 5}, + original: {line: 20, column: 0}, +}); + +export const mapBaz = new SourceMapGenerator({file: 'baz.js'}); +mapBaz.addMapping({ + source: path.join(mapDirPath, 'baz.ts'), + name: 'baz', + generated: {line: 3, column: 0}, + original: {line: 5, column: 0}, +}); + +fs.writeFileSync(path.join(mapDirPath, 'foo.js.map'), mapFoo.toString()); +fs.writeFileSync(path.join(mapDirPath, 'baz.js.map'), mapBaz.toString()); + +const heapGeneratedLeaf1 = { + name: 'foo2', + scriptName: path.join(mapDirPath, 'foo.js'), + scriptId: 1, + lineNumber: 5, + columnNumber: 5, + allocations: [{count: 3, sizeBytes: 2}], + children: [], +}; + +const heapGeneratedLeaf2 = { + name: 'baz', + scriptName: path.join(mapDirPath, 'baz.js'), + scriptId: 3, + lineNumber: 3, + columnNumber: 0, + allocations: [{count: 5, sizeBytes: 5}], + children: [], +}; + +const heapGeneratedNode2 = { + name: 'bar', + scriptName: path.join(mapDirPath, 'bar.js'), + scriptId: 2, + lineNumber: 10, + columnNumber: 0, + allocations: [], + children: [heapGeneratedLeaf2], +}; + +const heapGeneratedNode1 = { + name: 'foo1', + scriptName: path.join(mapDirPath, 'foo.js'), + scriptId: 1, + lineNumber: 1, + columnNumber: 3, + allocations: [], + children: [heapGeneratedNode2, heapGeneratedLeaf1], +}; + +export const v8HeapGeneratedProfile = Object.freeze({ + name: '(root)', + scriptName: '(root)', + scriptId: 10000, + lineNumber: 0, + columnNumber: 0, + allocations: [], + children: [heapGeneratedNode1], +}); + +const heapSourceFunctions = [ + new perftools.profiles.Function({ + id: 1, + name: 5, + systemName: 5, + filename: 6, + }), + new perftools.profiles.Function({ + id: 2, + name: 7, + systemName: 7, + filename: 6, + }), + new perftools.profiles.Function({ + id: 3, + name: 8, + systemName: 8, + filename: 9, + }), + new perftools.profiles.Function({ + id: 4, + name: 10, + systemName: 10, + filename: 11, + }), +]; + +const heapSourceLocations = [ + new perftools.profiles.Location({ + line: [{functionId: 1, line: 10}], + id: 1, + }), + new perftools.profiles.Location({ + line: [{functionId: 2, line: 20}], + id: 2, + }), + new perftools.profiles.Location({ + line: [{functionId: 3, line: 10}], + id: 3, + }), + new perftools.profiles.Location({ + line: [{functionId: 4, line: 5}], + id: 4, + }), +]; + +export const heapSourceProfile: perftools.profiles.IProfile = Object.freeze({ + sampleType: [ + new perftools.profiles.ValueType({type: 1, unit: 2}), + new perftools.profiles.ValueType({type: 3, unit: 4}), + ], + sample: [ + new perftools.profiles.Sample({ + locationId: [2, 1], + value: [3, 6], + label: [], + }), + new perftools.profiles.Sample({ + locationId: [4, 3, 1], + value: [5, 25], + label: [], + }), + ], + location: heapSourceLocations, + function: heapSourceFunctions, + stringTable: [ + '', + 'objects', + 'count', + 'space', + 'bytes', + 'foo1', + path.join(mapDirPath, 'foo.ts'), + 'foo2', + 'bar', + path.join(mapDirPath, 'bar.js'), + 'baz', + path.join(mapDirPath, 'baz.ts'), + ], + timeNanos: 0, + periodType: new perftools.profiles.ValueType({type: 3, unit: 4}), + period: 524288, +}); + +const timeGeneratedLeaf1 = { + name: 'foo', + scriptName: path.join(mapDirPath, 'foo.js'), + scriptId: 1, + lineNumber: 5, + columnNumber: 5, + hitCount: 5, + children: [], +}; + +const timeGeneratedLeaf2 = { + name: 'baz', + scriptName: path.join(mapDirPath, 'baz.js'), + scriptId: 3, + lineNumber: 3, + columnNumber: 0, + hitCount: 10, + children: [], +}; + +const timeGeneratedNode2 = { + name: 'bar', + scriptName: path.join(mapDirPath, 'bar.js'), + scriptId: 2, + lineNumber: 10, + columnNumber: 0, + children: [timeGeneratedLeaf2], +}; + +const timeGeneratedNode1 = { + name: 'foo1', + scriptName: path.join(mapDirPath, 'foo.js'), + scriptId: 1, + lineNumber: 1, + columnNumber: 3, + hitCount: 0, + children: [timeGeneratedNode2, timeGeneratedLeaf1], +}; + +export const timeGeneratedProfileRoot = Object.freeze({ + name: '(root)', + scriptName: '(root)', + scriptId: 10000, + lineNumber: 0, + columnNumber: 0, + hitCount: 0, + children: [timeGeneratedNode1], +}); + +export const v8TimeGeneratedProfile: TimeProfile = Object.freeze({ + startTime: 0, + endTime: 10 * 1000 * 1000, + topDownRoot: timeGeneratedProfileRoot, +}); + +const timeSourceFunctions = [ + new perftools.profiles.Function({ + id: 1, + name: 5, + systemName: 5, + filename: 6, + }), + new perftools.profiles.Function({ + id: 2, + name: 7, + systemName: 7, + filename: 6, + }), + new perftools.profiles.Function({ + id: 3, + name: 8, + systemName: 8, + filename: 9, + }), + new perftools.profiles.Function({ + id: 4, + name: 10, + systemName: 10, + filename: 11, + }), +]; + +const timeSourceLocations = [ + new perftools.profiles.Location({ + line: [{functionId: 1, line: 10}], + id: 1, + }), + new perftools.profiles.Location({ + line: [{functionId: 2, line: 20}], + id: 2, + }), + new perftools.profiles.Location({ + line: [{functionId: 3, line: 10}], + id: 3, + }), + new perftools.profiles.Location({ + line: [{functionId: 4, line: 5}], + id: 4, + }), +]; + +export const timeSourceProfile: perftools.profiles.IProfile = Object.freeze({ + sampleType: [ + new perftools.profiles.ValueType({type: 1, unit: 2}), + new perftools.profiles.ValueType({type: 3, unit: 4}), + ], + sample: [ + new perftools.profiles.Sample({ + locationId: [2, 1], + value: [5, 5000], + label: [], + }), + new perftools.profiles.Sample({ + locationId: [4, 3, 1], + value: [10, 10000], + label: [], + }), + ], + location: timeSourceLocations, + function: timeSourceFunctions, + stringTable: [ + '', + 'sample', + 'count', + 'wall', + 'microseconds', + 'foo1', + path.join(mapDirPath, 'foo.ts'), + 'foo2', + 'bar', + path.join(mapDirPath, 'bar.js'), + 'baz', + path.join(mapDirPath, 'baz.ts'), + ], + timeNanos: 0, + durationNanos: 10 * 1000 * 1000 * 1000, + periodType: new perftools.profiles.ValueType({type: 3, unit: 4}), + period: 1000, +}); diff --git a/handwritten/cloud-profiler/test/test-init-config.ts b/handwritten/cloud-profiler/test/test-init-config.ts new file mode 100644 index 00000000000..56f47adf097 --- /dev/null +++ b/handwritten/cloud-profiler/test/test-init-config.ts @@ -0,0 +1,555 @@ +// Copyright 2017 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import * as assert from 'assert'; +import {describe, it, before, beforeEach, afterEach, after} from 'mocha'; +import * as gcpMetadata from 'gcp-metadata'; +import {heap as heapProfiler} from 'pprof'; +import * as sinon from 'sinon'; + +import {createProfiler, nodeVersionOkay} from '../src/index'; +import {Profiler} from '../src/profiler'; +const packageJson = require('../../package.json'); + +describe('nodeVersionOkay', () => { + const version = parseInt(packageJson.engines.node.replace('>=', '')); + it('should accept alpha versions', () => { + assert.strictEqual(true, nodeVersionOkay(`v${version}.0.0-alpha.1`)); + }); + it('should accept beta versions', () => { + assert.strictEqual(true, nodeVersionOkay(`v${version}.9.10-beta.2`)); + }); + it('should accept nightly versions', () => { + assert.strictEqual( + true, + nodeVersionOkay(`v${version}.0.0-nightly2018000000`) + ); + }); + it('should accept pre-release versions', () => { + assert.strictEqual(true, nodeVersionOkay(`v${version}.0.0-pre`)); + }); + it('should accept v12.4.1', () => { + assert.strictEqual(true, nodeVersionOkay(`v${version}.4.1`)); + }); + it('should not accept v11.4.0', () => { + assert.strictEqual(false, nodeVersionOkay(`v${version - 1}.4.0`)); + }); +}); + +describe('createProfiler', () => { + let savedEnv: NodeJS.ProcessEnv; + // eslint-disable-next-line @typescript-eslint/no-explicit-any + let instanceMetadataStub: sinon.SinonStub; + // eslint-disable-next-line @typescript-eslint/no-explicit-any + let projectMetadataStub: sinon.SinonStub; + let startStub: sinon.SinonStub<[number, number], void>; + + const internalConfigParams = { + timeIntervalMicros: 1000, + heapIntervalBytes: 512 * 1024, + heapMaxStackDepth: 64, + ignoreHeapSamplesPath: '@google-cloud/profiler', + initialBackoffMillis: 1000 * 60, + backoffCapMillis: 60 * 60 * 1000, + backoffMultiplier: 1.3, + serverBackoffCapMillis: 2147483647, + localProfilingPeriodMillis: 1000, + localTimeDurationMillis: 1000, + localLogPeriodMillis: 10000, + apiEndpoint: 'cloudprofiler.googleapis.com', + }; + const disableSourceMapParams = { + sourceMapSearchPath: ['path'], + disableSourceMaps: true, + }; + let defaultConfig: {}; + + before(() => { + process.env = {}; + defaultConfig = internalConfigParams || {}; + startStub = sinon.stub(heapProfiler, 'start'); + savedEnv = process.env; + }); + + beforeEach(() => { + process.env = {}; + }); + + afterEach(() => { + if (instanceMetadataStub) { + instanceMetadataStub.restore(); + } + if (projectMetadataStub) { + projectMetadataStub.restore(); + } + heapProfiler.stop(); + startStub.reset(); + }); + + after(() => { + process.env = savedEnv; + startStub.restore(); + }); + + it('should not modify specified fields when not on GCE', async () => { + instanceMetadataStub = sinon.stub(gcpMetadata, 'instance'); + instanceMetadataStub.throwsException('cannot access metadata'); + projectMetadataStub = sinon.stub(gcpMetadata, 'project'); + projectMetadataStub.throwsException('cannot access metadata'); + + const config = Object.assign( + { + logLevel: 2, + serviceContext: {version: 'fake-version', service: 'fake-service'}, + disableHeap: true, + disableTime: true, + instance: 'instance', + zone: 'zone', + projectId: 'fake-projectId', + }, + disableSourceMapParams + ); + + const profiler: Profiler = await createProfiler(config); + const expConfig = Object.assign({}, defaultConfig, config); + assert.deepStrictEqual(profiler.config, expConfig); + }); + + it('should not modify specified fields when on metadata', async () => { + instanceMetadataStub = sinon.stub(gcpMetadata, 'instance'); + instanceMetadataStub + .withArgs('name') + .resolves('gce-instance') + .withArgs('zone') + .resolves('projects/123456789012/zones/gce-zone'); + projectMetadataStub = sinon.stub(gcpMetadata, 'project'); + projectMetadataStub.withArgs('project-id').resolves('gce-project'); + + const config = Object.assign( + { + logLevel: 2, + serviceContext: {version: 'fake-version', service: 'fake-service'}, + disableHeap: true, + disableTime: true, + instance: 'instance', + zone: 'zone', + projectId: 'fake-projectId', + }, + disableSourceMapParams + ); + const profiler: Profiler = await createProfiler(config); + const expConfig = Object.assign({}, defaultConfig, config); + assert.deepStrictEqual(profiler.config, expConfig); + }); + + it('should get project ID, zone and instance from metadata', async () => { + instanceMetadataStub = sinon.stub(gcpMetadata, 'instance'); + instanceMetadataStub + .withArgs('name') + .resolves('gce-instance') + .withArgs('zone') + .resolves('projects/123456789012/zones/gce-zone'); + projectMetadataStub = sinon.stub(gcpMetadata, 'project'); + projectMetadataStub.withArgs('project-id').resolves('gce-project'); + const config = Object.assign( + { + logLevel: 2, + serviceContext: {version: '', service: 'fake-service'}, + disableHeap: true, + disableTime: true, + }, + disableSourceMapParams + ); + const expConfigParams = { + projectId: 'gce-project', + logLevel: 2, + serviceContext: {version: '', service: 'fake-service'}, + disableHeap: true, + disableTime: true, + instance: 'gce-instance', + zone: 'gce-zone', + }; + const profiler: Profiler = await createProfiler(config); + const expConfig = Object.assign( + {}, + defaultConfig, + disableSourceMapParams, + expConfigParams + ); + assert.deepStrictEqual(profiler.config, expConfig); + }); + + it('should not reject when not on GCE and no zone and instance found', async () => { + instanceMetadataStub = sinon.stub(gcpMetadata, 'instance'); + instanceMetadataStub.throwsException('cannot access metadata'); + projectMetadataStub = sinon.stub(gcpMetadata, 'project'); + projectMetadataStub.throwsException('cannot access metadata'); + const config = Object.assign( + { + projectId: 'fake-projectId', + serviceContext: {service: 'fake-service'}, + }, + disableSourceMapParams + ); + const expConfigParams = { + logLevel: 2, + serviceContext: {service: 'fake-service'}, + disableHeap: false, + disableTime: false, + projectId: 'fake-projectId', + }; + const profiler: Profiler = await createProfiler(config); + const expConfig = Object.assign( + {}, + defaultConfig, + disableSourceMapParams, + expConfigParams + ); + assert.deepStrictEqual(profiler.config, expConfig); + }); + + it('should reject when no service specified', async () => { + instanceMetadataStub = sinon.stub(gcpMetadata, 'instance'); + instanceMetadataStub.throwsException('cannot access metadata'); + projectMetadataStub = sinon.stub(gcpMetadata, 'project'); + projectMetadataStub.throwsException('cannot access metadata'); + const config = Object.assign( + { + logLevel: 2, + serviceContext: {version: ''}, + disableHeap: true, + disableTime: true, + }, + disableSourceMapParams + ); + await createProfiler(config) + .then(() => { + assert.fail('expected error because no service in config'); + }) + .catch((e: Error) => { + assert.strictEqual( + e.message, + 'Service must be specified in the configuration' + ); + }); + }); + + it('should reject when no service does not match service regular expression', async () => { + instanceMetadataStub = sinon.stub(gcpMetadata, 'instance'); + instanceMetadataStub.throwsException('cannot access metadata'); + projectMetadataStub = sinon.stub(gcpMetadata, 'project'); + projectMetadataStub.throwsException('cannot access metadata'); + const config = { + logLevel: 2, + serviceContext: {service: 'serviceName', version: ''}, + disableHeap: true, + disableTime: true, + }; + try { + await createProfiler(config); + assert.fail('expected an error because invalid service was specified'); + } catch (e) { + assert.strictEqual( + (e as Error).message, + 'Service serviceName does not match regular expression "/^[a-z0-9]([-a-z0-9_.]{0,253}[a-z0-9])?$/"' + ); + } + }); + + it('should reject when no projectId given', async () => { + instanceMetadataStub = sinon.stub(gcpMetadata, 'instance'); + instanceMetadataStub.throwsException('cannot access metadata'); + projectMetadataStub = sinon.stub(gcpMetadata, 'project'); + projectMetadataStub.throwsException('cannot access metadata'); + const config = Object.assign( + { + logLevel: 2, + serviceContext: {version: '', service: 'fake-service'}, + disableHeap: true, + disableTime: true, + instance: 'instance', + zone: 'zone', + }, + disableSourceMapParams + ); + try { + await createProfiler(config); + assert.fail('expected an error because invalid service was specified'); + } catch (e) { + assert.strictEqual( + (e as Error).message, + 'Project ID must be specified in the configuration' + ); + } + }); + + it('should set sourceMapSearchPaths when specified in the config', async () => { + instanceMetadataStub = sinon.stub(gcpMetadata, 'instance'); + instanceMetadataStub.throwsException('cannot access metadata'); + projectMetadataStub = sinon.stub(gcpMetadata, 'project'); + projectMetadataStub.throwsException('cannot access metadata'); + const config = Object.assign( + { + projectId: 'project', + logLevel: 2, + serviceContext: {version: '', service: 'fake-service'}, + disableHeap: true, + disableTime: true, + instance: 'instance', + zone: 'zone', + sourceMapSearchPath: ['path'], + }, + disableSourceMapParams + ); + const profiler: Profiler = await createProfiler(config); + const expConfig = Object.assign( + {}, + config, + disableSourceMapParams, + defaultConfig + ); + assert.deepStrictEqual(profiler.config, expConfig); + }); + + it('should reject when sourceMapSearchPaths is empty array and source map support is enabled', async () => { + instanceMetadataStub = sinon.stub(gcpMetadata, 'instance'); + instanceMetadataStub.throwsException('cannot access metadata'); + projectMetadataStub = sinon.stub(gcpMetadata, 'project'); + projectMetadataStub.throwsException('cannot access metadata'); + const config = { + serviceContext: {version: '', service: 'fake-service'}, + instance: 'instance', + zone: 'zone', + sourceMapSearchPath: [], + disableSourceMaps: false, + }; + + try { + await createProfiler(config); + assert.fail('expected an error because invalid service was specified'); + } catch (e) { + assert.strictEqual( + (e as Error).message, + 'serviceMapSearchPath is an empty array. Use disableSourceMaps ' + + 'to disable source map support instead.' + ); + } + }); + + it('should set apiEndpoint to non-default value', async () => { + instanceMetadataStub = sinon.stub(gcpMetadata, 'instance'); + instanceMetadataStub.throwsException('cannot access metadata'); + projectMetadataStub = sinon.stub(gcpMetadata, 'project'); + projectMetadataStub.throwsException('cannot access metadata'); + + const config = Object.assign( + { + projectId: 'project', + apiEndpoint: 'test-cloudprofiler.sandbox.googleapis.com', + serviceContext: {version: '', service: 'fake-service'}, + }, + disableSourceMapParams + ); + const expConfigParams = { + projectId: 'project', + serviceContext: {version: '', service: 'fake-service'}, + disableHeap: false, + disableTime: false, + logLevel: 2, + apiEndpoint: 'test-cloudprofiler.sandbox.googleapis.com', + }; + const expConfig = Object.assign( + {}, + defaultConfig, + disableSourceMapParams, + expConfigParams + ); + const profiler: Profiler = await createProfiler(config); + assert.deepStrictEqual(profiler.config, expConfig); + }); + + it('should get values from environment variable when not specified in config or environment variables', async () => { + process.env.GCLOUD_PROJECT = 'process-projectId'; + process.env.GCLOUD_PROFILER_LOGLEVEL = '4'; + process.env.GAE_SERVICE = 'process-service'; + process.env.GAE_VERSION = 'process-version'; + process.env.GCLOUD_PROFILER_CONFIG = './test/fixtures/test-config.json'; + instanceMetadataStub = sinon.stub(gcpMetadata, 'instance'); + instanceMetadataStub + .withArgs('name') + .resolves('gce-instance') + .withArgs('zone') + .resolves('projects/123456789012/zones/gce-zone'); + projectMetadataStub = sinon.stub(gcpMetadata, 'project'); + projectMetadataStub.withArgs('project-id').resolves('gce-project'); + + const config = disableSourceMapParams; + const expConfigParams = { + projectId: 'process-projectId', + logLevel: 4, + serviceContext: { + version: 'process-version', + service: 'process-service', + }, + disableHeap: true, + disableTime: true, + instance: 'env_config_instance', + zone: 'env_config_zone', + }; + const profiler: Profiler = await createProfiler(config); + const expConfig = Object.assign({}, config, defaultConfig, expConfigParams); + assert.deepStrictEqual(profiler.config, expConfig); + }); + + it('should get values from Knative environment variables when values not specified in config or other environment variables', async () => { + process.env.K_SERVICE = 'k-service'; + process.env.K_REVISION = 'k-version'; + instanceMetadataStub = sinon.stub(gcpMetadata, 'instance'); + instanceMetadataStub.throwsException('cannot access metadata'); + projectMetadataStub = sinon.stub(gcpMetadata, 'project'); + projectMetadataStub.throwsException('cannot access metadata'); + const config = Object.assign( + {projectId: 'project'}, + disableSourceMapParams + ); + const expConfigParams = { + serviceContext: {version: 'k-version', service: 'k-service'}, + disableHeap: false, + disableTime: false, + logLevel: 2, + }; + const profiler: Profiler = await createProfiler(config); + const expConfig = Object.assign({}, config, defaultConfig, expConfigParams); + assert.deepStrictEqual(profiler.config, expConfig); + }); + + it('should get values from GAE environment variables when both GAE and Knative environment variables are specified', async () => { + process.env.GAE_SERVICE = 'process-service'; + process.env.GAE_VERSION = 'process-version'; + process.env.K_SERVICE = 'k-service'; + process.env.K_REVISION = 'k-version'; + instanceMetadataStub = sinon.stub(gcpMetadata, 'instance'); + instanceMetadataStub.throwsException('cannot access metadata'); + projectMetadataStub = sinon.stub(gcpMetadata, 'project'); + projectMetadataStub.throwsException('cannot access metadata'); + const config = Object.assign( + {projectId: 'project'}, + disableSourceMapParams + ); + const expConfigParams = { + serviceContext: { + version: 'process-version', + service: 'process-service', + }, + disableHeap: false, + disableTime: false, + logLevel: 2, + }; + const profiler: Profiler = await createProfiler(config); + const expConfig = Object.assign({}, config, defaultConfig, expConfigParams); + assert.deepStrictEqual(profiler.config, expConfig); + }); + + it('should not get values from from environment variable when values specified in config', async () => { + process.env.GCLOUD_PROJECT = 'process-projectId'; + process.env.GCLOUD_PROFILER_LOGLEVEL = '4'; + process.env.GAE_SERVICE = 'process-service'; + process.env.GAE_VERSION = 'process-version'; + process.env.GCLOUD_PROFILER_CONFIG = './test/fixtures/test-config.json'; + instanceMetadataStub = sinon.stub(gcpMetadata, 'instance'); + instanceMetadataStub + .withArgs('name') + .resolves('gce-instance') + .withArgs('zone') + .resolves('projects/123456789012/zones/gce-zone'); + projectMetadataStub = sinon.stub(gcpMetadata, 'project'); + projectMetadataStub.withArgs('project-id').resolves('gce-project'); + + const config = Object.assign( + { + projectId: 'config-projectId', + logLevel: 1, + serviceContext: { + version: 'config-version', + service: 'config-service', + }, + disableHeap: false, + disableTime: false, + instance: 'instance', + zone: 'zone', + }, + disableSourceMapParams + ); + const profiler: Profiler = await createProfiler(config); + const expConfig = Object.assign({}, config, defaultConfig); + assert.deepStrictEqual(profiler.config, expConfig); + }); + + it('should get values from from environment config when not specified in config or other environment variables', async () => { + instanceMetadataStub = sinon.stub(gcpMetadata, 'instance'); + instanceMetadataStub.throwsException('cannot access metadata'); + projectMetadataStub = sinon.stub(gcpMetadata, 'project'); + projectMetadataStub.throwsException('cannot access metadata'); + + process.env.GCLOUD_PROFILER_CONFIG = './test/fixtures/test-config.json'; + + const expConfigParams = { + logLevel: 3, + serviceContext: { + version: 'env_config_version', + service: 'env_config_service', + }, + disableHeap: true, + disableTime: true, + instance: 'env_config_instance', + zone: 'env_config_zone', + projectId: 'env_config_fake-projectId', + }; + + const config = disableSourceMapParams; + const profiler: Profiler = await createProfiler(config); + const expConfig = Object.assign({}, config, defaultConfig, expConfigParams); + assert.deepStrictEqual(profiler.config, expConfig); + }); + it('should start heap profiler when disableHeap is not set', async () => { + const config = Object.assign( + { + projectId: 'config-projectId', + serviceContext: {service: 'config-service'}, + instance: 'envConfig-instance', + zone: 'envConfig-zone', + }, + disableSourceMapParams + ); + await createProfiler(config); + assert.ok( + startStub.calledWith(1024 * 512, 64), + 'expected heap profiler to be started' + ); + }); + it('should start not heap profiler when disableHeap is true', async () => { + const config = Object.assign( + { + projectId: 'config-projectId', + serviceContext: {service: 'config-service'}, + disableHeap: true, + instance: 'envConfig-instance', + zone: 'envConfig-zone', + }, + disableSourceMapParams + ); + await createProfiler(config); + assert.ok(!startStub.called, 'expected heap profiler to not be started'); + }); +}); diff --git a/handwritten/cloud-profiler/test/test-profiler.ts b/handwritten/cloud-profiler/test/test-profiler.ts new file mode 100644 index 00000000000..4d5d7954cba --- /dev/null +++ b/handwritten/cloud-profiler/test/test-profiler.ts @@ -0,0 +1,939 @@ +// Copyright 2017 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import * as common from '@google-cloud/common'; +import { + BodyResponseCallback, + DecorateRequestOptions, +} from '@google-cloud/common'; +import * as assert from 'assert'; +import {describe, it, beforeEach, afterEach, before, after} from 'mocha'; +import * as extend from 'extend'; +import * as nock from 'nock'; +import {heap as heapProfiler, time as timeProfiler} from 'pprof'; +import * as sinon from 'sinon'; +import {promisify} from 'util'; +import * as zlib from 'zlib'; + +import {perftools} from 'pprof/proto/profile'; +import {ProfilerConfig} from '../src/config'; +import {Profiler, Retryer, BackoffResponseError} from '../src/profiler'; + +import { + decodedHeapProfile, + decodedTimeProfile, + heapProfile, + timeProfile, +} from './profiles-for-tests'; + +import * as ms from 'ms'; +// eslint-disable-next-line @typescript-eslint/no-var-requires +const fakeCredentials = require('../../test/fixtures/gcloud-credentials.json'); + +const API = 'cloudprofiler.googleapis.com'; +const TEST_API = 'test-cloudprofiler.sandbox.googleapis.com'; + +const FULL_API = `https://${API}/v2`; +const FULL_TEST_API = `https://${TEST_API}/v2`; + +const testConfig: ProfilerConfig = { + projectId: 'test-projectId', + logLevel: 0, + serviceContext: {service: 'test-service', version: 'test-version'}, + instance: 'test-instance', + zone: 'test-zone', + disableTime: false, + disableHeap: false, + credentials: fakeCredentials, + timeIntervalMicros: 1000, + heapIntervalBytes: 512 * 1024, + heapMaxStackDepth: 64, + ignoreHeapSamplesPath: '@google-cloud/profiler', + initialBackoffMillis: 1000, + backoffCapMillis: ms('1h')!, + backoffMultiplier: 1.3, + serverBackoffCapMillis: ms('7d')!, + localProfilingPeriodMillis: 1000, + localTimeDurationMillis: 1000, + localLogPeriodMillis: 1000, + sourceMapSearchPath: [], + disableSourceMaps: true, + apiEndpoint: API, +}; + +nock.disableNetConnect(); +function nockOauth2(): nock.Scope { + return nock('https://oauth2.googleapis.com') + .post(/\/token/, () => true) + .once() + .reply(200, { + refresh_token: 'hello', + access_token: 'goodbye', + expiry_date: new Date(9999, 1, 1), + }); +} + +describe('Retryer', () => { + it('should backoff until max-backoff reached', () => { + const retryer = new Retryer(1000, 1000000, 5, () => 0.5); + assert.strictEqual(retryer.getBackoff(), 0.5 * 1000); + assert.strictEqual(retryer.getBackoff(), 0.5 * 5000); + assert.strictEqual(retryer.getBackoff(), 0.5 * 25000); + assert.strictEqual(retryer.getBackoff(), 0.5 * 125000); + assert.strictEqual(retryer.getBackoff(), 0.5 * 625000); + assert.strictEqual(retryer.getBackoff(), 0.5 * 1000000); + assert.strictEqual(retryer.getBackoff(), 0.5 * 1000000); + assert.strictEqual(retryer.getBackoff(), 0.5 * 1000000); + assert.strictEqual(retryer.getBackoff(), 0.5 * 1000000); + assert.strictEqual(retryer.getBackoff(), 0.5 * 1000000); + }); +}); + +describe('Profiler', () => { + // eslint-disable-next-line @typescript-eslint/no-explicit-any + const sinonStubs: Array> = []; + beforeEach(() => { + sinonStubs.push(sinon.stub(timeProfiler, 'start')); + sinonStubs.push( + sinon.stub(timeProfiler, 'profile').returns(Promise.resolve(timeProfile)) + ); + + sinonStubs.push(sinon.stub(heapProfiler, 'stop')); + sinonStubs.push(sinon.stub(heapProfiler, 'start')); + sinonStubs.push(sinon.stub(heapProfiler, 'profile').returns(heapProfile)); + }); + afterEach(() => { + nock.cleanAll(); + sinonStubs.forEach(stub => { + stub.restore(); + }); + }); + describe('profile', () => { + it('should return expected profile when profile type is WALL.', async () => { + const profiler = new Profiler(testConfig); + const requestProf = { + name: 'projects/12345678901/test-projectId', + profileType: 'WALL', + duration: '10s', + labels: {instance: 'test-instance'}, + }; + const prof = await profiler.profile(requestProf); + const decodedBytes = Buffer.from(prof.profileBytes as 'string', 'base64'); + const unzippedBytes = (await promisify(zlib.gunzip)( + decodedBytes + )) as Uint8Array; + const outProfile = perftools.profiles.Profile.decode(unzippedBytes); + assert.deepStrictEqual(decodedTimeProfile, outProfile); + }); + it('should return expected profile when profile type is HEAP.', async () => { + const profiler = new Profiler(testConfig); + const requestProf = { + name: 'projects/12345678901/test-projectId', + profileType: 'HEAP', + labels: {instance: 'test-instance'}, + }; + const prof = await profiler.profile(requestProf); + const decodedBytes = Buffer.from(prof.profileBytes as 'string', 'base64'); + const unzippedBytes = (await promisify(zlib.gunzip)( + decodedBytes + )) as Uint8Array; + const outProfile = perftools.profiles.Profile.decode(unzippedBytes); + assert.deepStrictEqual(decodedHeapProfile, outProfile); + }); + it('should throw error when unexpected profile type is requested.', async () => { + const profiler = new Profiler(testConfig); + const requestProf = { + name: 'projects/12345678901/test-projectId', + profileType: 'UNKNOWN', + duration: '10s', + labels: {instance: 'test-instance'}, + }; + try { + await profiler.profile(requestProf); + assert.fail('Expected an error to be thrown,'); + } catch (err) { + assert.strictEqual( + (err as Error).message, + 'Unexpected profile type UNKNOWN.' + ); + } + }); + }); + describe('writeTimeProfile', () => { + it( + 'should return request with base64-encoded profile when time profiling' + + ' enabled', + async () => { + const profiler = new Profiler(testConfig); + + const requestProf = { + name: 'projects/12345678901/test-projectId', + profileType: 'WALL', + duration: '10s', + labels: {instance: 'test-instance'}, + }; + + const outRequestProfile = await profiler.writeTimeProfile(requestProf); + const encodedBytes = outRequestProfile.profileBytes; + + if (encodedBytes === undefined) { + assert.fail('profile bytes are undefined.'); + } + + const decodedBytes = Buffer.from(encodedBytes as string, 'base64'); + const unzippedBytes = (await promisify(zlib.gunzip)( + decodedBytes + )) as Uint8Array; + const outProfile = perftools.profiles.Profile.decode(unzippedBytes); + + // compare to decodedTimeProfile, which is equivalent to timeProfile, + // but numbers are replaced with longs. + assert.deepStrictEqual(decodedTimeProfile, outProfile); + } + ); + it('should throw error when time profiling is not enabled.', async () => { + const config = extend(true, {}, testConfig); + config.disableTime = true; + const profiler = new Profiler(config); + const requestProf = { + name: 'projects/12345678901/test-projectId', + profileType: 'WALL', + duration: '10s', + labels: {instance: 'test-instance'}, + }; + try { + await profiler.writeTimeProfile(requestProf); + assert.fail('expected error, no error thrown'); + } catch (err) { + assert.strictEqual( + (err as Error).message, + 'Cannot collect time profile, time profiler not enabled.' + ); + } + }); + }); + describe('writeHeapProfile', () => { + it( + 'should return request with base64-encoded profile when time profiling' + + ' enabled', + async () => { + const profiler = new Profiler(testConfig); + + const requestProf = { + name: 'projects/12345678901/test-projectId', + profileType: 'HEAP', + labels: {instance: 'test-instance'}, + }; + + const outRequestProfile = await profiler.writeHeapProfile(requestProf); + const encodedBytes = outRequestProfile.profileBytes; + + if (encodedBytes === undefined) { + assert.fail('profile bytes are undefined.'); + } + + const decodedBytes = Buffer.from(encodedBytes as string, 'base64'); + const unzippedBytes = (await promisify(zlib.gunzip)( + decodedBytes + )) as Uint8Array; + const outProfile = perftools.profiles.Profile.decode(unzippedBytes); + + // compare to decodedTimeProfile, which is equivalent to timeProfile, + // but numbers are replaced with longs. + assert.deepStrictEqual(decodedHeapProfile, outProfile); + } + ); + it('should throw error when heap profiling is not enabled.', async () => { + const config = extend(true, {}, testConfig); + config.disableHeap = true; + const profiler = new Profiler(config); + const requestProf = { + name: 'projects/12345678901/test-projectId', + profileType: 'HEAP', + labels: {instance: 'test-instance'}, + }; + try { + await profiler.writeHeapProfile(requestProf); + assert.fail('expected error, no error thrown'); + } catch (err) { + assert.strictEqual( + (err as Error).message, + 'Cannot collect heap profile, heap profiler not enabled.' + ); + } + }); + }); + describe('profileAndUpload', () => { + let requestStub: + | undefined + | sinon.SinonStub<[DecorateRequestOptions, BodyResponseCallback], void>; + afterEach(() => { + if (requestStub) { + requestStub.restore(); + } + }); + it('should send request to upload time profile.', async () => { + const requestProf = { + name: 'projects/12345678901/test-projectId', + duration: '10s', + profileType: 'WALL', + labels: {instance: 'test-instance'}, + }; + + requestStub = sinon + .stub(common.ServiceObject.prototype, 'request') + .onCall(0) + .callsArgWith(1, null, {}, {statusCode: 200}); + + const profiler = new Profiler(testConfig); + await profiler.profileAndUpload(requestProf); + + const uploaded = requestStub.firstCall.args[0].body as { + profileBytes?: string; + }; + const decodedBytes = Buffer.from( + uploaded.profileBytes as string, + 'base64' + ); + const unzippedBytes = (await promisify(zlib.gunzip)( + decodedBytes + )) as Uint8Array; + const outProfile = perftools.profiles.Profile.decode(unzippedBytes); + assert.deepStrictEqual(decodedTimeProfile, outProfile); + + uploaded.profileBytes = undefined; + assert.deepStrictEqual(uploaded, requestProf); + }); + it('should send request to upload heap profile.', async () => { + const requestProf = { + name: 'projects/12345678901/test-projectId', + duration: '10s', + profileType: 'HEAP', + labels: {instance: 'test-instance'}, + }; + + requestStub = sinon + .stub(common.ServiceObject.prototype, 'request') + .onCall(0) + .callsArgWith(1, null, {}, {statusCode: 200}); + + const profiler = new Profiler(testConfig); + await profiler.profileAndUpload(requestProf); + const uploaded = requestStub.firstCall.args[0].body as { + profileBytes?: string; + }; + const decodedBytes = Buffer.from( + uploaded.profileBytes as string, + 'base64' + ); + const unzippedBytes = (await promisify(zlib.gunzip)( + decodedBytes + )) as Uint8Array; + const outProfile = perftools.profiles.Profile.decode(unzippedBytes); + assert.deepStrictEqual(decodedHeapProfile, outProfile); + + uploaded.profileBytes = undefined; + assert.deepStrictEqual(uploaded, requestProf); + }); + it('should not uploaded when profile type unknown.', async () => { + const requestProf = { + name: 'projects/12345678901/test-projectId', + duration: '10s', + profileType: 'UNKNOWN_PROFILE_TYPE', + labels: {instance: 'test-instance'}, + }; + requestStub = sinon + .stub(common.ServiceObject.prototype, 'request') + .onCall(0) + .callsArgWith(1, null, {}, {}); + const profiler = new Profiler(testConfig); + await profiler.profileAndUpload(requestProf); + assert.strictEqual(0, requestStub.callCount); + }); + it('should ignore error thrown by http request.', async () => { + const requestProf = { + name: 'projects/12345678901/test-projectId', + duration: '10s', + profileType: 'WALL', + labels: {instance: 'test-instance'}, + }; + requestStub = sinon + .stub(common.ServiceObject.prototype, 'request') + .callsArgWith(1, new Error('Network error'), {}, {}); + const profiler = new Profiler(testConfig); + await profiler.profileAndUpload(requestProf); + }); + it('should ignore when non-200 status code returned.', async () => { + const requestProf = { + name: 'projects/12345678901/test-projectId', + duration: '10s', + profileType: 'WALL', + labels: {instance: 'test-instance'}, + }; + requestStub = sinon + .stub(common.ServiceObject.prototype, 'request') + .callsArgWith( + 1, + null, + {}, + {statusCode: 500, statusMessage: 'Error 500'} + ); + const profiler = new Profiler(testConfig); + await profiler.profileAndUpload(requestProf); + }); + it('should not retry on non-200 status codes', async () => { + const requestProf = { + name: 'projects/12345678901/test-projectId', + duration: '10s', + profileType: 'WALL', + labels: {instance: 'test-instance'}, + }; + nockOauth2(); + const apiMock = nock(FULL_API) + .patch('/' + requestProf.name) + .once() + .reply(500) + .patch('/' + requestProf.name) + .once() + .reply(200); + const profiler = new Profiler(testConfig); + await profiler.profileAndUpload(requestProf); + assert.strictEqual( + apiMock.isDone(), + false, + 'call to upload profile should not be retried' + ); + }); + it('should send request to upload profile to default API without error.', async () => { + const requestProf = { + name: 'projects/12345678901/test-projectId', + duration: '10s', + profileType: 'HEAP', + labels: {instance: 'test-instance'}, + }; + nockOauth2(); + const apiMock = nock(FULL_API) + .patch('/' + requestProf.name) + .once() + .reply(200); + const profiler = new Profiler(testConfig); + await profiler.profileAndUpload(requestProf); + assert.strictEqual(apiMock.isDone(), true, 'completed call to real API'); + }); + it('should send request to upload profile to non-default API without error.', async () => { + const requestProf = { + name: 'projects/12345678901/test-projectId', + duration: '10s', + profileType: 'HEAP', + labels: {instance: 'test-instance'}, + }; + nockOauth2(); + const apiMock = nock(FULL_TEST_API) + .patch('/' + requestProf.name) + .once() + .reply(200); + const config = extend(true, {}, testConfig); + config.apiEndpoint = TEST_API; + const profiler = new Profiler(config); + await profiler.profileAndUpload(requestProf); + assert.strictEqual(apiMock.isDone(), true, 'completed call to test API'); + }); + }); + describe('createProfile', () => { + let requestStub: + | undefined + | sinon.SinonStub<[DecorateRequestOptions, BodyResponseCallback], void>; + afterEach(() => { + if (requestStub) { + requestStub.restore(); + } + }); + it('should successfully create wall profile', async () => { + const response = { + name: 'projects/12345678901/test-projectId', + profileType: 'WALL', + duration: '10s', + deployment: { + labels: {version: 'test-version', language: 'nodejs'}, + projectId: 'test-projectId', + target: 'test-service', + }, + labels: {version: testConfig.serviceContext.version}, + }; + nockOauth2(); + const requestProfileMock = nock(FULL_API) + .post('/projects/' + testConfig.projectId + '/profiles') + .once() + .reply(200, response); + const profiler = new Profiler(testConfig); + const actualResponse = await profiler.createProfile(); + assert.deepStrictEqual(response, actualResponse); + assert.ok(requestProfileMock.isDone(), 'expected call to create profile'); + }); + it('should successfully create profile using non-default api', async () => { + const config = extend(true, {}, testConfig); + config.apiEndpoint = TEST_API; + const response = { + name: 'projects/12345678901/test-projectId', + profileType: 'WALL', + duration: '10s', + deployment: { + labels: {version: 'test-version', language: 'nodejs'}, + projectId: 'test-projectId', + target: 'test-service', + }, + labels: {version: config.serviceContext.version}, + }; + nockOauth2(); + const requestProfileMock = nock(FULL_TEST_API) + .post('/projects/' + config.projectId + '/profiles') + .once() + .reply(200, response); + const profiler = new Profiler(config); + const actualResponse = await profiler.createProfile(); + assert.deepStrictEqual(response, actualResponse); + assert.ok(requestProfileMock.isDone(), 'expected call to create profile'); + }); + it('should successfully create heap profile', async () => { + const response = { + name: 'projects/12345678901/test-projectId', + profileType: 'HEAP', + deployment: { + labels: {version: 'test-version', language: 'nodejs'}, + projectId: 'test-projectId', + target: 'test-service', + }, + labels: {version: testConfig.serviceContext.version}, + }; + nockOauth2(); + const requestProfileMock = nock(FULL_API) + .post('/projects/' + testConfig.projectId + '/profiles') + .once() + .reply(200, response); + const profiler = new Profiler(testConfig); + const actualResponse = await profiler.createProfile(); + assert.deepStrictEqual(response, actualResponse); + assert.ok(requestProfileMock.isDone(), 'expected call to create profile'); + }); + it('should throw error when invalid profile created', async () => { + const response = {name: 'projects/12345678901/test-projectId'}; + nockOauth2(); + nock(FULL_API) + .post('/projects/' + testConfig.projectId + '/profiles') + .once() + .reply(200, response); + const profiler = new Profiler(testConfig); + try { + await profiler.createProfile(); + assert.fail('expected error, no error thrown'); + } catch (err) { + assert.strictEqual( + (err as Error).message, + 'Profile not valid: ' + + '{"name":"projects/12345678901/test-projectId"}.' + ); + } + }); + it('should not retry on non-200 status codes', async () => { + const response = { + name: 'projects/12345678901/test-projectId', + profileType: 'HEAP', + deployment: { + labels: {version: 'test-version', language: 'nodejs'}, + projectId: 'test-projectId', + target: 'test-service', + }, + labels: {version: testConfig.serviceContext.version}, + }; + nockOauth2(); + nock(FULL_API) + .post('/projects/' + testConfig.projectId + '/profiles') + .once() + .reply(503, {}) + .post('/projects/' + testConfig.projectId + '/profiles') + .once() + .reply(200, response); + const profiler = new Profiler(testConfig); + try { + await profiler.createProfile(); + assert.fail('expected error, no error thrown'); + } catch (_) { + // 👻 + } + }); + it( + 'should not have instance and zone in request body when instance and' + + ' zone undefined', + async () => { + const config = extend(true, {}, testConfig); + config.instance = undefined; + config.zone = undefined; + const response = { + name: 'projects/12345678901/test-projectId', + profileType: 'WALL', + duration: '10s', + }; + requestStub = sinon + .stub(common.ServiceObject.prototype, 'request') + .onCall(0) + .callsArgWith(1, undefined, response, {statusCode: 200}); + const expRequestBody = { + deployment: { + labels: {version: 'test-version', language: 'nodejs'}, + projectId: 'test-projectId', + target: 'test-service', + }, + profileType: ['WALL', 'HEAP'], + }; + const profiler = new Profiler(config); + const actualResponse = await profiler.createProfile(); + assert.deepStrictEqual(response, actualResponse); + assert.deepStrictEqual( + expRequestBody, + requestStub.firstCall.args[0].body + ); + } + ); + it( + 'should not have instance and zone in request body when instance and' + + ' zone empty strings', + async () => { + const config = extend(true, {}, testConfig); + config.instance = ''; + config.zone = ''; + const response = { + name: 'projects/12345678901/test-projectId', + profileType: 'WALL', + duration: '10s', + }; + requestStub = sinon + .stub(common.ServiceObject.prototype, 'request') + .onCall(0) + .callsArgWith(1, undefined, response, {statusCode: 200}); + const expRequestBody = { + deployment: { + labels: {version: 'test-version', language: 'nodejs'}, + projectId: 'test-projectId', + target: 'test-service', + }, + profileType: ['WALL', 'HEAP'], + }; + const profiler = new Profiler(config); + const actualResponse = await profiler.createProfile(); + assert.deepStrictEqual(response, actualResponse); + assert.deepStrictEqual( + expRequestBody, + requestStub.firstCall.args[0].body + ); + } + ); + it('should keep additional fields in request profile.', async () => { + const response = { + name: 'projects/12345678901/test-projectId', + profileType: 'WALL', + duration: '10s', + labels: {version: testConfig.serviceContext.version}, + additionalField: 'additionalField', + }; + nockOauth2(); + nock(FULL_API) + .post('/projects/' + testConfig.projectId + '/profiles') + .once() + .reply(200, response); + const profiler = new Profiler(testConfig); + const actualResponse = await profiler.createProfile(); + assert.deepStrictEqual(response, actualResponse); + }); + it('should throw error when error thrown by http request.', async () => { + requestStub = sinon + .stub(common.ServiceObject.prototype, 'request') + .onCall(0) + .callsArgWith(1, new Error('Network error'), undefined, undefined); + const profiler = new Profiler(testConfig); + try { + await profiler.createProfile(); + assert.fail('expected error, no error thrown'); + } catch (err) { + assert.strictEqual((err as Error).message, 'Network error'); + } + }); + it('should throw status message when response has non-200 status.', async () => { + requestStub = sinon + .stub(common.ServiceObject.prototype, 'request') + .onCall(0) + .callsArgWith(1, undefined, undefined, { + statusCode: 500, + statusMessage: '500 status code', + }); + + const profiler = new Profiler(testConfig); + try { + await profiler.createProfile(); + assert.fail('expected error, no error thrown'); + } catch (err) { + assert.strictEqual((err as Error).message, '500 status code'); + } + }); + it( + 'should throw error with server-specified backoff when non-200 error' + + ' and backoff specified', + async () => { + requestStub = sinon + .stub(common.ServiceObject.prototype, 'request') + .onCall(0) + .callsArgWith( + 1, + undefined, + {error: {details: [{retryDelay: '50s'}]}}, + {statusCode: 409} + ); + + const profiler = new Profiler(testConfig); + try { + await profiler.createProfile(); + assert.fail('expected error, no error thrown'); + } catch (err) { + assert.strictEqual( + (err as BackoffResponseError).backoffMillis, + 50000 + ); + } + } + ); + it('should throw error when response undefined', async () => { + requestStub = sinon + .stub(common.ServiceObject.prototype, 'request') + .onCall(0) + .callsArgWith(1, undefined, undefined, {status: 200}); + + const profiler = new Profiler(testConfig); + try { + await profiler.createProfile(); + assert.fail('expected error, no error thrown'); + } catch (err) { + assert.strictEqual( + (err as Error).message, + 'Profile not valid: undefined.' + ); + } + }); + }); + describe('collectProfile', () => { + let requestStub: + | undefined + | sinon.SinonStub<[DecorateRequestOptions, BodyResponseCallback], void>; + let randomStub: sinon.SinonStub<[], number> | undefined; + + const RANDOM_VALUE = 0.5; + // Retryer calculates expected backoff as RANDOM_VALUE * testConfig.initialBackoffMillis => 0.5 * 1000 + const EXPECTED_BACKOFF = 500; + before(() => { + randomStub = sinon.stub(Math, 'random').returns(RANDOM_VALUE); + }); + afterEach(() => { + if (requestStub) { + requestStub.restore(); + } + }); + after(() => { + if (randomStub) { + randomStub.restore(); + } + }); + it('should indicate collectProfile should be called immediately when no errors', async () => { + const requestProfileResponseBody = { + name: 'projects/12345678901/test-projectId', + profileType: 'WALL', + duration: '10s', + labels: {version: testConfig.serviceContext.version}, + }; + requestStub = sinon + .stub(common.ServiceObject.prototype, 'request') + .onCall(0) + .callsArgWith(1, undefined, requestProfileResponseBody, { + statusCode: 200, + }) + .onCall(1) + .callsArgWith(1, undefined, undefined, {statusCode: 200}); + + const profiler = new Profiler(testConfig); + const delayMillis = await profiler.collectProfile(); + assert.strictEqual( + 0, + delayMillis, + 'No delay before asking to collect next profile' + ); + }); + it( + 'should return expect backoff when non-200 response and no backoff' + + ' indicated', + async () => { + requestStub = sinon + .stub(common.ServiceObject.prototype, 'request') + .onCall(0) + .callsArgWith(1, undefined, undefined, {statusCode: 404}); + + const profiler = new Profiler(testConfig); + const delayMillis = await profiler.collectProfile(); + assert.deepStrictEqual(EXPECTED_BACKOFF, delayMillis); + } + ); + it('should reset backoff after success', async () => { + const createProfileResponseBody = { + name: 'projects/12345678901/test-projectId', + profileType: 'WALL', + duration: '10s', + labels: {instance: testConfig.instance}, + }; + requestStub = sinon + .stub(common.ServiceObject.prototype, 'request') + // createProfile - first failure + .onCall(0) + .callsArgWith(1, undefined, undefined, {statusCode: 404}) + // createProfile - second failure + .onCall(1) + .callsArgWith(1, undefined, undefined, {statusCode: 404}) + // createProfile - third failure + .onCall(2) + .callsArgWith(1, undefined, undefined, {statusCode: 404}) + // createProfile + .onCall(3) + // createProfile - success + .callsArgWith(1, undefined, createProfileResponseBody, { + statusCode: 200, + }) + // upload profiler - success + .onCall(4) + .callsArgWith(1, undefined, undefined, {statusCode: 200}) + // createProfile - failure + .onCall(5) + .callsArgWith( + 1, + new Error('error creating profile'), + undefined, + undefined + ); + const profiler = new Profiler(testConfig); + let delayMillis = await profiler.collectProfile(); + assert.deepStrictEqual(500, delayMillis); + delayMillis = await profiler.collectProfile(); + assert.deepStrictEqual(650, delayMillis); + delayMillis = await profiler.collectProfile(); + assert.deepStrictEqual(845, delayMillis); + delayMillis = await profiler.collectProfile(); + assert.deepStrictEqual(0, delayMillis); + delayMillis = await profiler.collectProfile(); + assert.deepStrictEqual(500, delayMillis); + }); + it( + 'should return server-specified backoff when non-200 error and backoff' + + ' specified', + async () => { + requestStub = sinon + .stub(common.ServiceObject.prototype, 'request') + .onCall(0) + .callsArgWith( + 1, + undefined, + {error: {details: [{retryDelay: '50s'}]}}, + {statusCode: 409} + ); + const profiler = new Profiler(testConfig); + const delayMillis = await profiler.collectProfile(); + assert.strictEqual(50000, delayMillis); + } + ); + it( + 'should return expected backoff when non-200 error and invalid server backoff' + + ' specified', + async () => { + requestStub = sinon + .stub(common.ServiceObject.prototype, 'request') + .onCall(0) + .callsArgWith( + 1, + undefined, + {message: 'some message'}, + { + statusCode: 409, + body: {message: 'some message'}, + } + ); + const profiler = new Profiler(testConfig); + const delayMillis = await profiler.collectProfile(); + assert.strictEqual(EXPECTED_BACKOFF, delayMillis); + } + ); + it( + 'should return expected backoff when non-200 error and invalid server backoff' + + ' string specified', + async () => { + requestStub = sinon + .stub(common.ServiceObject.prototype, 'request') + .onCall(0) + .callsArgWith( + 1, + undefined, + {error: {details: [{retryDelay: 'not a duration'}]}}, + {statusCode: 409} + ); + const profiler = new Profiler(testConfig); + const delayMillis = await profiler.collectProfile(); + assert.strictEqual(EXPECTED_BACKOFF, delayMillis); + } + ); + it( + 'should return backoff limit, when server specified backoff is greater' + + ' then backoff limit', + async () => { + requestStub = sinon + .stub(common.ServiceObject.prototype, 'request') + .onCall(0) + .callsArgWith( + 1, + undefined, + {error: {details: [{retryDelay: '1000h'}]}}, + {statusCode: 409} + ); + const profiler = new Profiler(testConfig); + const delayMillis = await profiler.collectProfile(); + assert.strictEqual(ms('7d'), delayMillis); + } + ); + it( + 'should indicate collectProfile should be called immediately if there' + + ' is an error when collecting and uploading profile.', + async () => { + const createProfileResponseBody = { + name: 'projects/12345678901/test-projectId', + profileType: 'WALL', + duration: '10s', + labels: {instance: testConfig.instance}, + }; + requestStub = sinon + .stub(common.ServiceObject.prototype, 'request') + .onCall(0) + .callsArgWith(1, undefined, createProfileResponseBody, { + statusCode: 200, + }) + .onCall(1) + .callsArgWith(1, new Error('Error uploading'), undefined, undefined); + + const profiler = new Profiler(testConfig); + const delayMillis = await profiler.collectProfile(); + assert.strictEqual(0, delayMillis); + } + ); + }); +}); diff --git a/handwritten/cloud-profiler/tsconfig.json b/handwritten/cloud-profiler/tsconfig.json new file mode 100644 index 00000000000..c7d0241de02 --- /dev/null +++ b/handwritten/cloud-profiler/tsconfig.json @@ -0,0 +1,14 @@ +{ + "extends": "./node_modules/gts/tsconfig-google.json", + "compilerOptions": { + "rootDir": ".", + "outDir": "build", + "moduleResolution": "node", + "resolveJsonModule": true + }, + "include": [ + "src/*.ts", + "test/*.ts", + "system-test/*.ts" + ] +} diff --git a/release-please-config.json b/release-please-config.json index 9c211610789..883fa668b33 100644 --- a/release-please-config.json +++ b/release-please-config.json @@ -1,6 +1,8 @@ { + "bump-minor-pre-major": true, "initial-version": "0.1.0", "packages": { + "handwritten/cloud-profiler": {}, "packages/gapic-node-processing": {}, "packages/google-ads-admanager": {}, "packages/google-ads-datamanager": {}, @@ -149,11 +151,11 @@ "packages/google-cloud-saasplatform-saasservicemgmt": {}, "packages/google-cloud-scheduler": {}, "packages/google-cloud-secretmanager": {}, + "packages/google-cloud-securesourcemanager": {}, "packages/google-cloud-security-privateca": {}, "packages/google-cloud-security-publicca": {}, "packages/google-cloud-securitycenter": {}, "packages/google-cloud-securitycentermanagement": {}, - "packages/google-cloud-securesourcemanager": {}, "packages/google-cloud-servicedirectory": {}, "packages/google-cloud-servicehealth": {}, "packages/google-cloud-shell": {}, @@ -227,6 +229,5 @@ "type": "sentence-case" } ], - "bump-minor-pre-major": true, "release-type": "node" -} \ No newline at end of file +}